summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-16 11:32:46 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-19 09:25:23 +0200
commite31f0a7d25668d3c1531294d2ef44a9f3bde4ef4 (patch)
tree6c6bed9804be9df6162b2483f0a56f371f66464d
parentec16fdae540adaf710b1a86c620170b2880088f0 (diff)
downloadandroid-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.gz
android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.bz2
android-node-v8-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.zip
deps: update V8 to 7.7.299.4
PR-URL: https://github.com/nodejs/node/pull/28918 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
-rw-r--r--deps/v8/.flake811
-rw-r--r--deps/v8/.gitignore18
-rw-r--r--deps/v8/AUTHORS5
-rw-r--r--deps/v8/BUILD.gn244
-rw-r--r--deps/v8/ChangeLog1535
-rw-r--r--deps/v8/DEPS44
-rw-r--r--deps/v8/INTL_OWNERS3
-rw-r--r--deps/v8/OWNERS8
-rw-r--r--deps/v8/benchmarks/OWNERS (renamed from deps/v8/test/wasm-api-tests/OWNERS)0
-rw-r--r--deps/v8/gni/proto_library.gni3
-rw-r--r--deps/v8/gni/v8.gni2
-rw-r--r--deps/v8/include/APIDesign.md3
-rw-r--r--deps/v8/include/OWNERS7
-rw-r--r--deps/v8/include/js_protocol-1.2.json (renamed from deps/v8/src/inspector/js_protocol-1.2.json)0
-rw-r--r--deps/v8/include/js_protocol-1.3.json (renamed from deps/v8/src/inspector/js_protocol-1.3.json)0
-rw-r--r--deps/v8/include/js_protocol.pdl (renamed from deps/v8/src/inspector/js_protocol.pdl)0
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h11
-rw-r--r--deps/v8/include/v8-inspector.h6
-rw-r--r--deps/v8/include/v8-platform.h8
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h184
-rw-r--r--deps/v8/include/v8config.h6
-rw-r--r--deps/v8/infra/mb/gn_isolate_map.pyl4
-rw-r--r--deps/v8/infra/mb/mb_config.pyl12
-rw-r--r--deps/v8/infra/testing/PRESUBMIT.py4
-rw-r--r--deps/v8/infra/testing/builders.pyl287
-rw-r--r--deps/v8/samples/process.cc14
-rw-r--r--deps/v8/src/DEPS6
-rw-r--r--deps/v8/src/OWNERS10
-rw-r--r--deps/v8/src/api/OWNERS11
-rw-r--r--deps/v8/src/api/api-natives.cc33
-rw-r--r--deps/v8/src/api/api.cc428
-rw-r--r--deps/v8/src/api/api.h2
-rw-r--r--deps/v8/src/asmjs/OWNERS2
-rw-r--r--deps/v8/src/asmjs/asm-js.cc12
-rw-r--r--deps/v8/src/asmjs/asm-js.h2
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc26
-rw-r--r--deps/v8/src/asmjs/asm-parser.h6
-rw-r--r--deps/v8/src/ast/OWNERS2
-rw-r--r--deps/v8/src/ast/ast.cc15
-rw-r--r--deps/v8/src/ast/ast.h53
-rw-r--r--deps/v8/src/ast/modules.cc102
-rw-r--r--deps/v8/src/ast/modules.h16
-rw-r--r--deps/v8/src/ast/prettyprinter.cc26
-rw-r--r--deps/v8/src/ast/scopes.cc81
-rw-r--r--deps/v8/src/ast/scopes.h15
-rw-r--r--deps/v8/src/ast/variables.h53
-rw-r--r--deps/v8/src/base/adapters.h2
-rw-r--r--deps/v8/src/base/lsan.h29
-rw-r--r--deps/v8/src/base/memory.h (renamed from deps/v8/src/common/v8memory.h)30
-rw-r--r--deps/v8/src/base/platform/OWNERS2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc4
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc6
-rw-r--r--deps/v8/src/base/small-vector.h23
-rw-r--r--deps/v8/src/base/vlq-base64.cc58
-rw-r--r--deps/v8/src/base/vlq-base64.h23
-rw-r--r--deps/v8/src/builtins/OWNERS3
-rw-r--r--deps/v8/src/builtins/accessors.cc5
-rw-r--r--deps/v8/src/builtins/arguments.tq4
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc43
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc39
-rw-r--r--deps/v8/src/builtins/array-copywithin.tq2
-rw-r--r--deps/v8/src/builtins/array-every.tq29
-rw-r--r--deps/v8/src/builtins/array-filter.tq31
-rw-r--r--deps/v8/src/builtins/array-find.tq34
-rw-r--r--deps/v8/src/builtins/array-findindex.tq35
-rw-r--r--deps/v8/src/builtins/array-foreach.tq30
-rw-r--r--deps/v8/src/builtins/array-join.tq39
-rw-r--r--deps/v8/src/builtins/array-lastindexof.tq4
-rw-r--r--deps/v8/src/builtins/array-map.tq49
-rw-r--r--deps/v8/src/builtins/array-of.tq5
-rw-r--r--deps/v8/src/builtins/array-reduce-right.tq53
-rw-r--r--deps/v8/src/builtins/array-reduce.tq50
-rw-r--r--deps/v8/src/builtins/array-reverse.tq2
-rw-r--r--deps/v8/src/builtins/array-shift.tq2
-rw-r--r--deps/v8/src/builtins/array-slice.tq9
-rw-r--r--deps/v8/src/builtins/array-some.tq30
-rw-r--r--deps/v8/src/builtins/array-splice.tq19
-rw-r--r--deps/v8/src/builtins/array-unshift.tq2
-rw-r--r--deps/v8/src/builtins/array.tq36
-rw-r--r--deps/v8/src/builtins/base.tq625
-rw-r--r--deps/v8/src/builtins/bigint.tq206
-rw-r--r--deps/v8/src/builtins/boolean.tq29
-rw-r--r--deps/v8/src/builtins/builtins-api.cc16
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-array.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc41
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.h80
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc4
-rw-r--r--deps/v8/src/builtins/builtins-boolean-gen.cc19
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc13
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc60
-rw-r--r--deps/v8/src/builtins/builtins-console.cc3
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc36
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h12
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc24
-rw-r--r--deps/v8/src/builtins/builtins-data-view-gen.h8
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h19
-rw-r--r--deps/v8/src/builtins/builtins-error.cc9
-rw-r--r--deps/v8/src/builtins/builtins-global.cc18
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc38
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc117
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc40
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc31
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h29
-rw-r--r--deps/v8/src/builtins/builtins-math.cc26
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-number.cc20
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc44
-rw-r--r--deps/v8/src/builtins/builtins-object.cc72
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc32
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc97
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h29
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc127
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc141
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h36
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc2
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc48
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-symbol-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc73
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h10
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc80
-rw-r--r--deps/v8/src/builtins/collections.tq2
-rw-r--r--deps/v8/src/builtins/data-view.tq383
-rw-r--r--deps/v8/src/builtins/extras-utils.tq7
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc46
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq2
-rw-r--r--deps/v8/src/builtins/iterator.tq8
-rw-r--r--deps/v8/src/builtins/math.tq48
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc76
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc74
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq7
-rw-r--r--deps/v8/src/builtins/object.tq138
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc195
-rw-r--r--deps/v8/src/builtins/proxy-constructor.tq9
-rw-r--r--deps/v8/src/builtins/proxy-delete-property.tq67
-rw-r--r--deps/v8/src/builtins/proxy-get-property.tq32
-rw-r--r--deps/v8/src/builtins/proxy-get-prototype-of.tq70
-rw-r--r--deps/v8/src/builtins/proxy-has-property.tq6
-rw-r--r--deps/v8/src/builtins/proxy-is-extensible.tq56
-rw-r--r--deps/v8/src/builtins/proxy-prevent-extensions.tq66
-rw-r--r--deps/v8/src/builtins/proxy-revocable.tq8
-rw-r--r--deps/v8/src/builtins/proxy-revoke.tq2
-rw-r--r--deps/v8/src/builtins/proxy-set-property.tq22
-rw-r--r--deps/v8/src/builtins/proxy-set-prototype-of.tq77
-rw-r--r--deps/v8/src/builtins/proxy.tq30
-rw-r--r--deps/v8/src/builtins/reflect.tq82
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq11
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc188
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc25
-rw-r--r--deps/v8/src/builtins/string-endswith.tq9
-rw-r--r--deps/v8/src/builtins/string-html.tq47
-rw-r--r--deps/v8/src/builtins/string-iterator.tq10
-rw-r--r--deps/v8/src/builtins/string-repeat.tq2
-rw-r--r--deps/v8/src/builtins/string-slice.tq2
-rw-r--r--deps/v8/src/builtins/string-startswith.tq16
-rw-r--r--deps/v8/src/builtins/string-substring.tq2
-rw-r--r--deps/v8/src/builtins/string.tq32
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq158
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq2
-rw-r--r--deps/v8/src/builtins/typed-array-find.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-findindex.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq10
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq10
-rw-r--r--deps/v8/src/builtins/typed-array-slice.tq2
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-subarray.tq3
-rw-r--r--deps/v8/src/builtins/typed-array.tq31
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc46
-rw-r--r--deps/v8/src/codegen/DEPS9
-rw-r--r--deps/v8/src/codegen/OWNERS8
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc13
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h3
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc26
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h5
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h130
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc423
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h247
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h4
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc2
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64.h2
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h1
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc34
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h25
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc36
-rw-r--r--deps/v8/src/codegen/assembler.h33
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc871
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h391
-rw-r--r--deps/v8/src/codegen/compiler.cc155
-rw-r--r--deps/v8/src/codegen/compiler.h19
-rw-r--r--deps/v8/src/codegen/constant-pool.cc249
-rw-r--r--deps/v8/src/codegen/constant-pool.h190
-rw-r--r--deps/v8/src/codegen/cpu-features.h1
-rw-r--r--deps/v8/src/codegen/external-reference.cc37
-rw-r--r--deps/v8/src/codegen/external-reference.h7
-rw-r--r--deps/v8/src/codegen/handler-table.cc38
-rw-r--r--deps/v8/src/codegen/handler-table.h10
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc7
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h1
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc14
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h12
-rw-r--r--deps/v8/src/codegen/label.h2
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc3
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h16
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc33
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h15
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc3
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h4
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc33
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h15
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc12
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h21
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.cc97
-rw-r--r--deps/v8/src/codegen/pending-optimization-table.h44
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc22
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h27
-rw-r--r--deps/v8/src/codegen/ppc/code-stubs-ppc.cc28
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc39
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h5
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc23
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h29
-rw-r--r--deps/v8/src/codegen/s390/code-stubs-s390.cc27
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc22
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h5
-rw-r--r--deps/v8/src/codegen/safepoint-table.h13
-rw-r--r--deps/v8/src/codegen/source-position-table.cc15
-rw-r--r--deps/v8/src/codegen/tick-counter.cc23
-rw-r--r--deps/v8/src/codegen/tick-counter.h28
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h6
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc220
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h24
-rw-r--r--deps/v8/src/codegen/x64/constants-x64.h3
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc82
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h18
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h5
-rw-r--r--deps/v8/src/common/OWNERS3
-rw-r--r--deps/v8/src/common/globals.h37
-rw-r--r--deps/v8/src/common/message-template.h (renamed from deps/v8/src/execution/message-template.h)18
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h31
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS7
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/STYLE29
-rw-r--r--deps/v8/src/compiler/access-builder.cc28
-rw-r--r--deps/v8/src/compiler/access-builder.h11
-rw-r--r--deps/v8/src/compiler/access-info.cc42
-rw-r--r--deps/v8/src/compiler/access-info.h35
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.cc51
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.h45
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc68
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc9
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc73
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h1
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc80
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc4
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc64
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc9
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h12
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc105
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h22
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc5
-rw-r--r--deps/v8/src/compiler/backend/instruction.h6
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.h13
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.cc24
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc9
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc19
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc88
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc9
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc121
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h36
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc63
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc33
-rw-r--r--deps/v8/src/compiler/backend/unwinding-info-writer.h1
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc352
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h30
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc30
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc122
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc94
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h45
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc324
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h11
-rw-r--r--deps/v8/src/compiler/code-assembler.cc32
-rw-r--r--deps/v8/src/compiler/code-assembler.h123
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc8
-rw-r--r--deps/v8/src/compiler/common-operator.cc12
-rw-r--r--deps/v8/src/compiler/common-operator.h1
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc56
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h15
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h32
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h7
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc336
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h118
-rw-r--r--deps/v8/src/compiler/decompression-elimination.cc37
-rw-r--r--deps/v8/src/compiler/decompression-elimination.h5
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc320
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc54
-rw-r--r--deps/v8/src/compiler/escape-analysis.h11
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc9
-rw-r--r--deps/v8/src/compiler/graph-assembler.h38
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc12
-rw-r--r--deps/v8/src/compiler/graph-reducer.h8
-rw-r--r--deps/v8/src/compiler/heap-refs.h906
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc37
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1265
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h17
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc20
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-graph.cc8
-rw-r--r--deps/v8/src/compiler/js-graph.h52
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc909
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h831
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc3
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc66
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc17
-rw-r--r--deps/v8/src/compiler/js-inlining.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc320
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h35
-rw-r--r--deps/v8/src/compiler/js-operator.cc15
-rw-r--r--deps/v8/src/compiler/js-operator.h14
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc58
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc18
-rw-r--r--deps/v8/src/compiler/linkage.cc8
-rw-r--r--deps/v8/src/compiler/linkage.h2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc28
-rw-r--r--deps/v8/src/compiler/load-elimination.h2
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc20
-rw-r--r--deps/v8/src/compiler/loop-analysis.h6
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc13
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc3
-rw-r--r--deps/v8/src/compiler/machine-operator.cc55
-rw-r--r--deps/v8/src/compiler/machine-operator.h42
-rw-r--r--deps/v8/src/compiler/map-inference.cc25
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc45
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h6
-rw-r--r--deps/v8/src/compiler/node-properties.cc3
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/node.cc8
-rw-r--r--deps/v8/src/compiler/opcodes.h53
-rw-r--r--deps/v8/src/compiler/operation-typer.cc31
-rw-r--r--deps/v8/src/compiler/operation-typer.h6
-rw-r--r--deps/v8/src/compiler/pipeline.cc307
-rw-r--r--deps/v8/src/compiler/pipeline.h15
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc13
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc4
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h5
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc3
-rw-r--r--deps/v8/src/compiler/representation-change.cc192
-rw-r--r--deps/v8/src/compiler/representation-change.h51
-rw-r--r--deps/v8/src/compiler/scheduler.cc38
-rw-r--r--deps/v8/src/compiler/scheduler.h10
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc1402
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h329
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc29
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h9
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc175
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h8
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc112
-rw-r--r--deps/v8/src/compiler/simplified-operator.h29
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc8
-rw-r--r--deps/v8/src/compiler/state-values-utils.h4
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc16
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h6
-rw-r--r--deps/v8/src/compiler/typer.cc30
-rw-r--r--deps/v8/src/compiler/typer.h7
-rw-r--r--deps/v8/src/compiler/types.cc14
-rw-r--r--deps/v8/src/compiler/types.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc46
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc712
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h51
-rw-r--r--deps/v8/src/d8/d8.cc87
-rw-r--r--deps/v8/src/date/OWNERS3
-rw-r--r--deps/v8/src/debug/OWNERS2
-rw-r--r--deps/v8/src/debug/debug-coverage.cc15
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc9
-rw-r--r--deps/v8/src/debug/debug-evaluate.h5
-rw-r--r--deps/v8/src/debug/debug-frames.cc15
-rw-r--r--deps/v8/src/debug/debug-frames.h11
-rw-r--r--deps/v8/src/debug/debug-interface.h5
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.h1
-rw-r--r--deps/v8/src/debug/debug-scopes.cc60
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc5
-rw-r--r--deps/v8/src/debug/debug.cc20
-rw-r--r--deps/v8/src/debug/debug.h11
-rw-r--r--deps/v8/src/debug/liveedit.cc40
-rw-r--r--deps/v8/src/deoptimizer/OWNERS2
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h1
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc90
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h26
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc10
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc2
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc2
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc42
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc2
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc10
-rw-r--r--deps/v8/src/diagnostics/DEPS3
-rw-r--r--deps/v8/src/diagnostics/eh-frame.cc8
-rw-r--r--deps/v8/src/diagnostics/eh-frame.h14
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc87
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc124
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc294
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc82
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc31
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc108
-rw-r--r--deps/v8/src/execution/OWNERS5
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.cc4
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.cc6
-rw-r--r--deps/v8/src/execution/execution.cc350
-rw-r--r--deps/v8/src/execution/execution.h177
-rw-r--r--deps/v8/src/execution/frame-constants.h9
-rw-r--r--deps/v8/src/execution/frames-inl.h33
-rw-r--r--deps/v8/src/execution/frames.cc46
-rw-r--r--deps/v8/src/execution/frames.h25
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.cc6
-rw-r--r--deps/v8/src/execution/interrupts-scope.cc42
-rw-r--r--deps/v8/src/execution/interrupts-scope.h72
-rw-r--r--deps/v8/src/execution/isolate-data.h8
-rw-r--r--deps/v8/src/execution/isolate-inl.h7
-rw-r--r--deps/v8/src/execution/isolate-utils-inl.h64
-rw-r--r--deps/v8/src/execution/isolate-utils.h31
-rw-r--r--deps/v8/src/execution/isolate.cc231
-rw-r--r--deps/v8/src/execution/isolate.h83
-rw-r--r--deps/v8/src/execution/messages.cc312
-rw-r--r--deps/v8/src/execution/messages.h21
-rw-r--r--deps/v8/src/execution/microtask-queue.cc2
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.cc7
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc372
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.h18
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.cc3
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc372
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.h20
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc2
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc90
-rw-r--r--deps/v8/src/execution/stack-guard.cc345
-rw-r--r--deps/v8/src/execution/stack-guard.h186
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.cc1
-rw-r--r--deps/v8/src/extensions/OWNERS1
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.cc56
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.h38
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc7
-rw-r--r--deps/v8/src/flags/OWNERS1
-rw-r--r--deps/v8/src/flags/flag-definitions.h81
-rw-r--r--deps/v8/src/handles/OWNERS3
-rw-r--r--deps/v8/src/handles/handles.cc4
-rw-r--r--deps/v8/src/heap/OWNERS2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h2
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc54
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h229
-rw-r--r--deps/v8/src/heap/code-stats.cc6
-rw-r--r--deps/v8/src/heap/combined-heap.cc10
-rw-r--r--deps/v8/src/heap/combined-heap.h20
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc30
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc6
-rw-r--r--deps/v8/src/heap/embedder-tracing.h21
-rw-r--r--deps/v8/src/heap/factory-inl.h9
-rw-r--r--deps/v8/src/heap/factory.cc280
-rw-r--r--deps/v8/src/heap/factory.h48
-rw-r--r--deps/v8/src/heap/gc-tracer.cc29
-rw-r--r--deps/v8/src/heap/gc-tracer.h2
-rw-r--r--deps/v8/src/heap/heap-controller.cc21
-rw-r--r--deps/v8/src/heap/heap-controller.h7
-rw-r--r--deps/v8/src/heap/heap-inl.h17
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h48
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h2
-rw-r--r--deps/v8/src/heap/heap.cc649
-rw-r--r--deps/v8/src/heap/heap.h214
-rw-r--r--deps/v8/src/heap/incremental-marking.cc39
-rw-r--r--deps/v8/src/heap/incremental-marking.h12
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc7
-rw-r--r--deps/v8/src/heap/item-parallel-job.h6
-rw-r--r--deps/v8/src/heap/mark-compact.cc194
-rw-r--r--deps/v8/src/heap/object-stats.cc2
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h28
-rw-r--r--deps/v8/src/heap/objects-visiting.h5
-rw-r--r--deps/v8/src/heap/read-only-heap-inl.h31
-rw-r--r--deps/v8/src/heap/read-only-heap.cc85
-rw-r--r--deps/v8/src/heap/read-only-heap.h23
-rw-r--r--deps/v8/src/heap/remembered-set.h4
-rw-r--r--deps/v8/src/heap/scavenger-inl.h24
-rw-r--r--deps/v8/src/heap/scavenger.cc22
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc16
-rw-r--r--deps/v8/src/heap/spaces-inl.h36
-rw-r--r--deps/v8/src/heap/spaces.cc574
-rw-r--r--deps/v8/src/heap/spaces.h1103
-rw-r--r--deps/v8/src/heap/store-buffer.cc11
-rw-r--r--deps/v8/src/heap/stress-marking-observer.cc8
-rw-r--r--deps/v8/src/heap/stress-marking-observer.h4
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc24
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.h4
-rw-r--r--deps/v8/src/heap/sweeper.cc12
-rw-r--r--deps/v8/src/ic/OWNERS2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc741
-rw-r--r--deps/v8/src/ic/accessor-assembler.h113
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc28
-rw-r--r--deps/v8/src/ic/call-optimization.cc3
-rw-r--r--deps/v8/src/ic/ic-inl.h24
-rw-r--r--deps/v8/src/ic/ic.cc286
-rw-r--r--deps/v8/src/ic/ic.h37
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc87
-rw-r--r--deps/v8/src/ic/stub-cache.cc46
-rw-r--r--deps/v8/src/ic/stub-cache.h14
-rw-r--r--deps/v8/src/init/OWNERS11
-rw-r--r--deps/v8/src/init/bootstrapper.cc162
-rw-r--r--deps/v8/src/init/heap-symbols.h15
-rw-r--r--deps/v8/src/init/isolate-allocator.cc2
-rw-r--r--deps/v8/src/init/setup-isolate-deserialize.cc1
-rw-r--r--deps/v8/src/inspector/BUILD.gn15
-rw-r--r--deps/v8/src/inspector/DEPS3
-rw-r--r--deps/v8/src/inspector/OWNERS8
-rw-r--r--deps/v8/src/inspector/injected-script.cc42
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json10
-rw-r--r--deps/v8/src/inspector/string-16.cc17
-rw-r--r--deps/v8/src/inspector/string-16.h15
-rw-r--r--deps/v8/src/inspector/string-util.cc6
-rw-r--r--deps/v8/src/inspector/string-util.h25
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc21
-rw-r--r--deps/v8/src/inspector/v8-console.cc10
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc54
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc4
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc3
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc31
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc93
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc92
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc9
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-string-conversions.cc7
-rw-r--r--deps/v8/src/inspector/value-mirror.cc86
-rw-r--r--deps/v8/src/interpreter/OWNERS2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc92
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h33
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc4
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc12
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc169
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h17
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc23
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc6
-rw-r--r--deps/v8/src/interpreter/interpreter.cc17
-rw-r--r--deps/v8/src/interpreter/interpreter.h6
-rw-r--r--deps/v8/src/json/OWNERS3
-rw-r--r--deps/v8/src/json/json-parser.cc4
-rw-r--r--deps/v8/src/json/json-stringifier.cc24
-rw-r--r--deps/v8/src/libplatform/tracing/OWNERS1
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.cc4
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.h5
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-consumer.cc44
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-consumer.h80
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-producer.cc45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-producer.h70
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc28
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-shared-memory.h45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tasks.cc52
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tasks.h55
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc130
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h86
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.cc27
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.h9
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc147
-rw-r--r--deps/v8/src/libsampler/OWNERS3
-rw-r--r--deps/v8/src/logging/counters-definitions.h9
-rw-r--r--deps/v8/src/logging/counters.h4
-rw-r--r--deps/v8/src/logging/log.cc57
-rw-r--r--deps/v8/src/numbers/OWNERS6
-rw-r--r--deps/v8/src/numbers/conversions.cc4
-rw-r--r--deps/v8/src/objects/OWNERS3
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h8
-rw-r--r--deps/v8/src/objects/api-callbacks.h16
-rw-r--r--deps/v8/src/objects/arguments-inl.h10
-rw-r--r--deps/v8/src/objects/arguments.h4
-rw-r--r--deps/v8/src/objects/bigint.cc183
-rw-r--r--deps/v8/src/objects/bigint.h10
-rw-r--r--deps/v8/src/objects/code-inl.h50
-rw-r--r--deps/v8/src/objects/code.cc22
-rw-r--r--deps/v8/src/objects/code.h50
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h54
-rw-r--r--deps/v8/src/objects/compressed-slots.h29
-rw-r--r--deps/v8/src/objects/contexts-inl.h8
-rw-r--r--deps/v8/src/objects/contexts.cc40
-rw-r--r--deps/v8/src/objects/contexts.h52
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h114
-rw-r--r--deps/v8/src/objects/descriptor-array.h45
-rw-r--r--deps/v8/src/objects/dictionary-inl.h37
-rw-r--r--deps/v8/src/objects/dictionary.h11
-rw-r--r--deps/v8/src/objects/elements.cc251
-rw-r--r--deps/v8/src/objects/elements.h11
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h6
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h19
-rw-r--r--deps/v8/src/objects/feedback-vector.cc73
-rw-r--r--deps/v8/src/objects/feedback-vector.h14
-rw-r--r--deps/v8/src/objects/field-index-inl.h12
-rw-r--r--deps/v8/src/objects/field-index.h6
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h78
-rw-r--r--deps/v8/src/objects/fixed-array.h45
-rw-r--r--deps/v8/src/objects/free-space-inl.h36
-rw-r--r--deps/v8/src/objects/free-space.h3
-rw-r--r--deps/v8/src/objects/hash-table-inl.h20
-rw-r--r--deps/v8/src/objects/hash-table.h9
-rw-r--r--deps/v8/src/objects/heap-number-inl.h4
-rw-r--r--deps/v8/src/objects/heap-object-inl.h14
-rw-r--r--deps/v8/src/objects/heap-object.h58
-rw-r--r--deps/v8/src/objects/instance-type.h40
-rw-r--r--deps/v8/src/objects/intl-objects.cc273
-rw-r--r--deps/v8/src/objects/intl-objects.h28
-rw-r--r--deps/v8/src/objects/intl-objects.tq24
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h4
-rw-r--r--deps/v8/src/objects/js-array-buffer.h6
-rw-r--r--deps/v8/src/objects/js-array-inl.h7
-rw-r--r--deps/v8/src/objects/js-array.h6
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h12
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc20
-rw-r--r--deps/v8/src/objects/js-break-iterator.h32
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.cc29
-rw-r--r--deps/v8/src/objects/js-collator.h14
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h1
-rw-r--r--deps/v8/src/objects/js-collection.h4
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc221
-rw-r--r--deps/v8/src/objects/js-date-time-format.h6
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-list-format.cc64
-rw-r--r--deps/v8/src/objects/js-list-format.h18
-rw-r--r--deps/v8/src/objects/js-locale.cc13
-rw-r--r--deps/v8/src/objects/js-locale.h9
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h11
-rw-r--r--deps/v8/src/objects/js-number-format.cc326
-rw-r--r--deps/v8/src/objects/js-number-format.h37
-rw-r--r--deps/v8/src/objects/js-objects-inl.h351
-rw-r--r--deps/v8/src/objects/js-objects.cc320
-rw-r--r--deps/v8/src/objects/js-objects.h263
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h7
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc229
-rw-r--r--deps/v8/src/objects/js-plural-rules.h20
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h7
-rw-r--r--deps/v8/src/objects/js-proxy.h19
-rw-r--r--deps/v8/src/objects/js-regexp.h57
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc39
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h19
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc21
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h2
-rw-r--r--deps/v8/src/objects/js-segmenter.cc31
-rw-r--r--deps/v8/src/objects/js-segmenter.h13
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h14
-rw-r--r--deps/v8/src/objects/js-weak-refs.h68
-rw-r--r--deps/v8/src/objects/keys.cc5
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h4
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h72
-rw-r--r--deps/v8/src/objects/literal-objects.cc75
-rw-r--r--deps/v8/src/objects/literal-objects.h19
-rw-r--r--deps/v8/src/objects/lookup-inl.h55
-rw-r--r--deps/v8/src/objects/lookup.cc487
-rw-r--r--deps/v8/src/objects/lookup.h10
-rw-r--r--deps/v8/src/objects/map-inl.h156
-rw-r--r--deps/v8/src/objects/map-updater.cc27
-rw-r--r--deps/v8/src/objects/map-updater.h5
-rw-r--r--deps/v8/src/objects/map.cc185
-rw-r--r--deps/v8/src/objects/map.h91
-rw-r--r--deps/v8/src/objects/maybe-object.h4
-rw-r--r--deps/v8/src/objects/module-inl.h97
-rw-r--r--deps/v8/src/objects/module.cc768
-rw-r--r--deps/v8/src/objects/module.h220
-rw-r--r--deps/v8/src/objects/name-inl.h31
-rw-r--r--deps/v8/src/objects/name.h37
-rw-r--r--deps/v8/src/objects/object-list-macros.h28
-rw-r--r--deps/v8/src/objects/object-macros-undef.h7
-rw-r--r--deps/v8/src/objects/object-macros.h185
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h13
-rw-r--r--deps/v8/src/objects/objects-definitions.h35
-rw-r--r--deps/v8/src/objects/objects-inl.h378
-rw-r--r--deps/v8/src/objects/objects.cc252
-rw-r--r--deps/v8/src/objects/objects.h67
-rw-r--r--deps/v8/src/objects/oddball-inl.h6
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h4
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc6
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h10
-rw-r--r--deps/v8/src/objects/property-array-inl.h35
-rw-r--r--deps/v8/src/objects/property-array.h6
-rw-r--r--deps/v8/src/objects/property-cell.h2
-rw-r--r--deps/v8/src/objects/property.cc3
-rw-r--r--deps/v8/src/objects/prototype-inl.h9
-rw-r--r--deps/v8/src/objects/scope-info.cc71
-rw-r--r--deps/v8/src/objects/scope-info.h23
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h22
-rw-r--r--deps/v8/src/objects/shared-function-info.h31
-rw-r--r--deps/v8/src/objects/slots.h12
-rw-r--r--deps/v8/src/objects/source-text-module.cc661
-rw-r--r--deps/v8/src/objects/source-text-module.h220
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h4
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc313
-rw-r--r--deps/v8/src/objects/stack-frame-info.h35
-rw-r--r--deps/v8/src/objects/string-inl.h77
-rw-r--r--deps/v8/src/objects/string.cc46
-rw-r--r--deps/v8/src/objects/string.h91
-rw-r--r--deps/v8/src/objects/synthetic-module.cc108
-rw-r--r--deps/v8/src/objects/synthetic-module.h69
-rw-r--r--deps/v8/src/objects/tagged-field-inl.h162
-rw-r--r--deps/v8/src/objects/tagged-field.h76
-rw-r--r--deps/v8/src/objects/tagged-impl-inl.h44
-rw-r--r--deps/v8/src/objects/tagged-impl.h39
-rw-r--r--deps/v8/src/objects/tagged-value-inl.h31
-rw-r--r--deps/v8/src/objects/tagged-value.h6
-rw-r--r--deps/v8/src/objects/template-objects.cc4
-rw-r--r--deps/v8/src/objects/template-objects.h2
-rw-r--r--deps/v8/src/objects/templates-inl.h8
-rw-r--r--deps/v8/src/objects/templates.h2
-rw-r--r--deps/v8/src/objects/transitions-inl.h7
-rw-r--r--deps/v8/src/objects/transitions.cc2
-rw-r--r--deps/v8/src/objects/transitions.h2
-rw-r--r--deps/v8/src/objects/value-serializer.cc225
-rw-r--r--deps/v8/src/objects/value-serializer.h12
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/expression-scope.h11
-rw-r--r--deps/v8/src/parsing/parse-info.cc2
-rw-r--r--deps/v8/src/parsing/parser-base.h71
-rw-r--r--deps/v8/src/parsing/parser.cc29
-rw-r--r--deps/v8/src/parsing/parser.h30
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc29
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h13
-rw-r--r--deps/v8/src/parsing/preparse-data.cc2
-rw-r--r--deps/v8/src/parsing/preparser.cc8
-rw-r--r--deps/v8/src/parsing/preparser.h81
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc12
-rw-r--r--deps/v8/src/parsing/scanner.cc36
-rw-r--r--deps/v8/src/parsing/scanner.h4
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc16
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc7
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc4
-rw-r--r--deps/v8/src/regexp/OWNERS2
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h86
-rw-r--r--deps/v8/src/regexp/jsregexp.cc7055
-rw-r--r--deps/v8/src/regexp/jsregexp.h1548
-rw-r--r--deps/v8/src/regexp/regexp-ast.h24
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator-inl.h (renamed from deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h)24
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc (renamed from deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc)221
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h (renamed from deps/v8/src/regexp/regexp-macro-assembler-irregexp.h)39
-rw-r--r--deps/v8/src/regexp/regexp-bytecodes.h (renamed from deps/v8/src/regexp/bytecodes-irregexp.h)11
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc1678
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc3551
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h657
-rw-r--r--deps/v8/src/regexp/regexp-dotprinter.cc244
-rw-r--r--deps/v8/src/regexp/regexp-dotprinter.h23
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc (renamed from deps/v8/src/regexp/interpreter-irregexp.cc)269
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h (renamed from deps/v8/src/regexp/interpreter-irregexp.h)18
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-arch.h30
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc9
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h8
-rw-r--r--deps/v8/src/regexp/regexp-nodes.h675
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc66
-rw-r--r--deps/v8/src/regexp/regexp-parser.h10
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc6
-rw-r--r--deps/v8/src/regexp/regexp.cc1018
-rw-r--r--deps/v8/src/regexp/regexp.h177
-rw-r--r--deps/v8/src/roots/OWNERS11
-rw-r--r--deps/v8/src/roots/roots-inl.h3
-rw-r--r--deps/v8/src/roots/roots.h3
-rw-r--r--deps/v8/src/runtime/OWNERS3
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc40
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc31
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc30
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc1
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc44
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc7
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc349
-rw-r--r--deps/v8/src/runtime/runtime-module.cc8
-rw-r--r--deps/v8/src/runtime/runtime-object.cc82
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc12
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc48
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc18
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc1
-rw-r--r--deps/v8/src/runtime/runtime-test.cc127
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc164
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc3
-rw-r--r--deps/v8/src/runtime/runtime.h92
-rw-r--r--deps/v8/src/snapshot/OWNERS2
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc14
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc56
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h12
-rw-r--r--deps/v8/src/snapshot/deserializer.cc127
-rw-r--r--deps/v8/src/snapshot/deserializer.h19
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc10
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc6
-rw-r--r--deps/v8/src/snapshot/natives.h1
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc3
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc2
-rw-r--r--deps/v8/src/snapshot/references.h61
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.cc66
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.h8
-rw-r--r--deps/v8/src/snapshot/serializer-common.h30
-rw-r--r--deps/v8/src/snapshot/serializer.cc72
-rw-r--r--deps/v8/src/snapshot/serializer.h10
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h8
-rw-r--r--deps/v8/src/snapshot/snapshot.h6
-rw-r--r--deps/v8/src/strings/OWNERS4
-rw-r--r--deps/v8/src/strings/char-predicates-inl.h12
-rw-r--r--deps/v8/src/strings/char-predicates.h6
-rw-r--r--deps/v8/src/strings/string-builder-inl.h11
-rw-r--r--deps/v8/src/strings/string-builder.cc34
-rw-r--r--deps/v8/src/strings/string-stream.cc5
-rw-r--r--deps/v8/src/tasks/OWNERS6
-rw-r--r--deps/v8/src/third_party/siphash/OWNERS3
-rw-r--r--deps/v8/src/third_party/utf8-decoder/OWNERS2
-rw-r--r--deps/v8/src/third_party/valgrind/OWNERS1
-rw-r--r--deps/v8/src/third_party/vtune/OWNERS1
-rw-r--r--deps/v8/src/torque/ast.h226
-rw-r--r--deps/v8/src/torque/constants.h4
-rw-r--r--deps/v8/src/torque/contextual.h18
-rw-r--r--deps/v8/src/torque/csa-generator.cc31
-rw-r--r--deps/v8/src/torque/declarable.h85
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc23
-rw-r--r--deps/v8/src/torque/declaration-visitor.h12
-rw-r--r--deps/v8/src/torque/declarations.cc12
-rw-r--r--deps/v8/src/torque/declarations.h14
-rw-r--r--deps/v8/src/torque/earley-parser.cc25
-rw-r--r--deps/v8/src/torque/earley-parser.h4
-rw-r--r--deps/v8/src/torque/global-context.cc24
-rw-r--r--deps/v8/src/torque/global-context.h35
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc702
-rw-r--r--deps/v8/src/torque/implementation-visitor.h128
-rw-r--r--deps/v8/src/torque/ls/json-parser.cc2
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc42
-rw-r--r--deps/v8/src/torque/ls/message-handler.h4
-rw-r--r--deps/v8/src/torque/ls/message-pipe.h2
-rw-r--r--deps/v8/src/torque/ls/message.h10
-rw-r--r--deps/v8/src/torque/ls/torque-language-server.cc6
-rw-r--r--deps/v8/src/torque/server-data.h1
-rw-r--r--deps/v8/src/torque/source-positions.cc59
-rw-r--r--deps/v8/src/torque/source-positions.h32
-rw-r--r--deps/v8/src/torque/torque-compiler.cc33
-rw-r--r--deps/v8/src/torque/torque-compiler.h3
-rw-r--r--deps/v8/src/torque/torque-parser.cc280
-rw-r--r--deps/v8/src/torque/torque.cc22
-rw-r--r--deps/v8/src/torque/type-oracle.cc10
-rw-r--r--deps/v8/src/torque/type-oracle.h30
-rw-r--r--deps/v8/src/torque/type-visitor.cc94
-rw-r--r--deps/v8/src/torque/types.cc22
-rw-r--r--deps/v8/src/torque/types.h23
-rw-r--r--deps/v8/src/torque/utils.cc38
-rw-r--r--deps/v8/src/torque/utils.h16
-rw-r--r--deps/v8/src/tracing/OWNERS2
-rw-r--r--deps/v8/src/tracing/trace-event.h4
-rw-r--r--deps/v8/src/trap-handler/OWNERS2
-rw-r--r--deps/v8/src/utils/OWNERS2
-rw-r--r--deps/v8/src/utils/allocation.cc4
-rw-r--r--deps/v8/src/utils/allocation.h11
-rw-r--r--deps/v8/src/utils/splay-tree-inl.h292
-rw-r--r--deps/v8/src/utils/splay-tree.h194
-rw-r--r--deps/v8/src/utils/utils.h63
-rw-r--r--deps/v8/src/utils/vector.h5
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h17
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h11
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h12
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc251
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h32
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h50
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h40
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h139
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h139
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h12
-rw-r--r--deps/v8/src/wasm/c-api.cc1058
-rw-r--r--deps/v8/src/wasm/decoder.h4
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h510
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc9
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h2
-rw-r--r--deps/v8/src/wasm/function-compiler.cc116
-rw-r--r--deps/v8/src/wasm/function-compiler.h40
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc18
-rw-r--r--deps/v8/src/wasm/js-to-wasm-wrapper-cache.h41
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc24
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h100
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc6
-rw-r--r--deps/v8/src/wasm/module-compiler.cc335
-rw-r--r--deps/v8/src/wasm/module-compiler.h30
-rw-r--r--deps/v8/src/wasm/module-decoder.cc223
-rw-r--r--deps/v8/src/wasm/module-decoder.h4
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc212
-rw-r--r--deps/v8/src/wasm/value-type.h86
-rw-r--r--deps/v8/src/wasm/wasm-arguments.h73
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc103
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h19
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc23
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc74
-rw-r--r--deps/v8/src/wasm/wasm-engine.h7
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc5
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h4
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc488
-rw-r--r--deps/v8/src/wasm/wasm-js.cc35
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc3
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc314
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h54
-rw-r--r--deps/v8/src/wasm/wasm-module.h10
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h104
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc479
-rw-r--r--deps/v8/src/wasm/wasm-objects.h117
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc42
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h54
-rw-r--r--deps/v8/src/wasm/wasm-result.cc18
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc2
-rw-r--r--deps/v8/src/wasm/wasm-text.cc175
-rw-r--r--deps/v8/src/wasm/wasm-text.h11
-rw-r--r--deps/v8/src/wasm/wasm-value.h50
-rw-r--r--deps/v8/src/zone/OWNERS3
-rw-r--r--deps/v8/src/zone/zone-allocator.h36
-rw-r--r--deps/v8/src/zone/zone-splay-tree.h38
-rw-r--r--deps/v8/src/zone/zone.cc11
-rw-r--r--deps/v8/src/zone/zone.h8
-rw-r--r--deps/v8/test/OWNERS2
-rw-r--r--deps/v8/test/cctest/BUILD.gn57
-rw-r--r--deps/v8/test/cctest/DEPS3
-rw-r--r--deps/v8/test/cctest/OWNERS12
-rw-r--r--deps/v8/test/cctest/cctest.cc16
-rw-r--r--deps/v8/test/cctest/cctest.status20
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.cc16
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc17
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc26
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc13
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc15
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc38
-rw-r--r--deps/v8/test/cctest/disasm-regex-helper.cc291
-rw-r--r--deps/v8/test/cctest/disasm-regex-helper.h318
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc5
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc75
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-iterators.cc50
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc3
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc178
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h3
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden50
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden56
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden110
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden14
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden133
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc144
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc22
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc47
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc433
-rw-r--r--deps/v8/test/cctest/log-eq-of-logging-and-traversal.js201
-rw-r--r--deps/v8/test/cctest/test-allocation.cc2
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc529
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc4
-rw-r--r--deps/v8/test/cctest/test-api-typed-array.cc661
-rw-r--r--deps/v8/test/cctest/test-api.cc1517
-rw-r--r--deps/v8/test/cctest/test-api.h10
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc3
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc1058
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc10
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc10
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc117
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc49
-rw-r--r--deps/v8/test/cctest/test-compiler.cc4
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc1
-rw-r--r--deps/v8/test/cctest/test-debug.cc26
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc13
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc2
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc198
-rw-r--r--deps/v8/test/cctest/test-hashcode.cc8
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc27
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc16
-rw-r--r--deps/v8/test/cctest/test-inspector.cc15
-rw-r--r--deps/v8/test/cctest/test-intl.cc2
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc4
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc2
-rw-r--r--deps/v8/test/cctest/test-lockers.cc8
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc1
-rw-r--r--deps/v8/test/cctest/test-log.cc75
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm64.cc97
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc10
-rw-r--r--deps/v8/test/cctest/test-parsing.cc24
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm.cc283
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm64.cc158
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc9
-rw-r--r--deps/v8/test/cctest/test-regexp.cc212
-rw-r--r--deps/v8/test/cctest/test-roots.cc3
-rw-r--r--deps/v8/test/cctest/test-serialize.cc138
-rw-r--r--deps/v8/test/cctest/test-smi-lexicographic-compare.cc2
-rw-r--r--deps/v8/test/cctest/test-strings.cc10
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc42
-rw-r--r--deps/v8/test/cctest/test-transitions.cc2
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc7
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc6
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc88
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc44
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc70
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc13
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc371
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc39
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc544
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc46
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-codegen.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc13
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc13
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc29
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc8
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc33
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h5
-rw-r--r--deps/v8/test/common/types-fuzz.h5
-rw-r--r--deps/v8/test/common/wasm/OWNERS1
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h10
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h23
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc8
-rw-r--r--deps/v8/test/debugger/OWNERS1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-function-var.js42
-rw-r--r--deps/v8/test/debugger/debug/debug-modules-set-variable-value.mjs (renamed from deps/v8/test/debugger/debug/debug-modules-set-variable-value.js)6
-rw-r--r--deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.mjs (renamed from deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.mjs (renamed from deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js)7
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-5279.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-387599.js6
-rw-r--r--deps/v8/test/debugger/regress/regress-7421.js1
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-760225.js30
-rw-r--r--deps/v8/test/debugger/test-api.js3
-rw-r--r--deps/v8/test/debugger/testcfg.py13
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc28
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc11
-rw-r--r--deps/v8/test/fuzzer/regexp.cc5
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc17
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc166
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h6
-rw-r--r--deps/v8/test/inspector/DEPS1
-rw-r--r--deps/v8/test/inspector/OWNERS4
-rw-r--r--deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt24
-rw-r--r--deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/restart-frame-expected.txt10
-rw-r--r--deps/v8/test/inspector/debugger/restart-frame.js33
-rw-r--r--deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt5
-rw-r--r--deps/v8/test/inspector/debugger/wasm-clone-module.js40
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-expected.txt4
-rw-r--r--deps/v8/test/inspector/inspector-test.cc7
-rw-r--r--deps/v8/test/inspector/isolate-data.cc34
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt44
-rw-r--r--deps/v8/test/inspector/runtime/es6-module.js76
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown-expected.txt10
-rw-r--r--deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/query-objects-expected.txt3
-rw-r--r--deps/v8/test/inspector/runtime/query-objects.js44
-rw-r--r--deps/v8/test/inspector/runtime/run-script-async-expected.txt6
-rw-r--r--deps/v8/test/intl/general/supported-locales-of.js6
-rw-r--r--deps/v8/test/intl/intl.status3
-rw-r--r--deps/v8/test/intl/number-format/property-override.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/constructor-order.js16
-rw-r--r--deps/v8/test/intl/number-format/unified/currency-display.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js175
-rw-r--r--deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js177
-rw-r--r--deps/v8/test/intl/number-format/unified/notation.js58
-rw-r--r--deps/v8/test/intl/number-format/unified/percent.js65
-rw-r--r--deps/v8/test/intl/number-format/unified/sign-display.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/style-unit.js2
-rw-r--r--deps/v8/test/intl/number-format/unified/unit-display.js2
-rw-r--r--deps/v8/test/intl/regress-8866.js11
-rw-r--r--deps/v8/test/intl/regress-9312.js32
-rw-r--r--deps/v8/test/intl/regress-9408.js28
-rw-r--r--deps/v8/test/intl/regress-9513.js28
-rw-r--r--deps/v8/test/intl/relative-time-format/format-en.js24
-rw-r--r--deps/v8/test/intl/testcfg.py3
-rw-r--r--deps/v8/test/js-perf-test/BigInt/add.js142
-rw-r--r--deps/v8/test/js-perf-test/BigInt/as-uint-n.js86
-rw-r--r--deps/v8/test/js-perf-test/BigInt/bigint-util.js65
-rw-r--r--deps/v8/test/js-perf-test/BigInt/run.js3
-rw-r--r--deps/v8/test/js-perf-test/BigInt/test-config.js10
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js40
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js60
-rw-r--r--deps/v8/test/js-perf-test/InterpreterEntryTrampoline/arguments.js119
-rw-r--r--deps/v8/test/js-perf-test/InterpreterEntryTrampoline/locals.js326
-rw-r--r--deps/v8/test/js-perf-test/InterpreterEntryTrampoline/run.js26
-rw-r--r--deps/v8/test/js-perf-test/JSTests1.json43
-rw-r--r--deps/v8/test/js-perf-test/JSTests3.json60
-rw-r--r--deps/v8/test/js-perf-test/JSTests4.json10
-rw-r--r--deps/v8/test/js-perf-test/JSTests5.json11
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/array-map.js32
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/array-reduce.js59
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/has-own-property.js23
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/run.js3
-rw-r--r--deps/v8/test/js-perf-test/Proxies/proxies.js158
-rw-r--r--deps/v8/test/js-perf-test/RegExp.json2
-rw-r--r--deps/v8/test/js-perf-test/RegExp/RegExpTests.json2
-rw-r--r--deps/v8/test/js-perf-test/RegExp/complex_case_test.js46
-rw-r--r--deps/v8/test/js-perf-test/RegExp/run.js1
-rw-r--r--deps/v8/test/message/fail/arrow-bare-rest-param.out4
-rw-r--r--deps/v8/test/message/fail/arrow-missing.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-in-module.mjs (renamed from deps/v8/test/message/fail/class-fields-private-throw-in-module.js)2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-in-module.out2
-rw-r--r--deps/v8/test/message/fail/class-methods-private-throw-write.js13
-rw-r--r--deps/v8/test/message/fail/class-methods-private-throw-write.out6
-rw-r--r--deps/v8/test/message/fail/class-spread-property.out4
-rw-r--r--deps/v8/test/message/fail/export-duplicate-as.mjs (renamed from deps/v8/test/message/fail/export-duplicate-as.js)2
-rw-r--r--deps/v8/test/message/fail/export-duplicate-as.out2
-rw-r--r--deps/v8/test/message/fail/export-duplicate-default.mjs (renamed from deps/v8/test/message/fail/export-duplicate-default.js)2
-rw-r--r--deps/v8/test/message/fail/export-duplicate-default.out2
-rw-r--r--deps/v8/test/message/fail/export-duplicate.mjs (renamed from deps/v8/test/message/fail/export-duplicate.js)2
-rw-r--r--deps/v8/test/message/fail/export-duplicate.out2
-rw-r--r--deps/v8/test/message/fail/formal-parameters-trailing-comma.out4
-rw-r--r--deps/v8/test/message/fail/import-as-eval.mjs (renamed from deps/v8/test/message/fail/import-as-eval.js)2
-rw-r--r--deps/v8/test/message/fail/import-as-eval.out5
-rw-r--r--deps/v8/test/message/fail/import-as-redeclaration.mjs (renamed from deps/v8/test/message/fail/import-as-redeclaration.js)2
-rw-r--r--deps/v8/test/message/fail/import-as-redeclaration.out5
-rw-r--r--deps/v8/test/message/fail/import-as-reserved-word.mjs (renamed from deps/v8/test/message/fail/import-as-reserved-word.js)2
-rw-r--r--deps/v8/test/message/fail/import-as-reserved-word.out5
-rw-r--r--deps/v8/test/message/fail/import-blah-module.mjs5
-rw-r--r--deps/v8/test/message/fail/import-blah-module.out4
-rw-r--r--deps/v8/test/message/fail/import-blah-script.js5
-rw-r--r--deps/v8/test/message/fail/import-blah-script.out7
-rw-r--r--deps/v8/test/message/fail/import-eval.mjs (renamed from deps/v8/test/message/fail/import-eval.js)2
-rw-r--r--deps/v8/test/message/fail/import-eval.out5
-rw-r--r--deps/v8/test/message/fail/import-redeclaration.mjs (renamed from deps/v8/test/message/fail/import-redeclaration.js)2
-rw-r--r--deps/v8/test/message/fail/import-redeclaration.out5
-rw-r--r--deps/v8/test/message/fail/import-reserved-word.mjs (renamed from deps/v8/test/message/fail/import-reserved-word.js)2
-rw-r--r--deps/v8/test/message/fail/import-reserved-word.out5
-rw-r--r--deps/v8/test/message/fail/import-script.js6
-rw-r--r--deps/v8/test/message/fail/import-script.out7
-rw-r--r--deps/v8/test/message/fail/invalid-spread.out4
-rw-r--r--deps/v8/test/message/fail/modules-cycle1.mjs (renamed from deps/v8/test/message/fail/modules-cycle1.js)4
-rw-r--r--deps/v8/test/message/fail/modules-cycle1.out6
-rw-r--r--deps/v8/test/message/fail/modules-cycle2.mjs (renamed from deps/v8/test/message/fail/modules-cycle2.js)4
-rw-r--r--deps/v8/test/message/fail/modules-cycle2.out6
-rw-r--r--deps/v8/test/message/fail/modules-cycle3.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-cycle3.out6
-rw-r--r--deps/v8/test/message/fail/modules-cycle4.mjs (renamed from deps/v8/test/message/fail/modules-skip-cycle2.js)2
-rw-r--r--deps/v8/test/message/fail/modules-cycle4.out7
-rw-r--r--deps/v8/test/message/fail/modules-cycle5.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-cycle5.out6
-rw-r--r--deps/v8/test/message/fail/modules-cycle6.js8
-rw-r--r--deps/v8/test/message/fail/modules-cycle6.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-4.js)4
-rw-r--r--deps/v8/test/message/fail/modules-cycle6.out7
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export1.mjs (renamed from deps/v8/test/message/fail/modules-duplicate-export2.js)4
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export1.out2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export2.mjs (renamed from deps/v8/test/message/fail/modules-duplicate-export1.js)4
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export2.out2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export3.mjs (renamed from deps/v8/test/message/fail/modules-duplicate-export3.js)2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export3.out2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export4.mjs (renamed from deps/v8/test/message/fail/modules-duplicate-export4.js)2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export4.out2
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export5.mjs (renamed from deps/v8/test/message/fail/modules-duplicate-export5.js)5
-rw-r--r--deps/v8/test/message/fail/modules-duplicate-export5.out5
-rw-r--r--deps/v8/test/message/fail/modules-export-illformed-class.mjs (renamed from deps/v8/test/message/fail/modules-export-illformed-class.js)2
-rw-r--r--deps/v8/test/message/fail/modules-export-illformed-class.out4
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare1.js8
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare1.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare1.out4
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare2.js8
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare2.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare2.out4
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare3.js8
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare3.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-import-redeclare3.out2
-rw-r--r--deps/v8/test/message/fail/modules-import1.mjs5
-rw-r--r--deps/v8/test/message/fail/modules-import1.out6
-rw-r--r--deps/v8/test/message/fail/modules-import2.js7
-rw-r--r--deps/v8/test/message/fail/modules-import2.mjs5
-rw-r--r--deps/v8/test/message/fail/modules-import2.out6
-rw-r--r--deps/v8/test/message/fail/modules-import3.js7
-rw-r--r--deps/v8/test/message/fail/modules-import3.mjs5
-rw-r--r--deps/v8/test/message/fail/modules-import3.out6
-rw-r--r--deps/v8/test/message/fail/modules-import4.js8
-rw-r--r--deps/v8/test/message/fail/modules-import4.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-import4.out6
-rw-r--r--deps/v8/test/message/fail/modules-import5.js9
-rw-r--r--deps/v8/test/message/fail/modules-import5.mjs (renamed from deps/v8/test/message/fail/modules-cycle3.js)7
-rw-r--r--deps/v8/test/message/fail/modules-import5.out6
-rw-r--r--deps/v8/test/message/fail/modules-import6.mjs (renamed from deps/v8/test/message/fail/modules-import6.js)6
-rw-r--r--deps/v8/test/message/fail/modules-import6.out6
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle2.mjs5
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle3.js6
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle3.mjs6
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle5.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-10.js)2
-rw-r--r--deps/v8/test/message/fail/modules-skip-cycle6.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-9.js)2
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict1.js7
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict1.mjs (renamed from deps/v8/test/message/fail/modules-import1.js)4
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict1.out6
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict2.mjs (renamed from deps/v8/test/message/fail/modules-star-conflict2.js)8
-rw-r--r--deps/v8/test/message/fail/modules-star-conflict2.out6
-rw-r--r--deps/v8/test/message/fail/modules-star-default.mjs (renamed from deps/v8/test/message/fail/modules-star-default.js)4
-rw-r--r--deps/v8/test/message/fail/modules-star-default.out6
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export1.mjs (renamed from deps/v8/test/message/fail/modules-undefined-export1.js)2
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export1.out2
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export2.mjs (renamed from deps/v8/test/message/fail/modules-undefined-export2.js)2
-rw-r--r--deps/v8/test/message/fail/modules-undefined-export2.out2
-rw-r--r--deps/v8/test/message/fail/new-target-assignment.out4
-rw-r--r--deps/v8/test/message/fail/new-target-postfix-op.out4
-rw-r--r--deps/v8/test/message/fail/new-target-prefix-op.out4
-rw-r--r--deps/v8/test/message/fail/redeclaration5.mjs (renamed from deps/v8/test/message/fail/redeclaration5.js)2
-rw-r--r--deps/v8/test/message/fail/redeclaration5.out2
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.out2
-rw-r--r--deps/v8/test/message/fail/wasm-exception-throw.out2
-rw-r--r--deps/v8/test/message/fail/wasm-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-and-function-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-module-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-no-name.out2
-rw-r--r--deps/v8/test/message/fail/wasm-trap.out2
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.js8
-rw-r--r--deps/v8/test/message/fail/weak-refs-unregister.out6
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert_not_same.js7
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert_not_same.out10
-rw-r--r--deps/v8/test/message/regress/fail/regress-8409.out4
-rw-r--r--deps/v8/test/message/regress/fail/regress-900383.mjs (renamed from deps/v8/test/message/regress/fail/regress-900383.js)2
-rw-r--r--deps/v8/test/message/regress/fail/regress-900383.out2
-rw-r--r--deps/v8/test/message/testcfg.py13
-rw-r--r--deps/v8/test/message/wasm-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-and-function-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-module-name-streaming.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-async.out2
-rw-r--r--deps/v8/test/message/wasm-no-name-streaming.out2
-rw-r--r--deps/v8/test/mjsunit/allocation-folding.js5
-rw-r--r--deps/v8/test/mjsunit/arguments-apply-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/arguments-deopt.js5
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js9
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js3
-rw-r--r--deps/v8/test/mjsunit/array-non-smi-length.js2
-rw-r--r--deps/v8/test/mjsunit/array-pop.js1
-rw-r--r--deps/v8/test/mjsunit/array-push.js2
-rw-r--r--deps/v8/test/mjsunit/array-push4.js1
-rw-r--r--deps/v8/test/mjsunit/array-push6.js2
-rw-r--r--deps/v8/test/mjsunit/array-push8.js3
-rw-r--r--deps/v8/test/mjsunit/array-reduce.js202
-rw-r--r--deps/v8/test/mjsunit/array-shift3.js9
-rw-r--r--deps/v8/test/mjsunit/array-shift5.js96
-rw-r--r--deps/v8/test/mjsunit/array-sort.js15
-rw-r--r--deps/v8/test/mjsunit/asm/regress-9531.js28
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-realms.js115
-rw-r--r--deps/v8/test/mjsunit/bounds-checks-elimination.js1
-rw-r--r--deps/v8/test/mjsunit/closures.js5
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-noopt.js18
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js18
-rw-r--r--deps/v8/test/mjsunit/compare-objects.js3
-rw-r--r--deps/v8/test/mjsunit/comparison-ops-and-undefined.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js78
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js36
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-add.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js82
-rw-r--r--deps/v8/test/mjsunit/compiler/bigint-negate.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-add-static.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/dataview-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/generator-jump-targets.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/keyed-load-on-string.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-const-field.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/named-store.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-alignment.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-arguments.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-array-len.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-func.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-id.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-for-let.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forin-nested.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-infinite.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-labeled.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals-adapted.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-maze1.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-maze2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-regex-id.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-simple.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-top1.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-top2.js25
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-top3.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-try-catch.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-two.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-while-let.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/promise-constructor.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-607493.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-645851.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9041.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9087.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9137-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9137-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-935092.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-939316.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-944062-1.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-944062-2.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-945187.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-945644.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-946889.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-949435.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-952586.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-957559.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-958021.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-958420.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-958716.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-961986.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-966560-1.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-966560-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-977670.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-965513.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-974474.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-974476.js34
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-9113.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/tagged-template.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/typedarray-keyed.js21
-rw-r--r--deps/v8/test/mjsunit/constant-compare-nil-value.js10
-rw-r--r--deps/v8/test/mjsunit/constant-fold-control-instructions.js2
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js1
-rw-r--r--deps/v8/test/mjsunit/cross-realm-global-prototype.js1
-rw-r--r--deps/v8/test/mjsunit/default-nospec.js13
-rw-r--r--deps/v8/test/mjsunit/deopt-global-accessor.js10
-rw-r--r--deps/v8/test/mjsunit/deopt-minus-zero.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-unlinked.js4
-rw-r--r--deps/v8/test/mjsunit/deopt-with-outer-context.js8
-rw-r--r--deps/v8/test/mjsunit/div-mod.js1
-rw-r--r--deps/v8/test/mjsunit/double-truncation.js4
-rw-r--r--deps/v8/test/mjsunit/element-accessor.js44
-rw-r--r--deps/v8/test/mjsunit/elements-kind-depends.js2
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js2
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-2.js10
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-3.js5
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-9.js6
-rw-r--r--deps/v8/test/mjsunit/error-stack.js28
-rw-r--r--deps/v8/test/mjsunit/es6/classes-accesors.js97
-rw-r--r--deps/v8/test/mjsunit/es6/classes-constructor.js131
-rw-r--r--deps/v8/test/mjsunit/es6/classes-name-binding.js98
-rw-r--r--deps/v8/test/mjsunit/es6/classes-proto.js152
-rw-r--r--deps/v8/test/mjsunit/es6/classes-restricted-properties.js165
-rw-r--r--deps/v8/test/mjsunit/es6/classes-test-super.js120
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js823
-rw-r--r--deps/v8/test/mjsunit/es6/iterator-close.js16
-rw-r--r--deps/v8/test/mjsunit/es6/large-classes-methods.js38
-rw-r--r--deps/v8/test/mjsunit/es6/large-classes-properties.js39
-rw-r--r--deps/v8/test/mjsunit/es6/large-classes-static-methods.js44
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-8.js2
-rw-r--r--deps/v8/test/mjsunit/es6/map-iterator-9.js2
-rw-r--r--deps/v8/test/mjsunit/es6/math-trunc.js2
-rw-r--r--deps/v8/test/mjsunit/es6/new-target.js14
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js134
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-8.js2
-rw-r--r--deps/v8/test/mjsunit/es6/set-iterator-9.js2
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js2
-rw-r--r--deps/v8/test/mjsunit/es6/templates.js20
-rw-r--r--deps/v8/test/mjsunit/es7/exponentiation-operator.js12
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-5986.js18
-rw-r--r--deps/v8/test/mjsunit/es9/object-spread-basic.js5
-rw-r--r--deps/v8/test/mjsunit/expose-cputracemark.js37
-rw-r--r--deps/v8/test/mjsunit/external-array.js18
-rw-r--r--deps/v8/test/mjsunit/fast-element-smi-check.js8
-rw-r--r--deps/v8/test/mjsunit/fast-literal.js10
-rw-r--r--deps/v8/test/mjsunit/frozen-array-reduce.js1420
-rw-r--r--deps/v8/test/mjsunit/function-named-self-reference.js7
-rw-r--r--deps/v8/test/mjsunit/global-infinity.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/rematerialize-on-deopt.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/global-configurable.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/global-writable.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/global.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/hashbang-eval.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/module-parsing-eval.mjs (renamed from deps/v8/test/mjsunit/harmony/module-parsing-eval.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-1.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-1.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-10.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-10.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-11.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-11.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-12.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-12.js)4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-13.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-13.js)8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-14.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-14.js)8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-15.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-15.js)8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-16.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-16.mjs36
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-17.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-17.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-2.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-2.js)4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-3.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-3.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-4.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-4.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-5.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-5.js)4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-6.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-6.js)4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-7.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-7.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-8.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-8.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-9.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-9.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-large.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-large.js)6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-meta.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-meta.js)8
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-namespace.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-import-namespace.js)4
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-1.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-1.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-10.mjs (renamed from deps/v8/test/message/fail/modules-skip-cycle6.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-11.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-11.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-12.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-12.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-13.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-13.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-2.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-2.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-3.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-3.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-4.mjs6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-5.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-5.js)6
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-6.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-6.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-7.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-7.js)2
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-8.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-8.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-9.mjs5
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-empty.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-empty.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-large1.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-large1.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-skip-large2.mjs (renamed from deps/v8/test/mjsunit/harmony/modules-skip-large2.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/object-fromentries.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-special-object.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-methods.js228
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-overriden-exec.js26
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-912504.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer-worker-gc-stress.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/basics.js32
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js31
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js3
-rw-r--r--deps/v8/test/mjsunit/has-own-property.js133
-rw-r--r--deps/v8/test/mjsunit/hash-code.js6
-rw-r--r--deps/v8/test/mjsunit/ignition/optimized-stack-trace.js4
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-bytecode.js2
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-generator.js1
-rw-r--r--deps/v8/test/mjsunit/invalid-lhs.js44
-rw-r--r--deps/v8/test/mjsunit/json2.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic-module-export.mjs (renamed from deps/v8/test/mjsunit/keyed-has-ic-module-export.js)2
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic-module-import.mjs (renamed from deps/v8/test/mjsunit/keyed-has-ic-module-import.js)4
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic.js14
-rw-r--r--deps/v8/test/mjsunit/keyed-named-access.js5
-rw-r--r--deps/v8/test/mjsunit/keyed-store-array-literal.js5
-rw-r--r--deps/v8/test/mjsunit/large-object-literal.js13
-rw-r--r--deps/v8/test/mjsunit/math-ceil.js1
-rw-r--r--deps/v8/test/mjsunit/math-floor-negative.js12
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div.js326
-rw-r--r--deps/v8/test/mjsunit/math-floor-part2.js5
-rw-r--r--deps/v8/test/mjsunit/math-floor-part3.js5
-rw-r--r--deps/v8/test/mjsunit/math-imul.js18
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js18
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status56
-rw-r--r--deps/v8/test/mjsunit/modules-circular-valid.js7
-rw-r--r--deps/v8/test/mjsunit/modules-circular-valid.mjs (renamed from deps/v8/test/message/fail/modules-cycle4.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-cycle.js8
-rw-r--r--deps/v8/test/mjsunit/modules-cycle.mjs6
-rw-r--r--deps/v8/test/mjsunit/modules-default-name1.mjs (renamed from deps/v8/test/mjsunit/modules-default-name1.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name2.mjs (renamed from deps/v8/test/mjsunit/modules-default-name2.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name3.mjs (renamed from deps/v8/test/mjsunit/modules-default-name6.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name4.mjs (renamed from deps/v8/test/mjsunit/modules-default-name5.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name5.mjs (renamed from deps/v8/test/mjsunit/modules-default-name4.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name6.mjs (renamed from deps/v8/test/mjsunit/modules-default-name8.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name7.mjs (renamed from deps/v8/test/mjsunit/modules-default-name7.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name8.mjs (renamed from deps/v8/test/mjsunit/modules-default-name3.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default-name9.mjs (renamed from deps/v8/test/mjsunit/modules-default-name9.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-default.mjs (renamed from deps/v8/test/mjsunit/modules-default.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import1.mjs (renamed from deps/v8/test/mjsunit/modules-empty-import1.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import2.mjs (renamed from deps/v8/test/mjsunit/modules-empty-import2.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import3.mjs (renamed from deps/v8/test/mjsunit/modules-empty-import3.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import4.js11
-rw-r--r--deps/v8/test/mjsunit/modules-empty-import4.mjs9
-rw-r--r--deps/v8/test/mjsunit/modules-error-trace.mjs (renamed from deps/v8/test/mjsunit/modules-error-trace.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-export-star-as1.mjs (renamed from deps/v8/test/mjsunit/modules-export-star-as1.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-export-star-as2.mjs (renamed from deps/v8/test/mjsunit/modules-export-star-as2.js)10
-rw-r--r--deps/v8/test/mjsunit/modules-export-star-as3.mjs (renamed from deps/v8/test/mjsunit/modules-export-star-as3.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-exports1.mjs (renamed from deps/v8/test/mjsunit/modules-exports1.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-exports2.mjs (renamed from deps/v8/test/mjsunit/modules-exports2.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-exports3.mjs (renamed from deps/v8/test/mjsunit/modules-exports3.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-imports1.mjs (renamed from deps/v8/test/mjsunit/modules-imports1.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-imports2.mjs (renamed from deps/v8/test/mjsunit/modules-imports2.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-imports3.mjs (renamed from deps/v8/test/mjsunit/modules-imports3.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-imports4.mjs (renamed from deps/v8/test/mjsunit/modules-imports4.js)8
-rw-r--r--deps/v8/test/mjsunit/modules-imports5.mjs (renamed from deps/v8/test/mjsunit/modules-imports5.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-imports6.mjs (renamed from deps/v8/test/mjsunit/modules-imports6.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-imports7.mjs (renamed from deps/v8/test/mjsunit/modules-imports7.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-imports8.mjs (renamed from deps/v8/test/mjsunit/modules-imports8.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-init1.mjs (renamed from deps/v8/test/mjsunit/modules-init1.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-init2.mjs (renamed from deps/v8/test/mjsunit/modules-init2.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-init3.mjs (renamed from deps/v8/test/mjsunit/modules-init3.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-init4.js8
-rw-r--r--deps/v8/test/mjsunit/modules-init4.mjs (renamed from deps/v8/test/message/fail/modules-cycle5.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-namespace-getownproperty1.mjs (renamed from deps/v8/test/mjsunit/modules-namespace-getownproperty1.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-namespace-getownproperty2.mjs (renamed from deps/v8/test/mjsunit/modules-namespace-getownproperty2.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-namespace1.mjs (renamed from deps/v8/test/mjsunit/modules-namespace1.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-namespace2.mjs (renamed from deps/v8/test/mjsunit/modules-namespace2.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-namespace3.mjs (renamed from deps/v8/test/mjsunit/modules-namespace3.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-namespace4.mjs (renamed from deps/v8/test/mjsunit/modules-namespace4.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-preparse.mjs (renamed from deps/v8/test/mjsunit/modules-preparse.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-relative-path.mjs (renamed from deps/v8/test/mjsunit/modules-relative-path.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-skip-1.mjs (renamed from deps/v8/test/mjsunit/modules-skip-1.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-2.mjs (renamed from deps/v8/test/mjsunit/modules-skip-2.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-skip-3.mjs (renamed from deps/v8/test/mjsunit/modules-skip-3.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-4.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-4.mjs6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-5.mjs (renamed from deps/v8/test/mjsunit/modules-skip-5.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-6.mjs (renamed from deps/v8/test/mjsunit/modules-skip-6.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-skip-7.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-7.mjs6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-8.mjs (renamed from deps/v8/test/mjsunit/modules-skip-8.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-9.mjs (renamed from deps/v8/test/mjsunit/modules-skip-9.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-skip-circular-valid.mjs (renamed from deps/v8/test/mjsunit/modules-skip-circular-valid.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-cycle.mjs (renamed from deps/v8/test/message/fail/modules-skip-cycle5.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name1.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name1.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name2.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name2.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name3.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name3.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name4.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name4.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name5.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name5.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name6.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name6.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name7.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name7.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name8.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name8.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-default-name9.mjs (renamed from deps/v8/test/mjsunit/modules-skip-default-name9.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-empty-import-aux.mjs (renamed from deps/v8/test/mjsunit/modules-skip-empty-import-aux.js)0
-rw-r--r--deps/v8/test/mjsunit/modules-skip-empty-import.mjs (renamed from deps/v8/test/mjsunit/modules-skip-empty-import.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init1.mjs (renamed from deps/v8/test/mjsunit/modules-skip-init1.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init3.mjs (renamed from deps/v8/test/mjsunit/modules-skip-init3.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init4a.mjs (renamed from deps/v8/test/mjsunit/modules-skip-init4a.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init4b.mjs (renamed from deps/v8/test/mjsunit/modules-skip-init4b.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-skip-namespace.mjs (renamed from deps/v8/test/mjsunit/modules-skip-namespace.js)6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-star-exports-conflict.mjs6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-star-exports-cycle.mjs6
-rw-r--r--deps/v8/test/mjsunit/modules-star-exports-cycle.mjs (renamed from deps/v8/test/mjsunit/modules-star-exports-cycle.js)4
-rw-r--r--deps/v8/test/mjsunit/modules-this.mjs (renamed from deps/v8/test/mjsunit/modules-this.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-turbo1.mjs (renamed from deps/v8/test/mjsunit/modules-turbo1.js)2
-rw-r--r--deps/v8/test/mjsunit/modules-turbo2.mjs (renamed from deps/v8/test/mjsunit/modules-turbo2.js)2
-rw-r--r--deps/v8/test/mjsunit/nans.js1
-rw-r--r--deps/v8/test/mjsunit/non-extensible-array-reduce.js1420
-rw-r--r--deps/v8/test/mjsunit/noopt.js6
-rw-r--r--deps/v8/test/mjsunit/number-tostring-subnormal.js92
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js7
-rw-r--r--deps/v8/test/mjsunit/object-freeze.js558
-rw-r--r--deps/v8/test/mjsunit/object-literal-overwrite.js61
-rw-r--r--deps/v8/test/mjsunit/object-literal.js2
-rw-r--r--deps/v8/test/mjsunit/object-prevent-extensions.js475
-rw-r--r--deps/v8/test/mjsunit/object-seal.js529
-rw-r--r--deps/v8/test/mjsunit/omit-constant-mapcheck.js20
-rw-r--r--deps/v8/test/mjsunit/optimized-array-every.js23
-rw-r--r--deps/v8/test/mjsunit/optimized-array-find.js20
-rw-r--r--deps/v8/test/mjsunit/optimized-array-findindex.js20
-rw-r--r--deps/v8/test/mjsunit/optimized-array-some.js22
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach-holey-3.js51
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach.js557
-rw-r--r--deps/v8/test/mjsunit/optimized-reduceright.js91
-rw-r--r--deps/v8/test/mjsunit/optimized-typeof.js12
-rw-r--r--deps/v8/test/mjsunit/outobject-double-for-in.js1
-rw-r--r--deps/v8/test/mjsunit/packed-elements.js1
-rw-r--r--deps/v8/test/mjsunit/polymorph-arrays.js2
-rw-r--r--deps/v8/test/mjsunit/prototype-changes.js18
-rw-r--r--deps/v8/test/mjsunit/readonly.js87
-rw-r--r--deps/v8/test/mjsunit/recursive-store-opt.js4
-rw-r--r--deps/v8/test/mjsunit/regexp.js11
-rw-r--r--deps/v8/test/mjsunit/regress-958725.js7
-rw-r--r--deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js7
-rw-r--r--deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js5
-rw-r--r--deps/v8/test/mjsunit/regress/compare-map-elim1.js14
-rw-r--r--deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js8
-rw-r--r--deps/v8/test/mjsunit/regress/consolidated-holey-load.js6
-rw-r--r--deps/v8/test/mjsunit/regress/cross-script-vars.js7
-rw-r--r--deps/v8/test/mjsunit/regress/internalized-string-not-equal.js7
-rw-r--r--deps/v8/test/mjsunit/regress/math-min.js24
-rw-r--r--deps/v8/test/mjsunit/regress/number-named-call-deopt.js10
-rw-r--r--deps/v8/test/mjsunit/regress/poly_count_operation.js7
-rw-r--r--deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js30
-rw-r--r--deps/v8/test/mjsunit/regress/post-increment-close-context.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-100409.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1079.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-108296.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1099.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-110509.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1106.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1117.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1118.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-115100.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1166.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1167.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1210.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-123512.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1237.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-123919.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-124594.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1323.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1337.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1351.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-137768.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1412.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1423.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1434.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1476.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1521.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1560.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1563.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1582.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1583.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1592.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-164442.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1647.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1650.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-166379.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-171641.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1898.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-201590.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2030.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2045.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2056.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2110.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2170.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2193.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2234.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2250.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2261.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2294.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2315.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-234101.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-247688.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2489.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2499.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2537.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2539.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2595.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2596.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2612.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-264203.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2671-1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2671.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2758.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2813.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2843.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-298269.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2987.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2989.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3029.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3039.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3158.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3176.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3183.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-318420.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3204.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-320532.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3218915.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-323845.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-330046.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3307.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-331416.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-333594.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-334708.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3359.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3380.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3392.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-343609.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-346343.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-346587.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347262.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347542.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347543.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3476.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347904.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347909.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347914.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-348280.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-348512.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-349885.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-350863.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351261.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351263.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351319.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-351624.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-352982.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-354433.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-355486.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-355523.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3564.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-357105.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-358057.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-358059.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-359441.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-359491.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-361608.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-362128.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-363956.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-1.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-2.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-368243.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-370827.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-379770.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-380092.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-381313.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-385054.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-385565.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-386034.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3865.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3884.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3969.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-397.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3985.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4023.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-410912.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4121.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-412162.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-416730.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4173.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4266.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4267.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4325.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-435073.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-435477.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-436893.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4374.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-437765.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4388.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-446389.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-447526.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-449291.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4493-1.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4507.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-451322.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4515.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-451958.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4521.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4525.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-457935.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-460917.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-466993.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-467481.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4715.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4788-1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4788-2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4800.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-487981.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-488398.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4970.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4971.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5006.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-500831.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5033.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5085.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5205.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5252.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5262.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5275-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5275-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5286.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5332.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5357.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5404.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-556543.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5636-1.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5636-2.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5638.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5638b.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-572589.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5749.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5767.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5790.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5802.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-585041.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-590074.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-592341.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-592353.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5943.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5972.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599412.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599710.js68
-rw-r--r--deps/v8/test/mjsunit/regress/regress-606021.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6063.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6082.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6121.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-612146.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6248.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-625121.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-632289.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-634-debug.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6373.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-639270.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-642409.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-653407.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-662845.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-662904.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-664087.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-666046.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6681.mjs (renamed from deps/v8/test/mjsunit/regress/regress-6681.js)4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-669024.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-673242.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-681383.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-683617.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6907.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-696651.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-698790.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-709782.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-718891.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-725858.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-727662.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-730254.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-747075.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-747825.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7510.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7740.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-774824.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-775888.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-776309.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-781218.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-782754.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-783051.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-791334.mjs (renamed from deps/v8/test/mjsunit/regress/regress-791334.js)2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-791958.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-794822.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-794825.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-797581.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-802060.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-804837.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-805768.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-818070.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8384.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-838766.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-842612.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-843062-3.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-843543.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8510.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8630.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-865310.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-869735.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8913.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-895691.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-897815.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-899115.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-900585.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-901798.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-904417.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-919340.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-932953.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-936077.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9383.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-940361.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9466.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-950328.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-961709-classes-opt.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-961709-classes.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-962.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-963891.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-97116.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-97116b.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-977870.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-980891.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-981236.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-982702.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-add-minus-zero.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-alloc-smi-check.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-arm64-spillslots.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-bce-underflow.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-binop.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-captured-object-no-dummy-use.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-charat-empty.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-convert-hole.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-convert-hole2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-125148.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-134055.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-134609.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-138887.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-140083.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-142218.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-145961.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-147475.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-150545.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-150729.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-163530.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-173907.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-173907b.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-173974.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-196583.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-217858.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-233737.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-240032.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-242502.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-242870.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-242924.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-243868.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-244461.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-245424.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-245480.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-258519.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-263276.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-272564.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-274438.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-280333.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-285355.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-305309.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-306851.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-309623.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-315252.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-318671.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-319835.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-319860.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-323942.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-329709.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-336148.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-340064.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-344186.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-345715.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-345820.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-346636.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-347903.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-349079.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-349465.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-349878.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-350434.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-351320.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-351658.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-352058.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-352929.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-354391.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-357330.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-374838.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-380512.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-381534.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-382513.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-387636.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-390918.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-397662.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-405517.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-407946.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412208.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412210.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412215.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412319.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-417508.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-425519.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-433332.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-478612.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-485410.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-485548-1.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-485548-2.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-487608.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-489293.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-490021.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-500435.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-500497.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-500824.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505354.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-510738.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-513471.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-522895.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523213.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523307.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-527364.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-530598.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-537444.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-551287.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-557807.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-571064.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-573858.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-577112.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-589792.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-595615.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-598998.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599003.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-601617.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-602595.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-604680.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-608278.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-613494.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-613919.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-614292.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-614644.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-617567.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-621816.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-621868.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-624747.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-624919.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-625547.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-627828.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629062.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629435.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629823.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-630923.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631027.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-635923.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-638551.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-640497.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-642056.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-643073.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644245.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644689-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644689-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-645103.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-645438.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-647217.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-647887.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-648539.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-648737.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-650404.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-654723.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-655004.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-656037.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-656275.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-657478.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-658185.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-658691.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659475-1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659475-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659915a.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659915b.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-660379.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-661949.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662367.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662410.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662830.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-663340.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-663402.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-663750.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664084.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664942.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-665793.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-667689.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-669451.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-669850.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-671576.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-672792.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-679202.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-679378.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-681983.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-684208.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-685050.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-685506.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-685634.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-685680.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-686102.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-686427.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-686737.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-687029.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-687063.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-687990.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-694416.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-694709.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-696622.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-698607.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-699282.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-700733.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-703610.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-706642.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-708050-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-708050-2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-709537.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-709753.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-711166.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-712802.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715151.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715404.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715862.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-719479.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-722756.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-723455.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-724153.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-724608.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-725201.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-729573-1.js98
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-729573-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-732169.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-736575.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-736633.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-740116.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-741078.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-747062.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-747979.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-751715.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-752481.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-752826.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-752846.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-755044.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-757199.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-762874-1.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-762874-2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-766635.js71
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-768080.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-768367.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-769852.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-770543.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-770581.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-771971.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772610.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772672.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772689.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-772720.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-774459.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-776511.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-779367.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-781583.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-786723.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-801627.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-802333.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-819086.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-819298.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-820820.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-822284.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-825045.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-879560.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-879898.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-880207.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-884933.js128
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-890243.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-891627.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-895199.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-899524.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-900674.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-902395.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-903043.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-905457.js64
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-906043.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-906220.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-906870.js64
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-908309.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-913296.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-930948.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-931664.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-934166.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-935932.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-936302.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-937618.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-937649.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-941743.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-942068.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-944865.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-951400.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-961522.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-964833.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-966450.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-967101.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-967434.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-969368.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-969498.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-971782.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-976256.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-976598.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-976934.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-977012.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-977089.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-979023.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-979401.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-980168.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-980292.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-984344.js34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-985660.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deep-proto.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-double-canonicalization.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-empty-fixed-double-array.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-escape-preserve-smi-representation.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-et-clobbers-doubles.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-fast-literal-transition.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-filter-contexts.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-force-constant-representation.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-force-representation.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-freeze.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-fundecl.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-grow-deopt.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-grow-store-smi-check.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-gvn-ftt.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-hoist-load-named-field.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-class-constructor.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-constant-load.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-getter-near-stack-limit.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-int32-truncation.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-is-smi-repr.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining2.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-load-elements.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-load-field-by-index.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-mul-canoverflow.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-no-dummy-use-for-arguments-object.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-ntl.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-omit-checks.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-opt-typeof-null.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-parseint.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-phi-truncation.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-polymorphic-load.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-polymorphic-store.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-smi-math-floor-round.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sqrt.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-store-heapobject.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-typedarray-length.js139
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan3.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-4839.js60
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5254-1.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5254-2.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5255-1.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5255-2.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5255-3.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5697.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-6515.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-6906.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-7848.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8070.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-8770.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9233.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9394-2.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9394.js83
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9460.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress_967104.js12
-rw-r--r--deps/v8/test/mjsunit/regress/string-next-encoding.js4
-rw-r--r--deps/v8/test/mjsunit/regress/typed-array-lifetime.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-02256b.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9425.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9447.js37
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-968078.js47
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-980007.js14
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-985154.js34
-rw-r--r--deps/v8/test/mjsunit/sealed-array-reduce.js1431
-rw-r--r--deps/v8/test/mjsunit/setters-on-elements.js7
-rw-r--r--deps/v8/test/mjsunit/smi-representation.js10
-rw-r--r--deps/v8/test/mjsunit/strict-mode-implicit-receiver.js2
-rw-r--r--deps/v8/test/mjsunit/strict-mode-opt.js5
-rw-r--r--deps/v8/test/mjsunit/string-charcodeat.js34
-rw-r--r--deps/v8/test/mjsunit/string-fromcharcode.js30
-rw-r--r--deps/v8/test/mjsunit/string-indexof-1.js6
-rw-r--r--deps/v8/test/mjsunit/string-slices.js84
-rw-r--r--deps/v8/test/mjsunit/sum-0-plus-undefined-is-NaN.js7
-rw-r--r--deps/v8/test/mjsunit/switch-opt.js14
-rw-r--r--deps/v8/test/mjsunit/testcfg.py19
-rw-r--r--deps/v8/test/mjsunit/thin-strings.js20
-rw-r--r--deps/v8/test/mjsunit/tools/compiler-trace-flags.js4
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js41
-rw-r--r--deps/v8/test/mjsunit/transition-elements-kind.js5
-rw-r--r--deps/v8/test/mjsunit/ubsan-fuzzerbugs.js2
-rw-r--r--deps/v8/test/mjsunit/unbox-double-field-indexed.js4
-rw-r--r--deps/v8/test/mjsunit/unbox-double-field.js4
-rw-r--r--deps/v8/test/mjsunit/unbox-smi-field-indexed.js4
-rw-r--r--deps/v8/test/mjsunit/undetectable-compare.js1
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-table.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics-stress.js810
-rw-r--r--deps/v8/test/mjsunit/wasm/atomics64-stress.js894
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/README19
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast1047
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js445
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast308
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js470
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast130
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js170
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast392
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js505
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast5685
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js13859
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast673
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js440
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast947
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js866
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast1469
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js2651
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast1602
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js2096
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-global.js88
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/export-identity.js46
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter-mixed.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js24
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js37
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js33
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-gc-stress.js27
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js34
-rw-r--r--deps/v8/test/mjsunit/wasm/shared-memory-worker-stress.js27
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js37
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-copy-anyref.js73
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection.js98
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js16
-rw-r--r--deps/v8/test/mjsunit/worker-ping-test.js125
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc14
-rw-r--r--deps/v8/test/mozilla/mozilla.status4
-rw-r--r--deps/v8/test/preparser/OWNERS2
-rw-r--r--deps/v8/test/test262/OWNERS2
-rw-r--r--deps/v8/test/test262/harness-adapt.js5
-rw-r--r--deps/v8/test/test262/test262.status64
-rw-r--r--deps/v8/test/test262/testcfg.py9
-rw-r--r--deps/v8/test/torque/OWNERS1
-rw-r--r--deps/v8/test/torque/test-torque.tq234
-rw-r--r--deps/v8/test/unittests/BUILD.gn4
-rw-r--r--deps/v8/test/unittests/api/resource-constraints-unittest.cc56
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc5
-rw-r--r--deps/v8/test/unittests/base/vlq-base64-unittest.cc137
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc119
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc966
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc17
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h5
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc63
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc13
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/heap-controller-unittest.cc59
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc73
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc10
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc18
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc43
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/logging/counters-unittest.cc48
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc63
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc2
-rw-r--r--deps/v8/test/unittests/test-helpers.cc4
-rw-r--r--deps/v8/test/unittests/torque/earley-parser-unittest.cc2
-rw-r--r--deps/v8/test/unittests/torque/ls-message-unittest.cc85
-rw-r--r--deps/v8/test/unittests/torque/ls-server-data-unittest.cc24
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc108
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc224
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc78
-rw-r--r--deps/v8/test/unittests/wasm/wasm-text-unittest.cc121
-rw-r--r--deps/v8/test/wasm-api-tests/BUILD.gn9
-rw-r--r--deps/v8/test/wasm-api-tests/callbacks.cc299
-rw-r--r--deps/v8/test/wasm-api-tests/finalize.cc77
-rw-r--r--deps/v8/test/wasm-api-tests/globals.cc208
-rw-r--r--deps/v8/test/wasm-api-tests/memory.cc123
-rw-r--r--deps/v8/test/wasm-api-tests/reflect.cc109
-rw-r--r--deps/v8/test/wasm-api-tests/serialize.cc48
-rw-r--r--deps/v8/test/wasm-api-tests/table.cc116
-rw-r--r--deps/v8/test/wasm-api-tests/threads.cc105
-rw-r--r--deps/v8/test/wasm-api-tests/traps.cc60
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-test.h162
-rw-r--r--deps/v8/test/wasm-js/OWNERS1
-rw-r--r--deps/v8/test/wasm-js/testcfg.py4
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py15
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status10
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration-expected.txt6
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration.js6
-rw-r--r--deps/v8/test/webkit/class-syntax-expression-expected.txt2
-rw-r--r--deps/v8/test/webkit/class-syntax-expression.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-extends-expected.txt6
-rw-r--r--deps/v8/test/webkit/class-syntax-name-expected.txt8
-rw-r--r--deps/v8/test/webkit/class-syntax-semicolon-expected.txt10
-rw-r--r--deps/v8/test/webkit/class-syntax-semicolon.js10
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt32
-rw-r--r--deps/v8/test/webkit/fast/js/function-constructor-error-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt174
-rw-r--r--deps/v8/test/webkit/fast/js/function-toString-parentheses.js17
-rw-r--r--deps/v8/test/webkit/fast/js/object-extra-comma-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt84
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check.js36
-rw-r--r--deps/v8/test/webkit/fast/regex/toString-expected.txt2
-rw-r--r--deps/v8/test/webkit/function-toString-object-literals-expected.txt10
-rw-r--r--deps/v8/test/webkit/parser-xml-close-comment-expected.txt6
-rw-r--r--deps/v8/testing/OWNERS (renamed from deps/v8/infra/testing/OWNERS)2
-rw-r--r--deps/v8/third_party/binutils/OWNERS1
-rw-r--r--deps/v8/third_party/colorama/OWNERS1
-rw-r--r--deps/v8/third_party/inspector_protocol/BUILD.gn27
-rw-r--r--deps/v8/third_party/inspector_protocol/OWNERS2
-rw-r--r--deps/v8/third_party/inspector_protocol/README.md9
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rw-r--r--deps/v8/third_party/inspector_protocol/bindings/bindings.cc (renamed from deps/v8/test/mjsunit/modules-skip-cycle.js)4
-rw-r--r--deps/v8/third_party/inspector_protocol/bindings/bindings.h81
-rw-r--r--deps/v8/third_party/inspector_protocol/bindings/bindings_test.cc44
-rw-r--r--deps/v8/third_party/inspector_protocol/bindings/bindings_test_helper.h18
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/code_generator.py1215
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/concatenate_protocols.py47
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding.cc60
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding.h32
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc82
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gni7
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gypi34
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Allocator_h.template25
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Array_h.template138
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template3
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Forward_h.template50
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Maybe_h.template139
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template66
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_cpp.template28
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_h.template1
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template4
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template2
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template2201
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/encoding_h.template520
-rw-r--r--deps/v8/third_party/inspector_protocol/pdl.py295
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/roll.py9
-rw-r--r--deps/v8/third_party/v8/builtins/OWNERS3
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq45
-rw-r--r--deps/v8/third_party/wasm-api/LICENSE202
-rw-r--r--deps/v8/third_party/wasm-api/OWNERS2
-rw-r--r--deps/v8/third_party/wasm-api/README.v817
-rw-r--r--deps/v8/third_party/wasm-api/example/callback.c167
-rw-r--r--deps/v8/third_party/wasm-api/example/callback.cc145
-rw-r--r--deps/v8/third_party/wasm-api/example/callback.wasmbin0 -> 102 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/callback.wat10
-rw-r--r--deps/v8/third_party/wasm-api/example/finalize.c75
-rw-r--r--deps/v8/third_party/wasm-api/example/finalize.cc70
-rw-r--r--deps/v8/third_party/wasm-api/example/finalize.wasmbin0 -> 75 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/finalize.wat5
-rw-r--r--deps/v8/third_party/wasm-api/example/global.c222
-rw-r--r--deps/v8/third_party/wasm-api/example/global.cc193
-rw-r--r--deps/v8/third_party/wasm-api/example/global.wasmbin0 -> 576 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/global.wat27
-rw-r--r--deps/v8/third_party/wasm-api/example/hello.c107
-rw-r--r--deps/v8/third_party/wasm-api/example/hello.cc91
-rw-r--r--deps/v8/third_party/wasm-api/example/hello.wasmbin0 -> 71 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/hello.wat4
-rw-r--r--deps/v8/third_party/wasm-api/example/memory.c217
-rw-r--r--deps/v8/third_party/wasm-api/example/memory.cc169
-rw-r--r--deps/v8/third_party/wasm-api/example/memory.wasmbin0 -> 146 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/memory.wat11
-rw-r--r--deps/v8/third_party/wasm-api/example/reflect.c164
-rw-r--r--deps/v8/third_party/wasm-api/example/reflect.cc138
-rw-r--r--deps/v8/third_party/wasm-api/example/reflect.wasmbin0 -> 124 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/reflect.wat6
-rw-r--r--deps/v8/third_party/wasm-api/example/serialize.c122
-rw-r--r--deps/v8/third_party/wasm-api/example/serialize.cc103
-rw-r--r--deps/v8/third_party/wasm-api/example/serialize.wasmbin0 -> 71 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/serialize.wat4
-rw-r--r--deps/v8/third_party/wasm-api/example/table.c208
-rw-r--r--deps/v8/third_party/wasm-api/example/table.cc189
-rw-r--r--deps/v8/third_party/wasm-api/example/table.wasmbin0 -> 139 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/table.wat12
-rw-r--r--deps/v8/third_party/wasm-api/example/threads.c152
-rw-r--r--deps/v8/third_party/wasm-api/example/threads.cc124
-rw-r--r--deps/v8/third_party/wasm-api/example/threads.wasmbin0 -> 84 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/threads.wat5
-rw-r--r--deps/v8/third_party/wasm-api/example/trap.c121
-rw-r--r--deps/v8/third_party/wasm-api/example/trap.cc100
-rw-r--r--deps/v8/third_party/wasm-api/example/trap.wasmbin0 -> 105 bytes
-rw-r--r--deps/v8/third_party/wasm-api/example/trap.wat5
-rw-r--r--deps/v8/third_party/wasm-api/wasm.h677
-rw-r--r--deps/v8/third_party/wasm-api/wasm.hh770
-rw-r--r--deps/v8/tools/OWNERS3
-rwxr-xr-xdeps/v8/tools/callstats.py49
-rw-r--r--deps/v8/tools/clusterfuzz/OWNERS2
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt2
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py39
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py10
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py54
-rw-r--r--deps/v8/tools/clusterfuzz/v8_sanity_checks.js11
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn1
-rw-r--r--deps/v8/tools/gdbinit44
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py53
-rwxr-xr-xdeps/v8/tools/get_landmines.py1
-rwxr-xr-xdeps/v8/tools/grokdump.py7
-rw-r--r--deps/v8/tools/heap-stats/categories.js2
-rwxr-xr-xdeps/v8/tools/mb/mb.py2
-rwxr-xr-xdeps/v8/tools/node/fetch_deps.py1
-rw-r--r--deps/v8/tools/run_perf.py20
-rw-r--r--deps/v8/tools/testrunner/OWNERS2
-rw-r--r--deps/v8/tools/testrunner/base_runner.py14
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py22
-rw-r--r--deps/v8/tools/testrunner/local/variants.py1
-rw-r--r--deps/v8/tools/testrunner/outproc/message.py9
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py17
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rw-r--r--deps/v8/tools/tickprocessor.js46
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py15
-rw-r--r--deps/v8/tools/torque/vim-torque/syntax/torque.vim4
-rw-r--r--deps/v8/tools/torque/vscode-torque/.npmrc1
-rw-r--r--deps/v8/tools/torque/vscode-torque/README.md33
-rw-r--r--deps/v8/tools/torque/vscode-torque/language-configuration.json25
-rw-r--r--deps/v8/tools/torque/vscode-torque/out/extension.js99
-rw-r--r--deps/v8/tools/torque/vscode-torque/package.json90
-rw-r--r--deps/v8/tools/torque/vscode-torque/src/extension.ts104
-rw-r--r--deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json177
-rw-r--r--deps/v8/tools/torque/vscode-torque/tsconfig.json17
-rw-r--r--deps/v8/tools/torque/vscode-torque/tslint.json11
-rwxr-xr-xdeps/v8/tools/try_perf.py18
-rw-r--r--deps/v8/tools/turbolizer/OWNERS1
-rw-r--r--deps/v8/tools/turbolizer/info-view.html4
-rw-r--r--deps/v8/tools/turbolizer/src/graph-view.ts27
-rw-r--r--deps/v8/tools/turbolizer/src/sequence-view.ts14
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results1.json36
-rw-r--r--deps/v8/tools/unittests/testdata/expected_test_results2.json24
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py3
-rw-r--r--deps/v8/tools/v8heapconst.py597
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh98
-rw-r--r--deps/v8/tools/whitespace.txt2
-rw-r--r--deps/v8/tools/windbg.js194
2612 files changed, 77667 insertions, 81074 deletions
diff --git a/deps/v8/.flake8 b/deps/v8/.flake8
new file mode 100644
index 0000000000..c58d00ca05
--- /dev/null
+++ b/deps/v8/.flake8
@@ -0,0 +1,11 @@
+[flake8]
+ignore = E111,E114,E310 # We use 2-space indentation
+exclude =
+ ./third_party/, # third-party code
+ ./build/, # third-party code
+ ./buildtools/, # third-party code
+ ./tools/swarming_client/, # third-party code
+ ./test/wasm-js/, # third-party code
+ ./test/wasm-js/data/, # third-party code
+ ./test/test262/data/, # third-party code
+ ./test/test262/harness/, # third-party code
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 6350393ebf..ce47fa3610 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -26,7 +26,6 @@
.ccls-cache
.cpplint-cache
.cproject
-.d8_history
.gclient_entries
.gdb_history
.landmines
@@ -39,8 +38,7 @@
/build
/buildtools
/check-header-includes
-/hydrogen.cfg
-/obj
+/Default/
/out
/out.gn
/perf.data
@@ -72,6 +70,7 @@
/third_party/googletest/src/googletest/include/gtest/*
!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
!/third_party/v8
+!/third_party/wasm-api
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
@@ -83,6 +82,9 @@
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
+/tools/turbolizer/build
+/tools/turbolizer/.rpt2_cache
+/tools/turbolizer/deploy
/tools/visual_studio/Debug
/tools/visual_studio/Release
/v8.log.ll
@@ -94,23 +96,15 @@ GTAGS
TAGS
bsuite
compile_commands.json
-!/test/mjsunit/d8
-d8_g
gccauses
gcsuspects
gtags.files
-shell
-shell_g
+node_modules
tags
turbo*.cfg
turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json
-/Default/
-node_modules
-tools/turbolizer/build
-tools/turbolizer/.rpt2_cache
-tools/turbolizer/deploy
!/third_party/jinja2
!/third_party/markupsafe
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 5a8628b4cb..827d124b0d 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -128,17 +128,20 @@ Matthew Sporleder <msporleder@gmail.com>
Maxim Mazurok <maxim@mazurok.com>
Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
+Michael Mclaughlin <m8ch88l@gmail.com>
Michael Smith <mike@w3.org>
Michaël Zasso <mic.besace@gmail.com>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
Mikhail Gusarov <dottedmag@dottedmag.net>
Milton Chiang <milton.chiang@mediatek.com>
+Mu Tao <pamilty@gmail.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Niklas Hambüchen <mail@nh2.me>
Noj Vek <nojvek@gmail.com>
Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
+Oliver Dunk <oliver@oliverdunk.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peng Fei <pfgenyun@gmail.com>
@@ -160,6 +163,7 @@ Rob Wu <rob@robwu.nl>
Robert Meijer <robert.s.meijer@gmail.com>
Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
+Ross Kirsling <rkirsling@gmail.com>
Ruben Bridgewater <ruben@bridgewater.de>
Ryan Dahl <ry@tinyclouds.org>
Sakthipriyan Vairamani (thefourtheye) <thechargingvolcano@gmail.com>
@@ -168,6 +172,7 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Shawn Anastasio <shawnanastasio@gmail.com>
+Shawn Presser <shawnpresser@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
Taketoshi Aono <brn@b6n.ch>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 8640517ae5..efca4a626f 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -225,7 +225,7 @@ if (v8_enable_snapshot_native_code_counters == "") {
v8_enable_snapshot_native_code_counters = v8_enable_debugging_features
}
if (v8_enable_shared_ro_heap == "") {
- v8_enable_shared_ro_heap = v8_enable_lite_mode
+ v8_enable_shared_ro_heap = !v8_enable_pointer_compression && v8_use_snapshot
}
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
@@ -242,6 +242,8 @@ assert(!v8_enable_lite_mode || v8_use_snapshot,
assert(
!v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
"Pointer compression is not supported with shared read-only heap enabled")
+assert(v8_use_snapshot || !v8_enable_shared_ro_heap,
+ "Shared read-only heap requires snapshot")
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -408,6 +410,7 @@ config("features") {
if (v8_enable_test_features) {
defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ]
defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ]
+ defines += [ "V8_ENABLE_DOUBLE_CONST_STORE_CHECK" ]
}
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
@@ -940,6 +943,7 @@ torque_files = [
"src/builtins/array-unshift.tq",
"src/builtins/array.tq",
"src/builtins/base.tq",
+ "src/builtins/bigint.tq",
"src/builtins/boolean.tq",
"src/builtins/collections.tq",
"src/builtins/data-view.tq",
@@ -950,13 +954,20 @@ torque_files = [
"src/builtins/iterator.tq",
"src/builtins/math.tq",
"src/builtins/object-fromentries.tq",
+ "src/builtins/object.tq",
"src/builtins/proxy-constructor.tq",
+ "src/builtins/proxy-delete-property.tq",
"src/builtins/proxy-get-property.tq",
+ "src/builtins/proxy-get-prototype-of.tq",
"src/builtins/proxy-has-property.tq",
+ "src/builtins/proxy-is-extensible.tq",
+ "src/builtins/proxy-prevent-extensions.tq",
"src/builtins/proxy-revocable.tq",
"src/builtins/proxy-revoke.tq",
"src/builtins/proxy-set-property.tq",
+ "src/builtins/proxy-set-prototype-of.tq",
"src/builtins/proxy.tq",
+ "src/builtins/reflect.tq",
"src/builtins/regexp-replace.tq",
"src/builtins/regexp.tq",
"src/builtins/string.tq",
@@ -988,57 +999,6 @@ if (!v8_enable_i18n_support) {
torque_files -= [ "src/objects/intl-objects.tq" ]
}
-torque_namespaces = [
- "arguments",
- "array",
- "array-copywithin",
- "array-filter",
- "array-find",
- "array-findindex",
- "array-foreach",
- "array-join",
- "array-map",
- "array-of",
- "array-reverse",
- "array-shift",
- "array-slice",
- "array-splice",
- "array-unshift",
- "array-lastindexof",
- "base",
- "boolean",
- "collections",
- "data-view",
- "extras-utils",
- "growable-fixed-array",
- "internal-coverage",
- "iterator",
- "math",
- "object",
- "proxy",
- "regexp",
- "regexp-replace",
- "string",
- "string-html",
- "string-iterator",
- "string-repeat",
- "string-slice",
- "string-substring",
- "test",
- "typed-array",
- "typed-array-createtypedarray",
- "typed-array-every",
- "typed-array-filter",
- "typed-array-find",
- "typed-array-findindex",
- "typed-array-foreach",
- "typed-array-reduce",
- "typed-array-reduceright",
- "typed-array-slice",
- "typed-array-some",
- "typed-array-subarray",
-]
-
action("run_torque") {
visibility = [
":*",
@@ -1066,11 +1026,13 @@ action("run_torque") {
"$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc",
"$target_gen_dir/torque-generated/exported-macros-assembler-tq.h",
"$target_gen_dir/torque-generated/csa-types-tq.h",
+ "$target_gen_dir/torque-generated/instance-types-tq.h",
]
- foreach(namespace, torque_namespaces) {
+ foreach(file, torque_files) {
+ filetq = string_replace(file, ".tq", "-tq-csa")
outputs += [
- "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc",
- "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h",
+ "$target_gen_dir/torque-generated/$filetq.cc",
+ "$target_gen_dir/torque-generated/$filetq.h",
]
}
@@ -1080,11 +1042,10 @@ action("run_torque") {
root_build_dir),
"-o",
rebase_path("$target_gen_dir/torque-generated", root_build_dir),
+ "-v8-root",
+ rebase_path(".", root_build_dir),
]
-
- foreach(file, torque_files) {
- args += [ rebase_path(file, root_build_dir) ]
- }
+ args += torque_files
}
group("v8_maybe_icu") {
@@ -1112,10 +1073,11 @@ v8_source_set("torque_generated_initializers") {
"$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc",
"$target_gen_dir/torque-generated/exported-macros-assembler-tq.h",
]
- foreach(namespace, torque_namespaces) {
+ foreach(file, torque_files) {
+ filetq = string_replace(file, ".tq", "-tq-csa")
sources += [
- "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc",
- "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h",
+ "$target_gen_dir/torque-generated/$filetq.cc",
+ "$target_gen_dir/torque-generated/$filetq.h",
]
}
@@ -1515,6 +1477,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-async-generator-gen.cc",
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-bigint-gen.cc",
+ "src/builtins/builtins-bigint-gen.h",
"src/builtins/builtins-boolean-gen.cc",
"src/builtins/builtins-call-gen.cc",
"src/builtins/builtins-call-gen.h",
@@ -1776,6 +1739,8 @@ v8_compiler_sources = [
"src/compiler/control-equivalence.h",
"src/compiler/control-flow-optimizer.cc",
"src/compiler/control-flow-optimizer.h",
+ "src/compiler/csa-load-elimination.cc",
+ "src/compiler/csa-load-elimination.h",
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/decompression-elimination.cc",
@@ -1913,6 +1878,8 @@ v8_compiler_sources = [
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
"src/compiler/store-store-elimination.h",
+ "src/compiler/add-type-assertions-reducer.cc",
+ "src/compiler/add-type-assertions-reducer.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.cc",
@@ -2123,6 +2090,8 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/macro-assembler.h",
"src/codegen/optimized-compilation-info.cc",
"src/codegen/optimized-compilation-info.h",
+ "src/codegen/pending-optimization-table.cc",
+ "src/codegen/pending-optimization-table.h",
"src/codegen/register-arch.h",
"src/codegen/register-configuration.cc",
"src/codegen/register-configuration.h",
@@ -2139,6 +2108,8 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/source-position.h",
"src/codegen/string-constants.cc",
"src/codegen/string-constants.h",
+ "src/codegen/tick-counter.cc",
+ "src/codegen/tick-counter.h",
"src/codegen/turbo-assembler.cc",
"src/codegen/turbo-assembler.h",
"src/codegen/unoptimized-compilation-info.cc",
@@ -2148,7 +2119,6 @@ v8_source_set("v8_base_without_compiler") {
"src/common/checks.h",
"src/common/ptr-compr-inl.h",
"src/common/ptr-compr.h",
- "src/common/v8memory.h",
"src/compiler-dispatcher/compiler-dispatcher.cc",
"src/compiler-dispatcher/compiler-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
@@ -2212,8 +2182,11 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/frames.h",
"src/execution/futex-emulation.cc",
"src/execution/futex-emulation.h",
+ "src/execution/interrupts-scope.cc",
+ "src/execution/interrupts-scope.h",
"src/execution/isolate-data.h",
"src/execution/isolate-inl.h",
+ "src/execution/isolate-utils.h",
"src/execution/isolate.cc",
"src/execution/isolate.h",
"src/execution/message-template.h",
@@ -2226,6 +2199,8 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/simulator-base.cc",
"src/execution/simulator-base.h",
"src/execution/simulator.h",
+ "src/execution/stack-guard.cc",
+ "src/execution/stack-guard.h",
"src/execution/thread-id.cc",
"src/execution/thread-id.h",
"src/execution/thread-local-top.cc",
@@ -2234,6 +2209,8 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/v8threads.h",
"src/execution/vm-state-inl.h",
"src/execution/vm-state.h",
+ "src/extensions/cputracemark-extension.cc",
+ "src/extensions/cputracemark-extension.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@@ -2262,6 +2239,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
"src/heap/barrier.h",
+ "src/heap/basic-memory-chunk.cc",
+ "src/heap/basic-memory-chunk.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/combined-heap.cc",
@@ -2308,6 +2287,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
+ "src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
"src/heap/remembered-set.h",
@@ -2623,6 +2603,8 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/slots-atomic-inl.h",
"src/objects/slots-inl.h",
"src/objects/slots.h",
+ "src/objects/source-text-module.cc",
+ "src/objects/source-text-module.h",
"src/objects/stack-frame-info-inl.h",
"src/objects/stack-frame-info.cc",
"src/objects/stack-frame-info.h",
@@ -2635,6 +2617,10 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
+ "src/objects/synthetic-module.cc",
+ "src/objects/synthetic-module.h",
+ "src/objects/tagged-field-inl.h",
+ "src/objects/tagged-field.h",
"src/objects/tagged-impl-inl.h",
"src/objects/tagged-impl.cc",
"src/objects/tagged-impl.h",
@@ -2709,23 +2695,27 @@ v8_source_set("v8_base_without_compiler") {
"src/profiler/tick-sample.h",
"src/profiler/tracing-cpu-profiler.cc",
"src/profiler/tracing-cpu-profiler.h",
- "src/regexp/bytecodes-irregexp.h",
- "src/regexp/interpreter-irregexp.cc",
- "src/regexp/interpreter-irregexp.h",
- "src/regexp/jsregexp-inl.h",
- "src/regexp/jsregexp.cc",
- "src/regexp/jsregexp.h",
"src/regexp/property-sequences.cc",
"src/regexp/property-sequences.h",
"src/regexp/regexp-ast.cc",
"src/regexp/regexp-ast.h",
- "src/regexp/regexp-macro-assembler-irregexp-inl.h",
- "src/regexp/regexp-macro-assembler-irregexp.cc",
- "src/regexp/regexp-macro-assembler-irregexp.h",
+ "src/regexp/regexp-bytecode-generator-inl.h",
+ "src/regexp/regexp-bytecode-generator.cc",
+ "src/regexp/regexp-bytecode-generator.h",
+ "src/regexp/regexp-bytecodes.h",
+ "src/regexp/regexp-compiler-tonode.cc",
+ "src/regexp/regexp-compiler.cc",
+ "src/regexp/regexp-compiler.h",
+ "src/regexp/regexp-dotprinter.cc",
+ "src/regexp/regexp-dotprinter.h",
+ "src/regexp/regexp-interpreter.cc",
+ "src/regexp/regexp-interpreter.h",
+ "src/regexp/regexp-macro-assembler-arch.h",
"src/regexp/regexp-macro-assembler-tracer.cc",
"src/regexp/regexp-macro-assembler-tracer.h",
"src/regexp/regexp-macro-assembler.cc",
"src/regexp/regexp-macro-assembler.h",
+ "src/regexp/regexp-nodes.h",
"src/regexp/regexp-parser.cc",
"src/regexp/regexp-parser.h",
"src/regexp/regexp-special-case.h",
@@ -2733,6 +2723,8 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/regexp-stack.h",
"src/regexp/regexp-utils.cc",
"src/regexp/regexp-utils.h",
+ "src/regexp/regexp.cc",
+ "src/regexp/regexp.h",
"src/roots/roots-inl.h",
"src/roots/roots.cc",
"src/roots/roots.h",
@@ -2866,8 +2858,6 @@ v8_source_set("v8_base_without_compiler") {
"src/utils/ostreams.cc",
"src/utils/ostreams.h",
"src/utils/pointer-with-payload.h",
- "src/utils/splay-tree-inl.h",
- "src/utils/splay-tree.h",
"src/utils/utils-inl.h",
"src/utils/utils.cc",
"src/utils/utils.h",
@@ -2889,7 +2879,6 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/function-compiler.h",
"src/wasm/graph-builder-interface.cc",
"src/wasm/graph-builder-interface.h",
- "src/wasm/js-to-wasm-wrapper-cache.h",
"src/wasm/jump-table-assembler.cc",
"src/wasm/jump-table-assembler.h",
"src/wasm/leb-helper.h",
@@ -2909,6 +2898,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/streaming-decoder.cc",
"src/wasm/streaming-decoder.h",
"src/wasm/value-type.h",
+ "src/wasm/wasm-arguments.h",
"src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-manager.h",
"src/wasm/wasm-constants.h",
@@ -2956,7 +2946,6 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone-list-inl.h",
"src/zone/zone-segment.cc",
"src/zone/zone-segment.h",
- "src/zone/zone-splay-tree.h",
"src/zone/zone.cc",
"src/zone/zone.h",
]
@@ -3348,6 +3337,7 @@ v8_source_set("torque_base") {
"src/torque/declarations.h",
"src/torque/earley-parser.cc",
"src/torque/earley-parser.h",
+ "src/torque/global-context.cc",
"src/torque/global-context.h",
"src/torque/implementation-visitor.cc",
"src/torque/implementation-visitor.h",
@@ -3379,6 +3369,9 @@ v8_source_set("torque_base") {
":v8_libbase",
]
+ # The use of exceptions for Torque in violation of the Chromium style-guide
+ # is justified by the fact that it is only used from the non-essential
+ # language server and can be removed anytime if it causes problems.
configs = [
":internal_config",
"//build/config/compiler:exceptions",
@@ -3421,6 +3414,9 @@ v8_source_set("torque_ls_base") {
":torque_base",
]
+ # The use of exceptions for Torque in violation of the Chromium style-guide
+ # is justified by the fact that it is only used from the non-essential
+ # language server and can be removed anytime if it causes problems.
configs = [
":internal_config",
"//build/config/compiler:exceptions",
@@ -3476,7 +3472,9 @@ v8_component("v8_libbase") {
"src/base/list.h",
"src/base/logging.cc",
"src/base/logging.h",
+ "src/base/lsan.h",
"src/base/macros.h",
+ "src/base/memory.h",
"src/base/once.cc",
"src/base/once.h",
"src/base/optional.h",
@@ -3506,6 +3504,8 @@ v8_component("v8_libbase") {
"src/base/type-traits.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
+ "src/base/vlq-base64.cc",
+ "src/base/vlq-base64.h",
]
configs = [ ":internal_config_base" ]
@@ -3671,21 +3671,15 @@ v8_component("v8_libplatform") {
sources += [
"src/libplatform/tracing/json-trace-event-listener.cc",
"src/libplatform/tracing/json-trace-event-listener.h",
- "src/libplatform/tracing/perfetto-consumer.cc",
- "src/libplatform/tracing/perfetto-consumer.h",
- "src/libplatform/tracing/perfetto-producer.cc",
- "src/libplatform/tracing/perfetto-producer.h",
- "src/libplatform/tracing/perfetto-shared-memory.cc",
- "src/libplatform/tracing/perfetto-shared-memory.h",
- "src/libplatform/tracing/perfetto-tasks.cc",
- "src/libplatform/tracing/perfetto-tasks.h",
- "src/libplatform/tracing/perfetto-tracing-controller.cc",
- "src/libplatform/tracing/perfetto-tracing-controller.h",
+ "src/libplatform/tracing/trace-event-listener.cc",
"src/libplatform/tracing/trace-event-listener.h",
]
deps += [
- "//third_party/perfetto:libperfetto",
+ "//third_party/perfetto/protos/perfetto/trace:lite",
"//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite",
+ "//third_party/perfetto/protos/perfetto/trace/chrome:zero",
+ "//third_party/perfetto/src/tracing:client_api",
+ "//third_party/perfetto/src/tracing:platform_posix",
]
}
}
@@ -3846,6 +3840,9 @@ if (current_toolchain == v8_snapshot_toolchain) {
"//build/win:default_exe_manifest",
]
+ # The use of exceptions for Torque in violation of the Chromium style-guide
+ # is justified by the fact that it is only used from the non-essential
+ # language server and can be removed anytime if it causes problems.
configs = [
":internal_config",
"//build/config/compiler:exceptions",
@@ -3876,6 +3873,9 @@ v8_executable("torque-language-server") {
"//build/win:default_exe_manifest",
]
+ # The use of exceptions for Torque in violation of the Chromium style-guide
+ # is justified by the fact that it is only used from the non-essential
+ # language server and can be removed anytime if it causes problems.
configs = [
":internal_config",
"//build/config/compiler:exceptions",
@@ -3892,48 +3892,51 @@ v8_executable("torque-language-server") {
}
}
-if (current_toolchain == v8_generator_toolchain) {
- v8_executable("gen-regexp-special-case") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
+if (v8_enable_i18n_support) {
+ if (current_toolchain == v8_generator_toolchain) {
+ v8_executable("gen-regexp-special-case") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
- sources = [
- "src/regexp/gen-regexp-special-case.cc",
- ]
+ sources = [
+ "src/regexp/gen-regexp-special-case.cc",
+ ]
- deps = [
- ":v8_libbase",
- "//build/win:default_exe_manifest",
- "//third_party/icu",
- ]
+ deps = [
+ ":v8_libbase",
+ "//build/win:default_exe_manifest",
+ "//third_party/icu",
+ ]
- configs = [ ":internal_config" ]
+ configs = [ ":internal_config" ]
+ }
}
-}
-action("run_gen-regexp-special-case") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
+ action("run_gen-regexp-special-case") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
- script = "tools/run.py"
+ script = "tools/run.py"
- sources = v8_extra_library_files
+ sources = v8_extra_library_files
- deps = [
- ":gen-regexp-special-case($v8_generator_toolchain)",
- ]
+ deps = [
+ ":gen-regexp-special-case($v8_generator_toolchain)",
+ ]
- output_file = "$target_gen_dir/src/regexp/special-case.cc"
+ output_file = "$target_gen_dir/src/regexp/special-case.cc"
- outputs = [
- output_file,
- ]
+ outputs = [
+ output_file,
+ ]
- args = [
- "./" + rebase_path(
- get_label_info(":gen-regexp-special-case($v8_generator_toolchain)",
- "root_out_dir") + "/gen-regexp-special-case",
- root_build_dir),
- rebase_path(output_file, root_build_dir),
- ]
+ args = [
+ "./" + rebase_path(
+ get_label_info(
+ ":gen-regexp-special-case($v8_generator_toolchain)",
+ "root_out_dir") + "/gen-regexp-special-case",
+ root_build_dir),
+ rebase_path(output_file, root_build_dir),
+ ]
+ }
}
###############################################################################
@@ -4146,6 +4149,10 @@ v8_executable("d8") {
if (v8_enable_vtunejit) {
deps += [ "src/third_party/vtune:v8_vtune" ]
}
+
+ if (v8_use_perfetto) {
+ deps += [ "//third_party/perfetto/include/perfetto/tracing" ]
+ }
}
v8_executable("v8_hello_world") {
@@ -4451,7 +4458,6 @@ group("v8_generated_cc_files") {
":js2c_extras",
":run_torque",
"src/inspector:v8_generated_cc_files",
- "test/cctest:v8_generated_cc_files",
]
}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index c21ac11760..27afc18a51 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1538 @@
+2019-07-23: Version 7.7.299
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-23: Version 7.7.298
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-23: Version 7.7.297
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-23: Version 7.7.296
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-22: Version 7.7.295
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-22: Version 7.7.294
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-22: Version 7.7.293
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-22: Version 7.7.292
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-22: Version 7.7.291
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-21: Version 7.7.290
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-20: Version 7.7.289
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-20: Version 7.7.288
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.287
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.286
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.285
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.284
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.283
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.282
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.281
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.280
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.279
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.278
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-19: Version 7.7.277
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.276
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.275
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.274
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.273
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.272
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.271
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.270
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-18: Version 7.7.269
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.268
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.267
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.266
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.265
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.264
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.263
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.262
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.261
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.260
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.259
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.258
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.257
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.256
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-17: Version 7.7.255
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.254
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.253
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.252
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.251
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.250
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.249
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.248
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.247
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.246
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.245
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.244
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.243
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-16: Version 7.7.242
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.241
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.240
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.239
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.238
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.237
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.236
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.235
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.234
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.233
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.232
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-15: Version 7.7.231
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-14: Version 7.7.230
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-14: Version 7.7.229
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-13: Version 7.7.228
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-13: Version 7.7.227
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-13: Version 7.7.226
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.225
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.224
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.223
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.222
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.221
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.220
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.219
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-12: Version 7.7.218
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-11: Version 7.7.217
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-11: Version 7.7.216
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-11: Version 7.7.215
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-10: Version 7.7.214
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.213
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.212
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.211
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.210
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.209
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.208
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.207
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-09: Version 7.7.206
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.205
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.204
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.203
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.202
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.201
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.200
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.199
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.198
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.197
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.196
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.195
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.194
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-08: Version 7.7.193
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-06: Version 7.7.192
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-06: Version 7.7.191
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-05: Version 7.7.190
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-05: Version 7.7.189
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-05: Version 7.7.188
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-05: Version 7.7.187
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-05: Version 7.7.186
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-03: Version 7.7.185
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-03: Version 7.7.184
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-03: Version 7.7.183
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-03: Version 7.7.182
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-03: Version 7.7.181
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-03: Version 7.7.180
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.179
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.178
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.177
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.176
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.175
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.174
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.173
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-02: Version 7.7.172
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-01: Version 7.7.171
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-01: Version 7.7.170
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-01: Version 7.7.169
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-01: Version 7.7.168
+
+ Performance and stability improvements on all platforms.
+
+
+2019-07-01: Version 7.7.167
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-28: Version 7.7.166
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-28: Version 7.7.165
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-28: Version 7.7.164
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.163
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.162
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.161
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.160
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.159
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.158
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.157
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.156
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.155
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-27: Version 7.7.154
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-26: Version 7.7.153
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-26: Version 7.7.152
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-26: Version 7.7.151
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-26: Version 7.7.150
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.149
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.148
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.147
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.146
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.145
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.144
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.143
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.142
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-25: Version 7.7.141
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-24: Version 7.7.140
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-24: Version 7.7.139
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-24: Version 7.7.138
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-24: Version 7.7.137
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-24: Version 7.7.136
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-23: Version 7.7.135
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-23: Version 7.7.134
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-22: Version 7.7.133
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.132
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.131
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.130
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.129
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.128
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.127
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.126
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-21: Version 7.7.125
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-20: Version 7.7.124
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-20: Version 7.7.123
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-20: Version 7.7.122
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-20: Version 7.7.121
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-20: Version 7.7.120
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.119
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.118
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.117
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.116
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.115
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.114
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.113
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.112
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.111
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.110
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.109
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.108
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-19: Version 7.7.107
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.106
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.105
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.104
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.103
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.102
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.101
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.100
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.99
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.98
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.97
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.96
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-18: Version 7.7.95
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.94
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.93
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.92
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.91
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.90
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.89
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.88
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-17: Version 7.7.87
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-16: Version 7.7.86
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-16: Version 7.7.85
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.84
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.83
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.82
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.81
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.80
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.79
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.78
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.77
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.76
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.75
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.74
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-14: Version 7.7.73
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.72
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.71
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.70
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.69
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.68
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.67
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.66
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.65
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.64
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.63
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-13: Version 7.7.62
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.61
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.60
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.59
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.58
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.57
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.56
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.55
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.54
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.53
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.52
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-12: Version 7.7.51
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.50
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.49
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.48
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.47
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.46
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.45
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.44
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.43
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.42
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-11: Version 7.7.41
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-10: Version 7.7.40
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-10: Version 7.7.39
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-10: Version 7.7.38
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-10: Version 7.7.37
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-09: Version 7.7.36
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-09: Version 7.7.35
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-09: Version 7.7.34
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-08: Version 7.7.33
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-08: Version 7.7.32
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-08: Version 7.7.31
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-08: Version 7.7.30
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-07: Version 7.7.29
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-07: Version 7.7.28
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-07: Version 7.7.27
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-07: Version 7.7.26
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-07: Version 7.7.25
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-07: Version 7.7.24
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-05: Version 7.7.23
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-05: Version 7.7.22
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-05: Version 7.7.21
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-05: Version 7.7.20
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-05: Version 7.7.19
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-05: Version 7.7.18
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-04: Version 7.7.17
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-04: Version 7.7.16
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-04: Version 7.7.15
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-04: Version 7.7.14
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-04: Version 7.7.13
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.12
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.11
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.10
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.9
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.8
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.7
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.6
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.5
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.4
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.3
+
+ Performance and stability improvements on all platforms.
+
+
+2019-06-03: Version 7.7.2
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-31: Version 7.7.1
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-29: Version 7.6.311
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-29: Version 7.6.310
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-29: Version 7.6.309
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-29: Version 7.6.308
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.307
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.306
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.305
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.304
+
+ Performance and stability improvements on all platforms.
+
+
2019-05-28: Version 7.6.303
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index bca59b724f..986264356f 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -12,10 +12,10 @@ vars = {
'check_v8_header_includes': False,
# GN CIPD package version.
- 'gn_version': 'git_revision:81ee1967d3fcbc829bac1c005c3da59739c88df9',
+ 'gn_version': 'git_revision:972ed755f8e6d31cae9ba15fcd08136ae1a7886f',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:25958d48e89e980e2a97daeddc977fb5e2e1fb8c',
+ 'luci_go': 'git_revision:7d11fd9e66407c49cb6c8546a2ae45ea993a240c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -24,11 +24,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_emulator_version
# and whatever else without interference from each other.
- 'android_sdk_emulator_version': 'ki7EDQRAiZAUYlnTWR1XmI6cJTk65fJ-DNZUU1zrtS8C',
+ 'android_sdk_emulator_version': 'xhyuoquVvBTcJelgRjMKZeoBVSQRjB7pLVJPt5C9saIC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_extras_version
# and whatever else without interference from each other.
- 'android_sdk_extras_version': 'iIwhhDox5E-mHgwUhCz8JACWQCpUjdqt5KTY9VLugKQC',
+ 'android_sdk_extras_version': 'ppQ4TnqDvBHQ3lXx5KPq97egzF5X2FFyOrVHkGmiTMQC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_patcher_version
# and whatever else without interference from each other.
@@ -36,7 +36,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platform-tools_version
# and whatever else without interference from each other.
- 'android_sdk_platform-tools_version': '4Y2Cb2LGzoc-qt-oIUIlhySotJaKeE3ELFedSVe6Uk8C',
+ 'android_sdk_platform-tools_version': 'MSnxgXN7IurL-MQs1RrTkSFSb8Xd1UtZjLArI8Ty1FgC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
@@ -57,15 +57,15 @@ vars = {
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '4cebfa34c79bcfbce6a3f55d1b4f7628bb70ea8a',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '1e5d7d692f816af8136c738b79fe9e8dde8057f6',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '26af0d34d281440ad0dc6d2e43fe60f32ef62da0',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ee7b9dda90e409fb92031d511151debe5db7db9f',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '64e5d7d43a1ff205e3787ab6150bbc1a1837332b',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'fd97d4326fac6da84452b2d5fe75ff0949368dab',
'v8/third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a959e4f0cb643003f2d75d179cede449979e3e77',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b1c3ca20848c117eb935b02c25d441f03e6fbc5e',
'v8/buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0218c0f9ac9fdba00e5c27b5aca94d3a64c74f34',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '67b293ca1316d06f7f00160ce35c92b8849a9dc9',
'v8/buildtools/clang_format/script':
Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
'v8/buildtools/linux64': {
@@ -110,11 +110,6 @@ deps = {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884',
'condition': 'checkout_android',
},
- # This is deprecated.
- 'v8/third_party/android_tools': {
- 'url': Var('chromium_url') + '/android_tools.git' + '@' + '347a7c8078a009e98995985b7ab6ec6b35696dea',
- 'condition': 'checkout_android',
- },
'v8/third_party/android_sdk/public': {
'packages': [
{
@@ -158,7 +153,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a7b33124672f301cebe0ca94a67ca7d0362e3d6a',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '53913cecb11a3ef993f6496b9110964e2e2aeec3',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -166,23 +161,23 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'ae68779f84fc36bd88ba4fe0ff78ed9ea3c91d73',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5fd29151cf35c0813c33cc368a7c78389e3f5caa',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'f71fb4f9a912ec945401cc49a287a759b6131026',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6077f444da944d96d311d358d761164261f1cdd0',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '779c4f0f8488c64587b75dbb001d18c3c0c4cda9',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '96f125709acfd0b48fc1e5dae7d6ea42291726ac',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a9abd418ccc7999b00b8c7df60b25620a7d3c541',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '26a2268436f28f64c4539d9aab9ebd0f0b7c99c5',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
'v8/third_party/qemu-linux-x64': {
@@ -206,7 +201,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'fe8ba88894e4b3927d3cd9e24274a0f1a688cf71',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'f485a21a9cb05494161d97d545c3b29447610ffb',
'v8/tools/luci-go': {
'packages': [
{
@@ -236,15 +231,12 @@ deps = {
'dep_type': 'cipd',
},
'v8/test/wasm-js/data':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'bc7d3006bbda0de5031c2a1b9266a62fa7895019',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '1a411f713d9850ce7da24719aba5bb80c535f562',
'v8/third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + '10c98fe0cfae669f71610d97e9da94260a6da173',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + '0e8281399fd854de13461f2c1c9f2fb0b8e9c3ae',
'v8/third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
}
-recursedeps = [
- 'v8/third_party/android_tools',
-]
include_rules = [
# Everybody can use some things.
diff --git a/deps/v8/INTL_OWNERS b/deps/v8/INTL_OWNERS
new file mode 100644
index 0000000000..dbe6f3b7b5
--- /dev/null
+++ b/deps/v8/INTL_OWNERS
@@ -0,0 +1,3 @@
+cira@chromium.org
+mnita@google.com
+jshin@chromium.org
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index c428ba6d0b..be36096666 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -2,20 +2,20 @@
# Disagreement among owners should be escalated to eng reviewers.
file://ENG_REVIEW_OWNERS
-# TODO(9247) remove this.
-file://COMMON_OWNERS
-
per-file .clang-format=file://INFRA_OWNERS
per-file .clang-tidy=file://INFRA_OWNERS
per-file .editorconfig=file://INFRA_OWNERS
+per-file .flake8=file://INFRA_OWNERS
per-file .git-blame-ignore-revs=file://INFRA_OWNERS
per-file .gitattributes=file://INFRA_OWNERS
per-file .gitignore=file://INFRA_OWNERS
per-file .gn=file://INFRA_OWNERS
per-file .vpython=file://INFRA_OWNERS
per-file .ycm_extra_conf.py=file://INFRA_OWNERS
-per-file BUILD.gn=file://INFRA_OWNERS
+per-file BUILD.gn=file://COMMON_OWNERS
per-file DEPS=file://INFRA_OWNERS
+# For Test262 rolls.
+per-file DEPS=mathias@chromium.org
per-file PRESUBMIT=file://INFRA_OWNERS
per-file codereview.settings=file://INFRA_OWNERS
diff --git a/deps/v8/test/wasm-api-tests/OWNERS b/deps/v8/benchmarks/OWNERS
index 852d438bb0..852d438bb0 100644
--- a/deps/v8/test/wasm-api-tests/OWNERS
+++ b/deps/v8/benchmarks/OWNERS
diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni
index cf581ed46e..b16d8f93bd 100644
--- a/deps/v8/gni/proto_library.gni
+++ b/deps/v8/gni/proto_library.gni
@@ -187,7 +187,10 @@ template("proto_library") {
"visibility",
])
+ # Exclude the config.descriptor file which is an output for some reason.
+ set_sources_assignment_filter([ "*.descriptor" ])
sources = get_target_outputs(":$action_name")
+ set_sources_assignment_filter(sources_assignment_filter)
# configs -= [ "//gn/standalone:extra_warnings" ]
if (defined(invoker.extra_configs)) {
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 506b8428ee..e55c4cf346 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -92,7 +92,7 @@ if (v8_enable_backtrace == "") {
# subdirectories.
v8_path_prefix = get_path_info("../", "abspath")
-v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.pdl"
+v8_inspector_js_protocol = v8_path_prefix + "/include/js_protocol.pdl"
###############################################################################
# Templates
diff --git a/deps/v8/include/APIDesign.md b/deps/v8/include/APIDesign.md
index 8830fff7d1..fe42c8ed5d 100644
--- a/deps/v8/include/APIDesign.md
+++ b/deps/v8/include/APIDesign.md
@@ -67,3 +67,6 @@ which in turn guarantees long-term stability of the API.
# The V8 inspector
All debugging capabilities of V8 should be exposed via the inspector protocol.
+The exception to this are profiling features exposed via v8-profiler.h.
+Changes to the inspector protocol need to ensure backwards compatibility and
+commitment to maintain.
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index 7953cfe133..7ffbf74ce9 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -1,16 +1,17 @@
-set noparent
-
adamk@chromium.org
danno@chromium.org
ulan@chromium.org
yangguo@chromium.org
-per-file v8-internal.h=file://OWNERS
+per-file *DEPS=file://COMMON_OWNERS
+per-file v8-internal.h=file://COMMON_OWNERS
per-file v8-inspector.h=dgozman@chromium.org
per-file v8-inspector.h=pfeldman@chromium.org
per-file v8-inspector.h=kozyatinskiy@chromium.org
per-file v8-inspector-protocol.h=dgozman@chromium.org
per-file v8-inspector-protocol.h=pfeldman@chromium.org
per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org
+per-file js_protocol.pdl=dgozman@chromium.org
+per-file js_protocol.pdl=pfeldman@chromium.org
# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/inspector/js_protocol-1.2.json b/deps/v8/include/js_protocol-1.2.json
index aff6806222..aff6806222 100644
--- a/deps/v8/src/inspector/js_protocol-1.2.json
+++ b/deps/v8/include/js_protocol-1.2.json
diff --git a/deps/v8/src/inspector/js_protocol-1.3.json b/deps/v8/include/js_protocol-1.3.json
index ea573d11a6..ea573d11a6 100644
--- a/deps/v8/src/inspector/js_protocol-1.3.json
+++ b/deps/v8/include/js_protocol-1.3.json
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index c4ff51b060..c4ff51b060 100644
--- a/deps/v8/src/inspector/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index ccdca0a8c5..e7cd8bfcdb 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -14,6 +14,10 @@
#include "libplatform/libplatform-export.h"
#include "v8-platform.h" // NOLINT(build/include)
+namespace perfetto {
+class TracingSession;
+}
+
namespace v8 {
namespace base {
@@ -23,8 +27,8 @@ class Mutex;
namespace platform {
namespace tracing {
-class PerfettoTracingController;
class TraceEventListener;
+class JSONTraceEventListener;
const int kTraceMaxNumArgs = 2;
@@ -292,11 +296,10 @@ class V8_PLATFORM_EXPORT TracingController
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
std::atomic_bool recording_{false};
#ifdef V8_USE_PERFETTO
- std::atomic_bool perfetto_recording_{false};
- std::unique_ptr<PerfettoTracingController> perfetto_tracing_controller_;
std::ostream* output_stream_ = nullptr;
- std::unique_ptr<TraceEventListener> json_listener_;
+ std::unique_ptr<JSONTraceEventListener> json_listener_;
TraceEventListener* listener_for_testing_ = nullptr;
+ std::unique_ptr<perfetto::TracingSession> tracing_session_;
#endif
// Disallow copy and assign
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index b96a6e29ac..cfa2aaba96 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -109,6 +109,8 @@ class V8_EXPORT V8StackTrace {
virtual ~V8StackTrace() = default;
virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
buildInspectorObject() const = 0;
+ virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
+ buildInspectorObject(int maxAsyncDepth) const = 0;
virtual std::unique_ptr<StringBuffer> toString() const = 0;
// Safe to pass between threads, drops async chain.
@@ -130,10 +132,6 @@ class V8_EXPORT V8InspectorSession {
// Dispatching protocol messages.
static bool canDispatchMethod(const StringView& method);
virtual void dispatchProtocolMessage(const StringView& message) = 0;
- virtual V8_DEPRECATED("Use state() instead",
- std::unique_ptr<StringBuffer> stateJSON()) {
- return nullptr;
- }
virtual std::vector<uint8_t> state() = 0;
virtual std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
supportedDomains() = 0;
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index b707fafc49..7e43b0d9db 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -439,6 +439,14 @@ class Platform {
*/
virtual void DumpWithoutCrashing() {}
+ /**
+ * Lets the embedder to add crash keys.
+ */
+ virtual void AddCrashKey(int id, const char* name, uintptr_t value) {
+ // "noop" is a valid implementation if the embedder doesn't care to log
+ // additional data for crashes.
+ }
+
protected:
/**
* Default implementation of current wall-clock time in milliseconds
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 483bdd166f..91d7633b05 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 6
-#define V8_BUILD_NUMBER 303
-#define V8_PATCH_LEVEL 28
+#define V8_MINOR_VERSION 7
+#define V8_BUILD_NUMBER 299
+#define V8_PATCH_LEVEL 4
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 6a6dbb5193..ca96c32088 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -1326,6 +1326,37 @@ class V8_EXPORT Module {
* kEvaluated or kErrored.
*/
Local<UnboundModuleScript> GetUnboundModuleScript();
+
+ /*
+ * Callback defined in the embedder. This is responsible for setting
+ * the module's exported values with calls to SetSyntheticModuleExport().
+ * The callback must return a Value to indicate success (where no
+ * exception was thrown) and return an empy MaybeLocal to indicate falure
+ * (where an exception was thrown).
+ */
+ typedef MaybeLocal<Value> (*SyntheticModuleEvaluationSteps)(
+ Local<Context> context, Local<Module> module);
+
+ /**
+ * Creates a new SyntheticModule with the specified export names, where
+ * evaluation_steps will be executed upon module evaluation.
+ * export_names must not contain duplicates.
+ * module_name is used solely for logging/debugging and doesn't affect module
+ * behavior.
+ */
+ static Local<Module> CreateSyntheticModule(
+ Isolate* isolate, Local<String> module_name,
+ const std::vector<Local<String>>& export_names,
+ SyntheticModuleEvaluationSteps evaluation_steps);
+
+ /**
+ * Set this module's exported value for the name export_name to the specified
+ * export_value. This method must be called only on Modules created via
+ * CreateSyntheticModule. export_name must be one of the export_names that
+ * were passed in that CreateSyntheticModule call.
+ */
+ void SetSyntheticModuleExport(Local<String> export_name,
+ Local<Value> export_value);
};
/**
@@ -3289,8 +3320,6 @@ enum class IntegrityLevel { kFrozen, kSealed };
*/
class V8_EXPORT Object : public Value {
public:
- V8_DEPRECATED("Use maybe version",
- bool Set(Local<Value> key, Local<Value> value));
/**
* Set only return Just(true) or Empty(), so if it should never fail, use
* result.Check().
@@ -3298,8 +3327,6 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
Local<Value> key, Local<Value> value);
- V8_DEPRECATED("Use maybe version",
- bool Set(uint32_t index, Local<Value> value));
V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context, uint32_t index,
Local<Value> value);
@@ -3341,13 +3368,12 @@ class V8_EXPORT Object : public Value {
//
// Returns true on success.
V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
- Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
+ Local<Context> context, Local<Name> key,
+ PropertyDescriptor& descriptor); // NOLINT(runtime/references)
- V8_DEPRECATED("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
Local<Value> key);
- V8_DEPRECATED("Use maybe version", Local<Value> Get(uint32_t index));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
uint32_t index);
@@ -5320,6 +5346,8 @@ class V8_EXPORT RegExp : public Object {
kDotAll = 1 << 5,
};
+ static constexpr int kFlagCount = 6;
+
/**
* Creates a regular expression from the given pattern string and
* the flags bit field. May throw a JavaScript exception as
@@ -6406,7 +6434,19 @@ V8_INLINE Local<Boolean> False(Isolate* isolate);
*/
class V8_EXPORT ResourceConstraints {
public:
- ResourceConstraints();
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * provided heap size limit. The heap size includes both the young and
+ * the old generation.
+ *
+ * \param maximum_heap_size_in_bytes The hard limit for the heap size.
+ * When the heap size approaches this limit, V8 will perform series of
+ * garbage collections and invoke the NearHeapLimitCallback.
+ * If the garbage collections do not help and the callback does not
+ * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
+ */
+ void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes,
+ size_t maximum_heap_size_in_bytes);
/**
* Configures the constraints with reasonable default values based on the
@@ -6420,26 +6460,81 @@ class V8_EXPORT ResourceConstraints {
void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit);
- // Returns the max semi-space size in KB.
- size_t max_semi_space_size_in_kb() const {
- return max_semi_space_size_in_kb_;
+ /**
+ * The address beyond which the VM's stack may not grow.
+ */
+ uint32_t* stack_limit() const { return stack_limit_; }
+ void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+
+ /**
+ * The amount of virtual memory reserved for generated code. This is relevant
+ * for 64-bit architectures that rely on code range for calls in code.
+ */
+ size_t code_range_size_in_bytes() const { return code_range_size_; }
+ void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
+
+ /**
+ * The maximum size of the old generation.
+ * When the old generation approaches this limit, V8 will perform series of
+ * garbage collections and invoke the NearHeapLimitCallback.
+ * If the garbage collections do not help and the callback does not
+ * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
+ */
+ size_t max_old_generation_size_in_bytes() const {
+ return max_old_generation_size_;
+ }
+ void set_max_old_generation_size_in_bytes(size_t limit) {
+ max_old_generation_size_ = limit;
}
- // Sets the max semi-space size in KB.
- void set_max_semi_space_size_in_kb(size_t limit_in_kb) {
- max_semi_space_size_in_kb_ = limit_in_kb;
+ /**
+ * The maximum size of the young generation, which consists of two semi-spaces
+ * and a large object space. This affects frequency of Scavenge garbage
+ * collections and should be typically much smaller that the old generation.
+ */
+ size_t max_young_generation_size_in_bytes() const {
+ return max_young_generation_size_;
+ }
+ void set_max_young_generation_size_in_bytes(size_t limit) {
+ max_young_generation_size_ = limit;
}
- size_t max_old_space_size() const { return max_old_space_size_; }
- void set_max_old_space_size(size_t limit_in_mb) {
- max_old_space_size_ = limit_in_mb;
+ size_t initial_old_generation_size_in_bytes() const {
+ return initial_old_generation_size_;
}
- uint32_t* stack_limit() const { return stack_limit_; }
- // Sets an address beyond which the VM's stack may not grow.
- void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
- size_t code_range_size() const { return code_range_size_; }
- void set_code_range_size(size_t limit_in_mb) {
- code_range_size_ = limit_in_mb;
+ void set_initial_old_generation_size_in_bytes(size_t initial_size) {
+ initial_old_generation_size_ = initial_size;
+ }
+
+ size_t initial_young_generation_size_in_bytes() const {
+ return initial_young_generation_size_;
+ }
+ void set_initial_young_generation_size_in_bytes(size_t initial_size) {
+ initial_young_generation_size_ = initial_size;
+ }
+
+ /**
+ * Deprecated functions. Do not use in new code.
+ */
+ V8_DEPRECATE_SOON("Use code_range_size_in_bytes.",
+ size_t code_range_size() const) {
+ return code_range_size_ / kMB;
+ }
+ V8_DEPRECATE_SOON("Use set_code_range_size_in_bytes.",
+ void set_code_range_size(size_t limit_in_mb)) {
+ code_range_size_ = limit_in_mb * kMB;
+ }
+ V8_DEPRECATE_SOON("Use max_young_generation_size_in_bytes.",
+ size_t max_semi_space_size_in_kb() const);
+ V8_DEPRECATE_SOON("Use set_max_young_generation_size_in_bytes.",
+ void set_max_semi_space_size_in_kb(size_t limit_in_kb));
+ V8_DEPRECATE_SOON("Use max_old_generation_size_in_bytes.",
+ size_t max_old_space_size() const) {
+ return max_old_generation_size_ / kMB;
+ }
+ V8_DEPRECATE_SOON("Use set_max_old_generation_size_in_bytes.",
+ void set_max_old_space_size(size_t limit_in_mb)) {
+ max_old_generation_size_ = limit_in_mb * kMB;
}
V8_DEPRECATE_SOON("Zone does not pool memory any more.",
size_t max_zone_pool_size() const) {
@@ -6451,14 +6546,14 @@ class V8_EXPORT ResourceConstraints {
}
private:
- // max_semi_space_size_ is in KB
- size_t max_semi_space_size_in_kb_;
-
- // The remaining limits are in MB
- size_t max_old_space_size_;
- uint32_t* stack_limit_;
- size_t code_range_size_;
- size_t max_zone_pool_size_;
+ static constexpr size_t kMB = 1048576u;
+ size_t code_range_size_ = 0;
+ size_t max_old_generation_size_ = 0;
+ size_t max_young_generation_size_ = 0;
+ size_t max_zone_pool_size_ = 0;
+ size_t initial_old_generation_size_ = 0;
+ size_t initial_young_generation_size_ = 0;
+ uint32_t* stack_limit_ = nullptr;
};
@@ -6617,7 +6712,8 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
-typedef void (*MicrotasksCompletedCallback)(Isolate*);
+V8_DEPRECATE_SOON("Use *WithData version.",
+ typedef void (*MicrotasksCompletedCallback)(Isolate*));
typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
@@ -6770,6 +6866,8 @@ typedef void (*FailedAccessCheckCallback)(Local<Object> target,
*/
typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context,
Local<String> source);
+typedef MaybeLocal<String> (*ModifyCodeGenerationFromStringsCallback)(
+ Local<Context> context, Local<Value> source);
// --- WebAssembly compilation callbacks ---
typedef bool (*ExtensionCallback)(const FunctionCallbackInfo<Value>&);
@@ -7230,12 +7328,13 @@ class V8_EXPORT EmbedderHeapTracer {
void GarbageCollectionForTesting(EmbedderStackState stack_state);
/*
- * Called by the embedder to signal newly allocated memory. Not bound to
- * tracing phases. Embedders should trade off when increments are reported as
- * V8 may consult global heuristics on whether to trigger garbage collection
- * on this change.
+ * Called by the embedder to signal newly allocated or freed memory. Not bound
+ * to tracing phases. Embedders should trade off when increments are reported
+ * as V8 may consult global heuristics on whether to trigger garbage
+ * collection on this change.
*/
void IncreaseAllocatedSize(size_t bytes);
+ void DecreaseAllocatedSize(size_t bytes);
/*
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
@@ -7563,6 +7662,8 @@ class V8_EXPORT Isolate {
kRegExpMatchIsFalseishOnJSRegExp = 73,
kDateGetTimezoneOffset = 74,
kStringNormalize = 75,
+ kCallSiteAPIGetFunctionSloppyCall = 76,
+ kCallSiteAPIGetThisSloppyCall = 77,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@@ -8367,6 +8468,8 @@ class V8_EXPORT Isolate {
*/
void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback);
+ void SetModifyCodeGenerationFromStringsCallback(
+ ModifyCodeGenerationFromStringsCallback callback);
/**
* Set the callback to invoke to check if wasm code generation should
@@ -9402,6 +9505,15 @@ class V8_EXPORT Context {
V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
/**
+ * If callback is set, abort any attempt to execute JavaScript in this
+ * context, call the specified callback, and throw an exception.
+ * To unset abort, pass nullptr as callback.
+ */
+ typedef void (*AbortScriptExecutionCallback)(Isolate* isolate,
+ Local<Context> context);
+ void SetAbortScriptExecution(AbortScriptExecutionCallback callback);
+
+ /**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
*/
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index 5ec0480cf5..7bd2938225 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -353,6 +353,12 @@
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
+#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
+#error Inconsistent build configuration: To build the V8 shared library \
+set BUILDING_V8_SHARED, to include its headers for linking against the \
+V8 shared library set USING_V8_SHARED.
+#endif
+
#ifdef V8_OS_WIN
// Setup for Windows DLL export/import. When building the V8 DLL the
diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl
index 05b147d503..110b36c500 100644
--- a/deps/v8/infra/mb/gn_isolate_map.pyl
+++ b/deps/v8/infra/mb/gn_isolate_map.pyl
@@ -31,6 +31,10 @@
"label": "//test:v8_d8_default",
"type": "script",
},
+ "generate-bytecode-expectations": {
+ "label": "//test/cctest:generate-bytecode-expectations",
+ "type": "script",
+ },
"mjsunit": {
"label": "//test/mjsunit:v8_mjsunit",
"type": "script",
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 354415ef43..d5d192fb20 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -95,6 +95,8 @@
'V8 iOS - sim': 'release_x64_ios_simulator',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - pointer compression': 'release_x64_pointer_compression',
+ 'V8 Linux64 - pointer compression without dchecks':
+ 'release_x64_pointer_compression_without_dchecks',
'V8 Linux64 - arm64 - sim - pointer compression - builder':
'release_simulate_arm64_pointer_compression',
'V8 Linux - noembed': 'release_x86_noembed',
@@ -201,6 +203,7 @@
'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'debug_x86_trybot',
+ 'v8_linux_noi18n_compile_dbg': 'debug_x86_no_i18n',
'v8_linux_noi18n_rel_ng': 'release_x86_no_i18n_trybot',
'v8_linux_gc_stress_dbg': 'debug_x86_trybot',
'v8_linux_nosnap_rel': 'release_x86_no_snap_trybot',
@@ -458,6 +461,8 @@
'release_x64_pointer_compression': [
'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
'v8_enable_pointer_compression'],
+ 'release_x64_pointer_compression_without_dchecks': [
+ 'release_bot', 'x64', 'v8_enable_pointer_compression'],
'release_x64_trybot': [
'release_trybot', 'x64'],
'release_x64_test_features_trybot': [
@@ -491,7 +496,7 @@
'debug_x64_fuchsia': [
'debug_bot', 'x64', 'fuchsia'],
'debug_x64_gcc': [
- 'debug_bot', 'x64', 'gcc'],
+ 'debug_bot', 'x64', 'gcc', 'v8_check_header_includes'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
'debug_x64_jumbo': [
@@ -535,9 +540,10 @@
'release_x86_noembed_trybot': [
'release_trybot', 'x86', 'v8_no_enable_embedded_builtins'],
'release_x86_gcc': [
- 'release_bot', 'x86', 'gcc'],
+ 'release_bot', 'x86', 'gcc', 'v8_check_header_includes'],
'release_x86_gcc_minimal_symbols': [
- 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
+ 'release_bot', 'x86', 'gcc', 'minimal_symbols',
+ 'v8_check_header_includes'],
'release_x86_gcmole': [
'release_bot', 'x86', 'gcmole'],
'release_x86_gcmole_trybot': [
diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py
index b8e059724e..f1a64707b9 100644
--- a/deps/v8/infra/testing/PRESUBMIT.py
+++ b/deps/v8/infra/testing/PRESUBMIT.py
@@ -33,7 +33,9 @@ SUPPORTED_SWARMING_DIMENSIONS = [
]
# This is not an exhaustive list. It only reflects what we currently use. If
-# there's need to specify a different property, just add it here.
+# there's need to specify a different property, add it here and update the
+# properties passed to swarming in:
+# //build/scripts/slave/recipe_modules/v8/testing.py.
SUPPORTED_SWARMING_TASK_ATTRS = [
'expiration',
'hard_timeout',
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 0d39ea31f7..13a73f3e94 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -51,7 +51,7 @@
'v8_linux_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -67,7 +67,7 @@
},
'v8_linux_gc_stress_dbg': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2},
@@ -85,7 +85,7 @@
'v8_linux_nodcheck_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -100,7 +100,7 @@
},
'v8_linux_noembed_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -108,7 +108,7 @@
},
'v8_linux_noi18n_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
@@ -118,7 +118,7 @@
},
'v8_linux_nosnap_rel': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 6},
@@ -135,7 +135,7 @@
'v8_linux_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -155,7 +155,7 @@
'v8_linux_optional_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
# Code serializer.
@@ -210,7 +210,7 @@
},
'v8_linux_verify_csa_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -220,7 +220,7 @@
# Linux32 with arm simulators
'v8_linux_arm_dbg': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -233,7 +233,7 @@
},
'v8_linux_arm_lite_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
@@ -241,7 +241,7 @@
},
'v8_linux_arm_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
@@ -256,7 +256,7 @@
# Linux64
'v8_linux64_asan_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'test262_variants', 'shards': 7},
@@ -267,7 +267,7 @@
},
'v8_linux64_cfi_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -280,7 +280,7 @@
'v8_linux64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -297,7 +297,7 @@
},
'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{
@@ -309,7 +309,7 @@
},
'v8_linux64_fyi_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
# Stress sampling.
@@ -322,7 +322,7 @@
},
'v8_linux64_msan_rel': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'test262', 'shards': 2},
@@ -332,23 +332,28 @@
'v8_linux64_nodcheck_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
+ {'name': 'benchmarks', 'variant': 'assert_types'},
{'name': 'benchmarks', 'variant': 'extra'},
+ {'name': 'check-bytecode-baseline'},
{'name': 'mozilla'},
+ {'name': 'mozilla', 'variant': 'assert_types'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'perf_integration'},
{'name': 'test262_variants', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'assert_types', 'shards': 2},
{'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'shards': 2},
+ {'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
],
},
'v8_linux64_perfetto_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -356,7 +361,7 @@
},
'v8_linux64_pointer_compression_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -365,7 +370,7 @@
'v8_linux64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
# TODO(machenbach): Add benchmarks.
@@ -386,7 +391,7 @@
'v8_linux64_rel_xg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8initializers'},
@@ -395,7 +400,7 @@
},
'v8_linux64_sanitizer_coverage_rel': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -403,7 +408,7 @@
},
'v8_linux64_tsan_rel': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -416,7 +421,7 @@
},
'v8_linux64_tsan_isolates_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
@@ -424,7 +429,7 @@
},
'v8_linux64_ubsan_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -432,7 +437,7 @@
},
'v8_linux64_verify_csa_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -442,7 +447,7 @@
# Linux64 with arm64 simulators
'v8_linux_arm64_dbg': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -455,7 +460,7 @@
},
'v8_linux_arm64_gc_stress_dbg': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 10},
@@ -463,7 +468,7 @@
},
'v8_linux_arm64_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
@@ -476,7 +481,7 @@
},
'v8_linux64_arm64_pointer_compression_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -632,7 +637,7 @@
# Main.
'V8 Fuzzer': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -646,7 +651,7 @@
'V8 Linux': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -688,7 +693,7 @@
},
'V8 Linux - arm64 - sim - MSAN': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'test262', 'shards': 3},
@@ -698,7 +703,7 @@
'V8 Linux - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -759,7 +764,7 @@
},
'V8 Linux - noembed': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing'},
@@ -767,7 +772,7 @@
},
'V8 Linux - noembed - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -775,7 +780,7 @@
},
'V8 Linux - full debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -788,7 +793,7 @@
},
'V8 Linux - gc stress': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{
@@ -806,7 +811,7 @@
},
'V8 Linux - noi18n - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
@@ -816,7 +821,7 @@
},
'V8 Linux - nosnap': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -831,7 +836,7 @@
},
'V8 Linux - nosnap - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -844,7 +849,7 @@
},
'V8 Linux - predictable': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -854,7 +859,7 @@
},
'V8 Linux - shared': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -864,7 +869,7 @@
},
'V8 Linux - verify csa': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing'},
@@ -881,20 +886,25 @@
'V8 Linux64': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
+ {'name': 'benchmarks', 'variant': 'assert_types'},
{'name': 'benchmarks', 'variant': 'extra'},
+ {'name': 'check-bytecode-baseline'},
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
+ {'name': 'mozilla', 'variant': 'assert_types'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
{'name': 'perf_integration'},
{'name': 'test262_variants', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'assert_types'},
{'name': 'test262_variants', 'variant': 'extra'},
{'name': 'v8initializers'},
{'name': 'v8testing'},
+ {'name': 'v8testing', 'variant': 'assert_types'},
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
# Noavx.
@@ -917,7 +927,7 @@
},
'V8 Linux64 - cfi': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -929,7 +939,7 @@
},
'V8 Linux64 - custom snapshot - debug': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit', 'test_args': ['--no-harness']},
@@ -938,7 +948,7 @@
'V8 Linux64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -974,7 +984,7 @@
},
'V8 Linux64 - debug - fyi': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
# Infra staging.
@@ -986,7 +996,7 @@
},
'V8 Linux64 - debug - perfetto': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -999,7 +1009,7 @@
},
'V8 Linux64 - fyi': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
# Infra staging.
@@ -1011,7 +1021,7 @@
},
'V8 Linux64 - gcov coverage': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1019,7 +1029,7 @@
},
'V8 Linux64 - internal snapshot': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1027,7 +1037,7 @@
},
'V8 Linux64 - pointer compression': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -1035,7 +1045,7 @@
},
'V8 Linux64 - shared': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1045,7 +1055,7 @@
},
'V8 Linux64 - verify csa': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1053,7 +1063,7 @@
},
'V8 Linux64 ASAN': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'test262_variants', 'shards': 5},
@@ -1064,7 +1074,7 @@
},
'V8 Linux64 GC Stress - custom snapshot': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{
@@ -1076,7 +1086,7 @@
},
'V8 Linux64 TSAN': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -1089,7 +1099,7 @@
},
'V8 Linux64 TSAN - concurrent marking': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1119,7 +1129,7 @@
},
'V8 Linux64 TSAN - isolates': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
@@ -1127,7 +1137,7 @@
},
'V8 Linux64 UBSan': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1156,8 +1166,8 @@
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'shards': 2},
- {'name': 'v8testing', 'shards': 3},
- {'name': 'v8testing', 'variant': 'extra'},
+ {'name': 'v8testing', 'shards': 4},
+ {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
],
},
'V8 Mac64 ASAN': {
@@ -1266,74 +1276,44 @@
},
'V8 Arm': {
'swarming_dimensions': {
- 'cores': '2',
- 'cpu': 'armv7l',
- 'os': 'Ubuntu-14.04',
+ 'cores': '8',
+ 'cpu': 'armv7l-32-ODROID-XU4',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 21600,
'hard_timeout': 5400,
},
'tests': [
- {'name': 'benchmarks'},
- {'name': 'optimize_for_size'},
- {'name': 'v8testing', 'shards': 2},
# Odroid.
{
'name': 'benchmarks',
'suffix': 'ODROID',
- 'swarming_dimensions': {
- 'cores': '8',
- 'os': 'Ubuntu-16.04',
- 'cpu': 'armv7l-32-ODROID-XU4',
- },
# Less parallelism to prevent OOMs in benchmarks.
'test_args': ['-j2'],
},
{
'name': 'optimize_for_size',
'suffix': 'ODROID',
- 'swarming_dimensions': {
- 'cores': '8',
- 'os': 'Ubuntu-16.04',
- 'cpu': 'armv7l-32-ODROID-XU4',
- }
},
{
'name': 'v8testing',
'suffix': 'ODROID',
'shards': 2,
- 'swarming_dimensions': {
- 'cores': '8',
- 'os': 'Ubuntu-16.04',
- 'cpu': 'armv7l-32-ODROID-XU4',
- }
},
],
},
'V8 Arm - debug': {
'swarming_dimensions': {
- 'cores': '2',
- 'cpu': 'armv7l',
- 'os': 'Ubuntu-14.04',
+ 'cores': '8',
+ 'cpu': 'armv7l-32-ODROID-XU4',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 21600,
'hard_timeout': 3600,
},
'tests': [
- {
- 'name': 'optimize_for_size',
- 'variant': 'default',
- 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'],
- 'shards': 2
- },
- {
- 'name': 'v8testing',
- 'variant': 'default',
- 'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'],
- 'shards': 3
- },
# Odroid.
{
'name': 'optimize_for_size',
@@ -1341,11 +1321,6 @@
'variant': 'default',
'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'],
'shards': 2,
- 'swarming_dimensions': {
- 'cores': '8',
- 'os': 'Ubuntu-16.04',
- 'cpu': 'armv7l-32-ODROID-XU4',
- }
},
{
'name': 'v8testing',
@@ -1353,19 +1328,14 @@
'variant': 'default',
'test_args': ['--extra-flags=--verify-heap-skip-remembered-set'],
'shards': 3,
- 'swarming_dimensions': {
- 'cores': '8',
- 'os': 'Ubuntu-16.04',
- 'cpu': 'armv7l-32-ODROID-XU4',
- }
},
],
},
'V8 Arm GC Stress': {
'swarming_dimensions': {
- 'cores': '2',
- 'cpu': 'armv7l',
- 'os': 'Ubuntu-14.04',
+ 'cores': '8',
+ 'cpu': 'armv7l-32-ODROID-XU4',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1374,27 +1344,16 @@
'tests': [
{
'name': 'd8testing',
- 'variant': 'default',
- 'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'],
- 'shards': 3
- },
- {
- 'name': 'd8testing',
'suffix': 'ODROID',
'variant': 'default',
'test_args': ['--gc-stress', '--extra-flags=--verify-heap-skip-remembered-set'],
'shards': 3,
- 'swarming_dimensions': {
- 'cores': '8',
- 'os': 'Ubuntu-16.04',
- 'cpu': 'armv7l-32-ODROID-XU4',
- }
},
],
},
'V8 Linux - arm - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -1433,7 +1392,7 @@
},
'V8 Linux - arm - sim - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -1446,12 +1405,14 @@
{
'name': 'mozilla',
'suffix': 'armv8-a',
- 'test_args': ['--extra-flags', '--enable-armv8']
+ 'test_args': ['--extra-flags', '--enable-armv8'],
+ 'shards': 2,
},
{
'name': 'test262',
'suffix': 'armv8-a',
- 'test_args': ['--extra-flags', '--enable-armv8']
+ 'test_args': ['--extra-flags', '--enable-armv8'],
+ 'shards': 2,
},
{
'name': 'v8testing',
@@ -1483,7 +1444,7 @@
},
'V8 Linux - arm - sim - lite': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 2},
@@ -1491,7 +1452,7 @@
},
'V8 Linux - arm - sim - lite - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
@@ -1499,7 +1460,7 @@
},
'V8 Linux - arm64 - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -1512,7 +1473,7 @@
},
'V8 Linux - arm64 - sim - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
# TODO(machenbach): Remove longer timeout when this builder scales better.
'swarming_task_attrs': {
@@ -1529,7 +1490,7 @@
},
'V8 Linux - arm64 - sim - gc stress': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1546,7 +1507,7 @@
},
'V8 Linux - mips64el - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1560,7 +1521,7 @@
},
'V8 Linux - mipsel - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1574,7 +1535,7 @@
},
'V8 Linux - ppc64 - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1587,7 +1548,7 @@
},
'V8 Linux - s390x - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1600,7 +1561,7 @@
},
'V8 Linux64 - arm64 - sim - pointer compression': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1628,7 +1589,7 @@
# Clusterfuzz.
'V8 NumFuzz': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 13800,
@@ -1645,7 +1606,7 @@
},
'V8 NumFuzz - TSAN': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 13800,
@@ -1693,7 +1654,7 @@
},
'V8 NumFuzz - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'swarming_task_attrs': {
'expiration': 13800,
@@ -1750,7 +1711,7 @@
# Branches.
'V8 Linux - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1760,7 +1721,7 @@
},
'V8 Linux - beta branch - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1770,7 +1731,7 @@
},
'V8 Linux - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1780,7 +1741,7 @@
},
'V8 Linux - stable branch - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1790,7 +1751,7 @@
},
'V8 Linux64 - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1800,7 +1761,7 @@
},
'V8 Linux64 - beta branch - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1810,7 +1771,7 @@
},
'V8 Linux64 - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1820,7 +1781,7 @@
},
'V8 Linux64 - stable branch - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1830,7 +1791,7 @@
},
'V8 arm - sim - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1840,7 +1801,7 @@
},
'V8 arm - sim - beta branch - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1850,7 +1811,7 @@
},
'V8 arm - sim - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1860,7 +1821,7 @@
},
'V8 arm - sim - stable branch - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1870,7 +1831,7 @@
},
'V8 mips64el - sim - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'unittests'},
@@ -1878,7 +1839,7 @@
},
'V8 mips64el - sim - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'unittests'},
@@ -1886,7 +1847,7 @@
},
'V8 mipsel - sim - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 4},
@@ -1894,7 +1855,7 @@
},
'V8 mipsel - sim - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'v8testing', 'shards': 4},
@@ -1902,7 +1863,7 @@
},
'V8 ppc64 - sim - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'unittests'},
@@ -1910,7 +1871,7 @@
},
'V8 ppc64 - sim - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'unittests'},
@@ -1918,7 +1879,7 @@
},
'V8 s390x - sim - beta branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'unittests'},
@@ -1926,7 +1887,7 @@
},
'V8 s390x - sim - stable branch': {
'swarming_dimensions': {
- 'os': 'Ubuntu-14.04',
+ 'os': 'Ubuntu-16.04',
},
'tests': [
{'name': 'unittests'},
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 9af1c0b23b..e4f6fd9cee 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -676,19 +676,17 @@ StringHttpRequest kSampleRequests[kSampleSize] = {
StringHttpRequest("/", "localhost", "yahoo.com", "firefox")
};
-
-bool ProcessEntries(v8::Platform* platform, HttpRequestProcessor* processor,
- int count, StringHttpRequest* reqs) {
+bool ProcessEntries(v8::Isolate* isolate, v8::Platform* platform,
+ HttpRequestProcessor* processor, int count,
+ StringHttpRequest* reqs) {
for (int i = 0; i < count; i++) {
bool result = processor->Process(&reqs[i]);
- while (v8::platform::PumpMessageLoop(platform, Isolate::GetCurrent()))
- continue;
+ while (v8::platform::PumpMessageLoop(platform, isolate)) continue;
if (!result) return false;
}
return true;
}
-
void PrintMap(map<string, string>* m) {
for (map<string, string>::iterator i = m->begin(); i != m->end(); i++) {
pair<string, string> entry = *i;
@@ -727,7 +725,9 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "Error initializing processor.\n");
return 1;
}
- if (!ProcessEntries(platform.get(), &processor, kSampleSize, kSampleRequests))
+ if (!ProcessEntries(isolate, platform.get(), &processor, kSampleSize,
+ kSampleRequests)) {
return 1;
+ }
PrintMap(&output);
}
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index d24e647b24..1ae6a569e7 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -16,6 +16,7 @@ include_rules = [
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
+ "+src/heap/read-only-heap-inl.h",
"+src/heap/read-only-heap.h",
"-src/inspector",
"-src/interpreter",
@@ -29,6 +30,10 @@ include_rules = [
"+src/interpreter/interpreter.h",
"+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
+ "-src/regexp",
+ "+src/regexp/regexp.h",
+ "+src/regexp/regexp-stack.h",
+ "+src/regexp/regexp-utils.h",
"-src/trap-handler",
"+src/trap-handler/handler-inside-posix.h",
"+src/trap-handler/handler-inside-win.h",
@@ -44,5 +49,6 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
"+include/libplatform/v8-tracing.h",
+ "+perfetto/tracing.h"
],
}
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index abad5274c8..c6881f2321 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,9 +1,5 @@
-per-file intl.*=cira@chromium.org
-per-file intl.*=mnita@google.com
-per-file intl.*=jshin@chromium.org
-per-file typing-asm.*=aseemgarg@chromium.org
-per-file objects-body-descriptors*=hpayer@chromium.org
-per-file objects-body-descriptors*=mlippautz@chromium.org
-per-file objects-body-descriptors*=ulan@chromium.org
+per-file *DEPS=file://COMMON_OWNERS
+per-file intl-*=file://INTL_OWNERS
+per-file *-intl*=file://INTL_OWNERS
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/api/OWNERS b/deps/v8/src/api/OWNERS
new file mode 100644
index 0000000000..ce6fb20af8
--- /dev/null
+++ b/deps/v8/src/api/OWNERS
@@ -0,0 +1,11 @@
+file://include/OWNERS
+clemensh@chromium.org
+ishell@chromium.org
+jkummerow@chromium.org
+leszeks@chromium.org
+mlippautz@chromium.org
+mslekova@chromium.org
+mstarzinger@chromium.org
+verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>API
diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc
index c22b7c47f9..cd380d3cda 100644
--- a/deps/v8/src/api/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -5,8 +5,8 @@
#include "src/api/api-natives.h"
#include "src/api/api-inl.h"
+#include "src/common/message-template.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/lookup.h"
@@ -39,7 +39,6 @@ class InvokeScope {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target,
- bool is_hidden_prototype,
bool is_prototype);
MaybeHandle<JSFunction> InstantiateFunction(
@@ -54,7 +53,7 @@ MaybeHandle<Object> Instantiate(
isolate, Handle<FunctionTemplateInfo>::cast(data), maybe_name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
- Handle<JSReceiver>(), false, false);
+ Handle<JSReceiver>(), false);
} else {
return data;
}
@@ -129,7 +128,7 @@ void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
// Copy map so it won't interfere constructor's initial map.
Handle<Map> new_map = Map::Copy(isolate, old_map, "DisableAccessChecks");
new_map->set_is_access_check_needed(false);
- JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
+ JSObject::MigrateToMap(isolate, Handle<JSObject>::cast(object), new_map);
}
void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
@@ -138,7 +137,7 @@ void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
Handle<Map> new_map = Map::Copy(isolate, old_map, "EnableAccessChecks");
new_map->set_is_access_check_needed(true);
new_map->set_may_have_interesting_symbols(true);
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
}
class AccessCheckDisableScope {
@@ -178,8 +177,7 @@ Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
- Handle<TemplateInfoT> data,
- bool is_hidden_prototype) {
+ Handle<TemplateInfoT> data) {
HandleScope scope(isolate);
// Disable access checks while instantiating the object.
AccessCheckDisableScope access_check_scope(isolate, obj);
@@ -246,11 +244,10 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
} else {
auto getter = handle(properties->get(i++), isolate);
auto setter = handle(properties->get(i++), isolate);
- RETURN_ON_EXCEPTION(
- isolate,
- DefineAccessorProperty(isolate, obj, name, getter, setter,
- attributes, is_hidden_prototype),
- JSObject);
+ RETURN_ON_EXCEPTION(isolate,
+ DefineAccessorProperty(isolate, obj, name, getter,
+ setter, attributes, false),
+ JSObject);
}
} else {
// Intrinsic data property --- Get appropriate value from the current
@@ -364,7 +361,6 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target,
- bool is_hidden_prototype,
bool is_prototype) {
Handle<JSFunction> constructor;
int serial_number = Smi::ToInt(info->serial_number());
@@ -413,8 +409,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
if (is_prototype) JSObject::OptimizeAsPrototype(object);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
+ isolate, result, ConfigureInstance(isolate, object, info), JSObject);
if (info->immutable_proto()) {
JSObject::SetImmutableProto(object);
}
@@ -486,7 +481,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
InstantiateObject(
isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
- Handle<JSReceiver>(), false, true),
+ Handle<JSReceiver>(), true),
JSFunction);
}
Object parent = data->GetParentTemplate();
@@ -514,8 +509,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
function);
}
- MaybeHandle<JSObject> result =
- ConfigureInstance(isolate, function, data, false);
+ MaybeHandle<JSObject> result = ConfigureInstance(isolate, function, data);
if (result.is_null()) {
// Uncache on error.
if (serial_number) {
@@ -560,8 +554,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Isolate* isolate, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target) {
InvokeScope invoke_scope(isolate);
- return ::v8::internal::InstantiateObject(isolate, data, new_target, false,
- false);
+ return ::v8::internal::InstantiateObject(isolate, data, new_target, false);
}
MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index 90ff932215..e02c74416b 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -237,18 +237,10 @@ namespace v8 {
#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing<T>())
-#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
- return maybe_local.FromMaybe(Local<T>());
-
#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
namespace {
-Local<Context> ContextFromNeverReadOnlySpaceObject(
- i::Handle<i::JSReceiver> obj) {
- return reinterpret_cast<v8::Isolate*>(obj->GetIsolate())->GetCurrentContext();
-}
-
class InternalEscapableScope : public v8::EscapableHandleScope {
public:
explicit inline InternalEscapableScope(i::Isolate* isolate)
@@ -447,7 +439,7 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
heap_stats.end_marker = &end_marker;
if (isolate->heap()->HasBeenSetUp()) {
// BUG(1718): Don't use the take_snapshot since we don't support
- // HeapIterator here without doing a special GC.
+ // HeapObjectIterator here without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
char* first_newline = strchr(last_few_messages, '\n');
if (first_newline == nullptr || first_newline[1] == '\0')
@@ -764,9 +756,9 @@ StartupData SnapshotCreator::CreateBlob(
std::vector<i::Handle<i::SharedFunctionInfo>> sfis_to_clear;
{ // Heap allocation is disallowed within this scope.
- i::HeapIterator heap_iterator(isolate->heap());
- for (i::HeapObject current_obj = heap_iterator.next();
- !current_obj.is_null(); current_obj = heap_iterator.next()) {
+ i::HeapObjectIterator heap_iterator(isolate->heap());
+ for (i::HeapObject current_obj = heap_iterator.Next();
+ !current_obj.is_null(); current_obj = heap_iterator.Next()) {
if (current_obj.IsSharedFunctionInfo()) {
i::SharedFunctionInfo shared =
i::SharedFunctionInfo::cast(current_obj);
@@ -810,17 +802,19 @@ StartupData SnapshotCreator::CreateBlob(
i::SerializedHandleChecker handle_checker(isolate, &contexts);
CHECK(handle_checker.CheckGlobalAndEternalHandles());
- i::HeapIterator heap_iterator(isolate->heap());
- for (i::HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
- current_obj = heap_iterator.next()) {
+ i::HeapObjectIterator heap_iterator(isolate->heap());
+ for (i::HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
+ current_obj = heap_iterator.Next()) {
if (current_obj.IsJSFunction()) {
i::JSFunction fun = i::JSFunction::cast(current_obj);
// Complete in-object slack tracking for all functions.
fun.CompleteInobjectSlackTrackingIfActive();
+ fun.ResetIfBytecodeFlushed();
+
// Also, clear out feedback vectors, or any optimized code.
- if (!fun.raw_feedback_cell().value().IsUndefined()) {
+ if (fun.IsOptimized() || fun.IsInterpreted()) {
fun.raw_feedback_cell().set_value(
i::ReadOnlyRoots(isolate).undefined_value());
fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
@@ -963,42 +957,57 @@ Extension::Extension(const char* name, const char* source, int dep_count,
CHECK(source != nullptr || source_length_ == 0);
}
-ResourceConstraints::ResourceConstraints()
- : max_semi_space_size_in_kb_(0),
- max_old_space_size_(0),
- stack_limit_(nullptr),
- code_range_size_(0),
- max_zone_pool_size_(0) {}
+void ResourceConstraints::ConfigureDefaultsFromHeapSize(
+ size_t initial_heap_size_in_bytes, size_t maximum_heap_size_in_bytes) {
+ CHECK_LE(initial_heap_size_in_bytes, maximum_heap_size_in_bytes);
+ if (maximum_heap_size_in_bytes == 0) {
+ return;
+ }
+ size_t young_generation, old_generation;
+ i::Heap::GenerationSizesFromHeapSize(maximum_heap_size_in_bytes,
+ &young_generation, &old_generation);
+ set_max_young_generation_size_in_bytes(
+ i::Max(young_generation, i::Heap::MinYoungGenerationSize()));
+ set_max_old_generation_size_in_bytes(
+ i::Max(old_generation, i::Heap::MinOldGenerationSize()));
+ if (initial_heap_size_in_bytes > 0) {
+ i::Heap::GenerationSizesFromHeapSize(initial_heap_size_in_bytes,
+ &young_generation, &old_generation);
+ // We do not set lower bounds for the initial sizes.
+ set_initial_young_generation_size_in_bytes(young_generation);
+ set_initial_old_generation_size_in_bytes(old_generation);
+ }
+ if (i::kRequiresCodeRange) {
+ set_code_range_size_in_bytes(
+ i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
+ }
+}
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
- set_max_semi_space_size_in_kb(
- i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
- set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
+ size_t heap_size = i::Heap::HeapSizeFromPhysicalMemory(physical_memory);
+ size_t young_generation, old_generation;
+ i::Heap::GenerationSizesFromHeapSize(heap_size, &young_generation,
+ &old_generation);
+ set_max_young_generation_size_in_bytes(young_generation);
+ set_max_old_generation_size_in_bytes(old_generation);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
- // Reserve no more than 1/8 of the memory for the code range, but at most
- // kMaximalCodeRangeSize.
- set_code_range_size(
- i::Min(i::kMaximalCodeRangeSize / i::MB,
- static_cast<size_t>((virtual_memory_limit >> 3) / i::MB)));
+ set_code_range_size_in_bytes(
+ i::Min(i::kMaximalCodeRangeSize,
+ static_cast<size_t>(virtual_memory_limit / 8)));
}
}
-void SetResourceConstraints(i::Isolate* isolate,
- const ResourceConstraints& constraints) {
- size_t semi_space_size = constraints.max_semi_space_size_in_kb();
- size_t old_space_size = constraints.max_old_space_size();
- size_t code_range_size = constraints.code_range_size();
- if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
- isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
- code_range_size);
- }
+size_t ResourceConstraints::max_semi_space_size_in_kb() const {
+ return i::Heap::SemiSpaceSizeFromYoungGenerationSize(
+ max_young_generation_size_) /
+ i::KB;
+}
- if (constraints.stack_limit() != nullptr) {
- uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
- isolate->stack_guard()->SetStackLimit(limit);
- }
+void ResourceConstraints::set_max_semi_space_size_in_kb(size_t limit_in_kb) {
+ set_max_young_generation_size_in_bytes(
+ i::Heap::YoungGenerationSizeFromSemiSpaceSize(limit_in_kb * i::KB));
}
i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
@@ -1369,29 +1378,28 @@ static Local<ObjectTemplate> ObjectTemplateNew(
bool do_not_cache);
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
- i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->GetPrototypeTemplate(),
- i_isolate);
+ i::Handle<i::Object> result(self->GetPrototypeTemplate(), i_isolate);
if (result->IsUndefined(i_isolate)) {
// Do not cache prototype objects.
result = Utils::OpenHandle(
*ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
- i::FunctionTemplateInfo::SetPrototypeTemplate(
- i_isolate, Utils::OpenHandle(this), result);
+ i::FunctionTemplateInfo::SetPrototypeTemplate(i_isolate, self, result);
}
return ToApiHandle<ObjectTemplate>(result);
}
void FunctionTemplate::SetPrototypeProviderTemplate(
Local<FunctionTemplate> prototype_provider) {
- i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* i_isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
- auto info = Utils::OpenHandle(this);
- CHECK(info->GetPrototypeTemplate().IsUndefined(i_isolate));
- CHECK(info->GetParentTemplate().IsUndefined(i_isolate));
- i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, info,
+ CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate));
+ CHECK(self->GetParentTemplate().IsUndefined(i_isolate));
+ i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self,
result);
}
@@ -1420,17 +1428,21 @@ static Local<FunctionTemplate> FunctionTemplateNew(
i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
i::Handle<i::FunctionTemplateInfo> obj =
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
- InitializeFunctionTemplate(obj);
- obj->set_do_not_cache(do_not_cache);
- int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
- if (!do_not_cache) {
- next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ {
+ // Disallow GC until all fields of obj have acceptable types.
+ i::DisallowHeapAllocation no_gc;
+ InitializeFunctionTemplate(obj);
+ obj->set_length(length);
+ obj->set_do_not_cache(do_not_cache);
+ int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
+ if (!do_not_cache) {
+ next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ }
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number));
}
- obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type);
}
- obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
obj->set_accept_any_receiver(true);
@@ -2000,9 +2012,10 @@ bool ObjectTemplate::IsImmutableProto() {
}
void ObjectTemplate::SetImmutableProto() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- Utils::OpenHandle(this)->set_immutable_proto(true);
+ self->set_immutable_proto(true);
}
// --- S c r i p t s ---
@@ -2222,29 +2235,40 @@ Local<Value> Module::GetException() const {
int Module::GetModuleRequestsLength() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
- return self->info().module_requests().length();
+ if (self->IsSyntheticModule()) return 0;
+ return i::Handle<i::SourceTextModule>::cast(self)
+ ->info()
+ .module_requests()
+ .length();
}
Local<String> Module::GetModuleRequest(int i) const {
CHECK_GE(i, 0);
i::Handle<i::Module> self = Utils::OpenHandle(this);
+ CHECK(self->IsSourceTextModule());
i::Isolate* isolate = self->GetIsolate();
- i::Handle<i::FixedArray> module_requests(self->info().module_requests(),
- isolate);
+ i::Handle<i::FixedArray> module_requests(
+ i::Handle<i::SourceTextModule>::cast(self)->info().module_requests(),
+ isolate);
CHECK_LT(i, module_requests->length());
return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
}
Location Module::GetModuleRequestLocation(int i) const {
CHECK_GE(i, 0);
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope scope(isolate);
i::Handle<i::Module> self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ i::HandleScope scope(isolate);
+ CHECK(self->IsSourceTextModule());
i::Handle<i::FixedArray> module_request_positions(
- self->info().module_request_positions(), isolate);
+ i::Handle<i::SourceTextModule>::cast(self)
+ ->info()
+ .module_request_positions(),
+ isolate);
CHECK_LT(i, module_request_positions->length());
int position = i::Smi::ToInt(module_request_positions->get(i));
- i::Handle<i::Script> script(self->script(), isolate);
+ i::Handle<i::Script> script(
+ i::Handle<i::SourceTextModule>::cast(self)->script(), isolate);
i::Script::PositionInfo info;
i::Script::GetPositionInfo(script, position, &info, i::Script::WITH_OFFSET);
return v8::Location(info.line, info.column);
@@ -2265,8 +2289,10 @@ Local<UnboundModuleScript> Module::GetUnboundModuleScript() {
GetStatus() < kEvaluating, "v8::Module::GetUnboundScript",
"v8::Module::GetUnboundScript must be used on an unevaluated module");
i::Handle<i::Module> self = Utils::OpenHandle(this);
+ CHECK(self->IsSourceTextModule());
return ToApiHandle<UnboundModuleScript>(i::Handle<i::SharedFunctionInfo>(
- self->GetSharedFunctionInfo(), self->GetIsolate()));
+ i::Handle<i::SourceTextModule>::cast(self)->GetSharedFunctionInfo(),
+ self->GetIsolate()));
}
int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
@@ -2301,6 +2327,37 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
RETURN_ESCAPED(result);
}
+Local<Module> Module::CreateSyntheticModule(
+ Isolate* isolate, Local<String> module_name,
+ const std::vector<Local<v8::String>>& export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::String> i_module_name = Utils::OpenHandle(*module_name);
+ i::Handle<i::FixedArray> i_export_names = i_isolate->factory()->NewFixedArray(
+ static_cast<int>(export_names.size()));
+ for (int i = 0; i < i_export_names->length(); ++i) {
+ i::Handle<i::String> str = Utils::OpenHandle(*export_names[i]);
+ i_export_names->set(i, *str);
+ }
+ return v8::Utils::ToLocal(
+ i::Handle<i::Module>(i_isolate->factory()->NewSyntheticModule(
+ i_module_name, i_export_names, evaluation_steps)));
+}
+
+void Module::SetSyntheticModuleExport(Local<String> export_name,
+ Local<v8::Value> export_value) {
+ i::Handle<i::String> i_export_name = Utils::OpenHandle(*export_name);
+ i::Handle<i::Object> i_export_value = Utils::OpenHandle(*export_value);
+ i::Handle<i::Module> self = Utils::OpenHandle(this);
+ Utils::ApiCheck(self->IsSyntheticModule(),
+ "v8::Module::SetSyntheticModuleExport",
+ "v8::Module::SetSyntheticModuleExport must only be called on "
+ "a SyntheticModule");
+ i::SyntheticModule::SetExport(self->GetIsolate(),
+ i::Handle<i::SyntheticModule>::cast(self),
+ i_export_name, i_export_value);
+}
+
namespace {
i::Compiler::ScriptDetails GetScriptDetails(
@@ -2411,7 +2468,7 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
i::Handle<i::SharedFunctionInfo> shared = Utils::OpenHandle(*unbound);
- return ToApiHandle<Module>(i_isolate->factory()->NewModule(shared));
+ return ToApiHandle<Module>(i_isolate->factory()->NewSourceTextModule(shared));
}
namespace {
@@ -2745,11 +2802,12 @@ void v8::TryCatch::SetCaptureMessage(bool value) { capture_message_ = value; }
// --- M e s s a g e ---
Local<String> Message::Get() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
+ i::Handle<i::String> raw_result =
+ i::MessageHandler::GetMessage(isolate, self);
Local<String> result = Utils::ToLocal(raw_result);
return scope.Escape(result);
}
@@ -2760,10 +2818,10 @@ v8::Isolate* Message::GetIsolate() const {
}
ScriptOrigin Message::GetScriptOrigin() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Script> script(message->script(), isolate);
+ i::Handle<i::Script> script(self->script(), isolate);
return GetScriptOriginForScript(isolate, script);
}
@@ -2772,11 +2830,11 @@ v8::Local<Value> Message::GetScriptResourceName() const {
}
v8::Local<v8::StackTrace> Message::GetStackTrace() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
+ i::Handle<i::Object> stackFramesObj(self->stack_frames(), isolate);
if (!stackFramesObj->IsFixedArray()) return v8::Local<v8::StackTrace>();
auto stackTrace = i::Handle<i::FixedArray>::cast(stackFramesObj);
return scope.Escape(Utils::StackTraceToLocal(stackTrace));
@@ -2845,18 +2903,17 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
}
bool Message::IsSharedCrossOrigin() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- return Utils::OpenHandle(this)
- ->script()
- .origin_options()
- .IsSharedCrossOrigin();
+ return self->script().origin_options().IsSharedCrossOrigin();
}
bool Message::IsOpaque() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- return Utils::OpenHandle(this)->script().origin_options().IsOpaque();
+ return self->script().origin_options().IsOpaque();
}
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
@@ -2903,11 +2960,11 @@ Local<StackTrace> StackTrace::CurrentStackTrace(Isolate* isolate,
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
- return i::StackTraceFrame::GetLineNumber(Utils::OpenHandle(this));
+ return i::StackTraceFrame::GetOneBasedLineNumber(Utils::OpenHandle(this));
}
int StackFrame::GetColumn() const {
- return i::StackTraceFrame::GetColumnNumber(Utils::OpenHandle(this));
+ return i::StackTraceFrame::GetOneBasedColumnNumber(Utils::OpenHandle(this));
}
int StackFrame::GetScriptId() const {
@@ -2915,30 +2972,31 @@ int StackFrame::GetScriptId() const {
}
Local<String> StackFrame::GetScriptName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name =
- i::StackTraceFrame::GetFileName(Utils::OpenHandle(this));
+ i::Handle<i::Object> name = i::StackTraceFrame::GetFileName(self);
return name->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::Object> name =
- i::StackTraceFrame::GetScriptNameOrSourceUrl(Utils::OpenHandle(this));
+ i::StackTraceFrame::GetScriptNameOrSourceUrl(self);
return name->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
}
Local<String> StackFrame::GetFunctionName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::Object> name =
- i::StackTraceFrame::GetFunctionName(Utils::OpenHandle(this));
+ i::Handle<i::Object> name = i::StackTraceFrame::GetFunctionName(self);
return name->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(name)))
: Local<String>();
@@ -3518,8 +3576,7 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
}
i::Isolate* i::IsolateFromNeverReadOnlySpaceObject(i::Address obj) {
- return i::NeverReadOnlySpaceObject::GetIsolate(
- i::HeapObject::cast(i::Object(obj)));
+ return i::GetIsolateFromWritableObject(i::HeapObject::cast(i::Object(obj)));
}
bool i::ShouldThrowOnError(i::Isolate* isolate) {
@@ -3866,11 +3923,6 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
return Just(true);
}
-bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return Set(context, key, value).FromMaybe(false);
-}
-
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
v8::Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -3884,11 +3936,6 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
return Just(true);
}
-bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- return Set(context, index, value).FromMaybe(false);
-}
-
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value) {
@@ -4106,11 +4153,6 @@ MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<Value> v8::Object::Get(v8::Local<Value> key) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value);
-}
-
MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION(context, Object, Get, Value);
auto self = Utils::OpenHandle(this);
@@ -4121,11 +4163,6 @@ MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<Value> v8::Object::Get(uint32_t index) {
- auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Get(context, index), Value);
-}
-
MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
Local<Private> key) {
return Get(context, Local<Value>(reinterpret_cast<Value*>(*key)));
@@ -4171,8 +4208,8 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
}
Local<Value> v8::Object::GetPrototype() {
- auto isolate = Utils::OpenHandle(this)->GetIsolate();
auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
i::PrototypeIterator iter(isolate, self);
return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter));
}
@@ -4424,10 +4461,10 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
AccessControl settings) {
// TODO(verwaest): Remove |settings|.
DCHECK_EQ(v8::DEFAULT, settings);
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
- auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return;
i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
@@ -4637,9 +4674,9 @@ Local<v8::Context> v8::Object::CreationContext() {
int v8::Object::GetIdentityHash() {
i::DisallowHeapAllocation no_gc;
- auto isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope scope(isolate);
auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
+ i::HandleScope scope(isolate);
return self->GetOrCreateIdentityHash(isolate).value();
}
@@ -4825,9 +4862,9 @@ Local<Value> Function::GetDebugName() const {
}
Local<Value> Function::GetDisplayName() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (!self->IsJSFunction()) {
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -5358,20 +5395,15 @@ Local<Value> Symbol::Name() const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Isolate* isolate;
- if (!i::GetIsolateFromWritableObject(*sym, &isolate)) {
- // If the Symbol is in RO_SPACE, then its name must be too. Since RO_SPACE
- // objects are immovable we can use the Handle(Address*) constructor with
- // the address of the name field in the Symbol object without needing an
- // isolate.
-#ifdef V8_COMPRESS_POINTERS
- // Compressed fields can't serve as handle locations.
- // TODO(ishell): get Isolate as a parameter.
- isolate = i::Isolate::Current();
-#else
+ if (!i::GetIsolateFromHeapObject(*sym, &isolate)) {
+ // Symbol is in RO_SPACE, which means that its name is also in RO_SPACE.
+ // Since RO_SPACE objects are immovable we can use the Handle(Address*)
+ // constructor with the address of the name field in the Symbol object
+ // without needing an isolate.
+ DCHECK(!COMPRESS_POINTERS_BOOL);
i::Handle<i::HeapObject> ro_name(reinterpret_cast<i::Address*>(
sym->GetFieldAddress(i::Symbol::kNameOffset)));
return Utils::ToLocal(ro_name);
-#endif
}
i::Handle<i::Object> name(sym->name(), isolate);
@@ -5917,6 +5949,19 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
+void Context::SetAbortScriptExecution(
+ Context::AbortScriptExecutionCallback callback) {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ if (callback == nullptr) {
+ context->set_script_execution_callback(
+ i::ReadOnlyRoots(isolate).undefined_value());
+ } else {
+ SET_FIELD_WRAPPED(isolate, context, set_script_execution_callback,
+ callback);
+ }
+}
+
namespace {
i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::FixedArray list, size_t index) {
@@ -6218,8 +6263,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
// It is safe to call GetIsolateFromWritableHeapObject because
// SupportsExternalization already checked that the object is writable.
- i::Isolate* isolate;
- i::GetIsolateFromWritableObject(obj, &isolate);
+ i::Isolate* isolate = i::GetIsolateFromWritableObject(obj);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
CHECK(resource && resource->data());
@@ -6246,8 +6290,7 @@ bool v8::String::MakeExternal(
// It is safe to call GetIsolateFromWritableHeapObject because
// SupportsExternalization already checked that the object is writable.
- i::Isolate* isolate;
- i::GetIsolateFromWritableObject(obj, &isolate);
+ i::Isolate* isolate = i::GetIsolateFromWritableObject(obj);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
CHECK(resource && resource->data());
@@ -6364,10 +6407,11 @@ Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
double v8::NumberObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, NumberObject, NumberValue);
- return jsvalue->value().Number();
+ return js_primitive_wrapper->value().Number();
}
Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
@@ -6382,11 +6426,12 @@ Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
Local<v8::BigInt> v8::BigIntObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, BigIntObject, BigIntValue);
- return Utils::ToLocal(
- i::Handle<i::BigInt>(i::BigInt::cast(jsvalue->value()), isolate));
+ return Utils::ToLocal(i::Handle<i::BigInt>(
+ i::BigInt::cast(js_primitive_wrapper->value()), isolate));
}
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
@@ -6404,10 +6449,11 @@ Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, BooleanObject, BooleanValue);
- return jsvalue->value().IsTrue(isolate);
+ return js_primitive_wrapper->value().IsTrue(isolate);
}
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
@@ -6423,11 +6469,12 @@ Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<v8::String> v8::StringObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, StringObject, StringValue);
- return Utils::ToLocal(
- i::Handle<i::String>(i::String::cast(jsvalue->value()), isolate));
+ return Utils::ToLocal(i::Handle<i::String>(
+ i::String::cast(js_primitive_wrapper->value()), isolate));
}
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
@@ -6442,11 +6489,12 @@ Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
- i::Isolate* isolate = jsvalue->GetIsolate();
+ i::Handle<i::JSPrimitiveWrapper> js_primitive_wrapper =
+ i::Handle<i::JSPrimitiveWrapper>::cast(obj);
+ i::Isolate* isolate = js_primitive_wrapper->GetIsolate();
LOG_API(isolate, SymbolObject, SymbolValue);
- return Utils::ToLocal(
- i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value()), isolate));
+ return Utils::ToLocal(i::Handle<i::Symbol>(
+ i::Symbol::cast(js_primitive_wrapper->value()), isolate));
}
MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
@@ -7839,7 +7887,12 @@ void Isolate::Initialize(Isolate* isolate,
i_isolate->set_api_external_references(params.external_references);
i_isolate->set_allow_atomics_wait(params.allow_atomics_wait);
- SetResourceConstraints(i_isolate, params.constraints);
+ i_isolate->heap()->ConfigureHeap(params.constraints);
+ if (params.constraints.stack_limit() != nullptr) {
+ uintptr_t limit =
+ reinterpret_cast<uintptr_t>(params.constraints.stack_limit());
+ i_isolate->stack_guard()->SetStackLimit(limit);
+ }
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(isolate);
if (!i::Snapshot::Initialize(i_isolate)) {
@@ -8291,9 +8344,9 @@ void Isolate::LowMemoryNotification() {
i::GarbageCollectionReason::kLowMemoryNotification);
}
{
- i::HeapIterator iterator(isolate->heap());
- for (i::HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ i::HeapObjectIterator iterator(isolate->heap());
+ for (i::HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsAbstractCode()) {
i::AbstractCode::cast(obj).DropStackFrameCache();
}
@@ -8304,9 +8357,14 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!dependant_context) {
- // We left the current context, we can abort all WebAssembly compilations on
- // that isolate.
- isolate->wasm_engine()->DeleteCompileJobsOnIsolate(isolate);
+ if (!isolate->context().is_null()) {
+ // We left the current context, we can abort all WebAssembly compilations
+ // of that context.
+ // A handle scope for the native context.
+ i::HandleScope handle_scope(isolate);
+ isolate->wasm_engine()->DeleteCompileJobsOnContext(
+ isolate->native_context());
+ }
}
// TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
@@ -8408,6 +8466,9 @@ CALLBACK_SETTER(FatalErrorHandler, FatalErrorCallback, exception_behavior)
CALLBACK_SETTER(OOMErrorHandler, OOMErrorCallback, oom_behavior)
CALLBACK_SETTER(AllowCodeGenerationFromStringsCallback,
AllowCodeGenerationFromStringsCallback, allow_code_gen_callback)
+CALLBACK_SETTER(ModifyCodeGenerationFromStringsCallback,
+ ModifyCodeGenerationFromStringsCallback,
+ modify_code_gen_callback)
CALLBACK_SETTER(AllowWasmCodeGenerationCallback,
AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback)
@@ -8839,9 +8900,9 @@ std::vector<int> debug::Script::LineEnds() const {
}
MaybeLocal<String> debug::Script::Name() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->name(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -8849,9 +8910,9 @@ MaybeLocal<String> debug::Script::Name() const {
}
MaybeLocal<String> debug::Script::SourceURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source_url(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -8859,9 +8920,9 @@ MaybeLocal<String> debug::Script::SourceURL() const {
}
MaybeLocal<String> debug::Script::SourceMappingURL() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source_mapping_url(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -8869,18 +8930,18 @@ MaybeLocal<String> debug::Script::SourceMappingURL() const {
}
Maybe<int> debug::Script::ContextId() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Object value = script->context_data();
if (value.IsSmi()) return Just(i::Smi::ToInt(value));
return Nothing<int>();
}
MaybeLocal<String> debug::Script::Source() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope handle_scope(isolate);
i::Handle<i::Object> value(script->source(), isolate);
if (!value->IsString()) return MaybeLocal<String>();
return Utils::ToLocal(
@@ -10171,6 +10232,17 @@ void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
}
}
+void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
+ if (isolate_) {
+ i::LocalEmbedderHeapTracer* const tracer =
+ reinterpret_cast<i::Isolate*>(isolate_)
+ ->heap()
+ ->local_embedder_heap_tracer();
+ DCHECK_NOT_NULL(tracer);
+ tracer->DecreaseAllocatedSize(bytes);
+ }
+}
+
void EmbedderHeapTracer::RegisterEmbedderReference(
const TracedGlobal<v8::Value>& ref) {
if (ref.IsEmpty()) return;
@@ -10360,8 +10432,7 @@ void InvokeAccessorGetterCallback(
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kInvokeFunctionCallback);
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
Address callback_address = reinterpret_cast<Address>(callback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, callback_address);
@@ -10382,7 +10453,6 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
#undef EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE
#undef RETURN_ON_FAILED_EXECUTION
#undef RETURN_ON_FAILED_EXECUTION_PRIMITIVE
-#undef RETURN_TO_LOCAL_UNCHECKED
#undef RETURN_ESCAPED
#undef SET_FIELD_WRAPPED
#undef NEW_STRING
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index e041a5daf0..6135a7dfc6 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -14,9 +14,9 @@
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-proxy.h"
-#include "src/objects/module.h"
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/source-text-module.h"
#include "src/utils/detachable-vector.h"
#include "src/objects/templates.h"
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index d4103ae0c1..08f39f8d6a 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
ahaas@chromium.org
clemensh@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 5a38eeef36..7433b6a12c 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -12,9 +12,9 @@
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/common/assert-scope.h"
+#include "src/common/message-template.h"
#include "src/execution/execution.h"
#include "src/execution/isolate.h"
-#include "src/execution/message-template.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
@@ -249,9 +249,9 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
return FAILED;
}
module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
- parser.module_builder()->WriteTo(*module_);
+ parser.module_builder()->WriteTo(module_);
asm_offsets_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
- parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets_);
+ parser.module_builder()->WriteAsmJsOffsetTable(asm_offsets_);
stdlib_uses_ = *parser.stdlib_uses();
size_t compile_zone_size =
@@ -287,7 +287,7 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(
isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()),
- uses_bitset)
+ uses_bitset, shared_info->language_mode())
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
@@ -319,10 +319,10 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
translation_throughput);
}
-UnoptimizedCompilationJob* AsmJs::NewCompilationJob(
+std::unique_ptr<UnoptimizedCompilationJob> AsmJs::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator) {
- return new AsmJsCompilationJob(parse_info, literal, allocator);
+ return base::make_unique<AsmJsCompilationJob>(parse_info, literal, allocator);
}
namespace {
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 46dd3f2e34..3e714cba7a 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -23,7 +23,7 @@ class UnoptimizedCompilationJob;
// Interface to compile and instantiate for asm.js modules.
class AsmJs {
public:
- static UnoptimizedCompilationJob* NewCompilationJob(
+ static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 3d290a1fe1..6ac39dc89c 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -253,7 +253,7 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable,
const WasmInitExpr& init) {
info->kind = VarKind::kGlobal;
info->type = type;
- info->index = module_builder_->AddGlobal(vtype, false, true, init);
+ info->index = module_builder_->AddGlobal(vtype, true, init);
info->mutable_variable = mutable_variable;
}
@@ -385,7 +385,8 @@ void AsmJsParser::ValidateModule() {
module_builder_->MarkStartFunction(start);
for (auto& global_import : global_imports_) {
uint32_t import_index = module_builder_->AddGlobalImport(
- global_import.import_name, global_import.value_type);
+ global_import.import_name, global_import.value_type,
+ false /* mutability */);
start->EmitWithI32V(kExprGetGlobal, import_index);
start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info));
}
@@ -754,7 +755,7 @@ void AsmJsParser::ValidateFunction() {
// Record start of the function, used as position for the stack check.
current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position());
- CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> params(&cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
// Check against limit on number of parameters.
@@ -762,7 +763,7 @@ void AsmJsParser::ValidateFunction() {
FAIL("Number of parameters exceeds internal limit");
}
- CachedVector<ValueType> locals(cached_valuetype_vectors_);
+ CachedVector<ValueType> locals(&cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);
function_temp_locals_offset_ = static_cast<uint32_t>(
@@ -837,7 +838,7 @@ void AsmJsParser::ValidateFunctionParams(ZoneVector<AsmType*>* params) {
scanner_.EnterLocalScope();
EXPECT_TOKEN('(');
CachedVector<AsmJsScanner::token_t> function_parameters(
- cached_token_t_vectors_);
+ &cached_token_t_vectors_);
while (!failed_ && !Peek(')')) {
if (!scanner_.IsLocal()) {
FAIL("Expected parameter name");
@@ -969,7 +970,8 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
if (negate) {
dvalue = -dvalue;
}
- current_function_builder_->EmitF32Const(dvalue);
+ float fvalue = DoubleToFloat32(dvalue);
+ current_function_builder_->EmitF32Const(fvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
if (uvalue > 0x7FFFFFFF) {
@@ -1314,7 +1316,7 @@ void AsmJsParser::SwitchStatement() {
Begin(pending_label_);
pending_label_ = 0;
// TODO(bradnelson): Make less weird.
- CachedVector<int32_t> cases(cached_int_vectors_);
+ CachedVector<int32_t> cases(&cached_int_vectors_);
GatherCases(&cases);
EXPECT_TOKEN('{');
size_t count = cases.size() + 1;
@@ -2108,7 +2110,11 @@ AsmType* AsmJsParser::ValidateCall() {
// need to match the information stored at this point.
base::Optional<TemporaryVariableScope> tmp;
if (Check('[')) {
- RECURSEn(EqualityExpression());
+ AsmType* index = nullptr;
+ RECURSEn(index = EqualityExpression());
+ if (!index->IsA(AsmType::Intish())) {
+ FAILn("Expected intish index");
+ }
EXPECT_TOKENn('&');
uint32_t mask = 0;
if (!CheckForUnsigned(&mask)) {
@@ -2161,8 +2167,8 @@ AsmType* AsmJsParser::ValidateCall() {
}
// Parse argument list and gather types.
- CachedVector<AsmType*> param_types(cached_asm_type_p_vectors_);
- CachedVector<AsmType*> param_specific_types(cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> param_types(&cached_asm_type_p_vectors_);
+ CachedVector<AsmType*> param_specific_types(&cached_asm_type_p_vectors_);
EXPECT_TOKENn('(');
while (!failed_ && !Peek(')')) {
AsmType* t;
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 8740cdad11..c7bf30c29e 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -154,9 +154,9 @@ class AsmJsParser {
template <typename T>
class CachedVector final : public ZoneVector<T> {
public:
- explicit CachedVector(CachedVectors<T>& cache)
- : ZoneVector<T>(cache.zone()), cache_(&cache) {
- cache.fill(this);
+ explicit CachedVector(CachedVectors<T>* cache)
+ : ZoneVector<T>(cache->zone()), cache_(cache) {
+ cache->fill(this);
}
~CachedVector() { cache_->reuse(this); }
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index e95afc8afa..e6daa80ec9 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
adamk@chromium.org
bmeurer@chromium.org
gsathya@chromium.org
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index a930a374b8..9987eb2844 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -49,8 +49,6 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
return "UnknownIntrinsicIndex";
}
-void AstNode::Print() { Print(Isolate::Current()); }
-
void AstNode::Print(Isolate* isolate) {
AllowHandleDereference allow_deref;
AstPrinter::PrintOut(isolate, this);
@@ -132,6 +130,10 @@ bool Expression::ToBooleanIsFalse() const {
return IsLiteral() && AsLiteral()->ToBooleanIsFalse();
}
+bool Expression::IsPrivateName() const {
+ return IsVariableProxy() && AsVariableProxy()->IsPrivateName();
+}
+
bool Expression::IsValidReferenceExpression() const {
return IsProperty() ||
(IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression());
@@ -176,7 +178,7 @@ void VariableProxy::BindTo(Variable* var) {
set_var(var);
set_is_resolved();
var->set_is_used();
- if (is_assigned()) var->set_maybe_assigned();
+ if (is_assigned()) var->SetMaybeAssigned();
}
Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target,
@@ -601,8 +603,8 @@ void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
boilerplate_value = handle(Smi::kZero, isolate);
}
- kind = GetMoreGeneralElementsKind(kind,
- boilerplate_value->OptimalElementsKind());
+ kind = GetMoreGeneralElementsKind(
+ kind, boilerplate_value->OptimalElementsKind(isolate));
fixed_array->set(array_index, *boilerplate_value);
}
@@ -832,6 +834,9 @@ Call::CallType Call::GetCallType() const {
Property* property = expression()->AsProperty();
if (property != nullptr) {
+ if (property->IsPrivateReference()) {
+ return PRIVATE_CALL;
+ }
bool is_super = property->IsSuperAccess();
if (property->key()->IsPropertyName()) {
return is_super ? NAMED_SUPER_PROPERTY_CALL : NAMED_PROPERTY_CALL;
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 27d298c88e..bd52d1b2c0 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -147,7 +147,6 @@ class AstNode: public ZoneObject {
int position() const { return position_; }
#ifdef DEBUG
- void Print();
void Print(Isolate* isolate);
#endif // DEBUG
@@ -205,6 +204,9 @@ class Expression : public AstNode {
// True iff the expression is a valid reference expression.
bool IsValidReferenceExpression() const;
+ // True iff the expression is a private name.
+ bool IsPrivateName() const;
+
// Helpers for ToBoolean conversion.
bool ToBooleanIsTrue() const;
bool ToBooleanIsFalse() const;
@@ -1421,32 +1423,6 @@ class ObjectLiteral final : public AggregateLiteral {
: public BitField<bool, FastElementsField::kNext, 1> {};
};
-
-// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable
- : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
- bool (*)(void*, void*),
- ZoneAllocationPolicy> {
- public:
- explicit AccessorTable(Zone* zone)
- : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
- bool (*)(void*, void*), ZoneAllocationPolicy>(
- Literal::Match, ZoneAllocationPolicy(zone)),
- zone_(zone) {}
-
- Iterator lookup(Literal* literal) {
- Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
- if (it->second == nullptr) {
- it->second = new (zone_) ObjectLiteral::Accessors();
- }
- return it;
- }
-
- private:
- Zone* zone_;
-};
-
-
// An array literal has a literals object that is used
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public AggregateLiteral {
@@ -1533,7 +1509,7 @@ class VariableProxy final : public Expression {
void set_is_assigned() {
bit_field_ = IsAssignedField::update(bit_field_, true);
if (is_resolved()) {
- var()->set_maybe_assigned();
+ var()->SetMaybeAssigned();
}
}
@@ -1635,11 +1611,12 @@ class VariableProxy final : public Expression {
// Otherwise, the assignment is to a non-property (a global, a local slot, a
// parameter slot, or a destructuring pattern).
enum AssignType {
- NON_PROPERTY,
- NAMED_PROPERTY,
- KEYED_PROPERTY,
- NAMED_SUPER_PROPERTY,
- KEYED_SUPER_PROPERTY
+ NON_PROPERTY, // destructuring
+ NAMED_PROPERTY, // obj.key
+ KEYED_PROPERTY, // obj[key]
+ NAMED_SUPER_PROPERTY, // super.key
+ KEYED_SUPER_PROPERTY, // super[key]
+ PRIVATE_METHOD // obj.#key: #key is a private method
};
class Property final : public Expression {
@@ -1650,10 +1627,19 @@ class Property final : public Expression {
Expression* key() const { return key_; }
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
+ bool IsPrivateReference() const { return key()->IsPrivateName(); }
// Returns the properties assign type.
static AssignType GetAssignType(Property* property) {
if (property == nullptr) return NON_PROPERTY;
+ if (property->IsPrivateReference()) {
+ DCHECK(!property->IsSuperAccess());
+ VariableProxy* proxy = property->key()->AsVariableProxy();
+ DCHECK_NOT_NULL(proxy);
+ Variable* var = proxy->var();
+ // Use KEYED_PROPERTY for private fields.
+ return var->requires_brand_check() ? PRIVATE_METHOD : KEYED_PROPERTY;
+ }
bool super_access = property->IsSuperAccess();
return (property->key()->IsPropertyName())
? (super_access ? NAMED_SUPER_PROPERTY : NAMED_PROPERTY)
@@ -1715,6 +1701,7 @@ class Call final : public Expression {
KEYED_PROPERTY_CALL,
NAMED_SUPER_PROPERTY_CALL,
KEYED_SUPER_PROPERTY_CALL,
+ PRIVATE_CALL,
SUPER_CALL,
RESOLVED_PROPERTY_CALL,
OTHER_CALL
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 5e9bbc6332..261b72c352 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
-bool ModuleDescriptor::AstRawStringComparer::operator()(
+bool SourceTextModuleDescriptor::AstRawStringComparer::operator()(
const AstRawString* lhs, const AstRawString* rhs) const {
// Fast path for equal pointers: a pointer is not strictly less than itself.
if (lhs == rhs) return false;
@@ -27,12 +27,10 @@ bool ModuleDescriptor::AstRawStringComparer::operator()(
return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0;
}
-void ModuleDescriptor::AddImport(const AstRawString* import_name,
- const AstRawString* local_name,
- const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddImport(
+ const AstRawString* import_name, const AstRawString* local_name,
+ const AstRawString* module_request, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->import_name = import_name;
@@ -40,38 +38,34 @@ void ModuleDescriptor::AddImport(const AstRawString* import_name,
AddRegularImport(entry);
}
-void ModuleDescriptor::AddStarImport(const AstRawString* local_name,
- const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddStarImport(
+ const AstRawString* local_name, const AstRawString* module_request,
+ const Scanner::Location loc, const Scanner::Location specifier_loc,
+ Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddNamespaceImport(entry, zone);
}
-void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request,
- const Scanner::Location specifier_loc) {
+void SourceTextModuleDescriptor::AddEmptyImport(
+ const AstRawString* module_request, const Scanner::Location specifier_loc) {
AddModuleRequest(module_request, specifier_loc);
}
-
-void ModuleDescriptor::AddExport(
- const AstRawString* local_name, const AstRawString* export_name,
- Scanner::Location loc, Zone* zone) {
+void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name,
+ const AstRawString* export_name,
+ Scanner::Location loc, Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->export_name = export_name;
entry->local_name = local_name;
AddRegularExport(entry);
}
-void ModuleDescriptor::AddExport(const AstRawString* import_name,
- const AstRawString* export_name,
- const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddExport(
+ const AstRawString* import_name, const AstRawString* export_name,
+ const AstRawString* module_request, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
Entry* entry = new (zone) Entry(loc);
@@ -81,10 +75,9 @@ void ModuleDescriptor::AddExport(const AstRawString* import_name,
AddSpecialExport(entry, zone);
}
-void ModuleDescriptor::AddStarExport(const AstRawString* module_request,
- const Scanner::Location loc,
- const Scanner::Location specifier_loc,
- Zone* zone) {
+void SourceTextModuleDescriptor::AddStarExport(
+ const AstRawString* module_request, const Scanner::Location loc,
+ const Scanner::Location specifier_loc, Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddSpecialExport(entry, zone);
@@ -98,24 +91,25 @@ Handle<Object> ToStringOrUndefined(Isolate* isolate, const AstRawString* s) {
}
} // namespace
-Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
+Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
Isolate* isolate) const {
CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
- return ModuleInfoEntry::New(
+ return SourceTextModuleInfoEntry::New(
isolate, ToStringOrUndefined(isolate, export_name),
ToStringOrUndefined(isolate, local_name),
ToStringOrUndefined(isolate, import_name), module_request, cell_index,
location.beg_pos, location.end_pos);
}
-Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
- Zone* zone) const {
+Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
+ Isolate* isolate, Zone* zone) const {
// We serialize regular exports in a way that lets us later iterate over their
// local names and for each local name immediately access all its export
// names. (Regular exports have neither import name nor module request.)
ZoneVector<Handle<Object>> data(
- ModuleInfo::kRegularExportLength * regular_exports_.size(), zone);
+ SourceTextModuleInfo::kRegularExportLength * regular_exports_.size(),
+ zone);
int index = 0;
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
@@ -130,12 +124,13 @@ Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
} while (next != regular_exports_.end() && next->first == it->first);
Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(count);
- data[index + ModuleInfo::kRegularExportLocalNameOffset] =
+ data[index + SourceTextModuleInfo::kRegularExportLocalNameOffset] =
it->second->local_name->string();
- data[index + ModuleInfo::kRegularExportCellIndexOffset] =
+ data[index + SourceTextModuleInfo::kRegularExportCellIndexOffset] =
handle(Smi::FromInt(it->second->cell_index), isolate);
- data[index + ModuleInfo::kRegularExportExportNamesOffset] = export_names;
- index += ModuleInfo::kRegularExportLength;
+ data[index + SourceTextModuleInfo::kRegularExportExportNamesOffset] =
+ export_names;
+ index += SourceTextModuleInfo::kRegularExportLength;
// Collect the export names.
int i = 0;
@@ -159,7 +154,7 @@ Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
return result;
}
-void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
+void SourceTextModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
Entry* entry = it->second;
DCHECK_NOT_NULL(entry->local_name);
@@ -191,14 +186,14 @@ void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
}
}
-ModuleDescriptor::CellIndexKind ModuleDescriptor::GetCellIndexKind(
- int cell_index) {
+SourceTextModuleDescriptor::CellIndexKind
+SourceTextModuleDescriptor::GetCellIndexKind(int cell_index) {
if (cell_index > 0) return kExport;
if (cell_index < 0) return kImport;
return kInvalid;
}
-void ModuleDescriptor::AssignCellIndices() {
+void SourceTextModuleDescriptor::AssignCellIndices() {
int export_index = 1;
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
auto current_key = it->first;
@@ -230,10 +225,11 @@ void ModuleDescriptor::AssignCellIndices() {
namespace {
-const ModuleDescriptor::Entry* BetterDuplicate(
- const ModuleDescriptor::Entry* candidate,
- ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*>& export_names,
- const ModuleDescriptor::Entry* current_duplicate) {
+const SourceTextModuleDescriptor::Entry* BetterDuplicate(
+ const SourceTextModuleDescriptor::Entry* candidate,
+ ZoneMap<const AstRawString*, const SourceTextModuleDescriptor::Entry*>&
+ export_names,
+ const SourceTextModuleDescriptor::Entry* current_duplicate) {
DCHECK_NOT_NULL(candidate->export_name);
DCHECK(candidate->location.IsValid());
auto insert_result =
@@ -249,11 +245,11 @@ const ModuleDescriptor::Entry* BetterDuplicate(
} // namespace
-const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport(
- Zone* zone) const {
- const ModuleDescriptor::Entry* duplicate = nullptr;
- ZoneMap<const AstRawString*, const ModuleDescriptor::Entry*> export_names(
- zone);
+const SourceTextModuleDescriptor::Entry*
+SourceTextModuleDescriptor::FindDuplicateExport(Zone* zone) const {
+ const SourceTextModuleDescriptor::Entry* duplicate = nullptr;
+ ZoneMap<const AstRawString*, const SourceTextModuleDescriptor::Entry*>
+ export_names(zone);
for (const auto& elem : regular_exports_) {
duplicate = BetterDuplicate(elem.second, export_names, duplicate);
}
@@ -264,9 +260,9 @@ const ModuleDescriptor::Entry* ModuleDescriptor::FindDuplicateExport(
return duplicate;
}
-bool ModuleDescriptor::Validate(ModuleScope* module_scope,
- PendingCompilationErrorHandler* error_handler,
- Zone* zone) {
+bool SourceTextModuleDescriptor::Validate(
+ ModuleScope* module_scope, PendingCompilationErrorHandler* error_handler,
+ Zone* zone) {
DCHECK_EQ(this, module_scope->module());
DCHECK_NOT_NULL(error_handler);
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index c3aa2bd0ad..4921d41932 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -13,13 +13,13 @@ namespace internal {
class AstRawString;
-class ModuleInfo;
-class ModuleInfoEntry;
+class SourceTextModuleInfo;
+class SourceTextModuleInfoEntry;
class PendingCompilationErrorHandler;
-class ModuleDescriptor : public ZoneObject {
+class SourceTextModuleDescriptor : public ZoneObject {
public:
- explicit ModuleDescriptor(Zone* zone)
+ explicit SourceTextModuleDescriptor(Zone* zone)
: module_requests_(zone),
special_exports_(zone),
namespace_imports_(zone),
@@ -84,9 +84,9 @@ class ModuleDescriptor : public ZoneObject {
const AstRawString* import_name;
// The module_request value records the order in which modules are
- // requested. It also functions as an index into the ModuleInfo's array of
- // module specifiers and into the Module's array of requested modules. A
- // negative value means no module request.
+ // requested. It also functions as an index into the SourceTextModuleInfo's
+ // array of module specifiers and into the Module's array of requested
+ // modules. A negative value means no module request.
int module_request;
// Import/export entries that are associated with a MODULE-allocated
@@ -107,7 +107,7 @@ class ModuleDescriptor : public ZoneObject {
module_request(-1),
cell_index(0) {}
- Handle<ModuleInfoEntry> Serialize(Isolate* isolate) const;
+ Handle<SourceTextModuleInfoEntry> Serialize(Isolate* isolate) const;
};
enum CellIndexKind { kInvalid, kExport, kImport };
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index eca091d61f..c0fe3baff3 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -1278,14 +1278,24 @@ void AstPrinter::VisitProperty(Property* node) {
IndentedScope indent(this, buf.begin(), node->position());
Visit(node->obj());
- AssignType property_kind = Property::GetAssignType(node);
- if (property_kind == NAMED_PROPERTY ||
- property_kind == NAMED_SUPER_PROPERTY) {
- PrintLiteralIndented("NAME", node->key()->AsLiteral(), false);
- } else {
- DCHECK(property_kind == KEYED_PROPERTY ||
- property_kind == KEYED_SUPER_PROPERTY);
- PrintIndentedVisit("KEY", node->key());
+ AssignType type = Property::GetAssignType(node);
+ switch (type) {
+ case NAMED_PROPERTY:
+ case NAMED_SUPER_PROPERTY: {
+ PrintLiteralIndented("NAME", node->key()->AsLiteral(), false);
+ break;
+ }
+ case PRIVATE_METHOD: {
+ PrintIndentedVisit("PRIVATE_METHOD", node->key());
+ break;
+ }
+ case KEYED_PROPERTY:
+ case KEYED_SUPER_PROPERTY: {
+ PrintIndentedVisit("KEY", node->key());
+ break;
+ }
+ case NON_PROPERTY:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index e45303c64b..237d98ec60 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -9,7 +9,7 @@
#include "src/ast/ast.h"
#include "src/base/optional.h"
#include "src/builtins/accessors.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/objects/module-inl.h"
@@ -40,6 +40,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag,
+ RequiresBrandCheckFlag requires_brand_check,
bool* was_added) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
@@ -51,8 +52,9 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
if (*was_added) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
- Variable* variable = new (zone) Variable(
- scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
+ Variable* variable =
+ new (zone) Variable(scope, name, mode, kind, initialization_flag,
+ maybe_assigned_flag, requires_brand_check);
p->value = variable;
}
return reinterpret_cast<Variable*>(p->value);
@@ -128,7 +130,7 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule),
module_descriptor_(new (avfactory->zone())
- ModuleDescriptor(avfactory->zone())) {
+ SourceTextModuleDescriptor(avfactory->zone())) {
set_language_mode(LanguageMode::kStrict);
DeclareThis(avfactory);
}
@@ -262,7 +264,6 @@ void Scope::SetDefaults() {
is_debug_evaluate_scope_ = false;
inner_scope_calls_eval_ = false;
- force_context_allocation_ = false;
force_context_allocation_for_parameters_ = false;
is_declaration_scope_ = false;
@@ -506,8 +507,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(is_being_lazily_parsed_);
bool was_added;
Variable* var = DeclareVariableName(name, VariableMode::kVar, &was_added);
- if (sloppy_block_function->init() == Token::ASSIGN)
- var->set_maybe_assigned();
+ if (sloppy_block_function->init() == Token::ASSIGN) {
+ var->SetMaybeAssigned();
+ }
}
}
}
@@ -785,11 +787,13 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck;
{
location = VariableLocation::CONTEXT;
index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ &init_flag, &maybe_assigned_flag,
+ &requires_brand_check);
found = index >= 0;
}
@@ -814,9 +818,9 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name, Scope* cache) {
}
bool was_added;
- Variable* var =
- cache->variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
- init_flag, maybe_assigned_flag, &was_added);
+ Variable* var = cache->variables_.Declare(
+ zone(), this, name, mode, NORMAL_VARIABLE, init_flag, maybe_assigned_flag,
+ requires_brand_check, &was_added);
DCHECK(was_added);
var->AllocateTo(location, index);
return var;
@@ -889,7 +893,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// assigned because they might be accessed by a lazily parsed top-level
// function, which, for efficiency, we preparse without variable tracking.
if (is_script_scope() || is_module_scope()) {
- if (mode != VariableMode::kConst) var->set_maybe_assigned();
+ if (mode != VariableMode::kConst) var->SetMaybeAssigned();
var->set_is_used();
}
@@ -938,7 +942,7 @@ Variable* Scope::DeclareVariable(
DCHECK(*was_added);
}
} else {
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
if (V8_UNLIKELY(IsLexicalVariableMode(mode) ||
IsLexicalVariableMode(var->mode()))) {
// The name was declared in this scope before; check for conflicting
@@ -1009,7 +1013,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
}
// Sloppy block function redefinition.
}
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
}
var->set_is_used();
return var;
@@ -1040,7 +1044,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
bool was_added;
return cache->variables_.Declare(
zone(), this, name, VariableMode::kDynamicGlobal, kind,
- kCreatedInitialized, kNotAssigned, &was_added);
+ kCreatedInitialized, kNotAssigned, kNoBrandCheck, &was_added);
// TODO(neis): Mark variable as maybe-assigned?
}
@@ -1063,7 +1067,7 @@ Variable* Scope::NewTemporary(const AstRawString* name,
Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary,
NORMAL_VARIABLE, kCreatedInitialized);
scope->AddLocal(var);
- if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
+ if (maybe_assigned == kMaybeAssigned) var->SetMaybeAssigned();
return var;
}
@@ -1401,7 +1405,7 @@ void Scope::AnalyzePartially(DeclarationScope* max_outer_scope,
}
} else {
var->set_is_used();
- if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (proxy->is_assigned()) var->SetMaybeAssigned();
}
}
@@ -1592,6 +1596,10 @@ void PrintVar(int indent, Variable* var) {
if (comma) PrintF(", ");
PrintF("hole initialization elided");
}
+ if (var->requires_brand_check()) {
+ if (comma) PrintF(", ");
+ PrintF("requires brand check");
+ }
PrintF("\n");
}
@@ -1766,9 +1774,9 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode));
bool was_added;
- Variable* var =
- variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
- kCreatedInitialized, kNotAssigned, &was_added);
+ Variable* var = variables_.Declare(zone(), this, name, mode, NORMAL_VARIABLE,
+ kCreatedInitialized, kNotAssigned,
+ kNoBrandCheck, &was_added);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
@@ -1879,11 +1887,14 @@ Variable* Scope::LookupWith(VariableProxy* proxy, Scope* scope,
DCHECK(!scope->already_resolved_);
var->set_is_used();
var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (proxy->is_assigned()) var->SetMaybeAssigned();
}
if (entry_point != nullptr) entry_point->variables_.Remove(var);
Scope* target = entry_point == nullptr ? scope : entry_point;
- return target->NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ Variable* dynamic =
+ target->NonLocal(proxy->raw_name(), VariableMode::kDynamic);
+ dynamic->set_local_if_not_shadowed(var);
+ return dynamic;
}
Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
@@ -1912,7 +1923,7 @@ Variable* Scope::LookupSloppyEval(VariableProxy* proxy, Scope* scope,
// script scope are always dynamic.
if (var->IsGlobalObjectProperty()) {
Scope* target = entry_point == nullptr ? scope : entry_point;
- return target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
+ var = target->NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
}
if (var->is_dynamic()) return var;
@@ -2010,7 +2021,7 @@ void Scope::ResolvePreparsedVariable(VariableProxy* proxy, Scope* scope,
var->set_is_used();
if (!var->is_dynamic()) {
var->ForceContextAllocation();
- if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (proxy->is_assigned()) var->SetMaybeAssigned();
return;
}
}
@@ -2054,7 +2065,7 @@ bool Scope::MustAllocate(Variable* var) {
if (!var->raw_name()->IsEmpty() &&
(inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) {
var->set_is_used();
- if (inner_scope_calls_eval_) var->set_maybe_assigned();
+ if (inner_scope_calls_eval_) var->SetMaybeAssigned();
}
DCHECK(!var->has_forced_context_allocation() || var->is_used());
// Global variables do not need to be allocated.
@@ -2124,7 +2135,7 @@ void DeclarationScope::AllocateParameterLocals() {
DCHECK_EQ(this, var->scope());
if (has_mapped_arguments) {
var->set_is_used();
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
var->ForceContextAllocation();
}
AllocateParameter(var, i);
@@ -2315,12 +2326,13 @@ int Scope::ContextLocalCount() const {
(is_function_var_in_context ? 1 : 0);
}
-Variable* ClassScope::DeclarePrivateName(const AstRawString* name,
- bool* was_added) {
+Variable* ClassScope::DeclarePrivateName(
+ const AstRawString* name, RequiresBrandCheckFlag requires_brand_check,
+ bool* was_added) {
Variable* result = EnsureRareData()->private_name_map.Declare(
zone(), this, name, VariableMode::kConst, NORMAL_VARIABLE,
InitializationFlag::kNeedsInitialization,
- MaybeAssignedFlag::kMaybeAssigned, was_added);
+ MaybeAssignedFlag::kMaybeAssigned, requires_brand_check, was_added);
if (*was_added) {
locals_.Add(result);
}
@@ -2404,8 +2416,10 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int index = ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
+ RequiresBrandCheckFlag requires_brand_check;
+ int index =
+ ScopeInfo::ContextSlotIndex(*scope_info_, name_handle, &mode, &init_flag,
+ &maybe_assigned_flag, &requires_brand_check);
if (index < 0) {
return nullptr;
}
@@ -2417,7 +2431,7 @@ Variable* ClassScope::LookupPrivateNameInScopeInfo(const AstRawString* name) {
// Add the found private name to the map to speed up subsequent
// lookups for the same name.
bool was_added;
- Variable* var = DeclarePrivateName(name, &was_added);
+ Variable* var = DeclarePrivateName(name, requires_brand_check, &was_added);
DCHECK(was_added);
var->AllocateTo(VariableLocation::CONTEXT, index);
return var;
@@ -2454,8 +2468,7 @@ bool ClassScope::ResolvePrivateNames(ParseInfo* info) {
Scanner::Location loc = proxy->location();
info->pending_error_handler()->ReportMessageAt(
loc.beg_pos, loc.end_pos,
- MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name(),
- kSyntaxError);
+ MessageTemplate::kInvalidPrivateFieldResolution, proxy->raw_name());
return false;
} else {
var->set_is_used();
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 1feaad2a90..932d5c70b9 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -41,7 +41,9 @@ class VariableMap : public ZoneHashMap {
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag, bool* was_added);
+ MaybeAssignedFlag maybe_assigned_flag,
+ RequiresBrandCheckFlag requires_brand_check,
+ bool* was_added);
V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@@ -556,7 +558,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
MaybeAssignedFlag maybe_assigned_flag, bool* was_added) {
Variable* result =
variables_.Declare(zone, this, name, mode, kind, initialization_flag,
- maybe_assigned_flag, was_added);
+ maybe_assigned_flag, kNoBrandCheck, was_added);
if (*was_added) locals_.Add(result);
return result;
}
@@ -712,7 +714,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// True if one of the inner scopes or the scope itself calls eval.
bool inner_scope_calls_eval_ : 1;
- bool force_context_allocation_ : 1;
bool force_context_allocation_for_parameters_ : 1;
// True if it holds 'var' declarations.
@@ -1155,14 +1156,14 @@ class ModuleScope final : public DeclarationScope {
AstValueFactory* avfactory);
// Returns nullptr in a deserialized scope.
- ModuleDescriptor* module() const { return module_descriptor_; }
+ SourceTextModuleDescriptor* module() const { return module_descriptor_; }
// Set MODULE as VariableLocation for all variables that will live in a
// module's export table.
void AllocateModuleVariables();
private:
- ModuleDescriptor* const module_descriptor_;
+ SourceTextModuleDescriptor* const module_descriptor_;
};
class V8_EXPORT_PRIVATE ClassScope : public Scope {
@@ -1174,7 +1175,9 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
// Declare a private name in the private name map and add it to the
// local variables of this scope.
- Variable* DeclarePrivateName(const AstRawString* name, bool* was_added);
+ Variable* DeclarePrivateName(const AstRawString* name,
+ RequiresBrandCheckFlag requires_brand_check,
+ bool* was_added);
void AddUnresolvedPrivateName(VariableProxy* proxy);
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index df40fee754..7805fa20c8 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -21,7 +21,8 @@ class Variable final : public ZoneObject {
public:
Variable(Scope* scope, const AstRawString* name, VariableMode mode,
VariableKind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned)
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
+ RequiresBrandCheckFlag requires_brand_check = kNoBrandCheck)
: scope_(scope),
name_(name),
local_if_not_shadowed_(nullptr),
@@ -31,6 +32,7 @@ class Variable final : public ZoneObject {
bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
InitializationFlagField::encode(initialization_flag) |
VariableModeField::encode(mode) |
+ RequiresBrandCheckField::encode(requires_brand_check) |
IsUsedField::encode(false) |
ForceContextAllocationField::encode(false) |
ForceHoleInitializationField::encode(false) |
@@ -69,8 +71,31 @@ class Variable final : public ZoneObject {
MaybeAssignedFlag maybe_assigned() const {
return MaybeAssignedFlagField::decode(bit_field_);
}
- void set_maybe_assigned() {
- bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
+ void SetMaybeAssigned() {
+ // If this variable is dynamically shadowing another variable, then that
+ // variable could also be assigned (in the non-shadowing case).
+ if (has_local_if_not_shadowed()) {
+ // Avoid repeatedly marking the same tree of variables by only recursing
+ // when this variable's maybe_assigned status actually changes.
+ if (!maybe_assigned()) {
+ local_if_not_shadowed()->SetMaybeAssigned();
+ }
+ DCHECK(local_if_not_shadowed()->maybe_assigned());
+ }
+ set_maybe_assigned();
+ }
+
+ RequiresBrandCheckFlag get_requires_brand_check_flag() const {
+ return RequiresBrandCheckField::decode(bit_field_);
+ }
+
+ bool requires_brand_check() const {
+ return get_requires_brand_check_flag() == kRequiresBrandCheck;
+ }
+
+ void set_requires_brand_check() {
+ bit_field_ =
+ RequiresBrandCheckField::update(bit_field_, kRequiresBrandCheck);
}
int initializer_position() { return initializer_position_; }
@@ -143,11 +168,16 @@ class Variable final : public ZoneObject {
}
Variable* local_if_not_shadowed() const {
- DCHECK(mode() == VariableMode::kDynamicLocal &&
- local_if_not_shadowed_ != nullptr);
+ DCHECK((mode() == VariableMode::kDynamicLocal ||
+ mode() == VariableMode::kDynamic) &&
+ has_local_if_not_shadowed());
return local_if_not_shadowed_;
}
+ bool has_local_if_not_shadowed() const {
+ return local_if_not_shadowed_ != nullptr;
+ }
+
void set_local_if_not_shadowed(Variable* local) {
local_if_not_shadowed_ = local;
}
@@ -200,15 +230,19 @@ class Variable final : public ZoneObject {
const AstRawString* name_;
// If this field is set, this variable references the stored locally bound
- // variable, but it might be shadowed by variable bindings introduced by
- // sloppy 'eval' calls between the reference scope (inclusive) and the
- // binding scope (exclusive).
+ // variable, but it might be shadowed by variable bindings introduced by with
+ // blocks or sloppy 'eval' calls between the reference scope (inclusive) and
+ // the binding scope (exclusive).
Variable* local_if_not_shadowed_;
Variable* next_;
int index_;
int initializer_position_;
uint16_t bit_field_;
+ void set_maybe_assigned() {
+ bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned);
+ }
+
class VariableModeField : public BitField16<VariableMode, 0, 3> {};
class VariableKindField
: public BitField16<VariableKind, VariableModeField::kNext, 3> {};
@@ -225,6 +259,9 @@ class Variable final : public ZoneObject {
class MaybeAssignedFlagField
: public BitField16<MaybeAssignedFlag,
ForceHoleInitializationField::kNext, 1> {};
+ class RequiresBrandCheckField
+ : public BitField16<RequiresBrandCheckFlag, MaybeAssignedFlagField::kNext,
+ 1> {};
Variable** next() { return &next_; }
friend List;
friend base::ThreadedListTraits<Variable>;
diff --git a/deps/v8/src/base/adapters.h b/deps/v8/src/base/adapters.h
index 92c500085d..f684b52ccb 100644
--- a/deps/v8/src/base/adapters.h
+++ b/deps/v8/src/base/adapters.h
@@ -45,7 +45,7 @@ class ReversedAdapter {
// // iterates through v from back to front
// }
template <typename T>
-ReversedAdapter<T> Reversed(T& t) {
+ReversedAdapter<T> Reversed(T&& t) {
return ReversedAdapter<T>(t);
}
diff --git a/deps/v8/src/base/lsan.h b/deps/v8/src/base/lsan.h
new file mode 100644
index 0000000000..fd9bbd21c1
--- /dev/null
+++ b/deps/v8/src/base/lsan.h
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// LeakSanitizer support.
+
+#ifndef V8_BASE_LSAN_H_
+#define V8_BASE_LSAN_H_
+
+#include <type_traits>
+
+// There is no compile time flag for LSan, to enable this whenever ASan is
+// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'.
+// On windows, LSan is not implemented yet, so disable it there.
+#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
+
+#include <sanitizer/lsan_interface.h>
+
+#define LSAN_IGNORE_OBJECT(ptr) __lsan_ignore_object(ptr)
+
+#else // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
+
+#define LSAN_IGNORE_OBJECT(ptr) \
+ static_assert(std::is_convertible<decltype(ptr), const void*>::value, \
+ "LSAN_IGNORE_OBJECT can only be used with pointer types")
+
+#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
+
+#endif // V8_BASE_LSAN_H_
diff --git a/deps/v8/src/common/v8memory.h b/deps/v8/src/base/memory.h
index 02ba2de848..087f67291d 100644
--- a/deps/v8/src/common/v8memory.h
+++ b/deps/v8/src/base/memory.h
@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMMON_V8MEMORY_H_
-#define V8_COMMON_V8MEMORY_H_
+#ifndef V8_BASE_MEMORY_H_
+#define V8_BASE_MEMORY_H_
#include "src/base/macros.h"
-#include "src/common/globals.h"
namespace v8 {
-namespace internal {
+namespace base {
+
+using Address = uintptr_t;
+using byte = uint8_t;
// Memory provides an interface to 'raw' memory. It encapsulates the casts
// that typically are needed when incompatible pointer types are used.
@@ -39,22 +41,6 @@ static inline void WriteUnalignedValue(Address p, V value) {
memcpy(reinterpret_cast<void*>(p), &value, sizeof(V));
}
-static inline uint16_t ReadUnalignedUInt16(Address p) {
- return ReadUnalignedValue<uint16_t>(p);
-}
-
-static inline void WriteUnalignedUInt16(Address p, uint16_t value) {
- WriteUnalignedValue(p, value);
-}
-
-static inline uint32_t ReadUnalignedUInt32(Address p) {
- return ReadUnalignedValue<uint32_t>(p);
-}
-
-static inline void WriteUnalignedUInt32(Address p, uint32_t value) {
- WriteUnalignedValue(p, value);
-}
-
template <typename V>
static inline V ReadLittleEndianValue(Address p) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -93,7 +79,7 @@ static inline void WriteLittleEndianValue(V* p, V value) {
WriteLittleEndianValue<V>(reinterpret_cast<Address>(p), value);
}
-} // namespace internal
+} // namespace base
} // namespace v8
-#endif // V8_COMMON_V8MEMORY_H_
+#endif // V8_BASE_MEMORY_H_
diff --git a/deps/v8/src/base/platform/OWNERS b/deps/v8/src/base/platform/OWNERS
index 7f64f4dedb..bf5455c9af 100644
--- a/deps/v8/src/base/platform/OWNERS
+++ b/deps/v8/src/base/platform/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
hpayer@chromium.org
mlippautz@chromium.org
ulan@chromium.org
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 11499f572c..fa175c3917 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -48,7 +48,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
size_t request_size = size + (alignment - page_size);
zx_handle_t vmo;
- if (zx_vmo_create(request_size, ZX_VMO_NON_RESIZABLE, &vmo) != ZX_OK) {
+ if (zx_vmo_create(request_size, 0, &vmo) != ZX_OK) {
return nullptr;
}
static const char kVirtualMemoryName[] = "v8-virtualmem";
@@ -152,7 +152,7 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
const auto kMicrosPerSecond = 1000000ULL;
zx_time_t nanos_since_thread_started;
zx_status_t status =
- zx_clock_get_new(ZX_CLOCK_THREAD, &nanos_since_thread_started);
+ zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started);
if (status != ZX_OK) {
return -1;
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 7f4ce192db..6da83d7e02 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -199,6 +199,12 @@ void* OS::GetRandomMmapAddr() {
MutexGuard guard(rng_mutex.Pointer());
GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
}
+#if defined(__APPLE__)
+#if V8_TARGET_ARCH_ARM64
+ DCHECK_EQ(1 << 14, AllocatePageSize());
+ raw_addr = RoundDown(raw_addr, 1 << 14);
+#endif
+#endif
#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
// If random hint addresses interfere with address ranges hard coded in
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index bb024ca87e..b11dfb86b4 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -88,22 +88,29 @@ class SmallVector {
DCHECK_NE(0, size());
return end_[-1];
}
+ const T& back() const {
+ DCHECK_NE(0, size());
+ return end_[-1];
+ }
T& operator[](size_t index) {
DCHECK_GT(size(), index);
return begin_[index];
}
- const T& operator[](size_t index) const {
+ const T& at(size_t index) const {
DCHECK_GT(size(), index);
return begin_[index];
}
+ const T& operator[](size_t index) const { return at(index); }
+
template <typename... Args>
void emplace_back(Args&&... args) {
- if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
- new (end_) T(std::forward<Args>(args)...);
- ++end_;
+ T* end = end_;
+ if (V8_UNLIKELY(end == end_of_storage_)) end = Grow();
+ new (end) T(std::forward<Args>(args)...);
+ end_ = end + 1;
}
void pop_back(size_t count = 1) {
@@ -135,7 +142,12 @@ class SmallVector {
typename std::aligned_storage<sizeof(T) * kInlineSize, alignof(T)>::type
inline_storage_;
- void Grow(size_t min_capacity = 0) {
+ // Grows the backing store by a factor of two. Returns the new end of the used
+ // storage (this reduces binary size).
+ V8_NOINLINE T* Grow() { return Grow(0); }
+
+ // Grows the backing store by a factor of two, and at least to {min_capacity}.
+ V8_NOINLINE T* Grow(size_t min_capacity) {
size_t in_use = end_ - begin_;
size_t new_capacity =
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
@@ -145,6 +157,7 @@ class SmallVector {
begin_ = new_storage;
end_ = new_storage + in_use;
end_of_storage_ = new_storage + new_capacity;
+ return end_;
}
bool is_big() const { return begin_ != inline_storage_begin(); }
diff --git a/deps/v8/src/base/vlq-base64.cc b/deps/v8/src/base/vlq-base64.cc
new file mode 100644
index 0000000000..62e63ac872
--- /dev/null
+++ b/deps/v8/src/base/vlq-base64.cc
@@ -0,0 +1,58 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <climits>
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/vlq-base64.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+constexpr int8_t kCharToDigit[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 0x3e, -1, -1, -1, 0x3f,
+ 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, -1, -1,
+ -1, -1, -1, -1, -1, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12,
+ 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, -1, -1, -1, -1, -1,
+ -1, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24,
+ 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x32, 0x33, -1, -1, -1, -1, -1};
+
+constexpr uint32_t kContinueShift = 5;
+constexpr uint32_t kContinueMask = 1 << kContinueShift;
+constexpr uint32_t kDataMask = kContinueMask - 1;
+
+int8_t charToDigitDecode(uint8_t c) { return c < 128u ? kCharToDigit[c] : -1; }
+} // namespace
+
+int8_t charToDigitDecodeForTesting(uint8_t c) { return charToDigitDecode(c); }
+
+int32_t VLQBase64Decode(const char* start, size_t sz, size_t* pos) {
+ uint32_t res = 0;
+ uint64_t shift = 0;
+ int32_t digit;
+
+ do {
+ if (*pos >= sz) {
+ return std::numeric_limits<int32_t>::min();
+ }
+ digit = static_cast<int>(charToDigitDecode(start[*pos]));
+ bool is_last_byte = (shift + kContinueShift >= 32);
+ if (digit == -1 || (is_last_byte && (digit >> 2) != 0)) {
+ return std::numeric_limits<int32_t>::min();
+ }
+ res += (digit & kDataMask) << shift;
+ shift += kContinueShift;
+ (*pos)++;
+ } while (digit & kContinueMask);
+ return (res & 1) ? -static_cast<int32_t>(res >> 1) : (res >> 1);
+}
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/vlq-base64.h b/deps/v8/src/base/vlq-base64.h
new file mode 100644
index 0000000000..5d8633798b
--- /dev/null
+++ b/deps/v8/src/base/vlq-base64.h
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_VLQ_BASE64_H_
+#define V8_BASE_VLQ_BASE64_H_
+
+#include <string>
+
+#include "src/base/base-export.h"
+
+namespace v8 {
+namespace base {
+V8_BASE_EXPORT int8_t charToDigitDecodeForTesting(uint8_t c);
+
+// Decodes a VLQ-Base64-encoded string into 32bit digits. A valid return value
+// is within [-2^31+1, 2^31-1]. This function returns -2^31
+// (std::numeric_limits<int32_t>::min()) when bad input s is passed.
+V8_BASE_EXPORT int32_t VLQBase64Decode(const char* start, size_t sz,
+ size_t* pos);
+} // namespace base
+} // namespace v8
+#endif // V8_BASE_VLQ_BASE64_H_
diff --git a/deps/v8/src/builtins/OWNERS b/deps/v8/src/builtins/OWNERS
new file mode 100644
index 0000000000..450423f878
--- /dev/null
+++ b/deps/v8/src/builtins/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 25d37d73b4..ea6308622d 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -287,7 +287,8 @@ void Accessors::StringLengthGetter(
if (!value.IsString()) {
// Not a string value. That means that we either got a String wrapper or
// a Value with a String wrapper in its prototype chain.
- value = JSValue::cast(*Utils::OpenHandle(*info.Holder())).value();
+ value =
+ JSPrimitiveWrapper::cast(*Utils::OpenHandle(*info.Holder())).value();
}
Object result = Smi::FromInt(String::cast(value).length());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
@@ -305,7 +306,7 @@ Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
static Handle<Object> GetFunctionPrototype(Isolate* isolate,
Handle<JSFunction> function) {
if (!function->has_prototype()) {
- Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
+ Handle<JSObject> proto = isolate->factory()->NewFunctionPrototype(function);
JSFunction::SetPrototype(function, proto);
}
return Handle<Object>(function->prototype(), isolate);
diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq
index add66917c0..6df5f801a3 100644
--- a/deps/v8/src/builtins/arguments.tq
+++ b/deps/v8/src/builtins/arguments.tq
@@ -34,13 +34,13 @@ namespace arguments {
@export
macro GetArgumentsFrameAndCount(implicit context: Context)(f: JSFunction):
ArgumentsInfo {
- let frame: Frame = LoadParentFramePointer();
+ const frame: Frame = LoadParentFramePointer();
assert(frame.function == f);
const shared: SharedFunctionInfo = f.shared_function_info;
const formalParameterCount: bint =
Convert<bint>(Convert<int32>(shared.formal_parameter_count));
- let argumentCount: bint = formalParameterCount;
+ const argumentCount: bint = formalParameterCount;
const adaptor: ArgumentsAdaptorFrame =
Cast<ArgumentsAdaptorFrame>(frame.caller)
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 54c16932fa..9b9956b0fb 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -1093,11 +1093,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset));
+ BytecodeArray::kOsrNestingLevelOffset));
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
@@ -1509,13 +1509,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
- __ Pop(scratch);
+ Register builtin = temps.Acquire();
+ __ Pop(builtin);
__ add(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(lr);
- __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ bx(builtin);
}
} // namespace
@@ -2577,7 +2580,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
+ __ stop();
__ bind(&alignment_as_expected);
}
}
@@ -2606,7 +2609,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CompareRoot(r3, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ b(eq, &okay);
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2835,19 +2838,25 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address == r1 || function_address == r2);
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Move(r9, ExternalReference::is_profiling_address(isolate));
__ ldrb(r9, MemOperand(r9, 0));
__ cmp(r9, Operand(0));
- __ b(eq, &profiler_disabled);
-
- // Additional parameter is the address of the actual callback.
- __ Move(r3, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ Move(r3, function_address);
+ __ b(ne, &profiler_enabled);
+ __ Move(r9, ExternalReference::address_of_runtime_stats_flag());
+ __ ldr(r9, MemOperand(r9, 0));
+ __ cmp(r9, Operand(0));
+ __ b(ne, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ Move(r3, function_address);
+ __ b(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Move(r3, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index f81a1955ee..bcee8f0b5d 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -1201,10 +1201,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset));
+ BytecodeArray::kOsrNestingLevelOffset));
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
@@ -1683,18 +1683,20 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
- // Load builtin object.
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
UseScratchRegisterScope temps(masm);
Register builtin = temps.AcquireX();
- __ Ldr(builtin,
- MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
+ __ Ldr(
+ builtin,
+ MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinIndexOffset));
// Restore fp, lr.
__ Mov(sp, fp);
__ Pop(fp, lr);
- // Call builtin.
- __ JumpCodeObject(builtin);
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ Jump(builtin);
}
} // namespace
@@ -3400,16 +3402,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address.is(x1) || function_address.is(x2));
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Mov(x10, ExternalReference::is_profiling_address(isolate));
__ Ldrb(w10, MemOperand(x10));
- __ Cbz(w10, &profiler_disabled);
- __ Mov(x3, thunk_ref);
- __ B(&end_profiler_check);
-
- __ Bind(&profiler_disabled);
- __ Mov(x3, function_address);
+ __ Cbnz(w10, &profiler_enabled);
+ __ Mov(x10, ExternalReference::address_of_runtime_stats_flag());
+ __ Ldrsw(w10, MemOperand(x10));
+ __ Cbnz(w10, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ Mov(x3, function_address);
+ __ B(&end_profiler_check);
+ }
+ __ Bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Mov(x3, thunk_ref);
+ }
__ Bind(&end_profiler_check);
// Save the callee-save registers we are going to use.
diff --git a/deps/v8/src/builtins/array-copywithin.tq b/deps/v8/src/builtins/array-copywithin.tq
index bfc95a28bf..94d871e8f7 100644
--- a/deps/v8/src/builtins/array-copywithin.tq
+++ b/deps/v8/src/builtins/array-copywithin.tq
@@ -9,7 +9,7 @@ namespace array_copywithin {
// https://tc39.github.io/ecma262/#sec-array.prototype.copyWithin
transitioning javascript builtin ArrayPrototypeCopyWithin(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
diff --git a/deps/v8/src/builtins/array-every.tq b/deps/v8/src/builtins/array-every.tq
index 245b07556c..3451cd769b 100644
--- a/deps/v8/src/builtins/array-every.tq
+++ b/deps/v8/src/builtins/array-every.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArrayEveryLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayEveryLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -25,9 +26,10 @@ namespace array {
}
transitioning javascript builtin
- ArrayEveryLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayEveryLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ result: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -51,9 +53,9 @@ namespace array {
}
transitioning builtin ArrayEveryLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
- array: Object, o: JSReceiver, initialK: Number, length: Number,
- initialTo: Object): Object {
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _array: Object, o: JSReceiver, initialK: Number, length: Number,
+ _initialTo: Object): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Number = initialK; k < length; k++) {
@@ -88,7 +90,7 @@ namespace array {
labels Bailout(Smi) {
let k: Smi = 0;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const fastO: FastJSArray = Cast<FastJSArray>(o) otherwise goto Bailout(k);
let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
@@ -109,12 +111,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.every
transitioning javascript builtin
- ArrayEvery(implicit context: Context)(receiver: Object, ...arguments):
+ ArrayEvery(js-implicit context: Context, receiver: Object)(...arguments):
Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.every');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -144,8 +144,5 @@ namespace array {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.every');
- }
}
}
diff --git a/deps/v8/src/builtins/array-filter.tq b/deps/v8/src/builtins/array-filter.tq
index 4bf175a787..9acd0d04ee 100644
--- a/deps/v8/src/builtins/array-filter.tq
+++ b/deps/v8/src/builtins/array-filter.tq
@@ -4,9 +4,10 @@
namespace array_filter {
transitioning javascript builtin
- ArrayFilterLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object, initialTo: Object): Object {
+ ArrayFilterLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object, initialTo: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -27,9 +28,10 @@ namespace array_filter {
}
transitioning javascript builtin
- ArrayFilterLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object, valueK: Object, initialTo: Object,
+ ArrayFilterLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object, valueK: Object, initialTo: Object,
result: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -42,9 +44,9 @@ namespace array_filter {
const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. filter() needs
- // to pick up at the next step, which is setting the callback result in
- // the output array. After incrementing k and to, we can glide into the loop
- // continuation builtin.
+ // to pick up at the next step, which is setting the callback
+ // result in the output array. After incrementing k and to, we can glide
+ // into the loop continuation builtin.
if (ToBoolean(result)) {
FastCreateDataProperty(outputArray, numberTo, valueK);
numberTo = numberTo + 1;
@@ -58,7 +60,7 @@ namespace array_filter {
}
transitioning builtin ArrayFilterLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
array: JSReceiver, o: JSReceiver, initialK: Number, length: Number,
initialTo: Number): Object {
let to: Number = initialTo;
@@ -145,12 +147,10 @@ namespace array_filter {
// https://tc39.github.io/ecma262/#sec-array.prototype.filter
transitioning javascript builtin
- ArrayFilter(implicit context: Context)(receiver: Object, ...arguments):
+ ArrayFilter(js-implicit context: Context, receiver: Object)(...arguments):
Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.filter');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -199,8 +199,5 @@ namespace array_filter {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.filter');
- }
}
}
diff --git a/deps/v8/src/builtins/array-find.tq b/deps/v8/src/builtins/array-find.tq
index 28223e4c49..ef54dd4666 100644
--- a/deps/v8/src/builtins/array-find.tq
+++ b/deps/v8/src/builtins/array-find.tq
@@ -4,8 +4,9 @@
namespace array_find {
transitioning javascript builtin
- ArrayFindLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayFindLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized find implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -24,9 +25,10 @@ namespace array_find {
}
transitioning javascript builtin
- ArrayFindLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayFindLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ _callback: Object, _thisArg: Object, _initialK: Object, _length: Object,
+ _result: Object): Object {
// This deopt continuation point is never actually called, it just
// exists to make stack traces correct from a ThrowTypeError if the
// callback was found to be non-callable.
@@ -37,15 +39,16 @@ namespace array_find {
// happens right after the callback and it's returned value must be handled
// before iteration continues.
transitioning javascript builtin
- ArrayFindLoopAfterCallbackLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, foundValue: Object, isFound: Object): Object {
+ ArrayFindLoopAfterCallbackLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ foundValue: Object, isFound: Object): Object {
// All continuation points in the optimized find implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. find() needs
@@ -62,7 +65,7 @@ namespace array_find {
}
transitioning builtin ArrayFindLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
o: JSReceiver, initialK: Number, length: Number): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
@@ -116,12 +119,10 @@ namespace array_find {
// https://tc39.github.io/ecma262/#sec-array.prototype.find
transitioning javascript builtin
- ArrayPrototypeFind(implicit context: Context)(receiver: Object, ...arguments):
- Object {
+ ArrayPrototypeFind(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.find');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -151,8 +152,5 @@ namespace array_find {
label NotCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.find');
- }
}
}
diff --git a/deps/v8/src/builtins/array-findindex.tq b/deps/v8/src/builtins/array-findindex.tq
index 00d8378dfa..5a8bb85fba 100644
--- a/deps/v8/src/builtins/array-findindex.tq
+++ b/deps/v8/src/builtins/array-findindex.tq
@@ -4,8 +4,9 @@
namespace array_findindex {
transitioning javascript builtin
- ArrayFindIndexLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayFindIndexLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized findIndex implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -24,9 +25,10 @@ namespace array_findindex {
}
transitioning javascript builtin
- ArrayFindIndexLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayFindIndexLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ _callback: Object, _thisArg: Object, _initialK: Object, _length: Object,
+ _result: Object): Object {
// This deopt continuation point is never actually called, it just
// exists to make stack traces correct from a ThrowTypeError if the
// callback was found to be non-callable.
@@ -37,16 +39,16 @@ namespace array_findindex {
// happens right after the callback and it's returned value must be handled
// before iteration continues.
transitioning javascript builtin
- ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(implicit context:
- Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, foundValue: Object, isFound: Object): Object {
+ ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ foundValue: Object, isFound: Object): Object {
// All continuation points in the optimized findIndex implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// This custom lazy deopt point is right after the callback. find() needs
@@ -64,7 +66,7 @@ namespace array_findindex {
transitioning builtin ArrayFindIndexLoopContinuation(implicit context:
Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
o: JSReceiver, initialK: Number, length: Number): Number {
// 5. Let k be 0.
// 6. Repeat, while k < len
@@ -118,12 +120,10 @@ namespace array_findindex {
// https://tc39.github.io/ecma262/#sec-array.prototype.findIndex
transitioning javascript builtin
- ArrayPrototypeFindIndex(implicit context:
- Context)(receiver: Object, ...arguments): Object {
+ ArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.findIndex');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -154,8 +154,5 @@ namespace array_findindex {
label NotCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.findIndex');
- }
}
}
diff --git a/deps/v8/src/builtins/array-foreach.tq b/deps/v8/src/builtins/array-foreach.tq
index d362e95950..f52d944291 100644
--- a/deps/v8/src/builtins/array-foreach.tq
+++ b/deps/v8/src/builtins/array-foreach.tq
@@ -4,8 +4,9 @@
namespace array_foreach {
transitioning javascript builtin
- ArrayForEachLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArrayForEachLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized forEach implemntation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -21,9 +22,10 @@ namespace array_foreach {
}
transitioning javascript builtin
- ArrayForEachLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArrayForEachLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ _result: Object): Object {
// All continuation points in the optimized forEach implemntation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -38,9 +40,9 @@ namespace array_foreach {
}
transitioning builtin ArrayForEachLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
- array: Object, o: JSReceiver, initialK: Number, len: Number,
- to: Object): Object {
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _array: Object, o: JSReceiver, initialK: Number, len: Number,
+ _to: Object): Object {
// variables {array} and {to} are ignored.
// 5. Let k be 0.
@@ -72,7 +74,7 @@ namespace array_foreach {
labels Bailout(Smi) {
let k: Smi = 0;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
@@ -90,11 +92,10 @@ namespace array_foreach {
// https://tc39.github.io/ecma262/#sec-array.prototype.foreach
transitioning javascript builtin
- ArrayForEach(context: Context, receiver: Object, ...arguments): Object {
+ ArrayForEach(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.forEach');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -127,8 +128,5 @@ namespace array_foreach {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.forEach');
- }
}
}
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 72e1a3661e..c04233b222 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -37,7 +37,7 @@ namespace array_join {
const array: JSArray = UnsafeCast<JSArray>(receiver);
const fixedArray: FixedArray = UnsafeCast<FixedArray>(array.elements);
const element: Object = fixedArray.objects[UnsafeCast<Smi>(k)];
- return element == Hole ? kEmptyString : element;
+ return element == TheHole ? kEmptyString : element;
}
LoadJoinElement<array::FastDoubleElements>(
@@ -56,7 +56,7 @@ namespace array_join {
assert(!IsDetachedBuffer(typedArray.buffer));
return typed_array::LoadFixedTypedArrayElementAsTagged(
typedArray.data_ptr, UnsafeCast<Smi>(k),
- typed_array::KindForArrayType<T>(), SMI_PARAMETERS);
+ typed_array::KindForArrayType<T>());
}
transitioning builtin ConvertToLocaleString(
@@ -103,8 +103,8 @@ namespace array_join {
}
CannotUseSameArrayAccessor<JSTypedArray>(implicit context: Context)(
- loadFn: LoadJoinElementFn, receiver: JSReceiver, initialMap: Map,
- initialLen: Number): never
+ _loadFn: LoadJoinElementFn, receiver: JSReceiver, _initialMap: Map,
+ _initialLen: Number): never
labels Cannot, Can {
const typedArray: JSTypedArray = UnsafeCast<JSTypedArray>(receiver);
if (IsDetachedBuffer(typedArray.buffer)) goto Cannot;
@@ -246,7 +246,7 @@ namespace array_join {
case (nofSeparators: Number): {
return StringRepeat(context, sep, nofSeparators);
}
- case (obj: Object): {
+ case (Object): {
unreachable;
}
}
@@ -448,7 +448,7 @@ namespace array_join {
const previouslyVisited: Object = stack.objects[i];
// Add `receiver` to the first open slot
- if (previouslyVisited == Hole) {
+ if (previouslyVisited == TheHole) {
stack.objects[i] = receiver;
return True;
}
@@ -473,7 +473,7 @@ namespace array_join {
try {
const stack: FixedArray = LoadJoinStack()
otherwise IfUninitialized;
- if (stack.objects[0] == Hole) {
+ if (stack.objects[0] == TheHole) {
stack.objects[0] = receiver;
} else if (JoinStackPush(stack, receiver) == False)
deferred {
@@ -504,7 +504,7 @@ namespace array_join {
SetJoinStack(newStack);
}
else {
- stack.objects[i] = Hole;
+ stack.objects[i] = TheHole;
}
return Undefined;
}
@@ -521,7 +521,7 @@ namespace array_join {
// Builtin call was not nested (receiver is the first entry) and
// did not contain other nested arrays that expanded the stack.
if (stack.objects[0] == receiver && len == kMinJoinStackSize) {
- StoreFixedArrayElement(stack, 0, Hole, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(stack, 0, TheHole, SKIP_WRITE_BARRIER);
} else
deferred {
JoinStackPop(stack, receiver);
@@ -535,7 +535,7 @@ namespace array_join {
sepObj: Object, locales: Object, options: Object): Object {
// 3. If separator is undefined, let sep be the single-element String ",".
// 4. Else, let sep be ? ToString(separator).
- let sep: String =
+ const sep: String =
sepObj == Undefined ? ',' : ToString_Inline(context, sepObj);
// If the receiver is not empty and not already being joined, continue with
@@ -557,7 +557,8 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-array.prototype.join
transitioning javascript builtin
- ArrayPrototypeJoin(context: Context, receiver: Object, ...arguments): Object {
+ ArrayPrototypeJoin(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
const separator: Object = arguments[0];
// 1. Let O be ? ToObject(this value).
@@ -566,8 +567,8 @@ namespace array_join {
// 2. Let len be ? ToLength(? Get(O, "length")).
const len: Number = GetLengthProperty(o);
- // Only handle valid array lengths. Although the spec allows larger values,
- // this matches historical V8 behavior.
+ // Only handle valid array lengths. Although the spec allows larger
+ // values, this matches historical V8 behavior.
if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength);
return CycleProtectedArrayJoin<JSArray>(
@@ -576,7 +577,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-array.prototype.tolocalestring
transitioning javascript builtin ArrayPrototypeToLocaleString(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const locales: Object = arguments[0];
const options: Object = arguments[1];
@@ -586,8 +587,8 @@ namespace array_join {
// 2. Let len be ? ToLength(? Get(O, "length")).
const len: Number = GetLengthProperty(o);
- // Only handle valid array lengths. Although the spec allows larger values,
- // this matches historical V8 behavior.
+ // Only handle valid array lengths. Although the spec allows larger
+ // values, this matches historical V8 behavior.
if (len > kMaxArrayIndex + 1) ThrowTypeError(kInvalidArrayLength);
return CycleProtectedArrayJoin<JSArray>(
@@ -596,7 +597,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-array.prototype.tostring
transitioning javascript builtin ArrayPrototypeToString(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// 1. Let array be ? ToObject(this value).
const array: JSReceiver = ToObject_Inline(context, receiver);
@@ -617,7 +618,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.join
transitioning javascript builtin TypedArrayPrototypeJoin(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const separator: Object = arguments[0];
// Spec: ValidateTypedArray is applied to the this value prior to evaluating
@@ -632,7 +633,7 @@ namespace array_join {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.tolocalestring
transitioning javascript builtin TypedArrayPrototypeToLocaleString(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const locales: Object = arguments[0];
const options: Object = arguments[1];
diff --git a/deps/v8/src/builtins/array-lastindexof.tq b/deps/v8/src/builtins/array-lastindexof.tq
index d6213157dc..5ebc451e43 100644
--- a/deps/v8/src/builtins/array-lastindexof.tq
+++ b/deps/v8/src/builtins/array-lastindexof.tq
@@ -12,7 +12,7 @@ namespace array_lastindexof {
labels IfHole {
const elements: FixedArray = UnsafeCast<FixedArray>(elements);
const element: Object = elements.objects[index];
- if (element == Hole) goto IfHole;
+ if (element == TheHole) goto IfHole;
return element;
}
@@ -131,7 +131,7 @@ namespace array_lastindexof {
// https://tc39.github.io/ecma262/#sec-array.prototype.lastIndexOf
transitioning javascript builtin ArrayPrototypeLastIndexOf(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// 1. Let O be ? ToObject(this value).
const object: JSReceiver = ToObject_Inline(context, receiver);
diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq
index 7546f1cd00..dda569c682 100644
--- a/deps/v8/src/builtins/array-map.tq
+++ b/deps/v8/src/builtins/array-map.tq
@@ -4,9 +4,10 @@
namespace array_map {
transitioning javascript builtin
- ArrayMapLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object): Object {
+ ArrayMapLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -26,9 +27,10 @@ namespace array_map {
}
transitioning javascript builtin
- ArrayMapLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, array: Object,
- initialK: Object, length: Object, result: Object): Object {
+ ArrayMapLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, array: Object, initialK: Object,
+ length: Object, result: Object): Object {
// All continuation points in the optimized filter implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -55,7 +57,7 @@ namespace array_map {
}
transitioning builtin ArrayMapLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
array: JSReceiver, o: JSReceiver, initialK: Number,
length: Number): Object {
// 6. Let k be 0.
@@ -94,7 +96,7 @@ namespace array_map {
}
CreateJSArray(implicit context: Context)(validLength: Smi): JSArray {
- let length: Smi = this.fixedArray.length;
+ const length: Smi = this.fixedArray.length;
assert(validLength <= length);
let kind: ElementsKind = PACKED_SMI_ELEMENTS;
if (!this.onlySmis) {
@@ -114,7 +116,7 @@ namespace array_map {
kind = FastHoleyElementsKind(kind);
}
- let map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
let a: JSArray;
if (IsDoubleElementsKind(kind)) {
@@ -130,7 +132,7 @@ namespace array_map {
elements.floats[i] = Convert<float64>(n);
}
case (h: HeapObject): {
- assert(h == Hole);
+ assert(h == TheHole);
}
}
}
@@ -182,11 +184,11 @@ namespace array_map {
}
transitioning macro FastArrayMap(implicit context: Context)(
- fastO: FastJSArray, len: Smi, callbackfn: Callable,
+ fastO: FastJSArrayForRead, len: Smi, callbackfn: Callable,
thisArg: Object): JSArray
labels Bailout(JSArray, Smi) {
let k: Smi = 0;
- let fastOW = NewFastJSArrayWitness(fastO);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
let vector = NewVector(len);
// Build a fast loop over the smi array.
@@ -220,24 +222,12 @@ namespace array_map {
return vector.CreateJSArray(len);
}
- // Bails out if the slow path needs to be taken.
- // It's useful to structure it this way, because the consequences of
- // using the slow path on species creation are interesting to the caller.
- macro FastMapSpeciesCreate(implicit context: Context)(
- receiver: JSReceiver, length: Number): JSArray labels Bailout {
- if (IsArraySpeciesProtectorCellInvalid()) goto Bailout;
- const o = Cast<FastJSArray>(receiver) otherwise Bailout;
- const smiLength = Cast<Smi>(length) otherwise Bailout;
- const newMap: Map =
- LoadJSArrayElementsMap(PACKED_SMI_ELEMENTS, LoadNativeContext(context));
- return AllocateJSArray(PACKED_SMI_ELEMENTS, newMap, smiLength, smiLength);
- }
-
// https://tc39.github.io/ecma262/#sec-array.prototype.map
transitioning javascript builtin
- ArrayMap(implicit context: Context)(receiver: Object, ...arguments): Object {
+ ArrayMap(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
try {
- if (IsNullOrUndefined(receiver)) goto NullOrUndefinedError;
+ RequireObjectCoercible(receiver, 'Array.prototype.map');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -258,7 +248,7 @@ namespace array_map {
try {
// 5. Let A be ? ArraySpeciesCreate(O, len).
if (IsArraySpeciesProtectorCellInvalid()) goto SlowSpeciesCreate;
- const o: FastJSArray = Cast<FastJSArray>(receiver)
+ const o: FastJSArrayForRead = Cast<FastJSArrayForRead>(receiver)
otherwise SlowSpeciesCreate;
const smiLength: Smi = Cast<Smi>(len)
otherwise SlowSpeciesCreate;
@@ -279,8 +269,5 @@ namespace array_map {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.map');
- }
}
}
diff --git a/deps/v8/src/builtins/array-of.tq b/deps/v8/src/builtins/array-of.tq
index 76123207fd..7293318625 100644
--- a/deps/v8/src/builtins/array-of.tq
+++ b/deps/v8/src/builtins/array-of.tq
@@ -5,7 +5,8 @@
namespace array_of {
// https://tc39.github.io/ecma262/#sec-array.of
transitioning javascript builtin
- ArrayOf(implicit context: Context)(receiver: Object, ...arguments): Object {
+ ArrayOf(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
// 1. Let len be the actual number of arguments passed to this function.
const len: Smi = Convert<Smi>(arguments.length);
@@ -35,7 +36,7 @@ namespace array_of {
// 7. Repeat, while k < len
while (k < len) {
// a. Let kValue be items[k].
- let kValue: Object = items[Convert<intptr>(k)];
+ const kValue: Object = items[Convert<intptr>(k)];
// b. Let Pk be ! ToString(k).
// c. Perform ? CreateDataPropertyOrThrow(A, Pk, kValue).
diff --git a/deps/v8/src/builtins/array-reduce-right.tq b/deps/v8/src/builtins/array-reduce-right.tq
index 33661c38d1..b1aa71b85b 100644
--- a/deps/v8/src/builtins/array-reduce-right.tq
+++ b/deps/v8/src/builtins/array-reduce-right.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArrayReduceRightPreLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, length: Object): Object {
+ ArrayReduceRightPreLoopEagerDeoptContinuation(
+ js-implicit context: Context,
+ receiver: Object)(callback: Object, length: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -21,12 +22,13 @@ namespace array {
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
return ArrayReduceRightLoopContinuation(
- jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength);
+ jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength);
}
transitioning javascript builtin
- ArrayReduceRightLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceRightLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
accumulator: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -45,27 +47,28 @@ namespace array {
}
transitioning javascript builtin
- ArrayReduceRightLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceRightLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
result: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// The accumulator is the result from the callback call which just occured.
- let r = ArrayReduceRightLoopContinuation(
+ const r = ArrayReduceRightLoopContinuation(
jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
return r;
}
transitioning builtin ArrayReduceRightLoopContinuation(implicit context:
Context)(
- receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
- o: JSReceiver, initialK: Number, length: Number): Object {
+ _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
+ o: JSReceiver, initialK: Number, _length: Number): Object {
let accumulator = initialAccumulator;
// 8b and 9. Repeat, while k >= 0
@@ -82,7 +85,7 @@ namespace array {
// 8b iii and 9c i. Let kValue be ? Get(O, Pk).
const value: Object = GetProperty(o, k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
// 8b iii 1.
accumulator = value;
} else {
@@ -99,7 +102,7 @@ namespace array {
// 8c. if kPresent is false, throw a TypeError exception.
// If the accumulator is discovered with the sentinel hole value,
// this means kPresent is false.
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
}
return accumulator;
@@ -111,9 +114,9 @@ namespace array {
labels Bailout(Number, Object) {
let accumulator = initialAccumulator;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(len - 1, accumulator);
- let fastO =
- Cast<FastJSArray>(o) otherwise goto Bailout(len - 1, accumulator);
- let fastOW = NewFastJSArrayWitness(fastO);
+ const fastO = Cast<FastJSArrayForRead>(o)
+ otherwise goto Bailout(len - 1, accumulator);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
// Build a fast loop over the array.
for (let k: Smi = smiLen - 1; k >= 0; k--) {
@@ -123,7 +126,7 @@ namespace array {
if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -131,7 +134,7 @@ namespace array {
fastOW.Get());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduceRight');
}
return accumulator;
@@ -139,12 +142,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.reduceRight
transitioning javascript builtin
- ArrayReduceRight(implicit context: Context)(receiver: Object, ...arguments):
- Object {
+ ArrayReduceRight(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.reduceRight');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -162,7 +163,8 @@ namespace array {
// exception. (This case is handled at the end of
// ArrayReduceRightLoopContinuation).
- const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole;
+ const initialValue: Object =
+ arguments.length > 1 ? arguments[1] : TheHole;
try {
return FastArrayReduceRight(o, len, callbackfn, initialValue)
@@ -176,8 +178,5 @@ namespace array {
label NoCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduceRight');
- }
}
}
diff --git a/deps/v8/src/builtins/array-reduce.tq b/deps/v8/src/builtins/array-reduce.tq
index 67a112fd41..a5f6feb9cc 100644
--- a/deps/v8/src/builtins/array-reduce.tq
+++ b/deps/v8/src/builtins/array-reduce.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArrayReducePreLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, length: Object): Object {
+ ArrayReducePreLoopEagerDeoptContinuation(
+ js-implicit context: Context,
+ receiver: Object)(callback: Object, length: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -21,12 +22,13 @@ namespace array {
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
return ArrayReduceLoopContinuation(
- jsreceiver, callbackfn, Hole, jsreceiver, 0, numberLength);
+ jsreceiver, callbackfn, TheHole, jsreceiver, 0, numberLength);
}
transitioning javascript builtin
- ArrayReduceLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
accumulator: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -45,25 +47,26 @@ namespace array {
}
transitioning javascript builtin
- ArrayReduceLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, initialK: Object, length: Object,
+ ArrayReduceLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, initialK: Object, length: Object,
result: Object): Object {
// All continuation points in the optimized every implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
const jsreceiver = Cast<JSReceiver>(receiver) otherwise unreachable;
const callbackfn = Cast<Callable>(callback) otherwise unreachable;
- let numberK = Cast<Number>(initialK) otherwise unreachable;
+ const numberK = Cast<Number>(initialK) otherwise unreachable;
const numberLength = Cast<Number>(length) otherwise unreachable;
// The accumulator is the result from the callback call which just occured.
- let r = ArrayReduceLoopContinuation(
+ const r = ArrayReduceLoopContinuation(
jsreceiver, callbackfn, result, jsreceiver, numberK, numberLength);
return r;
}
transitioning builtin ArrayReduceLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
+ _receiver: JSReceiver, callbackfn: Callable, initialAccumulator: Object,
o: JSReceiver, initialK: Number, length: Number): Object {
let accumulator = initialAccumulator;
@@ -81,7 +84,7 @@ namespace array {
// 6c. i. Let kValue be ? Get(O, Pk).
const value: Object = GetProperty(o, k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
// 8b.
accumulator = value;
} else {
@@ -98,7 +101,7 @@ namespace array {
// 8c. if kPresent is false, throw a TypeError exception.
// If the accumulator is discovered with the sentinel hole value,
// this means kPresent is false.
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
}
return accumulator;
@@ -110,9 +113,10 @@ namespace array {
labels Bailout(Number, Object) {
const k = 0;
let accumulator = initialAccumulator;
- const smiLen = Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
- let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k, accumulator);
- let fastOW = NewFastJSArrayWitness(fastO);
+ Cast<Smi>(len) otherwise goto Bailout(k, accumulator);
+ const fastO =
+ Cast<FastJSArrayForRead>(o) otherwise goto Bailout(k, accumulator);
+ let fastOW = NewFastJSArrayForReadWitness(fastO);
// Build a fast loop over the array.
for (let k: Smi = 0; k < len; k++) {
@@ -122,7 +126,7 @@ namespace array {
if (k >= fastOW.Get().length) goto Bailout(k, accumulator);
const value: Object = fastOW.LoadElementNoHole(k) otherwise continue;
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -130,7 +134,7 @@ namespace array {
fastOW.Get());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, 'Array.prototype.reduce');
}
return accumulator;
@@ -138,12 +142,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.reduce
transitioning javascript builtin
- ArrayReduce(implicit context: Context)(receiver: Object, ...arguments):
+ ArrayReduce(js-implicit context: Context, receiver: Object)(...arguments):
Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.reduce');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -161,7 +163,8 @@ namespace array {
// exception. (This case is handled at the end of
// ArrayReduceLoopContinuation).
- const initialValue: Object = arguments.length > 1 ? arguments[1] : Hole;
+ const initialValue: Object =
+ arguments.length > 1 ? arguments[1] : TheHole;
try {
return FastArrayReduce(o, len, callbackfn, initialValue)
@@ -175,8 +178,5 @@ namespace array {
label NoCallableError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.reduce');
- }
}
}
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index f1ba8fddf7..82d2e6b605 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -165,7 +165,7 @@ namespace array_reverse {
// https://tc39.github.io/ecma262/#sec-array.prototype.reverse
transitioning javascript builtin ArrayPrototypeReverse(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
try {
TryFastPackedArrayReverse(receiver) otherwise Baseline;
return receiver;
diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq
index 3c8c1491bb..4dd82d7b88 100644
--- a/deps/v8/src/builtins/array-shift.tq
+++ b/deps/v8/src/builtins/array-shift.tq
@@ -103,7 +103,7 @@ namespace array_shift {
// https://tc39.github.io/ecma262/#sec-array.prototype.shift
transitioning javascript builtin ArrayPrototypeShift(
- implicit context: Context)(receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
try {
return TryFastArrayShift(receiver, arguments) otherwise Slow;
}
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index 5162329408..c3a6ac75cb 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -63,7 +63,7 @@ namespace array_slice {
for (let current: Smi = start; current < to; ++current) {
const e: Object =
sloppyElements.objects[current + kSloppyArgumentsParameterMapStart];
- const newElement: Object = e != Hole ?
+ const newElement: Object = e != TheHole ?
argumentsContext[UnsafeCast<Smi>(e)] :
unmappedElements.objects[current];
// It is safe to skip the write barrier here because resultElements was
@@ -105,7 +105,6 @@ namespace array_slice {
return ExtractFastJSArray(context, a, start, count);
}
case (a: JSArgumentsObjectWithLength): {
- const nativeContext: NativeContext = LoadNativeContext(context);
const map: Map = a.map;
if (IsFastAliasedArgumentsMap(map)) {
return HandleFastAliasedSloppyArgumentsSlice(context, a, start, count)
@@ -123,8 +122,8 @@ namespace array_slice {
// https://tc39.github.io/ecma262/#sec-array.prototype.slice
transitioning javascript builtin
- ArrayPrototypeSlice(context: Context, receiver: Object, ...arguments):
- Object {
+ ArrayPrototypeSlice(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// Handle array cloning case if the receiver is a fast array.
if (arguments.length == 0) {
typeswitch (receiver) {
@@ -186,7 +185,7 @@ namespace array_slice {
// 10. Repeat, while k < final
while (k < final) {
// a. Let Pk be ! ToString(k).
- let pK: Number = k;
+ const pK: Number = k;
// b. Let kPresent be ? HasProperty(O, Pk).
const fromPresent: Boolean = HasProperty(o, pK);
diff --git a/deps/v8/src/builtins/array-some.tq b/deps/v8/src/builtins/array-some.tq
index f68ea4ac30..a30af4e47a 100644
--- a/deps/v8/src/builtins/array-some.tq
+++ b/deps/v8/src/builtins/array-some.tq
@@ -4,8 +4,9 @@
namespace array {
transitioning javascript builtin
- ArraySomeLoopEagerDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
+ ArraySomeLoopEagerDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object,
length: Object): Object {
// All continuation points in the optimized some implementation are
// after the ToObject(O) call that ensures we are dealing with a
@@ -25,9 +26,10 @@ namespace array {
}
transitioning javascript builtin
- ArraySomeLoopLazyDeoptContinuation(implicit context: Context)(
- receiver: Object, callback: Object, thisArg: Object, initialK: Object,
- length: Object, result: Object): Object {
+ ArraySomeLoopLazyDeoptContinuation(
+ js-implicit context: Context, receiver: Object)(
+ callback: Object, thisArg: Object, initialK: Object, length: Object,
+ result: Object): Object {
// All continuation points in the optimized some implementation are
// after the ToObject(O) call that ensures we are dealing with a
// JSReceiver.
@@ -51,9 +53,9 @@ namespace array {
}
transitioning builtin ArraySomeLoopContinuation(implicit context: Context)(
- receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
- array: Object, o: JSReceiver, initialK: Number, length: Number,
- initialTo: Object): Object {
+ _receiver: JSReceiver, callbackfn: Callable, thisArg: Object,
+ _array: Object, o: JSReceiver, initialK: Number, length: Number,
+ _initialTo: Object): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Number = initialK; k < length; k++) {
@@ -88,7 +90,7 @@ namespace array {
labels Bailout(Smi) {
let k: Smi = 0;
const smiLen = Cast<Smi>(len) otherwise goto Bailout(k);
- let fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
+ const fastO = Cast<FastJSArray>(o) otherwise goto Bailout(k);
let fastOW = NewFastJSArrayWitness(fastO);
// Build a fast loop over the smi array.
@@ -109,11 +111,10 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.some
transitioning javascript builtin
- ArraySome(implicit context: Context)(receiver: Object, ...arguments): Object {
+ ArraySome(js-implicit context: Context, receiver: Object)(...arguments):
+ Object {
try {
- if (IsNullOrUndefined(receiver)) {
- goto NullOrUndefinedError;
- }
+ RequireObjectCoercible(receiver, 'Array.prototype.some');
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject_Inline(context, receiver);
@@ -143,8 +144,5 @@ namespace array {
label TypeError deferred {
ThrowTypeError(kCalledNonCallable, arguments[0]);
}
- label NullOrUndefinedError deferred {
- ThrowTypeError(kCalledOnNullOrUndefined, 'Array.prototype.some');
- }
}
}
diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq
index e24b51760c..3b65bb03d4 100644
--- a/deps/v8/src/builtins/array-splice.tq
+++ b/deps/v8/src/builtins/array-splice.tq
@@ -54,8 +54,7 @@ namespace array_splice {
macro FastSplice<FixedArrayType: type, ElementType: type>(implicit context:
Context)(
args: Arguments, a: JSArray, length: Smi, newLength: Smi,
- lengthDelta: Smi, actualStart: Smi, insertCount: Smi,
- actualDeleteCount: Smi): void labels Bailout {
+ actualStart: Smi, insertCount: Smi, actualDeleteCount: Smi): void {
// Make sure elements are writable.
array::EnsureWriteableFastElements(a);
@@ -77,7 +76,7 @@ namespace array_splice {
UnsafeCast<FixedArrayType>(elements), dstIndex, srcIndex, count);
} else {
// Grow.
- let capacity: Smi = CalculateNewElementsCapacity(newLength);
+ const capacity: Smi = CalculateNewElementsCapacity(newLength);
const newElements: FixedArrayType =
Extract<FixedArrayType>(elements, 0, actualStart, capacity);
a.elements = newElements;
@@ -168,12 +167,12 @@ namespace array_splice {
if (IsFastSmiOrTaggedElementsKind(elementsKind)) {
FastSplice<FixedArray, Object>(
- args, a, length, newLength, lengthDelta, actualStart, insertCount,
- actualDeleteCount) otherwise Bailout;
+ args, a, length, newLength, actualStart, insertCount,
+ actualDeleteCount);
} else {
FastSplice<FixedDoubleArray, Number>(
- args, a, length, newLength, lengthDelta, actualStart, insertCount,
- actualDeleteCount) otherwise Bailout;
+ args, a, length, newLength, actualStart, insertCount,
+ actualDeleteCount);
}
return deletedResult;
@@ -301,8 +300,6 @@ namespace array_splice {
context: Context, arguments: Arguments, o: JSReceiver, len: Number,
actualStart: Number, insertCount: Smi,
actualDeleteCount: Number): Object {
- const affected: Number = len - actualStart - actualDeleteCount;
-
// 9. Let A be ? ArraySpeciesCreate(O, actualDeleteCount).
const a: JSReceiver = ArraySpeciesCreate(context, o, actualDeleteCount);
const itemCount: Number = insertCount;
@@ -353,8 +350,8 @@ namespace array_splice {
// https://tc39.github.io/ecma262/#sec-array.prototype.splice
transitioning javascript builtin
- ArrayPrototypeSplice(context: Context, receiver: Object, ...arguments):
- Object {
+ ArrayPrototypeSplice(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// 1. Let O be ? ToObject(this value).
const o: JSReceiver = ToObject(context, receiver);
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index b2e746db47..e685d520cd 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -93,7 +93,7 @@ namespace array_unshift {
// https://tc39.github.io/ecma262/#sec-array.prototype.unshift
transitioning javascript builtin ArrayPrototypeUnshift(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
try {
TryFastArrayUnshift(context, receiver, arguments) otherwise Baseline;
}
diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq
index 9807db19c6..7e044e086b 100644
--- a/deps/v8/src/builtins/array.tq
+++ b/deps/v8/src/builtins/array.tq
@@ -33,18 +33,19 @@ namespace array {
}
macro IsJSArray(implicit context: Context)(o: Object): bool {
- try {
- const array: JSArray = Cast<JSArray>(o) otherwise NotArray;
- return true;
- }
- label NotArray {
- return false;
+ typeswitch (o) {
+ case (JSArray): {
+ return true;
+ }
+ case (Object): {
+ return false;
+ }
}
}
macro LoadElementOrUndefined(a: FixedArray, i: Smi): Object {
const e: Object = a.objects[i];
- return e == Hole ? Undefined : e;
+ return e == TheHole ? Undefined : e;
}
macro LoadElementOrUndefined(a: FixedDoubleArray, i: Smi): NumberOrUndefined {
@@ -62,26 +63,7 @@ namespace array {
}
macro StoreArrayHole(elements: FixedArray, k: Smi): void {
- elements.objects[k] = Hole;
- }
-
- macro CopyArrayElement(
- elements: FixedArray, newElements: FixedArray, from: Smi, to: Smi): void {
- const e: Object = elements.objects[from];
- newElements.objects[to] = e;
- }
-
- macro CopyArrayElement(
- elements: FixedDoubleArray, newElements: FixedDoubleArray, from: Smi,
- to: Smi): void {
- try {
- const floatValue: float64 = LoadDoubleWithHoleCheck(elements, from)
- otherwise FoundHole;
- newElements.floats[to] = floatValue;
- }
- label FoundHole {
- StoreArrayHole(newElements, to);
- }
+ elements.objects[k] = TheHole;
}
extern macro SetPropertyLength(implicit context: Context)(Object, Number);
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index 76e1a486c8..4aa1d57837 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -14,9 +14,11 @@
#include 'src/objects/js-generator.h'
#include 'src/objects/js-promise.h'
#include 'src/objects/js-regexp-string-iterator.h'
-#include 'src/objects/module.h'
+#include 'src/objects/js-weak-refs.h'
#include 'src/objects/objects.h'
+#include 'src/objects/source-text-module.h'
#include 'src/objects/stack-frame-info.h'
+#include 'src/objects/synthetic-module.h'
#include 'src/objects/template-objects.h'
type void;
@@ -31,12 +33,16 @@ type PositiveSmi extends Smi;
// The Smi value zero, which is often used as null for HeapObject types.
type Zero extends PositiveSmi;
+// A value with the size of Tagged which may contain arbitrary data.
+type Uninitialized extends Tagged;
+
@abstract
extern class HeapObject extends Tagged {
map: Map;
}
type Object = Smi | HeapObject;
+
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int31 extends int32
@@ -84,32 +90,33 @@ extern class Oddball extends HeapObject {
extern class HeapNumber extends HeapObject { value: float64; }
type Number = Smi | HeapNumber;
-type BigInt extends HeapObject generates 'TNode<BigInt>';
type Numeric = Number | BigInt;
@abstract
-@noVerifier
+@generateCppClass
extern class Name extends HeapObject {
- hash_field: int32;
+ hash_field: uint32;
}
+@generateCppClass
extern class Symbol extends Name {
flags: int32;
- name: Object;
+ name: Object; // The print name of a symbol, or undefined if none.
}
@abstract
+@generateCppClass
extern class String extends Name {
- length: uint32;
+ length: int32;
}
+@generateCppClass
extern class ConsString extends String {
first: String;
second: String;
}
@abstract
-@noVerifier
extern class ExternalString extends String {
resource: RawPtr;
resource_data: RawPtr;
@@ -118,28 +125,37 @@ extern class ExternalString extends String {
extern class ExternalOneByteString extends ExternalString {}
extern class ExternalTwoByteString extends ExternalString {}
-extern class InternalizedString extends String {}
+@generateCppClass
+extern class InternalizedString extends String {
+}
// TODO(v8:8983): Add declaration for variable-sized region.
@abstract
-@noVerifier
+@generateCppClass
extern class SeqString extends String {
}
-extern class SeqOneByteString extends SeqString {}
-extern class SeqTwoByteString extends SeqString {}
+@generateCppClass
+extern class SeqOneByteString extends SeqString {
+}
+@generateCppClass
+extern class SeqTwoByteString extends SeqString {
+}
+@generateCppClass
extern class SlicedString extends String {
parent: String;
offset: Smi;
}
-extern class ThinString extends String { actual: String; }
+@generateCppClass
+extern class ThinString extends String {
+ actual: String;
+}
// The HeapNumber value NaN
type NaN extends HeapNumber;
@abstract
-@noVerifier
@generatePrint
@generateCppClass
extern class Struct extends HeapObject {
@@ -169,7 +185,6 @@ type DirectString extends String;
type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
@abstract
-@noVerifier
@generateCppClass
extern class FixedArrayBase extends HeapObject {
length: Smi;
@@ -190,9 +205,7 @@ type LayoutDescriptor extends ByteArray
type TransitionArray extends WeakFixedArray
generates 'TNode<TransitionArray>';
-// InstanceType actually extends uint16, but a bunch of methods in
-// CodeStubAssembler expect a TNode<Int32T>, so keeping it signed for now.
-type InstanceType extends int16 constexpr 'InstanceType';
+type InstanceType extends uint16 constexpr 'InstanceType';
extern class Map extends HeapObject {
instance_size_in_words: uint8;
@@ -214,19 +227,21 @@ extern class Map extends HeapObject {
@ifnot(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: void;
dependent_code: DependentCode;
prototype_validity_cell: Smi | Cell;
+ // TODO(v8:9108): Misusing "weak" keyword; type should be
+ // Map | Weak<Map> | TransitionArray | PrototypeInfo | Smi.
weak transitions_or_prototype_info: Map | TransitionArray |
PrototypeInfo | Smi;
}
-type BytecodeArray extends FixedArrayBase;
-
@generatePrint
+@generateCppClass
extern class EnumCache extends Struct {
keys: FixedArray;
indices: FixedArray;
}
@generatePrint
+@generateCppClass
extern class SourcePositionTableWithFrameCache extends Struct {
source_position_table: ByteArray;
stack_frame_cache: Object;
@@ -250,8 +265,7 @@ extern class DescriptorArray extends HeapObject {
// than building the definition from C++.
intrinsic %GetAllocationBaseSize<Class: type>(map: Map): intptr;
intrinsic %Allocate<Class: type>(size: intptr): Class;
-intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr):
- Class;
+intrinsic %GetStructMap(instanceKind: constexpr InstanceType): Map;
intrinsic %AddIndexedFieldSizeToObjectSize<T: type>(
baseSize: intptr, indexSize: T, fieldSize: int32): intptr {
@@ -282,24 +296,35 @@ intrinsic
}
@abstract
-@noVerifier
extern class JSReceiver extends HeapObject {
- properties_or_hash: FixedArrayBase | Smi;
+ properties_or_hash: FixedArrayBase | PropertyArray | Smi;
}
type Constructor extends JSReceiver;
@abstract
@dirtyInstantiatedAbstractClass
+@generateCppClass
extern class JSObject extends JSReceiver {
- @noVerifier elements: FixedArrayBase;
+ // [elements]: The elements (properties with names that are integers).
+ //
+ // Elements can be in two general modes: fast and slow. Each mode
+ // corresponds to a set of object representations of elements that
+ // have something in common.
+ //
+ // In the fast mode elements is a FixedArray and so each element can be
+ // quickly accessed. The elements array can have one of several maps in this
+ // mode: fixed_array_map, fixed_double_array_map,
+ // sloppy_arguments_elements_map or fixed_cow_array_map (for copy-on-write
+ // arrays). In the latter case the elements array may be shared by a few
+ // objects and so before writing to any element the array must be copied. Use
+ // EnsureWritableFastElements in this case.
+ //
+ // In the slow mode the elements is either a NumberDictionary or a
+ // FixedArray parameter map for a (sloppy) arguments object.
+ elements: FixedArrayBase;
}
-macro NewJSObject(
- map: Map, properties: FixedArrayBase | Smi,
- elements: FixedArrayBase): JSObject {
- return new JSObject{map, properties_or_hash: properties, elements};
-}
macro NewJSObject(implicit context: Context)(): JSObject {
const objectFunction: JSFunction = GetObjectFunction();
const map: Map = Cast<Map>(objectFunction.prototype_or_initial_map)
@@ -328,19 +353,33 @@ macro GetDerivedMap(implicit context: Context)(
}
}
+macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
+ JSObject {
+ let properties = kEmptyFixedArray;
+ if (IsDictionaryMap(map)) {
+ properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
+ }
+ return AllocateJSObjectFromMap(
+ map, properties, kEmptyFixedArray, kNone, kWithSlackTracking);
+}
+
extern class JSFunction extends JSObject {
shared_function_info: SharedFunctionInfo;
context: Context;
feedback_cell: FeedbackCell;
weak code: Code;
+
+ // Space for the following field may or may not be allocated.
@noVerifier weak prototype_or_initial_map: JSReceiver | Map;
}
+@generateCppClass
extern class JSProxy extends JSReceiver {
- target: Object;
- handler: Object;
+ target: JSReceiver | Null;
+ handler: JSReceiver | Null;
}
+// Just a starting shape for JSObject; properties can move after initialization.
@noVerifier
extern class JSProxyRevocableResult extends JSObject {
proxy: Object;
@@ -358,21 +397,39 @@ macro NewJSProxyRevocableResult(implicit context: Context)(
};
}
-extern class JSGlobalProxy extends JSObject { native_context: Object; }
+@generateCppClass
+extern class JSGlobalProxy extends JSObject {
+ // [native_context]: the owner native context of this global proxy object.
+ // It is null value if this object is not used by any context.
+ native_context: Object;
+}
-extern class JSValue extends JSObject { value: Object; }
+@generateCppClass
+extern class JSPrimitiveWrapper extends JSObject {
+ value: Object;
+}
extern class JSArgumentsObject extends JSObject {}
+
+// Just a starting shape for JSObject; properties can move after initialization.
@noVerifier
@hasSameInstanceTypeAsParent
extern class JSArgumentsObjectWithLength extends JSArgumentsObject {
length: Object;
}
+
+// Just a starting shape for JSObject; properties can move after initialization.
@hasSameInstanceTypeAsParent
extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength {
callee: Object;
}
+// Just a starting shape for JSObject; properties can move after initialization.
+@hasSameInstanceTypeAsParent
+@noVerifier
+extern class JSStrictArgumentsObject extends JSArgumentsObjectWithLength {
+}
+
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
next_index: Number;
@@ -405,20 +462,6 @@ macro NewJSArray(implicit context: Context)(): JSArray {
};
}
-struct HoleIterator {
- Next(): Object labels NoMore() {
- return Hole;
- }
-}
-
-macro NewJSArray(implicit context: Context)(map: Map, length: Smi): JSArray {
- const map = GetFastPackedSmiElementsJSArrayMap();
- const i = HoleIterator{};
- const elements = new FixedArray{map, length, objects: ...i};
- return new
- JSArray{map, properties_or_hash: kEmptyFixedArray, elements, length};
-}
-
// A HeapObject with a JSArray map, and either fast packed elements, or fast
// holey elements when the global NoElementsProtector is not invalidated.
transient type FastJSArray extends JSArray;
@@ -441,18 +484,61 @@ transient type FastJSArrayForReadWithNoCustomIteration extends
type NoSharedNameSentinel extends Smi;
-type JSModuleNamespace extends JSObject;
-type WeakArrayList extends HeapObject;
+@generateCppClass
+extern class CallHandlerInfo extends Struct {
+ callback: Foreign | Undefined;
+ js_callback: Foreign | Undefined;
+ data: Object;
+}
+
+type ObjectHashTable extends FixedArray;
@abstract
+extern class Module extends HeapObject {
+ exports: ObjectHashTable;
+ hash: Smi;
+ status: Smi;
+ module_namespace: JSModuleNamespace | Undefined;
+ exception: Object;
+}
+
+type SourceTextModuleInfo extends FixedArray;
+
+extern class SourceTextModule extends Module {
+ code: SharedFunctionInfo | JSFunction |
+ JSGeneratorObject | SourceTextModuleInfo;
+ regular_exports: FixedArray;
+ regular_imports: FixedArray;
+ requested_modules: FixedArray;
+ script: Script;
+ import_meta: TheHole | JSObject;
+ dfs_index: Smi;
+ dfs_ancestor_index: Smi;
+}
+
+extern class SyntheticModule extends Module {
+ name: String;
+ export_names: FixedArray;
+ evaluation_steps: Foreign;
+}
+
+@abstract
+extern class JSModuleNamespace extends JSObject {
+ module: Module;
+}
+
+@hasSameInstanceTypeAsParent
@noVerifier
+extern class TemplateList extends FixedArray {
+}
+
+@abstract
extern class JSWeakCollection extends JSObject {
table: Object;
}
extern class JSWeakSet extends JSWeakCollection {}
extern class JSWeakMap extends JSWeakCollection {}
-@noVerifier
extern class JSCollectionIterator extends JSObject {
table: Object;
index: Object;
@@ -474,12 +560,20 @@ extern class JSMessageObject extends JSObject {
error_level: Smi;
}
+extern class WeakArrayList extends HeapObject {
+ capacity: Smi;
+ length: Smi;
+ // TODO(v8:8983): declare variable-sized region for contained MaybeObject's
+ // objects[length]: MaybeObject;
+}
+
extern class PrototypeInfo extends Struct {
js_module_namespace: JSModuleNamespace | Undefined;
prototype_users: WeakArrayList | Zero;
registry_slot: Smi;
validity_cell: Object;
- @noVerifier object_create_map: Smi | WeakArrayList;
+ // TODO(v8:9108): Should be Weak<Map> | Undefined.
+ @noVerifier object_create_map: Map | Undefined;
bit_field: Smi;
}
@@ -503,7 +597,7 @@ extern class Script extends Struct {
extern class EmbedderDataArray extends HeapObject { length: Smi; }
-type ScopeInfo extends Object generates 'TNode<ScopeInfo>';
+type ScopeInfo extends HeapObject generates 'TNode<ScopeInfo>';
extern class PreparseData extends HeapObject {
// TODO(v8:8983): Add declaration for variable-sized region.
@@ -527,16 +621,30 @@ extern class SharedFunctionInfo extends HeapObject {
expected_nof_properties: uint16;
function_token_offset: int16;
flags: int32;
+ function_literal_id: int32;
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
extern class JSBoundFunction extends JSObject {
- bound_target_function: JSReceiver;
+ bound_target_function: Callable;
bound_this: Object;
bound_arguments: FixedArray;
}
-type Callable = JSFunction | JSBoundFunction | JSProxy;
+// Specialized types. The following three type definitions don't correspond to
+// actual C++ classes, but have Is... methods that check additional constraints.
+
+// A Foreign object whose raw pointer is not allowed to be null.
+type NonNullForeign extends Foreign;
+
+// A function built with InstantiateFunction for the public API.
+type CallableApiObject extends HeapObject;
+
+// A JSProxy with the callable bit set.
+type CallableJSProxy extends JSProxy;
+
+type Callable =
+ JSFunction | JSBoundFunction | CallableJSProxy | CallableApiObject;
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
FixedArrayBase): intptr;
@@ -547,7 +655,7 @@ type NumberDictionary extends HeapObject
extern class FreeSpace extends HeapObject {
size: Smi;
- @noVerifier next: FreeSpace;
+ next: FreeSpace | Uninitialized;
}
// %RawDownCast should *never* be used anywhere in Torque code except for
@@ -609,45 +717,12 @@ extern class JSArrayBufferView extends JSObject {
}
extern class JSTypedArray extends JSArrayBufferView {
- AttachOffHeapBuffer(buffer: JSArrayBuffer, byteOffset: uintptr): void {
- const basePointer: Smi = 0;
-
- // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
- // platforms are self-limiting, because we can't allocate an array bigger
- // than our 32-bit arithmetic range anyway. 64 bit platforms could
- // theoretically have an offset up to 2^35 - 1.
- const backingStore = buffer.backing_store;
- const externalPointer = backingStore + Convert<intptr>(byteOffset);
-
- // Assert no overflow has occurred. Only assert if the mock array buffer
- // allocator is NOT used. When the mock array buffer is used, impossibly
- // large allocations are allowed that would erroneously cause an overflow
- // and this assertion to fail.
- assert(
- IsMockArrayBufferAllocatorFlag() ||
- Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
-
- this.elements = kEmptyByteArray;
- this.buffer = buffer;
- this.external_pointer = externalPointer;
- this.base_pointer = basePointer;
- }
-
length: uintptr;
external_pointer: RawPtr;
base_pointer: ByteArray | Smi;
}
-@noVerifier
-extern class JSAccessorPropertyDescriptor extends JSObject {
- get: Object;
- set: Object;
- enumerable: Object;
- configurable: Object;
-}
-
@abstract
-@noVerifier
extern class JSCollection extends JSObject {
table: Object;
}
@@ -681,14 +756,6 @@ extern class JSStringIterator extends JSObject {
next_index: Smi;
}
-@noVerifier
-extern class JSDataPropertyDescriptor extends JSObject {
- value: Object;
- writable: Object;
- enumerable: Object;
- configurable: Object;
-}
-
@abstract
extern class TemplateInfo extends Struct {
tag: Object;
@@ -722,7 +789,7 @@ extern class FunctionTemplateInfo extends TemplateInfo {
function_template_rare_data: Object;
shared_function_info: Object;
flag: Smi;
- @noVerifier length: Smi;
+ length: Smi;
cached_property_name: Object;
}
@@ -749,8 +816,6 @@ type LanguageMode extends Smi constexpr 'LanguageMode';
type ExtractFixedArrayFlags
generates 'TNode<Smi>'
constexpr 'CodeStubAssembler::ExtractFixedArrayFlags';
-type ParameterMode
- generates 'TNode<Int32T>' constexpr 'ParameterMode';
type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
@@ -770,21 +835,21 @@ const UTF32:
extern class Foreign extends HeapObject { foreign_address: RawPtr; }
extern class InterceptorInfo extends Struct {
- @noVerifier getter: Foreign | Zero;
- @noVerifier setter: Foreign | Zero;
- @noVerifier query: Foreign | Zero;
- @noVerifier descriptor: Foreign | Zero;
- @noVerifier deleter: Foreign | Zero;
- @noVerifier enumerator: Foreign | Zero;
- @noVerifier definer: Foreign | Zero;
+ getter: NonNullForeign | Zero | Undefined;
+ setter: NonNullForeign | Zero | Undefined;
+ query: NonNullForeign | Zero | Undefined;
+ descriptor: NonNullForeign | Zero | Undefined;
+ deleter: NonNullForeign | Zero | Undefined;
+ enumerator: NonNullForeign | Zero | Undefined;
+ definer: NonNullForeign | Zero | Undefined;
data: Object;
flags: Smi;
}
extern class AccessCheckInfo extends Struct {
- callback: Foreign | Zero;
- named_interceptor: InterceptorInfo | Zero;
- indexed_interceptor: InterceptorInfo | Zero;
+ callback: Foreign | Zero | Undefined;
+ named_interceptor: InterceptorInfo | Zero | Undefined;
+ indexed_interceptor: InterceptorInfo | Zero | Undefined;
data: Object;
}
@@ -800,6 +865,9 @@ extern class Cell extends HeapObject { value: Object; }
extern class DataHandler extends Struct {
smi_handler: Smi | Code;
validity_cell: Smi | Cell;
+
+ // Space for the following fields may or may not be allocated.
+ // TODO(v8:9108): Misusing "weak" keyword; should be MaybeObject.
@noVerifier weak data_1: Object;
@noVerifier weak data_2: Object;
@noVerifier weak data_3: Object;
@@ -850,17 +918,22 @@ extern class StackFrameInfo extends Struct {
column_number: Smi;
promise_all_index: Smi;
script_id: Smi;
- script_name: Object;
- script_name_or_source_url: Object;
- function_name: Object;
- wasm_module_name: Object;
+ script_name: String | Null | Undefined;
+ script_name_or_source_url: String | Null | Undefined;
+ function_name: String | Null | Undefined;
+ method_name: String | Null | Undefined;
+ type_name: String | Null | Undefined;
+ eval_origin: String | Null | Undefined;
+ wasm_module_name: String | Null | Undefined;
flag: Smi;
}
+type FrameArray extends FixedArray;
+
extern class StackTraceFrame extends Struct {
- frame_array: Object;
+ frame_array: FrameArray | Undefined;
frame_index: Smi;
- frame_info: Object;
+ frame_info: StackFrameInfo | Undefined;
id: Smi;
}
@@ -876,9 +949,20 @@ extern class WasmExportedFunctionData extends Struct {
instance: WasmInstanceObject;
jump_table_offset: Smi;
function_index: Smi;
+ // The remaining fields are for fast calling from C++. The contract is
+ // that they are lazily populated, and either all will be present or none.
+ c_wrapper_code: Object;
+ wasm_call_target: Smi; // Pseudo-smi: one-bit shift on all platforms.
+ packed_args_size: Smi;
}
-extern class WasmJSFunctionData extends Struct { wrapper_code: Code; }
+extern class WasmJSFunctionData extends Struct {
+ callable: JSReceiver;
+ wrapper_code: Code;
+ serialized_return_count: Smi;
+ serialized_parameter_count: Smi;
+ serialized_signature: ByteArray; // PodArray<wasm::ValueType>
+}
extern class WasmCapiFunctionData extends Struct {
call_target: RawPtr;
@@ -887,6 +971,16 @@ extern class WasmCapiFunctionData extends Struct {
serialized_signature: ByteArray; // PodArray<wasm::ValueType>
}
+extern class WasmIndirectFunctionTable extends Struct {
+ size: uint32;
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
+ sig_ids: RawPtr;
+ targets: RawPtr;
+ managed_native_allocations: Foreign | Undefined;
+ refs: FixedArray;
+}
+
extern class WasmDebugInfo extends Struct {
instance: WasmInstanceObject;
interpreter_handle: Foreign | Undefined;
@@ -947,9 +1041,9 @@ const kAllowLargeObjectAllocation: constexpr AllocationFlags
generates 'CodeStubAssembler::kAllowLargeObjectAllocation';
const kWithSlackTracking: constexpr SlackTrackingMode
- generates 'SlackTrackingMode::kWithSlackTracking';
+ generates 'CodeStubAssembler::SlackTrackingMode::kWithSlackTracking';
const kNoSlackTracking: constexpr SlackTrackingMode
- generates 'SlackTrackingMode::kNoSlackTracking';
+ generates 'CodeStubAssembler::SlackTrackingMode::kNoSlackTracking';
const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags
generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedDoubleArrays';
@@ -977,6 +1071,8 @@ const kCalledNonCallable: constexpr MessageTemplate
generates 'MessageTemplate::kCalledNonCallable';
const kCalledOnNullOrUndefined: constexpr MessageTemplate
generates 'MessageTemplate::kCalledOnNullOrUndefined';
+const kProtoObjectOrNull: constexpr MessageTemplate
+ generates 'MessageTemplate::kProtoObjectOrNull';
const kInvalidOffset: constexpr MessageTemplate
generates 'MessageTemplate::kInvalidOffset';
const kInvalidTypedArrayLength: constexpr MessageTemplate
@@ -1003,13 +1099,17 @@ const kSymbolToString: constexpr MessageTemplate
generates 'MessageTemplate::kSymbolToString';
const kPropertyNotFunction: constexpr MessageTemplate
generates 'MessageTemplate::kPropertyNotFunction';
+const kBigIntMaxLength: constexpr intptr
+ generates 'BigInt::kMaxLength';
+const kBigIntTooBig: constexpr MessageTemplate
+ generates 'MessageTemplate::kBigIntTooBig';
const kMaxArrayIndex:
constexpr uint32 generates 'JSArray::kMaxArrayIndex';
const kArrayBufferMaxByteLength:
constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength';
-const V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP:
- constexpr int31 generates 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP';
+const kMaxTypedArrayInHeap:
+ constexpr int31 generates 'JSTypedArray::kMaxSizeInHeap';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
const kSmiMaxValue: constexpr uintptr generates 'kSmiMaxValue';
const kSmiMax: uintptr = kSmiMaxValue;
@@ -1054,7 +1154,13 @@ const kStrictReadOnlyProperty: constexpr MessageTemplate
const kString: constexpr PrimitiveType
generates 'PrimitiveType::kString';
-type Hole extends Oddball;
+const kExternalPointerForOnHeapArray: constexpr RawPtr
+ generates 'JSTypedArray::ExternalPointerForOnHeapArray()';
+
+const kNameDictionaryInitialCapacity:
+ constexpr int32 generates 'NameDictionary::kInitialCapacity';
+
+type TheHole extends Oddball;
type Null extends Oddball;
type Undefined extends Oddball;
type True extends Oddball;
@@ -1064,7 +1170,7 @@ type Boolean = True | False;
type NumberOrUndefined = Number | Undefined;
-extern macro TheHoleConstant(): Hole;
+extern macro TheHoleConstant(): TheHole;
extern macro NullConstant(): Null;
extern macro UndefinedConstant(): Undefined;
extern macro TrueConstant(): True;
@@ -1075,7 +1181,7 @@ extern macro EmptyStringConstant(): EmptyString;
extern macro LengthStringConstant(): String;
extern macro NanConstant(): NaN;
-const Hole: Hole = TheHoleConstant();
+const TheHole: TheHole = TheHoleConstant();
const Null: Null = NullConstant();
const Undefined: Undefined = UndefinedConstant();
const True: True = TrueConstant();
@@ -1090,11 +1196,6 @@ const false: constexpr bool generates 'false';
const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict';
const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
-const SMI_PARAMETERS: constexpr ParameterMode
- generates 'CodeStubAssembler::SMI_PARAMETERS';
-const INTPTR_PARAMETERS: constexpr ParameterMode
- generates 'CodeStubAssembler::INTPTR_PARAMETERS';
-
const SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
const UNSAFE_SKIP_WRITE_BARRIER:
@@ -1107,7 +1208,7 @@ extern class AsyncGeneratorRequest extends Struct {
promise: JSPromise;
}
-extern class ModuleInfoEntry extends Struct {
+extern class SourceTextModuleInfoEntry extends Struct {
export_name: String | Undefined;
local_name: String | Undefined;
import_name: String | Undefined;
@@ -1134,7 +1235,7 @@ extern class PromiseReaction extends Struct {
extern class PromiseReactionJobTask extends Microtask {
argument: Object;
context: Context;
- @noVerifier handler: Callable | Undefined;
+ handler: Callable | Undefined;
promise_or_capability: JSPromise | PromiseCapability | Undefined;
}
@@ -1155,22 +1256,8 @@ extern class JSRegExp extends JSObject {
flags: Smi | Undefined;
}
-@noVerifier
-extern class JSIteratorResult extends JSObject {
- value: Object;
- done: Boolean;
-}
-
-macro NewJSIteratorResult(implicit context: Context)(
- value: Object, done: Boolean): JSIteratorResult {
- return new JSIteratorResult{
- map: GetIteratorResultMap(),
- properties_or_hash: kEmptyFixedArray,
- elements: kEmptyFixedArray,
- value,
- done
- };
-}
+extern transitioning macro AllocateJSIteratorResult(implicit context: Context)(
+ Object, Boolean): JSObject;
// Note: Although a condition for a FastJSRegExp is having a positive smi
// lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible
@@ -1230,9 +1317,9 @@ extern class AccessorInfo extends Struct {
name: Object;
flags: Smi;
expected_receiver_type: Object;
- @noVerifier setter: Foreign | Zero;
- @noVerifier getter: Foreign | Zero;
- @noVerifier js_getter: Foreign | Zero;
+ setter: NonNullForeign | Zero;
+ getter: NonNullForeign | Zero;
+ js_getter: NonNullForeign | Zero;
data: Object;
}
@@ -1277,7 +1364,7 @@ extern class FeedbackCell extends Struct {
type AllocationSite extends Struct;
extern class AllocationMemento extends Struct {
- @noVerifier allocation_site: AllocationSite;
+ allocation_site: AllocationSite;
}
extern class WasmModuleObject extends JSObject {
@@ -1303,8 +1390,8 @@ extern class WasmMemoryObject extends JSObject {
}
extern class WasmGlobalObject extends JSObject {
- untagged_buffer: JSArrayBuffer;
- tagged_buffer: FixedArray;
+ untagged_buffer: JSArrayBuffer | Undefined;
+ tagged_buffer: FixedArray | Undefined;
offset: Smi;
flags: Smi;
}
@@ -1314,10 +1401,6 @@ extern class WasmExceptionObject extends JSObject {
exception_tag: HeapObject;
}
-@noVerifier
-extern class WasmExceptionPackage extends JSReceiver {
-}
-
type WasmExportedFunction extends JSFunction;
extern class AsmWasmData extends Struct {
@@ -1327,6 +1410,46 @@ extern class AsmWasmData extends Struct {
uses_bitset: HeapNumber;
}
+extern class JSFinalizationGroup extends JSObject {
+ native_context: NativeContext;
+ cleanup: Object;
+ active_cells: Undefined | WeakCell;
+ cleared_cells: Undefined | WeakCell;
+ key_map: Object;
+ next: Undefined | JSFinalizationGroup;
+ flags: Smi;
+}
+
+extern class JSFinalizationGroupCleanupIterator extends JSObject {
+ finalization_group: JSFinalizationGroup;
+}
+
+extern class WeakCell extends HeapObject {
+ finalization_group: Undefined | JSFinalizationGroup;
+ target: Undefined | JSReceiver;
+ holdings: Object;
+ prev: Undefined | WeakCell;
+ next: Undefined | WeakCell;
+ key: Object;
+ key_list_prev: Undefined | WeakCell;
+ key_list_next: Undefined | WeakCell;
+}
+
+extern class JSWeakRef extends JSObject { target: Undefined | JSReceiver; }
+
+extern class BytecodeArray extends FixedArrayBase {
+ // TODO(v8:8983): bytecode array object sizes vary based on their contents.
+ constant_pool: FixedArray;
+ handler_table: ByteArray;
+ source_position_table: Undefined | ByteArray |
+ SourcePositionTableWithFrameCache;
+ frame_size: int32;
+ parameter_size: int32;
+ incoming_new_target_or_generator_register: int32;
+ osr_nesting_level: int8;
+ bytecode_age: int8;
+}
+
extern macro Is64(): constexpr bool;
extern macro SelectBooleanConstant(bool): Boolean;
@@ -1358,7 +1481,7 @@ extern transitioning builtin SetProperty(implicit context: Context)(
extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
Object, Object, Object);
extern transitioning builtin DeleteProperty(implicit context: Context)(
- Object, Object, LanguageMode);
+ Object, Object, LanguageMode): Object;
extern transitioning builtin HasProperty(implicit context: Context)(
Object, Object): Boolean;
extern transitioning macro HasProperty_Inline(implicit context: Context)(
@@ -1403,6 +1526,10 @@ extern macro ConstructWithTarget(implicit context: Context)(
extern macro SpeciesConstructor(implicit context: Context)(
Object, JSReceiver): JSReceiver;
+extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool;
+extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
+ NameDictionary;
+
extern builtin ToObject(Context, Object): JSReceiver;
extern macro ToObject_Inline(Context, Object): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
@@ -1598,6 +1725,7 @@ extern operator '==' macro Word32Equal(bool, bool): bool;
extern operator '!=' macro Word32NotEqual(bool, bool): bool;
extern operator '+' macro Float64Add(float64, float64): float64;
+extern operator '-' macro Float64Sub(float64, float64): float64;
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
@@ -1650,6 +1778,8 @@ extern macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
extern macro IsValidPositiveSmi(intptr): bool;
+extern macro IsInteger(HeapNumber): bool;
+
extern macro HeapObjectToJSDataView(HeapObject): JSDataView
labels CastError;
extern macro HeapObjectToJSProxy(HeapObject): JSProxy
@@ -1713,7 +1843,7 @@ macro Cast<A: type>(o: HeapObject): A
labels CastError;
Cast<HeapObject>(o: HeapObject): HeapObject
- labels CastError {
+labels _CastError {
return o;
}
@@ -1837,6 +1967,11 @@ Cast<HeapNumber>(o: HeapObject): HeapNumber
goto CastError;
}
+Cast<BigInt>(o: HeapObject): BigInt labels CastError {
+ if (IsBigInt(o)) return %RawDownCast<BigInt>(o);
+ goto CastError;
+}
+
Cast<JSRegExp>(o: HeapObject): JSRegExp
labels CastError {
if (IsJSRegExp(o)) return %RawDownCast<JSRegExp>(o);
@@ -1849,9 +1984,9 @@ Cast<Map>(implicit context: Context)(o: HeapObject): Map
goto CastError;
}
-Cast<JSValue>(o: HeapObject): JSValue
+Cast<JSPrimitiveWrapper>(o: HeapObject): JSPrimitiveWrapper
labels CastError {
- if (IsJSValue(o)) return %RawDownCast<JSValue>(o);
+ if (IsJSPrimitiveWrapper(o)) return %RawDownCast<JSPrimitiveWrapper>(o);
goto CastError;
}
@@ -1915,24 +2050,24 @@ Cast<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
FastJSArrayForCopy
labels CastError {
if (IsArraySpeciesProtectorCellInvalid()) goto CastError;
- const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
- return %RawDownCast<FastJSArrayForCopy>(o);
+ const a = Cast<FastJSArray>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForCopy>(a);
}
Cast<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayWithNoCustomIteration
labels CastError {
if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
- const a: FastJSArray = Cast<FastJSArray>(o) otherwise CastError;
- return %RawDownCast<FastJSArrayWithNoCustomIteration>(o);
+ const a = Cast<FastJSArray>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayWithNoCustomIteration>(a);
}
Cast<FastJSArrayForReadWithNoCustomIteration>(implicit context: Context)(
o: HeapObject): FastJSArrayForReadWithNoCustomIteration
labels CastError {
if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
- const a: FastJSArrayForRead = Cast<FastJSArrayForRead>(o) otherwise CastError;
- return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(o);
+ const a = Cast<FastJSArrayForRead>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(a);
}
Cast<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
@@ -1990,7 +2125,7 @@ extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
extern macro TruncateFloat64ToFloat32(float64): float32;
-extern macro TruncateHeapNumberValueToWord32(Number): int32;
+extern macro TruncateHeapNumberValueToWord32(HeapNumber): int32;
extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
@@ -2007,13 +2142,14 @@ extern macro Float64Constant(constexpr float64): float64;
extern macro SmiConstant(constexpr int31): Smi;
extern macro SmiConstant(constexpr Smi): Smi;
extern macro SmiConstant(constexpr MessageTemplate): Smi;
+extern macro SmiConstant(constexpr LanguageMode): Smi;
extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
-extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
extern macro Int32Constant(constexpr ElementsKind): ElementsKind;
extern macro IntPtrConstant(constexpr NativeContextSlot): NativeContextSlot;
extern macro IntPtrConstant(constexpr ContextSlot): ContextSlot;
extern macro IntPtrConstant(constexpr intptr): intptr;
+extern macro PointerConstant(constexpr RawPtr): RawPtr;
extern macro SingleCharacterStringConstant(constexpr string): String;
extern macro BitcastWordToTaggedSigned(intptr): Smi;
@@ -2126,6 +2262,9 @@ Convert<Number, int32>(i: int32): Number {
Convert<intptr, int32>(i: int32): intptr {
return ChangeInt32ToIntPtr(i);
}
+Convert<intptr, uint32>(i: uint32): intptr {
+ return Signed(ChangeUint32ToWord(i));
+}
Convert<Smi, int32>(i: int32): Smi {
return SmiFromInt32(i);
}
@@ -2333,10 +2472,6 @@ extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
FixedDoubleArray, intptr, float64): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
FixedDoubleArray, Smi, float64): void;
-operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
- a: FixedDoubleArray, i: Smi, n: Number): void {
- StoreFixedDoubleArrayElementSmi(a, i, Convert<float64>(n));
-}
operator '[]=' macro StoreFixedDoubleArrayDirect(
a: FixedDoubleArray, i: Smi, v: Number) {
a.floats[i] = Convert<float64>(v);
@@ -2418,7 +2553,7 @@ extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
extern macro AllocateJSArray(Map, FixedArrayBase, Smi): JSArray;
extern macro AllocateJSObjectFromMap(Map): JSObject;
extern macro AllocateJSObjectFromMap(
- Map, FixedArray, FixedArray, constexpr AllocationFlags,
+ Map, FixedArray | PropertyArray, FixedArray, constexpr AllocationFlags,
constexpr SlackTrackingMode): JSObject;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
@@ -2531,10 +2666,10 @@ LoadElementNoHole<FixedArray>(implicit context: Context)(
a: JSArray, index: Smi): Object
labels IfHole {
try {
- let elements: FixedArray =
+ const elements: FixedArray =
Cast<FixedArray>(a.elements) otherwise Unexpected;
- let e: Object = elements.objects[index];
- if (e == Hole) {
+ const e: Object = elements.objects[index];
+ if (e == TheHole) {
goto IfHole;
}
return e;
@@ -2548,9 +2683,10 @@ LoadElementNoHole<FixedDoubleArray>(implicit context: Context)(
a: JSArray, index: Smi): Object
labels IfHole {
try {
- let elements: FixedDoubleArray =
+ const elements: FixedDoubleArray =
Cast<FixedDoubleArray>(a.elements) otherwise Unexpected;
- let e: float64 = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
+ const e: float64 =
+ LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
return AllocateHeapNumberWithValue(e);
}
label Unexpected {
@@ -2594,7 +2730,7 @@ struct FastJSArrayWitness {
} else {
const elements = Cast<FixedArray>(this.unstable.elements)
otherwise unreachable;
- StoreFixedArrayElement(elements, k, Hole);
+ StoreFixedArrayElement(elements, k, TheHole);
}
}
@@ -2638,12 +2774,12 @@ struct FastJSArrayWitness {
MoveElements(dst: intptr, src: intptr, length: intptr) {
assert(this.arrayIsPushable);
if (this.hasDoubles) {
- let elements: FixedDoubleArray =
+ const elements: FixedDoubleArray =
Cast<FixedDoubleArray>(this.unstable.elements)
otherwise unreachable;
TorqueMoveElements(elements, dst, src, length);
} else {
- let elements: FixedArray = Cast<FixedArray>(this.unstable.elements)
+ const elements: FixedArray = Cast<FixedArray>(this.unstable.elements)
otherwise unreachable;
if (this.hasSmis) {
TorqueMoveElementsSmi(elements, dst, src, length);
@@ -2662,17 +2798,62 @@ struct FastJSArrayWitness {
}
macro NewFastJSArrayWitness(array: FastJSArray): FastJSArrayWitness {
- let kind = array.map.elements_kind;
+ const kind = array.map.elements_kind;
return FastJSArrayWitness{
stable: array,
unstable: array,
map: array.map,
- hasDoubles: !IsElementsKindLessThanOrEqual(kind, HOLEY_ELEMENTS),
+ hasDoubles: IsDoubleElementsKind(kind),
hasSmis: IsElementsKindLessThanOrEqual(kind, HOLEY_SMI_ELEMENTS),
arrayIsPushable: false
};
}
+struct FastJSArrayForReadWitness {
+ Get(): FastJSArrayForRead {
+ return this.unstable;
+ }
+
+ Recheck() labels CastError {
+ if (this.stable.map != this.map) goto CastError;
+ // We don't need to check elements kind or whether the prototype
+ // has changed away from the default JSArray prototype, because
+ // if the map remains the same then those properties hold.
+ //
+ // However, we have to make sure there are no elements in the
+ // prototype chain.
+ if (IsNoElementsProtectorCellInvalid()) goto CastError;
+ this.unstable = %RawDownCast<FastJSArrayForRead>(this.stable);
+ }
+
+ LoadElementNoHole(implicit context: Context)(k: Smi): Object
+ labels FoundHole {
+ if (this.hasDoubles) {
+ return LoadElementNoHole<FixedDoubleArray>(this.unstable, k)
+ otherwise FoundHole;
+ } else {
+ return LoadElementNoHole<FixedArray>(this.unstable, k)
+ otherwise FoundHole;
+ }
+ }
+
+ const stable: JSArray;
+ unstable: FastJSArrayForRead;
+ const map: Map;
+ const hasDoubles: bool;
+}
+
+macro NewFastJSArrayForReadWitness(array: FastJSArrayForRead):
+ FastJSArrayForReadWitness {
+ const kind = array.map.elements_kind;
+ return FastJSArrayForReadWitness{
+ stable: array,
+ unstable: array,
+ map: array.map,
+ hasDoubles: IsDoubleElementsKind(kind)
+ };
+}
+
extern macro TransitionElementsKind(
JSObject, Map, constexpr ElementsKind,
constexpr ElementsKind): void labels Bailout;
@@ -2693,6 +2874,7 @@ extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
+extern macro IsBigInt(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
extern macro IsName(HeapObject): bool;
extern macro IsPrivateSymbol(HeapObject): bool;
@@ -2702,7 +2884,7 @@ extern macro IsOddball(HeapObject): bool;
extern macro IsSymbol(HeapObject): bool;
extern macro IsJSArrayMap(Map): bool;
extern macro IsExtensibleMap(Map): bool;
-extern macro IsJSValue(HeapObject): bool;
+extern macro IsJSPrimitiveWrapper(HeapObject): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
extern macro Typeof(Object): Object;
@@ -2713,7 +2895,7 @@ macro NumberIsNaN(number: Number): bool {
return false;
}
case (hn: HeapNumber): {
- let value: float64 = Convert<float64>(hn);
+ const value: float64 = Convert<float64>(hn);
return value != value;
}
}
@@ -2722,6 +2904,8 @@ macro NumberIsNaN(number: Number): bool {
extern macro GotoIfForceSlowPath() labels Taken;
extern macro BranchIfToBooleanIsTrue(Object): never
labels Taken, NotTaken;
+extern macro BranchIfToBooleanIsFalse(Object): never
+ labels Taken, NotTaken;
macro ToBoolean(obj: Object): bool {
if (BranchIfToBooleanIsTrue(obj)) {
@@ -2731,13 +2915,24 @@ macro ToBoolean(obj: Object): bool {
}
}
+@export
+macro RequireObjectCoercible(implicit context: Context)(
+ value: Object, name: constexpr string): Object {
+ if (IsNullOrUndefined(value)) {
+ ThrowTypeError(kCalledOnNullOrUndefined, name);
+ }
+ return value;
+}
+
+extern macro BranchIfSameValue(Object, Object): never labels Taken, NotTaken;
+
transitioning macro ToIndex(input: Object, context: Context): Number
labels RangeError {
if (input == Undefined) {
return 0;
}
- let value: Number = ToInteger_Inline(context, input, kTruncateMinusZero);
+ const value: Number = ToInteger_Inline(context, input, kTruncateMinusZero);
if (value < 0 || value > kMaxSafeInteger) {
goto RangeError;
}
@@ -2824,19 +3019,6 @@ macro BranchIfFastJSArrayForRead(o: Object, context: Context):
BranchIf<FastJSArrayForRead>(o) otherwise True, False;
}
-macro BranchIfNotFastJSArray(o: Object, context: Context): never
- labels True, False {
- BranchIfNot<FastJSArray>(o) otherwise True, False;
-}
-
-macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never
- labels True, False {
- // Long-term, it's likely not a good idea to have this slow-path test here,
- // since it fundamentally breaks the type system.
- GotoIfForceSlowPath() otherwise False;
- BranchIf<FastJSArrayForCopy>(o) otherwise True, False;
-}
-
@export
macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool {
return Is<FastJSArrayWithNoCustomIteration>(o);
@@ -2859,7 +3041,7 @@ namespace runtime {
transitioning builtin FastCreateDataProperty(implicit context: Context)(
receiver: JSReceiver, key: Object, value: Object): Object {
try {
- let array = Cast<FastJSArray>(receiver) otherwise Slow;
+ const array = Cast<FastJSArray>(receiver) otherwise Slow;
const index: Smi = Cast<Smi>(key) otherwise goto Slow;
if (index < 0 || index > array.length) goto Slow;
array::EnsureWriteableFastElements(array);
@@ -2929,3 +3111,46 @@ transitioning macro ToStringImpl(context: Context, o: Object): String {
}
unreachable;
}
+
+macro VerifiedUnreachable(): never {
+ StaticAssert(false);
+ unreachable;
+}
+
+macro Float64IsSomeInfinity(value: float64): bool {
+ if (value == V8_INFINITY) {
+ return true;
+ }
+ return value == (Convert<float64>(0) - V8_INFINITY);
+}
+
+@export
+macro IsIntegerOrSomeInfinity(o: Object): bool {
+ typeswitch (o) {
+ case (Smi): {
+ return true;
+ }
+ case (hn: HeapNumber): {
+ if (Float64IsSomeInfinity(Convert<float64>(hn))) {
+ return true;
+ }
+ return IsInteger(hn);
+ }
+ case (Object): {
+ return false;
+ }
+ }
+}
+
+builtin CheckNumberInRange(implicit context: Context)(
+ value: Number, min: Number, max: Number): Undefined {
+ if (IsIntegerOrSomeInfinity(value) && min <= value && value <= max) {
+ return Undefined;
+ } else {
+ Print('Range type assertion failed! (value/min/max)');
+ Print(value);
+ Print(min);
+ Print(max);
+ unreachable;
+ }
+}
diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq
new file mode 100644
index 0000000000..a1b1cb6780
--- /dev/null
+++ b/deps/v8/src/builtins/bigint.tq
@@ -0,0 +1,206 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-bigint-gen.h'
+
+// TODO(nicohartmann): Discuss whether types used by multiple builtins should be
+// in global namespace
+@noVerifier
+extern class BigIntBase extends HeapObject generates 'TNode<BigInt>' {
+}
+
+type BigInt extends BigIntBase;
+
+@noVerifier
+@hasSameInstanceTypeAsParent
+extern class MutableBigInt extends BigIntBase generates 'TNode<BigInt>' {
+}
+
+Convert<BigInt, MutableBigInt>(i: MutableBigInt): BigInt {
+ assert(bigint::IsCanonicalized(i));
+ return %RawDownCast<BigInt>(Convert<BigIntBase>(i));
+}
+
+namespace bigint {
+
+ const kPositiveSign: uint32 = 0;
+ const kNegativeSign: uint32 = 1;
+
+ extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+ extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize(
+ MutableBigInt, BigIntBase, BigIntBase): void;
+ extern macro BigIntBuiltinsAssembler::CppAbsoluteCompare(
+ BigIntBase, BigIntBase): int32;
+
+ extern macro BigIntBuiltinsAssembler::ReadBigIntSign(BigIntBase): uint32;
+ extern macro BigIntBuiltinsAssembler::ReadBigIntLength(BigIntBase): intptr;
+ extern macro BigIntBuiltinsAssembler::WriteBigIntSignAndLength(
+ MutableBigInt, uint32, intptr): void;
+
+ extern macro CodeStubAssembler::AllocateBigInt(intptr): MutableBigInt;
+ extern macro CodeStubAssembler::StoreBigIntDigit(
+ MutableBigInt, intptr, uintptr): void;
+ extern macro CodeStubAssembler::LoadBigIntDigit(BigIntBase, intptr): uintptr;
+
+ @export // Silence unused warning.
+ // TODO(szuend): Remove @export once macros that are only used in
+ // asserts are no longer detected as unused.
+ macro IsCanonicalized(bigint: BigIntBase): bool {
+ const length = ReadBigIntLength(bigint);
+
+ if (length == 0) {
+ return ReadBigIntSign(bigint) == kPositiveSign;
+ }
+
+ return LoadBigIntDigit(bigint, length - 1) != 0;
+ }
+
+ macro InvertSign(sign: uint32): uint32 {
+ return sign == kPositiveSign ? kNegativeSign : kPositiveSign;
+ }
+
+ macro AllocateEmptyBigIntNoThrow(implicit context: Context)(
+ sign: uint32, length: intptr): MutableBigInt labels BigIntTooBig {
+ if (length > kBigIntMaxLength) {
+ goto BigIntTooBig;
+ }
+ const result: MutableBigInt = AllocateBigInt(length);
+
+ WriteBigIntSignAndLength(result, sign, length);
+ return result;
+ }
+
+ macro AllocateEmptyBigInt(implicit context: Context)(
+ sign: uint32, length: intptr): MutableBigInt {
+ try {
+ return AllocateEmptyBigIntNoThrow(sign, length) otherwise BigIntTooBig;
+ }
+ label BigIntTooBig {
+ ThrowRangeError(kBigIntTooBig);
+ }
+ }
+
+ macro MutableBigIntAbsoluteCompare(x: BigIntBase, y: BigIntBase): int32 {
+ return CppAbsoluteCompare(x, y);
+ }
+
+ macro MutableBigIntAbsoluteSub(implicit context: Context)(
+ x: BigInt, y: BigInt, resultSign: uint32): BigInt {
+ const xlength = ReadBigIntLength(x);
+ const ylength = ReadBigIntLength(y);
+ const xsign = ReadBigIntSign(x);
+
+ assert(MutableBigIntAbsoluteCompare(x, y) >= 0);
+ if (xlength == 0) {
+ assert(ylength == 0);
+ return x;
+ }
+
+ if (ylength == 0) {
+ return resultSign == xsign ? x : BigIntUnaryMinus(x);
+ }
+
+ const result = AllocateEmptyBigInt(resultSign, xlength);
+ CppAbsoluteSubAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ }
+
+ macro MutableBigIntAbsoluteAdd(implicit context: Context)(
+ xBigint: BigInt, yBigint: BigInt,
+ resultSign: uint32): BigInt labels BigIntTooBig {
+ let xlength = ReadBigIntLength(xBigint);
+ let ylength = ReadBigIntLength(yBigint);
+
+ let x = xBigint;
+ let y = yBigint;
+ if (xlength < ylength) {
+ // Swap x and y so that x is longer.
+ x = yBigint;
+ y = xBigint;
+ const tempLength = xlength;
+ xlength = ylength;
+ ylength = tempLength;
+ }
+
+ // case: 0n + 0n
+ if (xlength == 0) {
+ assert(ylength == 0);
+ return x;
+ }
+
+ // case: x + 0n
+ if (ylength == 0) {
+ return resultSign == ReadBigIntSign(x) ? x : BigIntUnaryMinus(x);
+ }
+
+ // case: x + y
+ const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + 1)
+ otherwise BigIntTooBig;
+ CppAbsoluteAddAndCanonicalize(result, x, y);
+ return Convert<BigInt>(result);
+ }
+
+ macro BigIntAddImpl(implicit context: Context)(x: BigInt, y: BigInt): BigInt
+ labels BigIntTooBig {
+ const xsign = ReadBigIntSign(x);
+ const ysign = ReadBigIntSign(y);
+ if (xsign == ysign) {
+ // x + y == x + y
+ // -x + -y == -(x + y)
+ return MutableBigIntAbsoluteAdd(x, y, xsign) otherwise BigIntTooBig;
+ }
+
+ // x + -y == x - y == -(y - x)
+ // -x + y == y - x == -(x - y)
+ if (MutableBigIntAbsoluteCompare(x, y) >= 0) {
+ return MutableBigIntAbsoluteSub(x, y, xsign);
+ }
+ return MutableBigIntAbsoluteSub(y, x, InvertSign(xsign));
+ }
+
+ builtin BigIntAddNoThrow(implicit context: Context)(x: BigInt, y: BigInt):
+ Numeric {
+ try {
+ return BigIntAddImpl(x, y) otherwise BigIntTooBig;
+ }
+ label BigIntTooBig {
+ // Smi sentinal is used to signal BigIntTooBig exception.
+ return Convert<Smi>(0);
+ }
+ }
+
+ builtin BigIntAdd(implicit context: Context)(xNum: Numeric, yNum: Numeric):
+ BigInt {
+ try {
+ const x = Cast<BigInt>(xNum) otherwise MixedTypes;
+ const y = Cast<BigInt>(yNum) otherwise MixedTypes;
+
+ return BigIntAddImpl(x, y) otherwise BigIntTooBig;
+ }
+ label MixedTypes {
+ ThrowTypeError(kBigIntMixedTypes);
+ }
+ label BigIntTooBig {
+ ThrowRangeError(kBigIntTooBig);
+ }
+ }
+
+ builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt {
+ const length = ReadBigIntLength(bigint);
+
+ // There is no -0n.
+ if (length == 0) {
+ return bigint;
+ }
+
+ const result =
+ AllocateEmptyBigInt(InvertSign(ReadBigIntSign(bigint)), length);
+ for (let i: intptr = 0; i < length; ++i) {
+ StoreBigIntDigit(result, i, LoadBigIntDigit(bigint, i));
+ }
+ return Convert<BigInt>(result);
+ }
+
+} // namespace bigint
diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq
index a41ef76d21..25f9ebd396 100644
--- a/deps/v8/src/builtins/boolean.tq
+++ b/deps/v8/src/builtins/boolean.tq
@@ -3,39 +3,20 @@
// found in the LICENSE file.
namespace boolean {
- const kNameDictionaryInitialCapacity:
- constexpr int32 generates 'NameDictionary::kInitialCapacity';
-
- extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool;
- extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
- NameDictionary;
-
- // TODO(v8:9120): This is a workaround to get access to target and new.target
- // in javascript builtins. Requires cleanup once this is fully supported by
- // torque.
- const NEW_TARGET_INDEX:
- constexpr int32 generates 'Descriptor::kJSNewTarget';
- const TARGET_INDEX: constexpr int32 generates 'Descriptor::kJSTarget';
- extern macro Parameter(constexpr int32): Object;
-
javascript builtin
- BooleanConstructor(context: Context, receiver: Object, ...arguments): Object {
+ BooleanConstructor(
+ js-implicit context: Context, receiver: Object, newTarget: Object,
+ target: JSFunction)(...arguments): Object {
const value = SelectBooleanConstant(ToBoolean(arguments[0]));
- const newTarget = Parameter(NEW_TARGET_INDEX);
if (newTarget == Undefined) {
return value;
}
- const target = UnsafeCast<JSFunction>(Parameter(TARGET_INDEX));
const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
- let properties = kEmptyFixedArray;
- if (IsDictionaryMap(map)) {
- properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
- }
- const obj = UnsafeCast<JSValue>(AllocateJSObjectFromMap(
- map, properties, kEmptyFixedArray, kNone, kWithSlackTracking));
+ const obj =
+ UnsafeCast<JSPrimitiveWrapper>(AllocateFastOrSlowJSObjectFromMap(map));
obj.value = value;
return obj;
}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index 7ee879ab51..0c30e52154 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -32,14 +32,16 @@ JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info,
JSObject js_obj_receiver = JSObject::cast(receiver);
FunctionTemplateInfo signature = FunctionTemplateInfo::cast(recv_type);
- // Check the receiver. Fast path for receivers with no hidden prototypes.
+ // Check the receiver.
if (signature.IsTemplateFor(js_obj_receiver)) return receiver;
- if (!js_obj_receiver.map().has_hidden_prototype()) return JSReceiver();
- for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype,
- PrototypeIterator::END_AT_NON_HIDDEN);
- !iter.IsAtEnd(); iter.Advance()) {
- JSObject current = iter.GetCurrent<JSObject>();
- if (signature.IsTemplateFor(current)) return current;
+
+ // The JSGlobalProxy might have a hidden prototype.
+ if (V8_UNLIKELY(js_obj_receiver.IsJSGlobalProxy())) {
+ HeapObject prototype = js_obj_receiver.map().prototype();
+ if (!prototype.IsNull(isolate)) {
+ JSObject js_obj_prototype = JSObject::cast(prototype);
+ if (signature.IsTemplateFor(js_obj_prototype)) return js_obj_prototype;
+ }
}
return JSReceiver();
}
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 6cc9fd9623..d65d57cc79 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -266,7 +266,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
Increment(&current_argument, kSystemPointerSize);
- Node* arg = LoadBufferObject(current_argument.value(), 0);
+ Node* arg = LoadBufferObject(
+ UncheckedCast<RawPtrT>(current_argument.value()), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
},
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 29bcae6feb..07f74cb429 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -227,7 +227,7 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
- GotoIf(IsDetachedBuffer(array_buffer), detached);
+ GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
Node* value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
@@ -402,7 +402,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
TNode<JSArray> array_receiver;
- Node* kind = nullptr;
+ TNode<Int32T> kind;
Label fast(this);
BranchIfFastJSArray(receiver, context, &fast, &runtime);
@@ -709,19 +709,19 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
iterator_assembler.GetIterator(context, items, iterator_method);
TNode<Context> native_context = LoadNativeContext(context);
- TNode<Object> fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TNode<Map> fast_iterator_result_map = CAST(
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
Goto(&loop);
BIND(&loop);
{
// Loop while iterator is not done.
- TNode<Object> next = iterator_assembler.IteratorStep(
+ TNode<JSReceiver> next = iterator_assembler.IteratorStep(
context, iterator_record, &loop_done, fast_iterator_result_map);
TVARIABLE(Object, value,
- CAST(iterator_assembler.IteratorValue(
- context, next, fast_iterator_result_map)));
+ iterator_assembler.IteratorValue(context, next,
+ fast_iterator_result_map));
// If a map_function is supplied then call it (using this_arg as
// receiver), on the value returned from the iterator. Exceptions are
@@ -2035,8 +2035,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
&normal_sequence);
{
// Make elements kind holey and update elements kind in the type info.
- var_elements_kind =
- Signed(Word32Or(var_elements_kind.value(), Int32Constant(1)));
+ var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1));
StoreObjectFieldNoWriteBarrier(
allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask)));
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index e6ab965a7e..96c10ed0fd 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -970,8 +970,9 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
}
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
- DCHECK(object->IsJSValue());
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+ DCHECK(object->IsJSPrimitiveWrapper());
+ Handle<JSPrimitiveWrapper> js_value =
+ Handle<JSPrimitiveWrapper>::cast(object);
DCHECK(js_value->value().IsString());
Handle<String> string(String::cast(js_value->value()), isolate);
uint32_t length = static_cast<uint32_t>(string->length());
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 03df1aaaad..a95365e425 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -36,6 +36,21 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
TNode<JSAsyncFunctionObject> async_function_object =
CAST(LoadContextElement(context, Context::EXTENSION_INDEX));
+ // Push the promise for the {async_function_object} back onto the catch
+ // prediction stack to handle exceptions thrown after resuming from the
+ // await properly.
+ Label if_instrumentation(this, Label::kDeferred),
+ if_instrumentation_done(this);
+ Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done);
+ BIND(&if_instrumentation);
+ {
+ TNode<JSPromise> promise = LoadObjectField<JSPromise>(
+ async_function_object, JSAsyncFunctionObject::kPromiseOffset);
+ CallRuntime(Runtime::kDebugAsyncFunctionResumed, context, promise);
+ Goto(&if_instrumentation_done);
+ }
+ BIND(&if_instrumentation_done);
+
// Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
// unnecessary runtime checks removed.
@@ -80,27 +95,19 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
Signed(IntPtrAdd(WordSar(frame_size, IntPtrConstant(kTaggedSizeLog2)),
formal_parameter_count));
- // Allocate space for the promise, the async function object
- // and the register file.
- TNode<IntPtrT> size = IntPtrAdd(
- IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
- JSAsyncFunctionObject::kSize + FixedArray::kHeaderSize),
- Signed(WordShl(parameters_and_register_length,
- IntPtrConstant(kTaggedSizeLog2))));
- TNode<HeapObject> base = AllocateInNewSpace(size);
-
- // Initialize the register file.
- TNode<FixedArray> parameters_and_registers = UncheckedCast<FixedArray>(
- InnerAllocate(base, JSAsyncFunctionObject::kSize +
- JSPromise::kSizeWithEmbedderFields));
- StoreMapNoWriteBarrier(parameters_and_registers, RootIndex::kFixedArrayMap);
- StoreObjectFieldNoWriteBarrier(parameters_and_registers,
- FixedArray::kLengthOffset,
- SmiFromIntPtr(parameters_and_register_length));
+ // Allocate and initialize the register file.
+ TNode<FixedArrayBase> parameters_and_registers =
+ AllocateFixedArray(HOLEY_ELEMENTS, parameters_and_register_length,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
FillFixedArrayWithValue(HOLEY_ELEMENTS, parameters_and_registers,
IntPtrConstant(0), parameters_and_register_length,
RootIndex::kUndefinedValue);
+ // Allocate space for the promise, the async function object.
+ TNode<IntPtrT> size = IntPtrConstant(JSPromise::kSizeWithEmbedderFields +
+ JSAsyncFunctionObject::kSize);
+ TNode<HeapObject> base = AllocateInNewSpace(size);
+
// Initialize the promise.
TNode<Context> native_context = LoadNativeContext(context);
TNode<JSFunction> promise_function =
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
index 8a752f2517..d4818f0e01 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.cc
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-bigint-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h
new file mode 100644
index 0000000000..288418258b
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-bigint-gen.h
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_BIGINT_GEN_H_
+#define V8_BUILTINS_BUILTINS_BIGINT_GEN_H_
+
+#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/bigint.h"
+
+namespace v8 {
+namespace internal {
+
+class BigIntBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit BigIntBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<IntPtrT> ReadBigIntLength(TNode<BigInt> value) {
+ TNode<Word32T> bitfield = LoadBigIntBitfield(value);
+ return ChangeInt32ToIntPtr(
+ Signed(DecodeWord32<BigIntBase::LengthBits>(bitfield)));
+ }
+
+ TNode<Uint32T> ReadBigIntSign(TNode<BigInt> value) {
+ TNode<Word32T> bitfield = LoadBigIntBitfield(value);
+ return DecodeWord32<BigIntBase::SignBits>(bitfield);
+ }
+
+ void WriteBigIntSignAndLength(TNode<BigInt> bigint, TNode<Uint32T> sign,
+ TNode<IntPtrT> length) {
+ STATIC_ASSERT(BigIntBase::SignBits::kShift == 0);
+ TNode<Uint32T> bitfield = Unsigned(
+ Word32Or(Word32Shl(TruncateIntPtrToInt32(length),
+ Int32Constant(BigIntBase::LengthBits::kShift)),
+ Word32And(sign, Int32Constant(BigIntBase::SignBits::kMask))));
+ StoreBigIntBitfield(bigint, bitfield);
+ }
+
+ void CppAbsoluteAddAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference> mutable_big_int_absolute_add_and_canonicalize =
+ ExternalConstant(
+ ExternalReference::
+ mutable_big_int_absolute_add_and_canonicalize_function());
+ CallCFunction(mutable_big_int_absolute_add_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ void CppAbsoluteSubAndCanonicalize(TNode<BigInt> result, TNode<BigInt> x,
+ TNode<BigInt> y) {
+ TNode<ExternalReference> mutable_big_int_absolute_sub_and_canonicalize =
+ ExternalConstant(
+ ExternalReference::
+ mutable_big_int_absolute_sub_and_canonicalize_function());
+ CallCFunction(mutable_big_int_absolute_sub_and_canonicalize,
+ MachineType::AnyTagged(),
+ std::make_pair(MachineType::AnyTagged(), result),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y));
+ }
+
+ TNode<Int32T> CppAbsoluteCompare(TNode<BigInt> x, TNode<BigInt> y) {
+ TNode<ExternalReference> mutable_big_int_absolute_compare =
+ ExternalConstant(
+ ExternalReference::mutable_big_int_absolute_compare_function());
+ TNode<Int32T> result = UncheckedCast<Int32T>(
+ CallCFunction(mutable_big_int_absolute_compare, MachineType::Int32(),
+ std::make_pair(MachineType::AnyTagged(), x),
+ std::make_pair(MachineType::AnyTagged(), y)));
+ return result;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_BUILTINS_BUILTINS_BIGINT_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index a8a847ef47..09d71a0562 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -80,10 +80,10 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
// 1. If Type(value) is BigInt, return value.
if (value->IsBigInt()) return Handle<BigInt>::cast(value);
// 2. If Type(value) is Object and value has a [[BigIntData]] internal slot:
- if (value->IsJSValue()) {
+ if (value->IsJSPrimitiveWrapper()) {
// 2a. Assert: value.[[BigIntData]] is a BigInt value.
// 2b. Return value.[[BigIntData]].
- Object data = JSValue::cast(*value).value();
+ Object data = JSPrimitiveWrapper::cast(*value).value();
if (data.IsBigInt()) return handle(BigInt::cast(data), isolate);
}
// 3. Throw a TypeError exception.
diff --git a/deps/v8/src/builtins/builtins-boolean-gen.cc b/deps/v8/src/builtins/builtins-boolean-gen.cc
index 30cf7ba0c1..74474a8918 100644
--- a/deps/v8/src/builtins/builtins-boolean-gen.cc
+++ b/deps/v8/src/builtins/builtins-boolean-gen.cc
@@ -15,22 +15,23 @@ namespace internal {
// ES6 #sec-boolean.prototype.tostring
TF_BUILTIN(BooleanPrototypeToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* value = ToThisValue(context, receiver, PrimitiveType::kBoolean,
- "Boolean.prototype.toString");
- Node* result = LoadObjectField(value, Oddball::kToStringOffset);
+ TNode<Oddball> value =
+ CAST(ToThisValue(context, receiver, PrimitiveType::kBoolean,
+ "Boolean.prototype.toString"));
+ TNode<String> result = CAST(LoadObjectField(value, Oddball::kToStringOffset));
Return(result);
}
// ES6 #sec-boolean.prototype.valueof
TF_BUILTIN(BooleanPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Node* result = ToThisValue(context, receiver, PrimitiveType::kBoolean,
- "Boolean.prototype.valueOf");
+ TNode<Oddball> result = CAST(ToThisValue(
+ context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf"));
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 05142a8f07..deb91dee24 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -475,14 +475,13 @@ TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
BIND(&holder_next);
{
- // Continue with the hidden prototype of the {holder} if it
- // has one, or throw an illegal invocation exception, since
- // the receiver did not pass the {signature} check.
+ // Continue with the hidden prototype of the {holder} if it is a
+ // JSGlobalProxy (the hidden prototype can either be null or a
+ // JSObject in that case), or throw an illegal invocation exception,
+ // since the receiver did not pass the {signature} check.
TNode<Map> holder_map = LoadMap(holder);
var_holder = LoadMapPrototype(holder_map);
- GotoIf(IsSetWord32(LoadMapBitField2(holder_map),
- Map::HasHiddenPrototypeBit::kMask),
- &holder_loop);
+ GotoIf(IsJSGlobalProxyMap(holder_map), &holder_loop);
ThrowTypeError(context, MessageTemplate::kIllegalInvocation);
}
}
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index d98eba4eeb..d1082291ef 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -8,6 +8,7 @@
#include "src/logging/counters.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/stack-frame-info.h"
namespace v8 {
namespace internal {
@@ -76,6 +77,9 @@ BUILTIN(CallSitePrototypeGetFunction) {
StackFrameBase* frame = it.Frame();
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
+
+ isolate->CountUsage(v8::Isolate::kCallSiteAPIGetFunctionSloppyCall);
+
return *frame->GetFunction();
}
@@ -135,6 +139,9 @@ BUILTIN(CallSitePrototypeGetThis) {
StackFrameBase* frame = it.Frame();
if (frame->IsStrict()) return ReadOnlyRoots(isolate).undefined_value();
+
+ isolate->CountUsage(v8::Isolate::kCallSiteAPIGetThisSloppyCall);
+
return *frame->GetReceiver();
}
@@ -197,9 +204,9 @@ BUILTIN(CallSitePrototypeIsToplevel) {
BUILTIN(CallSitePrototypeToString) {
HandleScope scope(isolate);
CHECK_CALLSITE(recv, "toString");
- FrameArrayIterator it(isolate, GetFrameArray(isolate, recv),
- GetFrameIndex(isolate, recv));
- RETURN_RESULT_OR_FAILURE(isolate, it.Frame()->ToString());
+ Handle<StackTraceFrame> frame = isolate->factory()->NewStackTraceFrame(
+ GetFrameArray(isolate, recv), GetFrameIndex(isolate, recv));
+ RETURN_RESULT_OR_FAILURE(isolate, SerializeStackTraceFrame(isolate, frame));
}
#undef CHECK_CALLSITE
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index b5a9851c70..613e5f10ff 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -66,19 +66,19 @@ class BaseCollectionsAssembler : public CodeStubAssembler {
TNode<Object> iterable);
// Constructs a collection instance. Choosing a fast path when possible.
- TNode<Object> AllocateJSCollection(TNode<Context> context,
- TNode<JSFunction> constructor,
- TNode<Object> new_target);
+ TNode<JSObject> AllocateJSCollection(TNode<Context> context,
+ TNode<JSFunction> constructor,
+ TNode<JSReceiver> new_target);
// Fast path for constructing a collection instance if the constructor
// function has not been modified.
- TNode<Object> AllocateJSCollectionFast(TNode<HeapObject> constructor);
+ TNode<JSObject> AllocateJSCollectionFast(TNode<JSFunction> constructor);
// Fallback for constructing a collection instance if the constructor function
// has been modified.
- TNode<Object> AllocateJSCollectionSlow(TNode<Context> context,
- TNode<JSFunction> constructor,
- TNode<Object> new_target);
+ TNode<JSObject> AllocateJSCollectionSlow(TNode<Context> context,
+ TNode<JSFunction> constructor,
+ TNode<JSReceiver> new_target);
// Allocates the backing store for a collection.
virtual TNode<Object> AllocateTable(Variant variant, TNode<Context> context,
@@ -320,17 +320,17 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
- TNode<Object> fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TNode<Map> fast_iterator_result_map = CAST(
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
TVARIABLE(Object, var_exception);
Goto(&loop);
BIND(&loop);
{
- TNode<Object> next = iterator_assembler.IteratorStep(
+ TNode<JSReceiver> next = iterator_assembler.IteratorStep(
context, iterator, &exit, fast_iterator_result_map);
- TNode<Object> next_value = CAST(iterator_assembler.IteratorValue(
- context, next, fast_iterator_result_map));
+ TNode<Object> next_value = iterator_assembler.IteratorValue(
+ context, next, fast_iterator_result_map);
AddConstructorEntry(variant, context, collection, add_func, next_value,
nullptr, &if_exception, &var_exception);
Goto(&loop);
@@ -367,33 +367,33 @@ void BaseCollectionsAssembler::GotoIfInitialAddFunctionModified(
GetAddFunctionNameIndex(variant), if_modified);
}
-TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
+TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollection(
TNode<Context> context, TNode<JSFunction> constructor,
- TNode<Object> new_target) {
+ TNode<JSReceiver> new_target) {
TNode<BoolT> is_target_unmodified = WordEqual(constructor, new_target);
- return Select<Object>(is_target_unmodified,
- [=] { return AllocateJSCollectionFast(constructor); },
- [=] {
- return AllocateJSCollectionSlow(context, constructor,
- new_target);
- });
+ return Select<JSObject>(
+ is_target_unmodified,
+ [=] { return AllocateJSCollectionFast(constructor); },
+ [=] {
+ return AllocateJSCollectionSlow(context, constructor, new_target);
+ });
}
-TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
- TNode<HeapObject> constructor) {
+TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollectionFast(
+ TNode<JSFunction> constructor) {
CSA_ASSERT(this, IsConstructorMap(LoadMap(constructor)));
- TNode<Object> initial_map =
- LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
- return CAST(AllocateJSObjectFromMap(initial_map));
+ TNode<Map> initial_map =
+ CAST(LoadJSFunctionPrototypeOrInitialMap(constructor));
+ return AllocateJSObjectFromMap(initial_map);
}
-TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionSlow(
+TNode<JSObject> BaseCollectionsAssembler::AllocateJSCollectionSlow(
TNode<Context> context, TNode<JSFunction> constructor,
- TNode<Object> new_target) {
+ TNode<JSReceiver> new_target) {
ConstructorBuiltinsAssembler constructor_assembler(this->state());
- return CAST(constructor_assembler.EmitFastNewObject(context, constructor,
- new_target));
+ return constructor_assembler.EmitFastNewObject(context, constructor,
+ new_target);
}
void BaseCollectionsAssembler::GenerateConstructor(
@@ -408,7 +408,7 @@ void BaseCollectionsAssembler::GenerateConstructor(
TNode<Context> native_context = LoadNativeContext(context);
TNode<Object> collection = AllocateJSCollection(
- context, GetConstructor(variant, native_context), new_target);
+ context, GetConstructor(variant, native_context), CAST(new_target));
AddConstructorEntries(variant, context, native_context, collection, iterable);
Return(collection);
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 973f1785d1..9ab3566cec 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -39,7 +39,8 @@ namespace internal {
namespace {
void ConsoleCall(
- Isolate* isolate, internal::BuiltinArguments& args,
+ Isolate* isolate,
+ internal::BuiltinArguments& args, // NOLINT(runtime/references)
void (debug::ConsoleDelegate::*func)(const v8::debug::ConsoleCallArguments&,
const v8::debug::ConsoleContext&)) {
CHECK(!isolate->has_pending_exception());
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index a725f3c4a1..767e626432 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -147,44 +147,40 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
}
TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* target = Parameter(Descriptor::kTarget);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
Label call_runtime(this);
- Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+ TNode<JSObject> result =
+ EmitFastNewObject(context, target, new_target, &call_runtime);
Return(result);
BIND(&call_runtime);
TailCallRuntime(Runtime::kNewObject, context, target, new_target);
}
-Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
- Node* target,
- Node* new_target) {
- VARIABLE(var_obj, MachineRepresentation::kTagged);
+compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
+ SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target) {
+ TVARIABLE(JSObject, var_obj);
Label call_runtime(this), end(this);
- Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
- var_obj.Bind(result);
+ var_obj = EmitFastNewObject(context, target, new_target, &call_runtime);
Goto(&end);
BIND(&call_runtime);
- var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target));
+ var_obj = CAST(CallRuntime(Runtime::kNewObject, context, target, new_target));
Goto(&end);
BIND(&end);
return var_obj.value();
}
-Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
- Node* target,
- Node* new_target,
- Label* call_runtime) {
- CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE));
- CSA_ASSERT(this, IsJSReceiver(new_target));
-
+compiler::TNode<JSObject> ConstructorBuiltinsAssembler::EmitFastNewObject(
+ SloppyTNode<Context> context, SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target, Label* call_runtime) {
// Verify that the new target is a JSFunction.
Label fast(this), end(this);
GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast);
@@ -732,7 +728,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
TNode<JSFunction> target = LoadTargetFromFrame();
Node* result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, n_value);
+ StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, n_value);
args.PopAndReturn(result);
}
}
@@ -798,7 +794,7 @@ TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
Node* result =
CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, s_value);
+ StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, s_value);
args.PopAndReturn(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 9093a5a77b..9208506c79 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -31,10 +31,14 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Label* call_runtime);
Node* EmitCreateEmptyObjectLiteral(Node* context);
- Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
-
- Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
- Label* call_runtime);
+ TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
+ SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target);
+
+ TNode<JSObject> EmitFastNewObject(SloppyTNode<Context> context,
+ SloppyTNode<JSFunction> target,
+ SloppyTNode<JSReceiver> new_target,
+ Label* call_runtime);
};
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index bc7e349ce1..71a9cbf145 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -392,7 +392,8 @@ TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) {
// ES6 section 7.1.13 ToObject (argument)
TF_BUILTIN(ToObject, CodeStubAssembler) {
Label if_smi(this, Label::kDeferred), if_jsreceiver(this),
- if_noconstructor(this, Label::kDeferred), if_wrapjsvalue(this);
+ if_noconstructor(this, Label::kDeferred),
+ if_wrapjs_primitive_wrapper(this);
Node* context = Parameter(Descriptor::kContext);
Node* object = Parameter(Descriptor::kArgument);
@@ -411,27 +412,30 @@ TF_BUILTIN(ToObject, CodeStubAssembler) {
IntPtrConstant(Map::kNoConstructorFunctionIndex)),
&if_noconstructor);
constructor_function_index_var.Bind(constructor_function_index);
- Goto(&if_wrapjsvalue);
+ Goto(&if_wrapjs_primitive_wrapper);
BIND(&if_smi);
constructor_function_index_var.Bind(
IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
- Goto(&if_wrapjsvalue);
+ Goto(&if_wrapjs_primitive_wrapper);
- BIND(&if_wrapjsvalue);
+ BIND(&if_wrapjs_primitive_wrapper);
TNode<Context> native_context = LoadNativeContext(context);
Node* constructor = LoadContextElement(
native_context, constructor_function_index_var.value());
Node* initial_map =
LoadObjectField(constructor, JSFunction::kPrototypeOrInitialMapOffset);
- Node* js_value = Allocate(JSValue::kSize);
- StoreMapNoWriteBarrier(js_value, initial_map);
- StoreObjectFieldRoot(js_value, JSValue::kPropertiesOrHashOffset,
+ Node* js_primitive_wrapper = Allocate(JSPrimitiveWrapper::kSize);
+ StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map);
+ StoreObjectFieldRoot(js_primitive_wrapper,
+ JSPrimitiveWrapper::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ StoreObjectFieldRoot(js_primitive_wrapper,
+ JSPrimitiveWrapper::kElementsOffset,
RootIndex::kEmptyFixedArray);
- StoreObjectField(js_value, JSValue::kValueOffset, object);
- Return(js_value);
+ StoreObjectField(js_primitive_wrapper, JSPrimitiveWrapper::kValueOffset,
+ object);
+ Return(js_primitive_wrapper);
BIND(&if_noconstructor);
ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject,
diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h
index eeb84f34db..d5c6571880 100644
--- a/deps/v8/src/builtins/builtins-data-view-gen.h
+++ b/deps/v8/src/builtins/builtins-data-view-gen.h
@@ -17,13 +17,13 @@ class DataViewBuiltinsAssembler : public CodeStubAssembler {
explicit DataViewBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- TNode<Int32T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
- return UncheckedCast<Int32T>(
+ TNode<Uint8T> LoadUint8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
+ return UncheckedCast<Uint8T>(
Load(MachineType::Uint8(), data_pointer, offset));
}
- TNode<Int32T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
- return UncheckedCast<Int32T>(
+ TNode<Int8T> LoadInt8(TNode<RawPtrT> data_pointer, TNode<UintPtrT> offset) {
+ return UncheckedCast<Int8T>(
Load(MachineType::Int8(), data_pointer, offset));
}
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 3412edb89d..23ab4a88ca 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -103,8 +103,8 @@ namespace internal {
\
/* String helpers */ \
TFC(StringCharAt, StringAt) \
- TFC(StringCodePointAtUTF16, StringAt) \
- TFC(StringCodePointAtUTF32, StringAt) \
+ TFC(StringCodePointAt, StringAt) \
+ TFC(StringFromCodePointAt, StringAtAsString) \
TFC(StringEqual, Compare) \
TFC(StringGreaterThan, Compare) \
TFC(StringGreaterThanOrEqual, Compare) \
@@ -170,7 +170,9 @@ namespace internal {
\
/* Adapters for Turbofan into runtime */ \
TFC(AllocateInYoungGeneration, Allocate) \
+ TFC(AllocateRegularInYoungGeneration, Allocate) \
TFC(AllocateInOldGeneration, Allocate) \
+ TFC(AllocateRegularInOldGeneration, Allocate) \
\
/* TurboFan support builtins */ \
TFS(CopyFastSmiOrObjectElements, kObject) \
@@ -266,7 +268,7 @@ namespace internal {
\
/* Abort */ \
TFC(Abort, Abort) \
- TFC(AbortJS, Abort) \
+ TFC(AbortCSAAssert, Abort) \
\
/* Built-in functions for Javascript */ \
/* Special internal builtins */ \
@@ -726,16 +728,12 @@ namespace internal {
CPP(ObjectGetOwnPropertyDescriptors) \
TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
- CPP(ObjectGetPrototypeOf) \
- CPP(ObjectSetPrototypeOf) \
TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \
- CPP(ObjectIsExtensible) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
TFJ(ObjectKeys, 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
- CPP(ObjectPreventExtensions) \
/* ES6 #sec-object.prototype.tostring */ \
TFJ(ObjectPrototypeToString, 0, kReceiver) \
/* ES6 #sec-object.prototype.valueof */ \
@@ -823,16 +821,10 @@ namespace internal {
ASM(ReflectApply, Dummy) \
ASM(ReflectConstruct, Dummy) \
CPP(ReflectDefineProperty) \
- CPP(ReflectDeleteProperty) \
- CPP(ReflectGet) \
CPP(ReflectGetOwnPropertyDescriptor) \
- CPP(ReflectGetPrototypeOf) \
TFJ(ReflectHas, 2, kReceiver, kTarget, kKey) \
- CPP(ReflectIsExtensible) \
CPP(ReflectOwnKeys) \
- CPP(ReflectPreventExtensions) \
CPP(ReflectSet) \
- CPP(ReflectSetPrototypeOf) \
\
/* RegExp */ \
CPP(RegExpCapture1Getter) \
@@ -1150,6 +1142,7 @@ namespace internal {
ASM(StackCheck, Dummy) \
ASM(DoubleToI, Dummy) \
TFC(GetProperty, GetProperty) \
+ TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \
TFS(SetProperty, kReceiver, kKey, kValue) \
TFS(SetPropertyInLiteral, kReceiver, kKey, kValue) \
ASM(MemCopyUint8Uint8, CCall) \
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index e099baeb34..3bcc7356d4 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -31,10 +31,11 @@ BUILTIN(ErrorConstructor) {
}
RETURN_RESULT_OR_FAILURE(
- isolate, ErrorUtils::Construct(isolate, args.target(),
- Handle<Object>::cast(args.new_target()),
- args.atOrUndefined(isolate, 1), mode,
- caller, false));
+ isolate,
+ ErrorUtils::Construct(isolate, args.target(),
+ Handle<Object>::cast(args.new_target()),
+ args.atOrUndefined(isolate, 1), mode, caller,
+ ErrorUtils::StackTraceCollection::kDetailed));
}
// static
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 53e974c452..137f7f3402 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -86,17 +86,27 @@ BUILTIN(GlobalEval) {
Handle<Object> x = args.atOrUndefined(isolate, 1);
Handle<JSFunction> target = args.target();
Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
- if (!x->IsString()) return *x;
if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
return ReadOnlyRoots(isolate).undefined_value();
}
+
+ // Run embedder pre-checks before executing eval. If the argument is a
+ // non-String (or other object the embedder doesn't know to handle), then
+ // return it directly.
+ MaybeHandle<String> source;
+ bool unhandled_object;
+ std::tie(source, unhandled_object) =
+ Compiler::ValidateDynamicCompilationSource(
+ isolate, handle(target->native_context(), isolate), x);
+ if (unhandled_object) return *x;
+
Handle<JSFunction> function;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, function,
- Compiler::GetFunctionFromString(handle(target->native_context(), isolate),
- Handle<String>::cast(x),
- NO_PARSE_RESTRICTION, kNoSourcePosition));
+ Compiler::GetFunctionFromValidatedString(
+ handle(target->native_context(), isolate), source,
+ NO_PARSE_RESTRICTION, kNoSourcePosition));
RETURN_RESULT_OR_FAILURE(
isolate,
Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index d1b50f2cdc..973356f569 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -28,7 +28,8 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler {
// kind. Use with caution. This produces a *lot* of code.
using ElementsKindSwitchCase = std::function<void(ElementsKind)>;
void DispatchByElementsKind(TNode<Int32T> elements_kind,
- const ElementsKindSwitchCase& case_function);
+ const ElementsKindSwitchCase& case_function,
+ bool handle_typed_elements_kind);
// Dispatches over all possible combinations of {from,to} elements kinds.
using ElementsKindTransitionSwitchCase =
@@ -48,7 +49,7 @@ TF_BUILTIN(LoadIC_StringLength, CodeStubAssembler) {
TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) {
Node* value = Parameter(Descriptor::kReceiver);
- Node* string = LoadJSValueValue(value);
+ Node* string = LoadJSPrimitiveWrapperValue(value);
Return(LoadStringLengthAsSmi(string));
}
@@ -227,7 +228,7 @@ void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore(
[=, &miss](ElementsKind from_kind, ElementsKind to_kind) {
TransitionElementsKind(receiver, map, from_kind, to_kind, &miss);
EmitElementStore(receiver, key, value, to_kind, store_mode, &miss,
- context);
+ context, nullptr);
});
Return(value);
}
@@ -280,7 +281,8 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
V(BIGINT64_ELEMENTS)
void HandlerBuiltinsAssembler::DispatchByElementsKind(
- TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function) {
+ TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function,
+ bool handle_typed_elements_kind) {
Label next(this), if_unknown_type(this, Label::kDeferred);
int32_t elements_kinds[] = {
@@ -300,6 +302,8 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
};
STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
+ // TODO(mythria): Do not emit cases for typed elements kind when
+ // handle_typed_elements is false to decrease the size of the jump table.
Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
arraysize(elements_kinds));
@@ -310,6 +314,9 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
IsFrozenOrSealedElementsKindUnchecked(KIND)) { \
/* Disable support for frozen or sealed elements kinds. */ \
Unreachable(); \
+ } else if (!handle_typed_elements_kind && \
+ IsTypedArrayElementsKind(KIND)) { \
+ Unreachable(); \
} else { \
case_function(KIND); \
Goto(&next); \
@@ -340,17 +347,26 @@ void HandlerBuiltinsAssembler::Generate_StoreFastElementIC(
Label miss(this);
+ bool handle_typed_elements_kind =
+ store_mode == STANDARD_STORE || store_mode == STORE_IGNORE_OUT_OF_BOUNDS;
+ // For typed arrays maybe_converted_value contains the value obtained after
+ // calling ToNumber. We should pass the converted value to the runtime to
+ // avoid doing the user visible conversion again.
+ VARIABLE(maybe_converted_value, MachineRepresentation::kTagged, value);
+ maybe_converted_value.Bind(value);
// TODO(v8:8481): Pass elements_kind in feedback vector slots.
- DispatchByElementsKind(LoadElementsKind(receiver),
- [=, &miss](ElementsKind elements_kind) {
- EmitElementStore(receiver, key, value, elements_kind,
- store_mode, &miss, context);
- });
+ DispatchByElementsKind(
+ LoadElementsKind(receiver),
+ [=, &miss, &maybe_converted_value](ElementsKind elements_kind) {
+ EmitElementStore(receiver, key, value, elements_kind, store_mode, &miss,
+ context, &maybe_converted_value);
+ },
+ handle_typed_elements_kind);
Return(value);
BIND(&miss);
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
- receiver, key);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context,
+ maybe_converted_value.value(), slot, vector, receiver, key);
}
TF_BUILTIN(StoreFastElementIC_Standard, HandlerBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index baaadb722a..8d22767b58 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -614,8 +614,9 @@ class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
Label if_done(this), if_noelements(this),
if_sourcenotjsobject(this, Label::kDeferred);
- // JSValue wrappers for numbers don't have any enumerable own properties,
- // so we can immediately skip the whole operation if {source} is a Smi.
+ // JSPrimitiveWrapper wrappers for numbers don't have any enumerable own
+ // properties, so we can immediately skip the whole operation if {source} is
+ // a Smi.
GotoIf(TaggedIsSmi(source), &if_done);
// Otherwise check if {source} is a proper JSObject, and if not, defer
@@ -809,17 +810,49 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+ TNode<Smi> allocation_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(true)));
TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
- SmiFromIntPtr(requested_size));
+ SmiFromIntPtr(requested_size), allocation_flags);
+}
+
+TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
+ TNode<IntPtrT> requested_size =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+
+ TNode<Smi> allocation_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(false)));
+ TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiFromIntPtr(requested_size), allocation_flags);
}
TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+
+ TNode<Smi> runtime_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(true)));
+ TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
+ SmiFromIntPtr(requested_size), runtime_flags);
+}
+
+TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
+ TNode<IntPtrT> requested_size =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
+ CSA_CHECK(this, IsValidPositiveSmi(requested_size));
+ TNode<Smi> runtime_flags =
+ SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllowLargeObjectAllocationFlag::encode(false)));
TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
- SmiFromIntPtr(requested_size), SmiConstant(0));
+ SmiFromIntPtr(requested_size), runtime_flags);
}
TF_BUILTIN(Abort, CodeStubAssembler) {
@@ -827,9 +860,9 @@ TF_BUILTIN(Abort, CodeStubAssembler) {
TailCallRuntime(Runtime::kAbort, NoContextConstant(), message_id);
}
-TF_BUILTIN(AbortJS, CodeStubAssembler) {
+TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
TNode<String> message = CAST(Parameter(Descriptor::kMessageOrMessageId));
- TailCallRuntime(Runtime::kAbortJS, NoContextConstant(), message);
+ TailCallRuntime(Runtime::kAbortCSAAssert, NoContextConstant(), message);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
@@ -907,6 +940,8 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
Node* object = Parameter(Descriptor::kObject);
Node* key = Parameter(Descriptor::kKey);
Node* context = Parameter(Descriptor::kContext);
+ // TODO(duongn): consider tailcalling to GetPropertyWithReceiver(object,
+ // object, key, OnNonExistent::kReturnUndefined).
Label if_notfound(this), if_proxy(this, Label::kDeferred),
if_slow(this, Label::kDeferred);
@@ -932,7 +967,7 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
Goto(if_bailout);
};
- TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ TryPrototypeChainLookup(object, object, key, lookup_property_in_holder,
lookup_element_in_holder, &if_notfound, &if_slow,
&if_proxy);
@@ -955,6 +990,74 @@ TF_BUILTIN(GetProperty, CodeStubAssembler) {
}
}
+// ES6 [[Get]] operation with Receiver.
+TF_BUILTIN(GetPropertyWithReceiver, CodeStubAssembler) {
+ Node* object = Parameter(Descriptor::kObject);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* on_non_existent = Parameter(Descriptor::kOnNonExistent);
+ Label if_notfound(this), if_proxy(this, Label::kDeferred),
+ if_slow(this, Label::kDeferred);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [=](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name, Label* next_holder,
+ Label* if_bailout) {
+ VARIABLE(var_value, MachineRepresentation::kTagged);
+ Label if_found(this);
+ TryGetOwnProperty(context, receiver, holder, holder_map,
+ holder_instance_type, unique_name, &if_found,
+ &var_value, next_holder, if_bailout);
+ BIND(&if_found);
+ Return(var_value.value());
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [=](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index, Label* next_holder,
+ Label* if_bailout) {
+ // Not supported yet.
+ Use(next_holder);
+ Goto(if_bailout);
+ };
+
+ TryPrototypeChainLookup(receiver, object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &if_notfound, &if_slow,
+ &if_proxy);
+
+ BIND(&if_notfound);
+ Label throw_reference_error(this);
+ GotoIf(WordEqual(on_non_existent,
+ SmiConstant(OnNonExistent::kThrowReferenceError)),
+ &throw_reference_error);
+ CSA_ASSERT(this, WordEqual(on_non_existent,
+ SmiConstant(OnNonExistent::kReturnUndefined)));
+ Return(UndefinedConstant());
+
+ BIND(&throw_reference_error);
+ Return(CallRuntime(Runtime::kThrowReferenceError, context, key));
+
+ BIND(&if_slow);
+ TailCallRuntime(Runtime::kGetPropertyWithReceiver, context, object, key,
+ receiver, on_non_existent);
+
+ BIND(&if_proxy);
+ {
+ // Convert the {key} to a Name first.
+ Node* name = CallBuiltin(Builtins::kToName, context, key);
+
+ // Proxy cannot handle private symbol so bailout.
+ GotoIf(IsPrivateSymbol(name), &if_slow);
+
+ // The {object} is a JSProxy instance, look up the {name} on it, passing
+ // {object} both as receiver and holder. If {name} is absent we can safely
+ // return undefined from here.
+ TailCallBuiltin(Builtins::kProxyGetProperty, context, object, name,
+ receiver, on_non_existent);
+ }
+}
+
// ES6 [[Set]] operation.
TF_BUILTIN(SetProperty, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 882afa3c32..ff8e96f4f5 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -276,15 +276,14 @@ Object LegacyFormatConstructor(BuiltinArguments args, Isolate* isolate,
// 2. Let format be ? OrdinaryCreateFromConstructor(newTarget,
// "%<T>Prototype%", ...).
- Handle<JSObject> obj;
+ Handle<Map> map;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> format = Handle<T>::cast(obj);
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
// 3. Perform ? Initialize<T>(Format, locales, options).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, format, T::Initialize(isolate, format, locales, options));
+ Handle<T> format;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, format,
+ T::New(isolate, map, locales, options));
// 4. Let this be the this value.
Handle<Object> receiver = args.receiver();
@@ -351,21 +350,17 @@ Object DisallowCallConstructor(BuiltinArguments args, Isolate* isolate,
Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<JSObject> obj;
+ Handle<Map> map;
// 2. Let result be OrdinaryCreateFromConstructor(NewTarget,
// "%<T>Prototype%").
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> result = Handle<T>::cast(obj);
- result->set_flags(0);
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
Handle<Object> locales = args.atOrUndefined(isolate, 1);
Handle<Object> options = args.atOrUndefined(isolate, 2);
- // 3. Return Initialize<T>(t, locales, options).
- RETURN_RESULT_OR_FAILURE(isolate,
- T::Initialize(isolate, result, locales, options));
+ // 3. Return New<T>(t, locales, options).
+ RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options));
}
/**
@@ -387,14 +382,11 @@ Object CallOrConstructConstructor(BuiltinArguments args, Isolate* isolate) {
Handle<Object> locales = args.atOrUndefined(isolate, 1);
Handle<Object> options = args.atOrUndefined(isolate, 2);
- Handle<JSObject> obj;
+ Handle<Map> map;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<T> result = Handle<T>::cast(obj);
+ isolate, map, JSFunction::GetDerivedMap(isolate, target, new_target));
- RETURN_RESULT_OR_FAILURE(isolate,
- T::Initialize(isolate, result, locales, options));
+ RETURN_RESULT_OR_FAILURE(isolate, T::New(isolate, map, locales, options));
}
} // namespace
@@ -591,12 +583,11 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
Handle<Object> tag, Handle<Object> options) {
- Handle<JSObject> locale;
+ Handle<Map> map;
// 6. Let locale be ? OrdinaryCreateFromConstructor(NewTarget,
// %LocalePrototype%, internalSlotsList).
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, locale,
- JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
+ isolate, map, JSFunction::GetDerivedMap(isolate, constructor, new_target),
JSLocale);
// 7. If Type(tag) is not String or Object, throw a TypeError exception.
@@ -628,8 +619,7 @@ MaybeHandle<JSLocale> CreateLocale(Isolate* isolate,
Object::ToObject(isolate, options), JSLocale);
}
- return JSLocale::Initialize(isolate, Handle<JSLocale>::cast(locale),
- locale_string, options_object);
+ return JSLocale::New(isolate, map, locale_string, options_object);
}
} // namespace
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 0484501bfb..b3d8e27dbc 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -75,7 +75,7 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
}
}
-TNode<Object> IteratorBuiltinsAssembler::IteratorStep(
+TNode<JSReceiver> IteratorBuiltinsAssembler::IteratorStep(
Node* context, const IteratorRecord& iterator, Label* if_done,
Node* fast_iterator_result_map, Label* if_exception, Variable* exception) {
DCHECK_NOT_NULL(if_done);
@@ -125,23 +125,21 @@ TNode<Object> IteratorBuiltinsAssembler::IteratorStep(
}
BIND(&return_result);
- return UncheckedCast<Object>(result);
+ return CAST(result);
}
-Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
- Node* fast_iterator_result_map,
- Label* if_exception,
- Variable* exception) {
- CSA_ASSERT(this, IsJSReceiver(result));
-
+TNode<Object> IteratorBuiltinsAssembler::IteratorValue(
+ TNode<Context> context, TNode<JSReceiver> result,
+ base::Optional<TNode<Map>> fast_iterator_result_map, Label* if_exception,
+ Variable* exception) {
Label exit(this);
- VARIABLE(var_value, MachineRepresentation::kTagged);
- if (fast_iterator_result_map != nullptr) {
+ TVARIABLE(Object, var_value);
+ if (fast_iterator_result_map) {
// Fast iterator result case:
Label if_generic(this);
Node* map = LoadMap(result);
- GotoIfNot(WordEqual(map, fast_iterator_result_map), &if_generic);
- var_value.Bind(LoadObjectField(result, JSIteratorResult::kValueOffset));
+ GotoIfNot(WordEqual(map, *fast_iterator_result_map), &if_generic);
+ var_value = LoadObjectField(result, JSIteratorResult::kValueOffset);
Goto(&exit);
BIND(&if_generic);
@@ -149,9 +147,10 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result,
// Generic iterator result case:
{
- Node* value = GetProperty(context, result, factory()->value_string());
+ TNode<Object> value =
+ GetProperty(context, result, factory()->value_string());
GotoIfException(value, if_exception, exception);
- var_value.Bind(value);
+ var_value = value;
Goto(&exit);
}
@@ -217,10 +216,10 @@ TNode<JSArray> IteratorBuiltinsAssembler::IterableToList(
BIND(&loop_start);
{
// a. Set next to ? IteratorStep(iteratorRecord).
- TNode<Object> next = IteratorStep(context, iterator_record, &done);
+ TNode<JSReceiver> next = IteratorStep(context, iterator_record, &done);
// b. If next is not false, then
// i. Let nextValue be ? IteratorValue(next).
- TNode<Object> next_value = CAST(IteratorValue(context, next));
+ TNode<Object> next_value = IteratorValue(context, next);
// ii. Append nextValue to the end of the List values.
values.Push(next_value);
Goto(&loop_start);
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index cf421dc5b7..db86c65385 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -32,18 +32,19 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorstep
- // Returns `false` if the iterator is done, otherwise returns an
- // iterator result.
+ // If the iterator is done, goto {if_done}, otherwise returns an iterator
+ // result.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- TNode<Object> IteratorStep(Node* context, const IteratorRecord& iterator,
- Label* if_done,
- Node* fast_iterator_result_map = nullptr,
- Label* if_exception = nullptr,
- Variable* exception = nullptr);
-
- TNode<Object> IteratorStep(Node* context, const IteratorRecord& iterator,
- Node* fast_iterator_result_map, Label* if_done) {
+ TNode<JSReceiver> IteratorStep(Node* context, const IteratorRecord& iterator,
+ Label* if_done,
+ Node* fast_iterator_result_map = nullptr,
+ Label* if_exception = nullptr,
+ Variable* exception = nullptr);
+
+ TNode<JSReceiver> IteratorStep(Node* context, const IteratorRecord& iterator,
+ Node* fast_iterator_result_map,
+ Label* if_done) {
return IteratorStep(context, iterator, if_done, fast_iterator_result_map);
}
@@ -51,10 +52,10 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler {
// Return the `value` field from an iterator.
// `fast_iterator_result_map` refers to the map for the JSIteratorResult
// object, loaded from the native context.
- Node* IteratorValue(Node* context, Node* result,
- Node* fast_iterator_result_map = nullptr,
- Label* if_exception = nullptr,
- Variable* exception = nullptr);
+ TNode<Object> IteratorValue(
+ TNode<Context> context, TNode<JSReceiver> result,
+ base::Optional<TNode<Map>> fast_iterator_result_map = base::nullopt,
+ Label* if_exception = nullptr, Variable* exception = nullptr);
// https://tc39.github.io/ecma262/#sec-iteratorclose
void IteratorCloseOnException(Node* context, const IteratorRecord& iterator,
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index 6d3274a4a5..cce780ab9f 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -20,7 +20,6 @@ BUILTIN(MathHypot) {
if (length == 0) return Smi::kZero;
DCHECK_LT(0, length);
double max = 0;
- bool one_arg_is_nan = false;
std::vector<double> abs_values;
abs_values.reserve(length);
for (int i = 0; i < length; i++) {
@@ -28,29 +27,20 @@ BUILTIN(MathHypot) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x,
Object::ToNumber(isolate, x));
double abs_value = std::abs(x->Number());
-
- if (std::isnan(abs_value)) {
- one_arg_is_nan = true;
- } else {
- abs_values.push_back(abs_value);
- if (max < abs_value) {
- max = abs_value;
- }
+ abs_values.push_back(abs_value);
+ // Use negation here to make sure that {max} is NaN
+ // in the end in case any of the arguments was NaN.
+ if (!(abs_value <= max)) {
+ max = abs_value;
}
}
- if (max == V8_INFINITY) {
- return *isolate->factory()->NewNumber(V8_INFINITY);
- }
-
- if (one_arg_is_nan) {
- return ReadOnlyRoots(isolate).nan_value();
- }
-
if (max == 0) {
return Smi::kZero;
+ } else if (max == V8_INFINITY) {
+ return ReadOnlyRoots(isolate).infinity_value();
}
- DCHECK_GT(max, 0);
+ DCHECK(!(max <= 0));
// Kahan summation to avoid rounding errors.
// Normalize the numbers to the largest one to avoid overflow.
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 5b3af79f00..f5c4477c23 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -315,8 +315,8 @@ TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
// ES6 #sec-number.prototype.valueof
TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber,
"Number.prototype.valueOf");
@@ -538,8 +538,8 @@ TF_BUILTIN(Add, AddStubAssembler) {
BIND(&do_bigint_add);
{
- Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(),
- var_right.value(), SmiConstant(Operation::kAdd)));
+ TailCallBuiltin(Builtins::kBigIntAdd, context, var_left.value(),
+ var_right.value());
}
BIND(&do_double_add);
@@ -996,8 +996,8 @@ TF_BUILTIN(Equal, CodeStubAssembler) {
}
TF_BUILTIN(StrictEqual, CodeStubAssembler) {
- Node* lhs = Parameter(Descriptor::kLeft);
- Node* rhs = Parameter(Descriptor::kRight);
+ TNode<Object> lhs = CAST(Parameter(Descriptor::kLeft));
+ TNode<Object> rhs = CAST(Parameter(Descriptor::kRight));
Return(StrictEqual(lhs, rhs));
}
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 929e686604..d2fb0ff74c 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -25,8 +25,8 @@ BUILTIN(NumberPrototypeToExponential) {
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -70,8 +70,8 @@ BUILTIN(NumberPrototypeToFixed) {
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -117,8 +117,8 @@ BUILTIN(NumberPrototypeToLocaleString) {
Handle<Object> value = args.at(0);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
// 1. Let x be ? thisNumberValue(this value)
if (!value->IsNumber()) {
@@ -147,8 +147,8 @@ BUILTIN(NumberPrototypeToPrecision) {
Handle<Object> precision = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -192,8 +192,8 @@ BUILTIN(NumberPrototypeToString) {
Handle<Object> radix = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
- if (value->IsJSValue()) {
- value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ if (value->IsJSPrimitiveWrapper()) {
+ value = handle(Handle<JSPrimitiveWrapper>::cast(value)->value(), isolate);
}
if (!value->IsNumber()) {
THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 314331d498..8d59ee3bd1 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -65,8 +65,6 @@ class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind);
- TNode<Uint32T> HasHiddenPrototype(TNode<Map> map);
-
TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) {
return DecodeWord32<PropertyDetails::KindField>(details);
}
@@ -185,12 +183,6 @@ TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
return Word32Equal(kind, Int32Constant(PropertyKind::kData));
}
-TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
- TNode<Map> map) {
- TNode<Uint32T> bit_field2 = Unsigned(LoadMapBitField2(map));
- return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field2);
-}
-
void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
TNode<Context> context, TNode<Object> maybe_object,
CollectType collect_type) {
@@ -254,7 +246,6 @@ void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties(
TNode<Map> map, Label* if_slow) {
GotoIf(IsStringWrapperElementsKind(map), if_slow);
GotoIf(IsSpecialReceiverMap(map), if_slow);
- GotoIf(HasHiddenPrototype(map), if_slow);
GotoIf(IsDictionaryMap(map), if_slow);
}
@@ -602,9 +593,19 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
if_fast(this), try_fast(this, Label::kDeferred),
if_slow(this, Label::kDeferred), if_join(this);
- // Check if the {object} has a usable enum cache.
+ // Take the slow path if the {object} IsCustomElementsReceiverInstanceType or
+ // has any elements.
GotoIf(TaggedIsSmi(object), &if_slow);
Node* object_map = LoadMap(object);
+ TNode<Int32T> instance_type = LoadMapInstanceType(object_map);
+ GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_slow);
+ Node* object_elements = LoadElements(object);
+ GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
+ Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
+ &if_slow);
+
+ // Check if the {object} has a usable enum cache.
+ BIND(&if_empty_elements);
Node* object_bit_field3 = LoadMapBitField3(object_map);
Node* object_enum_length =
DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
@@ -612,15 +613,7 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) {
WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)),
&try_fast);
- // Ensure that the {object} doesn't have any elements.
- CSA_ASSERT(this, IsJSObjectMap(object_map));
- Node* object_elements = LoadElements(object);
- GotoIf(IsEmptyFixedArray(object_elements), &if_empty_elements);
- Branch(IsEmptySlowElementDictionary(object_elements), &if_empty_elements,
- &if_slow);
-
// Check whether all own properties are enumerable.
- BIND(&if_empty_elements);
Node* number_descriptors =
DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(object_bit_field3);
GotoIfNot(WordEqual(object_enum_length, number_descriptors), &if_slow);
@@ -728,11 +721,11 @@ TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) {
// invoke the ToObject builtin, which raises the appropriate error.
// Otherwise we don't need to invoke ToObject, since {receiver} is
// either already a JSReceiver, in which case ToObject is a no-op,
- // or it's a Primitive and ToObject would allocate a fresh JSValue
+ // or it's a Primitive and ToObject would allocate a fresh JSPrimitiveWrapper
// wrapper, which wouldn't be identical to any existing JSReceiver
// found in the prototype chain of {value}, hence it will return
// false no matter if we search for the Primitive {receiver} or
- // a newly allocated JSValue wrapper for {receiver}.
+ // a newly allocated JSPrimitiveWrapper wrapper for {receiver}.
GotoIf(IsNull(receiver), &if_receiverisnullorundefined);
GotoIf(IsUndefined(receiver), &if_receiverisnullorundefined);
@@ -794,7 +787,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
{JS_SPECIAL_API_OBJECT_TYPE, &if_apiobject},
{JS_PROXY_TYPE, &if_proxy},
{JS_ERROR_TYPE, &if_error},
- {JS_VALUE_TYPE, &if_value}};
+ {JS_PRIMITIVE_WRAPPER_TYPE, &if_value}};
size_t const kNumCases = arraysize(kJumpTable);
Label* case_labels[kNumCases];
int32_t case_values[kNumCases];
@@ -996,7 +989,7 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) {
if_value_is_bigint(this, Label::kDeferred),
if_value_is_string(this, Label::kDeferred);
- Node* receiver_value = LoadJSValueValue(receiver);
+ Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
// We need to start with the object to see if the value was a subclass
// which might have interesting properties.
var_holder.Bind(receiver);
@@ -1346,10 +1339,15 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(
result, JSGeneratorObject::kParametersAndRegistersOffset,
parameters_and_registers);
+ Node* resume_mode = SmiConstant(JSGeneratorObject::ResumeMode::kNext);
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kResumeModeOffset,
+ resume_mode);
Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
executing);
- GotoIfNot(HasInstanceType(maybe_map, JS_ASYNC_GENERATOR_OBJECT_TYPE), &done);
+ GotoIfNot(InstanceTypeEqual(LoadMapInstanceType(maybe_map),
+ JS_ASYNC_GENERATOR_OBJECT_TYPE),
+ &done);
StoreObjectFieldNoWriteBarrier(
result, JSAsyncGeneratorObject::kIsAwaitingOffset, SmiConstant(0));
Goto(&done);
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 59e4373f98..1ca5fffd8d 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-factory.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/logging/counters.h"
#include "src/objects/keys.h"
@@ -218,52 +218,6 @@ BUILTIN(ObjectFreeze) {
return *object;
}
-// ES section 19.1.2.9 Object.getPrototypeOf ( O )
-BUILTIN(ObjectGetPrototypeOf) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
-
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
-
- RETURN_RESULT_OR_FAILURE(isolate,
- JSReceiver::GetPrototype(isolate, receiver));
-}
-
-// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
-BUILTIN(ObjectSetPrototypeOf) {
- HandleScope scope(isolate);
-
- // 1. Let O be ? RequireObjectCoercible(O).
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "Object.setPrototypeOf")));
- }
-
- // 2. If Type(proto) is neither Object nor Null, throw a TypeError exception.
- Handle<Object> proto = args.atOrUndefined(isolate, 2);
- if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
- }
-
- // 3. If Type(O) is not Object, return O.
- if (!object->IsJSReceiver()) return *object;
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
- // 4. Let status be ? O.[[SetPrototypeOf]](proto).
- // 5. If status is false, throw a TypeError exception.
- MAYBE_RETURN(JSReceiver::SetPrototype(receiver, proto, true, kThrowOnError),
- ReadOnlyRoots(isolate).exception());
-
- // 6. Return O.
- return *receiver;
-}
-
// ES6 section B.2.2.1.1 get Object.prototype.__proto__
BUILTIN(ObjectPrototypeGetProto) {
HandleScope scope(isolate);
@@ -332,18 +286,6 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
}
-// ES6 section 19.1.2.11 Object.isExtensible ( O )
-BUILTIN(ObjectIsExtensible) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result =
- object->IsJSReceiver()
- ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
- : Just(false);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
// ES6 section 19.1.2.12 Object.isFrozen ( O )
BUILTIN(ObjectIsFrozen) {
HandleScope scope(isolate);
@@ -403,18 +345,6 @@ BUILTIN(ObjectGetOwnPropertyDescriptors) {
return *descriptors;
}
-// ES6 section 19.1.2.15 Object.preventExtensions ( O )
-BUILTIN(ObjectPreventExtensions) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
- kThrowOnError),
- ReadOnlyRoots(isolate).exception());
- }
- return *object;
-}
-
// ES6 section 19.1.2.17 Object.seal ( O )
BUILTIN(ObjectSeal) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index ad70fb1dd1..1339e2dccd 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -2062,7 +2062,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
TNode<Object> resolve =
GetProperty(native_context, constructor, factory()->resolve_string());
- GotoIfException(resolve, if_exception, var_exception);
+ GotoIfException(resolve, &close_iterator, var_exception);
// 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
// exception.
@@ -2077,9 +2077,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// Let next be IteratorStep(iteratorRecord.[[Iterator]]).
// If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
// ReturnIfAbrupt(next).
- Node* const fast_iterator_result_map =
- LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
- Node* const next = iter_assembler.IteratorStep(
+ TNode<Map> const fast_iterator_result_map = CAST(
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX));
+ TNode<JSReceiver> const next = iter_assembler.IteratorStep(
native_context, iterator, &done_loop, fast_iterator_result_map,
if_exception, var_exception);
@@ -2087,7 +2087,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
// true.
// ReturnIfAbrupt(nextValue).
- Node* const next_value = iter_assembler.IteratorValue(
+ TNode<Object> const next_value = iter_assembler.IteratorValue(
native_context, next, fast_iterator_result_map, if_exception,
var_exception);
@@ -2148,7 +2148,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
&if_slow);
GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow);
GotoIf(TaggedIsSmi(next_value), &if_slow);
- Node* const next_value_map = LoadMap(next_value);
+ Node* const next_value_map = LoadMap(CAST(next_value));
BranchIfPromiseThenLookupChainIntact(native_context, next_value_map,
&if_fast, &if_slow);
@@ -2526,8 +2526,7 @@ TF_BUILTIN(PromiseAllSettledResolveElementClosure, PromiseBuiltinsAssembler) {
LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
TNode<Map> object_function_map = Cast(LoadObjectField(
object_function, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<JSObject> obj =
- Cast(AllocateJSObjectFromMap(object_function_map));
+ TNode<JSObject> obj = AllocateJSObjectFromMap(object_function_map);
// 10. Perform ! CreateDataProperty(obj, "status", "fulfilled").
CallBuiltin(Builtins::kFastCreateDataProperty, context, obj,
@@ -2557,8 +2556,7 @@ TF_BUILTIN(PromiseAllSettledRejectElementClosure, PromiseBuiltinsAssembler) {
LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX));
TNode<Map> object_function_map = Cast(LoadObjectField(
object_function, JSFunction::kPrototypeOrInitialMapOffset));
- TNode<JSObject> obj =
- Cast(AllocateJSObjectFromMap(object_function_map));
+ TNode<JSObject> obj = AllocateJSObjectFromMap(object_function_map);
// 10. Perform ! CreateDataProperty(obj, "status", "rejected").
CallBuiltin(Builtins::kFastCreateDataProperty, context, obj,
@@ -2579,7 +2577,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant());
Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Context> const context = CAST(Parameter(Descriptor::kContext));
ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
"Promise.race");
@@ -2626,11 +2624,11 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
TNode<Object> resolve =
GetProperty(native_context, receiver, factory()->resolve_string());
- GotoIfException(resolve, &reject_promise, &var_exception);
+ GotoIfException(resolve, &close_iterator, &var_exception);
// 4. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
// exception.
- ThrowIfNotCallable(CAST(context), resolve, "resolve");
+ ThrowIfNotCallable(context, resolve, "resolve");
var_promise_resolve_function = resolve;
Goto(&loop);
@@ -2638,13 +2636,13 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
BIND(&loop);
{
- Node* const fast_iterator_result_map = LoadContextElement(
- native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ TNode<Map> const fast_iterator_result_map = CAST(LoadContextElement(
+ native_context, Context::ITERATOR_RESULT_MAP_INDEX));
// Let next be IteratorStep(iteratorRecord.[[Iterator]]).
// If next is an abrupt completion, set iteratorRecord.[[Done]] to true.
// ReturnIfAbrupt(next).
- Node* const next = iter_assembler.IteratorStep(
+ TNode<JSReceiver> const next = iter_assembler.IteratorStep(
context, iterator, &break_loop, fast_iterator_result_map,
&reject_promise, &var_exception);
@@ -2652,7 +2650,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
// true.
// ReturnIfAbrupt(nextValue).
- Node* const next_value =
+ TNode<Object> const next_value =
iter_assembler.IteratorValue(context, next, fast_iterator_result_map,
&reject_promise, &var_exception);
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index a1a2f6308f..948540ea5f 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -13,8 +13,9 @@
namespace v8 {
namespace internal {
-Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
- Node* context) {
+compiler::TNode<JSProxy> ProxiesCodeStubAssembler::AllocateProxy(
+ TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSReceiver> handler) {
VARIABLE(map, MachineRepresentation::kTagged);
Label callable_target(this), constructor_target(this), none_target(this),
@@ -53,7 +54,7 @@ Node* ProxiesCodeStubAssembler::AllocateProxy(Node* target, Node* handler,
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kTargetOffset, target);
StoreObjectFieldNoWriteBarrier(proxy, JSProxy::kHandlerOffset, handler);
- return proxy;
+ return CAST(proxy);
}
Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
@@ -121,8 +122,9 @@ Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
return context;
}
-Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
- Node* context) {
+compiler::TNode<JSFunction>
+ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(TNode<Context> context,
+ TNode<JSProxy> proxy) {
Node* const native_context = LoadNativeContext(context);
Node* const proxy_context =
@@ -132,13 +134,8 @@ Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
Node* const revoke_info =
LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN);
- return AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
- proxy_context);
-}
-
-Node* ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget() {
- return CodeAssembler::Parameter(static_cast<int>(
- Builtin_ProxyConstructor_InterfaceDescriptor::kJSNewTarget));
+ return CAST(AllocateFunctionWithMapAndContext(revoke_map, revoke_info,
+ proxy_context));
}
TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
@@ -262,9 +259,11 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
{ ThrowTypeError(context, MessageTemplate::kProxyRevoked, "construct"); }
}
-Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
- Node* context, Node* target, Node* proxy, Node* name, Node* trap_result,
+void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
+ TNode<Context> context, TNode<JSReceiver> target, TNode<JSProxy> proxy,
+ TNode<Name> name, TNode<Object> trap_result,
JSProxy::AccessKind access_kind) {
+ // TODO(mslekova): Think of a better name for the trap_result param.
Node* map = LoadMap(target);
VARIABLE(var_value, MachineRepresentation::kTagged);
VARIABLE(var_details, MachineRepresentation::kWord32);
@@ -273,7 +272,7 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
Label if_found_value(this), check_in_runtime(this, Label::kDeferred),
check_passed(this);
- GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
+ GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
@@ -366,12 +365,13 @@ Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
}
BIND(&check_passed);
- return trap_result;
}
}
-Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
- Node* proxy, Node* name) {
+void ProxiesCodeStubAssembler::CheckHasTrapResult(TNode<Context> context,
+ TNode<JSReceiver> target,
+ TNode<JSProxy> proxy,
+ TNode<Name> name) {
Node* target_map = LoadMap(target);
VARIABLE(var_value, MachineRepresentation::kTagged);
VARIABLE(var_details, MachineRepresentation::kWord32);
@@ -383,7 +383,7 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
check_in_runtime(this, Label::kDeferred);
// 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P).
- GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
+ GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, target_map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
@@ -419,7 +419,64 @@ Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
}
BIND(&check_passed);
- return FalseConstant();
+}
+
+void ProxiesCodeStubAssembler::CheckDeleteTrapResult(TNode<Context> context,
+ TNode<JSReceiver> target,
+ TNode<JSProxy> proxy,
+ TNode<Name> name) {
+ TNode<Map> target_map = LoadMap(target);
+ TVARIABLE(Object, var_value);
+ TVARIABLE(Uint32T, var_details);
+ TVARIABLE(Object, var_raw_value);
+
+ Label if_found_value(this, Label::kDeferred),
+ throw_non_configurable(this, Label::kDeferred),
+ throw_non_extensible(this, Label::kDeferred), check_passed(this),
+ check_in_runtime(this, Label::kDeferred);
+
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ GotoIfNot(IsUniqueNameNoIndex(name), &check_in_runtime);
+ TNode<Int32T> instance_type = LoadInstanceType(target);
+ TryGetOwnProperty(context, target, target, target_map, instance_type, name,
+ &if_found_value, &var_value, &var_details, &var_raw_value,
+ &check_passed, &check_in_runtime, kReturnAccessorPair);
+
+ // 11. If targetDesc is undefined, return true.
+ BIND(&if_found_value);
+ {
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError exception.
+ TNode<BoolT> non_configurable = IsSetWord32(
+ var_details.value(), PropertyDetails::kAttributesDontDeleteMask);
+ GotoIf(non_configurable, &throw_non_configurable);
+
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ TNode<BoolT> target_extensible = IsExtensibleMap(target_map);
+
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ GotoIfNot(target_extensible, &throw_non_extensible);
+ Goto(&check_passed);
+ }
+
+ BIND(&throw_non_configurable);
+ {
+ ThrowTypeError(context,
+ MessageTemplate::kProxyDeletePropertyNonConfigurable, name);
+ }
+
+ BIND(&throw_non_extensible);
+ {
+ ThrowTypeError(context, MessageTemplate::kProxyDeletePropertyNonExtensible,
+ name);
+ }
+
+ BIND(&check_in_runtime);
+ {
+ CallRuntime(Runtime::kCheckProxyDeleteTrapResult, context, name, target);
+ Goto(&check_passed);
+ }
+
+ BIND(&check_passed);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index fcaac7df66..cb51faf575 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -17,19 +17,21 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- Node* AllocateProxy(Node* target, Node* handler, Node* context);
- Node* AllocateProxyRevokeFunction(Node* proxy, Node* context);
+ TNode<JSProxy> AllocateProxy(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSReceiver> handler);
+ TNode<JSFunction> AllocateProxyRevokeFunction(TNode<Context> context,
+ TNode<JSProxy> proxy);
- // Get JSNewTarget parameter for ProxyConstructor builtin (Torque).
- // TODO(v8:9120): Remove this once torque support exists
- Node* GetProxyConstructorJSNewTarget();
+ void CheckGetSetTrapResult(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSProxy> proxy, TNode<Name> name,
+ TNode<Object> trap_result,
+ JSProxy::AccessKind access_kind);
- Node* CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
- Node* name, Node* trap_result,
- JSProxy::AccessKind access_kind);
+ void CheckHasTrapResult(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSProxy> proxy, TNode<Name> name);
- Node* CheckHasTrapResult(Node* context, Node* target, Node* proxy,
- Node* name);
+ void CheckDeleteTrapResult(TNode<Context> context, TNode<JSReceiver> target,
+ TNode<JSProxy> proxy, TNode<Name> name);
protected:
enum ProxyRevokeFunctionContextSlot {
@@ -37,9 +39,10 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
kProxyContextLength,
};
- Node* AllocateJSArrayForCodeStubArguments(Node* context,
- CodeStubArguments& args, Node* argc,
- ParameterMode mode);
+ Node* AllocateJSArrayForCodeStubArguments(
+ Node* context,
+ CodeStubArguments& args, // NOLINT(runtime/references)
+ Node* argc, ParameterMode mode);
private:
Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index e998652dad..6151fcbd47 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -46,53 +46,6 @@ BUILTIN(ReflectDefineProperty) {
return *isolate->factory()->ToBoolean(result.FromJust());
}
-// ES6 section 26.1.4 Reflect.deleteProperty
-BUILTIN(ReflectDeleteProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at(1);
- Handle<Object> key = args.at(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.deleteProperty")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver>::cast(target), name, LanguageMode::kSloppy);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-// ES6 section 26.1.6 Reflect.get
-BUILTIN(ReflectGet) {
- HandleScope scope(isolate);
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> key = args.atOrUndefined(isolate, 2);
- Handle<Object> receiver = args.length() > 3 ? args.at(3) : target;
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.get")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- RETURN_RESULT_OR_FAILURE(
- isolate, Object::GetPropertyOrElement(receiver, name,
- Handle<JSReceiver>::cast(target)));
-}
-
// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
BUILTIN(ReflectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
@@ -119,42 +72,6 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
return *desc.ToObject(isolate);
}
-// ES6 section 26.1.8 Reflect.getPrototypeOf
-BUILTIN(ReflectGetPrototypeOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.getPrototypeOf")));
- }
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
- RETURN_RESULT_OR_FAILURE(isolate,
- JSReceiver::GetPrototype(isolate, receiver));
-}
-
-// ES6 section 26.1.10 Reflect.isExtensible
-BUILTIN(ReflectIsExtensible) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.isExtensible")));
- }
-
- Maybe<bool> result =
- JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
// ES6 section 26.1.11 Reflect.ownKeys
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
@@ -177,25 +94,6 @@ BUILTIN(ReflectOwnKeys) {
return *isolate->factory()->NewJSArrayWithElements(keys);
}
-// ES6 section 26.1.12 Reflect.preventExtensions
-BUILTIN(ReflectPreventExtensions) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.preventExtensions")));
- }
-
- Maybe<bool> result = JSReceiver::PreventExtensions(
- Handle<JSReceiver>::cast(target), kDontThrow);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
// ES6 section 26.1.13 Reflect.set
BUILTIN(ReflectSet) {
HandleScope scope(isolate);
@@ -223,30 +121,5 @@ BUILTIN(ReflectSet) {
return *isolate->factory()->ToBoolean(result.FromJust());
}
-// ES6 section 26.1.14 Reflect.setPrototypeOf
-BUILTIN(ReflectSetPrototypeOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at(1);
- Handle<Object> proto = args.at(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.setPrototypeOf")));
- }
-
- if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
- }
-
- Maybe<bool> result = JSReceiver::SetPrototype(
- Handle<JSReceiver>::cast(target), proto, true, kDontThrow);
- MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 51ee2796e6..d53518ff7e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -15,7 +15,7 @@
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -94,12 +94,12 @@ TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(TNode<Context> context,
TNode<String> pattern = Select<String>(
IsUndefined(maybe_string), [=] { return EmptyStringConstant(); },
[=] { return ToString_Inline(context, maybe_string); });
- TNode<Object> regexp = CAST(AllocateJSObjectFromMap(initial_map));
+ TNode<JSObject> regexp = AllocateJSObjectFromMap(initial_map);
return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
pattern, flags);
}
-TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndex(
+TNode<Object> RegExpBuiltinsAssembler::FastLoadLastIndexBeforeSmiCheck(
TNode<JSRegExp> regexp) {
// Load the in-object field.
static const int field_offset =
@@ -121,23 +121,27 @@ TNode<Object> RegExpBuiltinsAssembler::LoadLastIndex(TNode<Context> context,
// The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
// JSRegExp instance.
-void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
+void RegExpBuiltinsAssembler::FastStoreLastIndex(TNode<JSRegExp> regexp,
+ TNode<Smi> value) {
// Store the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kTaggedSize;
StoreObjectField(regexp, field_offset, value);
}
-void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
- Node* value) {
- Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
- SetPropertyStrict(CAST(context), CAST(regexp), CAST(name), CAST(value));
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(SloppyTNode<Context> context,
+ SloppyTNode<Object> regexp,
+ SloppyTNode<Number> value) {
+ TNode<Name> name = HeapConstant(isolate()->factory()->lastIndex_string());
+ SetPropertyStrict(context, regexp, name, value);
}
-void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
- Node* value, bool is_fastpath) {
+void RegExpBuiltinsAssembler::StoreLastIndex(TNode<Context> context,
+ TNode<Object> regexp,
+ TNode<Number> value,
+ bool is_fastpath) {
if (is_fastpath) {
- FastStoreLastIndex(regexp, value);
+ FastStoreLastIndex(CAST(regexp), CAST(value));
} else {
SlowStoreLastIndex(context, regexp, value);
}
@@ -248,10 +252,10 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<Context> native_context = LoadNativeContext(context);
TNode<Map> map = CAST(LoadContextElement(
native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- TNode<NameDictionary> properties = AllocateNameDictionary(num_properties);
+ TNode<NameDictionary> properties =
+ AllocateNameDictionary(num_properties, kAllowLargeObjectAllocation);
- TNode<JSObject> group_object =
- CAST(AllocateJSObjectFromMap(map, properties));
+ TNode<JSObject> group_object = AllocateJSObjectFromMap(map, properties);
StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object);
TVARIABLE(IntPtrT, var_i, IntPtrZero());
@@ -534,19 +538,18 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
// We expect exactly one result since we force the called regexp to behave
// as non-global.
TNode<IntPtrT> int_result = ChangeInt32ToIntPtr(result);
+ GotoIf(
+ IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpSuccess)),
+ &if_success);
+ GotoIf(
+ IntPtrEqual(int_result, IntPtrConstant(RegExp::kInternalRegExpFailure)),
+ &if_failure);
GotoIf(IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::SUCCESS)),
- &if_success);
- GotoIf(IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::FAILURE)),
- &if_failure);
- GotoIf(IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::EXCEPTION)),
+ IntPtrConstant(RegExp::kInternalRegExpException)),
&if_exception);
- CSA_ASSERT(this,
- IntPtrEqual(int_result,
- IntPtrConstant(NativeRegExpMacroAssembler::RETRY)));
+ CSA_ASSERT(this, IntPtrEqual(int_result,
+ IntPtrConstant(RegExp::kInternalRegExpRetry)));
Goto(&runtime);
}
@@ -755,7 +758,7 @@ RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
GotoIfNot(should_update_last_index, &out);
// Update the new last index from {match_indices}.
- TNode<Number> new_lastindex = CAST(UnsafeLoadFixedArrayElement(
+ TNode<Smi> new_lastindex = CAST(UnsafeLoadFixedArrayElement(
CAST(match_indices), RegExpMatchInfo::kFirstCaptureIndex + 1));
StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
@@ -852,7 +855,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context,
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Node* const last_index = FastLoadLastIndex(CAST(object));
+ TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
var_result.Bind(TaggedIsPositiveSmi(last_index));
Goto(&out);
@@ -897,7 +900,7 @@ TNode<BoolT> RegExpBuiltinsAssembler::IsFastRegExpWithOriginalExec(
BIND(&check_last_index);
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- TNode<Object> last_index = FastLoadLastIndex(object);
+ TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(object);
var_result = TaggedIsPositiveSmi(last_index);
Goto(&out);
@@ -925,9 +928,9 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
// This should only be needed for String.p.(split||matchAll), but we are
// conservative here.
- GotoIf(IsRegExpSpeciesProtectorCellInvalid(), if_ismodified);
+ TNode<Context> native_context = LoadNativeContext(context);
+ GotoIf(IsRegExpSpeciesProtectorCellInvalid(native_context), if_ismodified);
- Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
Node* const initial_map =
@@ -954,7 +957,7 @@ void RegExpBuiltinsAssembler::BranchIfFastRegExp(
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Node* const last_index = FastLoadLastIndex(CAST(object));
+ TNode<Object> last_index = FastLoadLastIndexBeforeSmiCheck(CAST(object));
Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified);
}
@@ -1012,7 +1015,7 @@ TF_BUILTIN(RegExpPrototypeExecSlow, RegExpBuiltinsAssembler) {
// Fast path stub for ATOM regexps. String matching is done by StringIndexOf,
// and {match_info} is updated on success.
-// The slow path is implemented in RegExpImpl::AtomExec.
+// The slow path is implemented in RegExp::AtomExec.
TF_BUILTIN(RegExpExecAtom, RegExpBuiltinsAssembler) {
TNode<JSRegExp> regexp = CAST(Parameter(Descriptor::kRegExp));
TNode<String> subject_string = CAST(Parameter(Descriptor::kString));
@@ -1538,7 +1541,8 @@ TNode<Int32T> RegExpBuiltinsAssembler::FastFlagGetter(TNode<JSRegExp> regexp,
JSRegExp::Flag flag) {
TNode<Smi> flags = CAST(LoadObjectField(regexp, JSRegExp::kFlagsOffset));
TNode<Smi> mask = SmiConstant(flag);
- return SmiToInt32(SmiShr(SmiAnd(flags, mask), JSRegExp::FlagShiftBits(flag)));
+ return SmiToInt32(SmiShr(SmiAnd(flags, mask), base::bits::CountTrailingZeros(
+ static_cast<int>(flag))));
}
// Load through the GetProperty stub.
@@ -1807,10 +1811,9 @@ TF_BUILTIN(RegExpPrototypeTestFast, RegExpBuiltinsAssembler) {
Return(FalseConstant());
}
-Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
- Node* const index,
- Node* const is_unicode,
- bool is_fastpath) {
+TNode<Number> RegExpBuiltinsAssembler::AdvanceStringIndex(
+ SloppyTNode<String> string, SloppyTNode<Number> index,
+ SloppyTNode<BoolT> is_unicode, bool is_fastpath) {
CSA_ASSERT(this, IsString(string));
CSA_ASSERT(this, IsNumberNormalized(index));
if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
@@ -1818,8 +1821,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
// Default to last_index + 1.
// TODO(pwong): Consider using TrySmiAdd for the fast path to reduce generated
// code.
- Node* const index_plus_one = NumberInc(index);
- VARIABLE(var_result, MachineRepresentation::kTagged, index_plus_one);
+ TNode<Number> index_plus_one = NumberInc(index);
+ TVARIABLE(Number, var_result, index_plus_one);
// Advancing the index has some subtle issues involving the distinction
// between Smis and HeapNumbers. There's three cases:
@@ -1846,10 +1849,10 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
BIND(&if_isunicode);
{
TNode<IntPtrT> const string_length = LoadStringLengthAsWord(string);
- TNode<IntPtrT> untagged_plus_one = SmiUntag(index_plus_one);
+ TNode<IntPtrT> untagged_plus_one = SmiUntag(CAST(index_plus_one));
GotoIfNot(IntPtrLessThan(untagged_plus_one, string_length), &out);
- Node* const lead = StringCharCodeAt(string, SmiUntag(index));
+ Node* const lead = StringCharCodeAt(string, SmiUntag(CAST(index)));
GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
Int32Constant(0xD800)),
&out);
@@ -1860,8 +1863,8 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
&out);
// At a surrogate pair, return index + 2.
- Node* const index_plus_two = NumberInc(index_plus_one);
- var_result.Bind(index_plus_two);
+ TNode<Number> index_plus_two = NumberInc(index_plus_one);
+ var_result = index_plus_two;
Goto(&out);
}
@@ -1870,31 +1873,30 @@ Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
return var_result.value();
}
-void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
- Node* const regexp,
+void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(TNode<Context> context,
+ TNode<Object> regexp,
TNode<String> string,
const bool is_fastpath) {
if (is_fastpath) CSA_ASSERT(this, IsFastRegExp(context, regexp));
Node* const is_global =
- FlagGetter(CAST(context), CAST(regexp), JSRegExp::kGlobal, is_fastpath);
+ FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
Label if_isglobal(this), if_isnotglobal(this);
Branch(is_global, &if_isglobal, &if_isnotglobal);
BIND(&if_isnotglobal);
{
- Node* const result =
- is_fastpath
- ? RegExpPrototypeExecBody(CAST(context), CAST(regexp), string, true)
- : RegExpExec(context, regexp, string);
+ Node* const result = is_fastpath ? RegExpPrototypeExecBody(
+ context, CAST(regexp), string, true)
+ : RegExpExec(context, regexp, string);
Return(result);
}
BIND(&if_isglobal);
{
- Node* const is_unicode = FlagGetter(CAST(context), CAST(regexp),
- JSRegExp::kUnicode, is_fastpath);
+ Node* const is_unicode =
+ FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
StoreLastIndex(context, regexp, SmiZero(), is_fastpath);
@@ -1935,8 +1937,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
// On the fast path, grab the matching string from the raw match index
// array.
TNode<RegExpMatchInfo> match_indices =
- RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp),
- string, &if_didnotmatch, true);
+ RegExpPrototypeExecBodyWithoutResult(context, CAST(regexp), string,
+ &if_didnotmatch, true);
Label dosubstring(this), donotsubstring(this);
Branch(var_atom.value(), &donotsubstring, &dosubstring);
@@ -1988,15 +1990,14 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
TNode<Smi> const match_length = LoadStringLengthAsSmi(match);
GotoIfNot(SmiEqual(match_length, SmiZero()), &loop);
- Node* last_index =
- LoadLastIndex(CAST(context), CAST(regexp), is_fastpath);
+ Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
if (is_fastpath) {
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
} else {
last_index = ToLength_Inline(context, last_index);
}
- Node* const new_last_index =
+ TNode<Number> new_last_index =
AdvanceStringIndex(string, last_index, is_unicode, is_fastpath);
if (is_fastpath) {
@@ -2017,7 +2018,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
{
// Wrap the match in a JSArray.
- Node* const result = array.ToJSArray(CAST(context));
+ Node* const result = array.ToJSArray(context);
Return(result);
}
}
@@ -2034,7 +2035,7 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
ThrowIfNotJSReceiver(context, maybe_receiver,
MessageTemplate::kIncompatibleMethodReceiver,
"RegExp.prototype.@@match");
- Node* const receiver = maybe_receiver;
+ TNode<JSReceiver> receiver = CAST(maybe_receiver);
// Convert {maybe_string} to a String.
TNode<String> const string = ToString_Inline(context, maybe_string);
@@ -2086,7 +2087,8 @@ void RegExpMatchAllAssembler::Generate(TNode<Context> context,
// 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
// 8. Perform ? Set(matcher, "lastIndex", lastIndex, true).
- FastStoreLastIndex(var_matcher.value(), FastLoadLastIndex(fast_regexp));
+ FastStoreLastIndex(CAST(var_matcher.value()),
+ FastLoadLastIndex(fast_regexp));
// 9. If flags contains "g", let global be true.
// 10. Else, let global be false.
@@ -2226,12 +2228,11 @@ TF_BUILTIN(RegExpMatchFast, RegExpBuiltinsAssembler) {
}
void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
- Node* const context, Node* const regexp, Node* const string) {
+ TNode<Context> context, TNode<JSRegExp> regexp, TNode<String> string) {
CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsString(string));
// Grab the initial value of last index.
- Node* const previous_last_index = FastLoadLastIndex(CAST(regexp));
+ TNode<Smi> previous_last_index = FastLoadLastIndex(regexp);
// Ensure last index is 0.
FastStoreLastIndex(regexp, SmiZero());
@@ -2239,7 +2240,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
// Call exec.
Label if_didnotmatch(this);
TNode<RegExpMatchInfo> match_indices = RegExpPrototypeExecBodyWithoutResult(
- CAST(context), CAST(regexp), CAST(string), &if_didnotmatch, true);
+ context, regexp, string, &if_didnotmatch, true);
// Successful match.
{
@@ -2839,16 +2840,14 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
GotoIfNot(IsEmptyString(match_str), &return_result);
// 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
- TNode<Smi> this_index = CAST(FastLoadLastIndex(CAST(iterating_regexp)));
- CSA_ASSERT(this, TaggedIsSmi(this_index));
+ TNode<Smi> this_index = FastLoadLastIndex(CAST(iterating_regexp));
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
- TNode<Smi> next_index = CAST(AdvanceStringIndex(
- iterating_string, this_index, HasUnicodeFlag(flags), true));
- CSA_ASSERT(this, TaggedIsSmi(next_index));
+ TNode<Smi> next_index = AdvanceStringIndexFast(
+ iterating_string, this_index, HasUnicodeFlag(flags));
// 3. Perform ? Set(R, "lastIndex", nextIndex, true).
- FastStoreLastIndex(iterating_regexp, next_index);
+ FastStoreLastIndex(CAST(iterating_regexp), next_index);
// iii. Return ! CreateIterResultObject(match, false).
Goto(&return_result);
@@ -2866,8 +2865,8 @@ TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
TNode<Number> this_index = ToLength_Inline(context, last_index);
// 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
- TNode<Object> next_index = CAST(AdvanceStringIndex(
- iterating_string, this_index, HasUnicodeFlag(flags), false));
+ TNode<Number> next_index = AdvanceStringIndex(
+ iterating_string, this_index, HasUnicodeFlag(flags), false);
// 3. Perform ? Set(R, "lastIndex", nextIndex, true).
SlowStoreLastIndex(context, iterating_regexp, next_index);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 88c00095b9..3677314f19 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -7,7 +7,7 @@
#include "src/base/optional.h"
#include "src/codegen/code-stub-assembler.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
namespace v8 {
namespace internal {
@@ -42,15 +42,20 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
TNode<String> input, TNode<FixedArray>* elements_out = nullptr);
- TNode<Object> FastLoadLastIndex(TNode<JSRegExp> regexp);
+ TNode<Object> FastLoadLastIndexBeforeSmiCheck(TNode<JSRegExp> regexp);
+ TNode<Smi> FastLoadLastIndex(TNode<JSRegExp> regexp) {
+ return CAST(FastLoadLastIndexBeforeSmiCheck(regexp));
+ }
TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
TNode<Object> LoadLastIndex(TNode<Context> context, TNode<Object> regexp,
bool is_fastpath);
- void FastStoreLastIndex(Node* regexp, Node* value);
- void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
- void StoreLastIndex(Node* context, Node* regexp, Node* value,
- bool is_fastpath);
+ void FastStoreLastIndex(TNode<JSRegExp> regexp, TNode<Smi> value);
+ void SlowStoreLastIndex(SloppyTNode<Context> context,
+ SloppyTNode<Object> regexp,
+ SloppyTNode<Number> value);
+ void StoreLastIndex(TNode<Context> context, TNode<Object> regexp,
+ TNode<Number> value, bool is_fastpath);
// Loads {var_string_start} and {var_string_end} with the corresponding
// offsets into the given {string_data}.
@@ -127,20 +132,23 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* RegExpExec(Node* context, Node* regexp, Node* string);
- Node* AdvanceStringIndex(Node* const string, Node* const index,
- Node* const is_unicode, bool is_fastpath);
+ TNode<Number> AdvanceStringIndex(SloppyTNode<String> string,
+ SloppyTNode<Number> index,
+ SloppyTNode<BoolT> is_unicode,
+ bool is_fastpath);
- Node* AdvanceStringIndexFast(Node* const string, Node* const index,
- Node* const is_unicode) {
- return AdvanceStringIndex(string, index, is_unicode, true);
+ TNode<Smi> AdvanceStringIndexFast(TNode<String> string, TNode<Smi> index,
+ TNode<BoolT> is_unicode) {
+ return CAST(AdvanceStringIndex(string, index, is_unicode, true));
}
- void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
+ void RegExpPrototypeMatchBody(TNode<Context> context, TNode<Object> regexp,
TNode<String> const string,
const bool is_fastpath);
- void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
- Node* const string);
+ void RegExpPrototypeSearchBodyFast(TNode<Context> context,
+ TNode<JSRegExp> regexp,
+ TNode<String> string);
void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
Node* const string);
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 3e0f7182c7..e758782a99 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -6,8 +6,8 @@
#include "src/builtins/builtins.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
+#include "src/regexp/regexp.h"
#include "src/strings/string-builder-inl.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5689b42619..97dc8ca895 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -545,32 +545,33 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
Return(result);
}
-TF_BUILTIN(StringCodePointAtUTF16, StringBuiltinsAssembler) {
+TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* position = Parameter(Descriptor::kPosition);
+
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
TNode<Int32T> code =
- LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
// And return it as TaggedSigned value.
// TODO(turbofan): Allow builtins to return values untagged.
TNode<Smi> result = SmiFromInt32(code);
Return(result);
}
-TF_BUILTIN(StringCodePointAtUTF32, StringBuiltinsAssembler) {
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* position = Parameter(Descriptor::kPosition);
+TF_BUILTIN(StringFromCodePointAt, StringBuiltinsAssembler) {
+ TNode<String> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<IntPtrT> position =
+ UncheckedCast<IntPtrT>(Parameter(Descriptor::kPosition));
// TODO(sigurds) Figure out if passing length as argument pays off.
TNode<IntPtrT> length = LoadStringLengthAsWord(receiver);
// Load the character code at the {position} from the {receiver}.
TNode<Int32T> code =
- LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32);
- // And return it as TaggedSigned value.
- // TODO(turbofan): Allow builtins to return values untagged.
- TNode<Smi> result = SmiFromInt32(code);
+ LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF16);
+ // Create a String from the UTF16 encoded code point
+ TNode<String> result = StringFromSingleUTF16EncodedCodePoint(code);
Return(result);
}
@@ -952,19 +953,6 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant,
}
}
-void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
- Node* const value,
- const char* method_name) {
- Label out(this), throw_exception(this, Label::kDeferred);
- Branch(IsNullOrUndefined(value), &throw_exception, &out);
-
- BIND(&throw_exception);
- ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined,
- method_name);
-
- BIND(&out);
-}
-
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Node* const maybe_string,
Handle<Symbol> symbol, DescriptorIndexAndName symbol_index,
@@ -1072,10 +1060,10 @@ compiler::Node* StringBuiltinsAssembler::GetSubstitution(
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
- Node* const receiver = Parameter(Descriptor::kReceiver);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* const search = Parameter(Descriptor::kSearch);
Node* const replace = Parameter(Descriptor::kReplace);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Smi> const smi_zero = SmiConstant(0);
@@ -1578,7 +1566,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
- Node* const receiver = args.GetReceiver();
+ TNode<Object> receiver = args.GetReceiver();
Node* const separator = args.GetOptionalArgumentValue(kSeparatorArg);
Node* const limit = args.GetOptionalArgumentValue(kLimitArg);
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -1986,12 +1974,12 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
switch (encoding) {
case UnicodeEncoding::UTF16:
- var_result = Signed(Word32Or(
+ var_result = Word32Or(
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
- Word32Shl(lead, Int32Constant(16)), trail));
+ Word32Shl(lead, Int32Constant(16)), trail);
#else
- Word32Shl(trail, Int32Constant(16)), lead));
+ Word32Shl(trail, Int32Constant(16)), lead);
#endif
break;
@@ -2002,8 +1990,8 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
// (lead << 10) + trail + SURROGATE_OFFSET
- var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)),
- Int32Add(trail, surrogate_offset)));
+ var_result = Int32Add(Word32Shl(lead, Int32Constant(10)),
+ Int32Add(trail, surrogate_offset));
break;
}
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 92ebd3803b..679ce0e17f 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -76,9 +76,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<Smi> subject_length,
TNode<Number> limit_number);
- void RequireObjectCoercible(Node* const context, Node* const value,
- const char* method_name);
-
TNode<BoolT> SmiIsNegative(TNode<Smi> value) {
return SmiLessThan(value, SmiConstant(0));
}
diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc
index 4e8c9f9850..610a8baeb3 100644
--- a/deps/v8/src/builtins/builtins-symbol-gen.cc
+++ b/deps/v8/src/builtins/builtins-symbol-gen.cc
@@ -13,8 +13,8 @@ namespace internal {
// ES #sec-symbol-objects
// ES #sec-symbol.prototype.description
TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.description");
@@ -24,8 +24,8 @@ TF_BUILTIN(SymbolPrototypeDescriptionGetter, CodeStubAssembler) {
// ES6 #sec-symbol.prototype-@@toprimitive
TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype [ @@toPrimitive ]");
@@ -34,8 +34,8 @@ TF_BUILTIN(SymbolPrototypeToPrimitive, CodeStubAssembler) {
// ES6 #sec-symbol.prototype.tostring
TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* value = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.toString");
@@ -45,8 +45,8 @@ TF_BUILTIN(SymbolPrototypeToString, CodeStubAssembler) {
// ES6 #sec-symbol.prototype.valueof
TF_BUILTIN(SymbolPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* result = ToThisValue(context, receiver, PrimitiveType::kSymbol,
"Symbol.prototype.valueOf");
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 8484685a6a..857d33988f 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -18,32 +18,12 @@ using compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
-// This is needed for gc_mole which will compile this file without the full set
-// of GN defined macros.
-#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
-#endif
-
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
-// Setup the TypedArray which is under construction.
-// - Set the length.
-// - Set the byte_offset.
-// - Set the byte_length.
-// - Set EmbedderFields to 0.
-void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
- TNode<UintPtrT> length,
- TNode<UintPtrT> byte_offset,
- TNode<UintPtrT> byte_length) {
- StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kLengthOffset, length,
- MachineType::PointerRepresentation());
- StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset,
- byte_offset,
- MachineType::PointerRepresentation());
- StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteLengthOffset,
- byte_length,
- MachineType::PointerRepresentation());
+// Sets the embedder fields to 0 for a TypedArray which is under construction.
+void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
+ TNode<JSTypedArray> holder) {
for (int offset = JSTypedArray::kHeaderSize;
offset < JSTypedArray::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectField(holder, offset, SmiConstant(0));
@@ -54,8 +34,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
// elements.
// TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit.
TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- TNode<Context> context, TNode<JSTypedArray> holder,
- TNode<UintPtrT> byte_length) {
+ TNode<Context> context, TNode<UintPtrT> byte_length) {
TNode<Context> native_context = LoadNativeContext(context);
TNode<Map> map =
CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX));
@@ -97,16 +76,6 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) {
StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
-
- StoreObjectField(holder, JSTypedArray::kBufferOffset, buffer);
-
- TNode<ByteArray> elements = AllocateByteArray(byte_length);
- StoreObjectField(holder, JSTypedArray::kElementsOffset, elements);
- StoreObjectField(holder, JSTypedArray::kBasePointerOffset, elements);
- StoreObjectFieldNoWriteBarrier(
- holder, JSTypedArray::kExternalPointerOffset,
- PointerConstant(JSTypedArray::ExternalPointerForOnHeapArray()),
- MachineType::PointerRepresentation());
return buffer;
}
@@ -200,13 +169,13 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
Return(ChangeUintPtrToTagged(length));
}
-TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
+TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
TNode<Word32T> kind) {
return Word32Or(Word32Equal(kind, Int32Constant(UINT8_ELEMENTS)),
Word32Equal(kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)));
}
-TNode<Word32T> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
+TNode<BoolT> TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
TNode<Word32T> kind) {
return Word32Or(Word32Equal(kind, Int32Constant(BIGINT64_ELEMENTS)),
Word32Equal(kind, Int32Constant(BIGUINT64_ELEMENTS)));
@@ -228,7 +197,12 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
TorqueStructTypedArrayElementsInfo
TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
TNode<JSTypedArray> typed_array) {
- TNode<Int32T> elements_kind = LoadElementsKind(typed_array);
+ return GetTypedArrayElementsInfo(LoadMap(typed_array));
+}
+
+TorqueStructTypedArrayElementsInfo
+TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(TNode<Map> map) {
+ TNode<Int32T> elements_kind = LoadMapElementsKind(map);
TVARIABLE(UintPtrT, var_size_log2);
TVARIABLE(Map, var_map);
ReadOnlyRoots roots(isolate());
@@ -294,10 +268,9 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::GetBuffer(
Label call_runtime(this), done(this);
TVARIABLE(Object, var_result);
- TNode<Object> buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
GotoIf(IsDetachedBuffer(buffer), &call_runtime);
- TNode<UintPtrT> backing_store = LoadObjectField<UintPtrT>(
- CAST(buffer), JSArrayBuffer::kBackingStoreOffset);
+ TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(buffer);
GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime);
var_result = buffer;
Goto(&done);
@@ -327,10 +300,10 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
TNode<Context> context, TNode<JSTypedArray> source,
TNode<JSTypedArray> target, TNode<IntPtrT> offset, Label* call_runtime,
Label* if_source_too_large) {
- CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(
- LoadObjectField(source, JSTypedArray::kBufferOffset))));
- CSA_ASSERT(this, Word32BinaryNot(IsDetachedBuffer(
- LoadObjectField(target, JSTypedArray::kBufferOffset))));
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsDetachedBuffer(LoadJSArrayBufferViewBuffer(source))));
+ CSA_ASSERT(this, Word32BinaryNot(
+ IsDetachedBuffer(LoadJSArrayBufferViewBuffer(target))));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(offset, IntPtrConstant(0)));
CSA_ASSERT(this,
IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
@@ -774,8 +747,8 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
// ToNumber/ToBigInt may execute JavaScript code, which could
// detach the array's buffer.
- Node* buffer =
- LoadObjectField(new_typed_array, JSTypedArray::kBufferOffset);
+ TNode<JSArrayBuffer> buffer =
+ LoadJSArrayBufferViewBuffer(new_typed_array);
GotoIf(IsDetachedBuffer(buffer), &if_detached);
// GC may move backing store in ToNumber, thus load backing
@@ -997,8 +970,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// ToNumber/ToBigInt may execute JavaScript code, which could
// detach the array's buffer.
- Node* buffer = LoadObjectField(target_obj.value(),
- JSTypedArray::kBufferOffset);
+ TNode<JSArrayBuffer> buffer =
+ LoadJSArrayBufferViewBuffer(target_obj.value());
GotoIf(IsDetachedBuffer(buffer), &if_detached);
// GC may move backing store in map_fn, thus load backing
@@ -1027,7 +1000,5 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
"%TypedArray%.from");
}
-#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index 6fb02a657c..d637bc9c6b 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -27,15 +27,12 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
const char* method_name,
IterationKind iteration_kind);
- void SetupTypedArray(TNode<JSTypedArray> holder, TNode<UintPtrT> length,
- TNode<UintPtrT> byte_offset,
- TNode<UintPtrT> byte_length);
+ void SetupTypedArrayEmbedderFields(TNode<JSTypedArray> holder);
void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
TNode<Map> map, TNode<Smi> length,
TNode<UintPtrT> byte_offset);
TNode<JSArrayBuffer> AllocateEmptyOnHeapBuffer(TNode<Context> context,
- TNode<JSTypedArray> holder,
TNode<UintPtrT> byte_length);
TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
@@ -44,16 +41,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<UintPtrT> byte_offset);
// Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
- TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
+ TNode<BoolT> IsUint8ElementsKind(TNode<Word32T> kind);
// Returns true if kind is either BIGINT64_ELEMENTS or BIGUINT64_ELEMENTS.
- TNode<Word32T> IsBigInt64ElementsKind(TNode<Word32T> kind);
+ TNode<BoolT> IsBigInt64ElementsKind(TNode<Word32T> kind);
// Returns the byte size of an element for a TypedArray elements kind.
TNode<IntPtrT> GetTypedArrayElementSize(TNode<Word32T> elements_kind);
// Returns information (byte size and map) about a TypedArray's elements.
ElementsInfo GetTypedArrayElementsInfo(TNode<JSTypedArray> typed_array);
+ ElementsInfo GetTypedArrayElementsInfo(TNode<Map> map);
TNode<JSFunction> GetDefaultConstructor(TNode<Context> context,
TNode<JSTypedArray> exemplar);
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index 78f37c0cf5..18738d2c48 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -48,14 +48,24 @@ BUILTIN(FinalizationGroupRegister) {
HandleScope scope(isolate);
const char* method_name = "FinalizationGroup.prototype.register";
+ // 1. Let finalizationGroup be the this value.
+ //
+ // 2. If Type(finalizationGroup) is not Object, throw a TypeError
+ // exception.
+ //
+ // 4. If finalizationGroup does not have a [[Cells]] internal slot,
+ // throw a TypeError exception.
CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
Handle<Object> target = args.atOrUndefined(isolate, 1);
+
+ // 3. If Type(target) is not Object, throw a TypeError exception.
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kWeakRefsRegisterTargetMustBeObject));
}
+
Handle<Object> holdings = args.atOrUndefined(isolate, 2);
if (target->SameValue(*holdings)) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -64,15 +74,21 @@ BUILTIN(FinalizationGroupRegister) {
MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame));
}
- Handle<Object> key = args.atOrUndefined(isolate, 3);
- // TODO(marja, gsathya): Restrictions on "key" (e.g., does it need to be an
- // object).
+ Handle<Object> unregister_token = args.atOrUndefined(isolate, 3);
+ // 5. If Type(unregisterToken) is not Object,
+ // a. If unregisterToken is not undefined, throw a TypeError exception.
+ if (!unregister_token->IsJSReceiver() && !unregister_token->IsUndefined()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject,
+ unregister_token));
+ }
// TODO(marja): Realms.
JSFinalizationGroup::Register(finalization_group,
- Handle<JSReceiver>::cast(target), holdings, key,
- isolate);
+ Handle<JSReceiver>::cast(target), holdings,
+ unregister_token, isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -80,25 +96,63 @@ BUILTIN(FinalizationGroupUnregister) {
HandleScope scope(isolate);
const char* method_name = "FinalizationGroup.prototype.unregister";
+ // 1. Let finalizationGroup be the this value.
+ //
+ // 2. If Type(finalizationGroup) is not Object, throw a TypeError
+ // exception.
+ //
+ // 3. If finalizationGroup does not have a [[Cells]] internal slot,
+ // throw a TypeError exception.
CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
- Handle<Object> key = args.atOrUndefined(isolate, 1);
- JSFinalizationGroup::Unregister(finalization_group, key, isolate);
- return ReadOnlyRoots(isolate).undefined_value();
+ Handle<Object> unregister_token = args.atOrUndefined(isolate, 1);
+
+ // 4. If Type(unregisterToken) is not Object, throw a TypeError exception.
+ if (!unregister_token->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject,
+ unregister_token));
+ }
+
+ bool success = JSFinalizationGroup::Unregister(
+ finalization_group, Handle<JSReceiver>::cast(unregister_token), isolate);
+
+ return *isolate->factory()->ToBoolean(success);
}
BUILTIN(FinalizationGroupCleanupSome) {
HandleScope scope(isolate);
const char* method_name = "FinalizationGroup.prototype.cleanupSome";
+ // 1. Let finalizationGroup be the this value.
+ //
+ // 2. If Type(finalizationGroup) is not Object, throw a TypeError
+ // exception.
+ //
+ // 3. If finalizationGroup does not have a [[Cells]] internal slot,
+ // throw a TypeError exception.
CHECK_RECEIVER(JSFinalizationGroup, finalization_group, method_name);
- // TODO(marja, gsathya): Add missing "cleanup" callback.
+ Handle<Object> callback(finalization_group->cleanup(), isolate);
+ Handle<Object> callback_obj = args.atOrUndefined(isolate, 1);
+
+ // 4. If callback is not undefined and IsCallable(callback) is
+ // false, throw a TypeError exception.
+ if (!callback_obj->IsUndefined(isolate)) {
+ if (!callback_obj->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable));
+ }
+ callback = callback_obj;
+ }
// Don't do set_scheduled_for_cleanup(false); we still have the microtask
// scheduled and don't want to schedule another one in case the user never
// executes microtasks.
- JSFinalizationGroup::Cleanup(finalization_group, isolate);
+ JSFinalizationGroup::Cleanup(isolate, finalization_group, callback);
+
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -138,7 +192,7 @@ BUILTIN(WeakRefConstructor) {
}
Handle<JSReceiver> target_receiver =
handle(JSReceiver::cast(*target_object), isolate);
- isolate->heap()->AddKeepDuringJobTarget(target_receiver);
+ isolate->heap()->KeepDuringJob(target_receiver);
// TODO(marja): Realms.
@@ -158,9 +212,9 @@ BUILTIN(WeakRefDeref) {
if (weak_ref->target().IsJSReceiver()) {
Handle<JSReceiver> target =
handle(JSReceiver::cast(weak_ref->target()), isolate);
- // AddKeepDuringJobTarget might allocate and cause a GC, but it won't clear
+ // KeepDuringJob might allocate and cause a GC, but it won't clear
// weak_ref since we hold a Handle to its target.
- isolate->heap()->AddKeepDuringJobTarget(target);
+ isolate->heap()->KeepDuringJob(target);
} else {
DCHECK(weak_ref->target().IsUndefined(isolate));
}
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index eb95a77023..b83906d109 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -33,7 +33,7 @@ namespace collections {
}
}
}
- case (receiver: JSReceiver): {
+ case (JSReceiver): {
goto MayHaveSideEffects;
}
case (o: Object): deferred {
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 842e9527ee..62a0cc31c3 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -74,16 +74,17 @@ namespace data_view {
// ES6 section 24.2.4.1 get DataView.prototype.buffer
javascript builtin DataViewPrototypeGetBuffer(
- context: Context, receiver: Object, ...arguments): JSArrayBuffer {
- let dataView: JSDataView =
+ js-implicit context: Context,
+ receiver: Object)(...arguments): JSArrayBuffer {
+ const dataView: JSDataView =
ValidateDataView(context, receiver, 'get DataView.prototype.buffer');
return dataView.buffer;
}
// ES6 section 24.2.4.2 get DataView.prototype.byteLength
javascript builtin DataViewPrototypeGetByteLength(
- context: Context, receiver: Object, ...arguments): Number {
- let dataView: JSDataView = ValidateDataView(
+ js-implicit context: Context, receiver: Object)(...arguments): Number {
+ const dataView: JSDataView = ValidateDataView(
context, receiver, 'get DataView.prototype.byte_length');
if (WasNeutered(dataView)) {
// TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
@@ -95,8 +96,8 @@ namespace data_view {
// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
javascript builtin DataViewPrototypeGetByteOffset(
- context: Context, receiver: Object, ...arguments): Number {
- let dataView: JSDataView = ValidateDataView(
+ js-implicit context: Context, receiver: Object)(...arguments): Number {
+ const dataView: JSDataView = ValidateDataView(
context, receiver, 'get DataView.prototype.byte_offset');
if (WasNeutered(dataView)) {
// TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
@@ -128,7 +129,7 @@ namespace data_view {
macro LoadDataView16(
buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
signed: constexpr bool): Number {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
let b0: int32;
let b1: int32;
@@ -155,12 +156,12 @@ namespace data_view {
macro LoadDataView32(
buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
kind: constexpr ElementsKind): Number {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = LoadUint8(dataPointer, offset);
- let b1: uint32 = LoadUint8(dataPointer, offset + 1);
- let b2: uint32 = LoadUint8(dataPointer, offset + 2);
- let b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
let result: uint32;
if (requestedLittleEndian) {
@@ -174,7 +175,7 @@ namespace data_view {
} else if constexpr (kind == UINT32_ELEMENTS) {
return Convert<Number>(result);
} else if constexpr (kind == FLOAT32_ELEMENTS) {
- let floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
+ const floatRes: float64 = Convert<float64>(BitcastInt32ToFloat32(result));
return Convert<Number>(floatRes);
} else {
unreachable;
@@ -184,16 +185,16 @@ namespace data_view {
macro LoadDataViewFloat64(
buffer: JSArrayBuffer, offset: uintptr,
requestedLittleEndian: bool): Number {
- let dataPointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(dataPointer, offset);
- let b1: uint32 = LoadUint8(dataPointer, offset + 1);
- let b2: uint32 = LoadUint8(dataPointer, offset + 2);
- let b3: uint32 = LoadUint8(dataPointer, offset + 3);
- let b4: uint32 = LoadUint8(dataPointer, offset + 4);
- let b5: uint32 = LoadUint8(dataPointer, offset + 5);
- let b6: uint32 = LoadUint8(dataPointer, offset + 6);
- let b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ const dataPointer: RawPtr = buffer.backing_store;
+
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ const b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ const b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ const b7: uint32 = LoadUint8(dataPointer, offset + 7);
let lowWord: uint32;
let highWord: uint32;
@@ -212,74 +213,49 @@ namespace data_view {
return Convert<Number>(result);
}
- extern macro AllocateBigInt(intptr): BigInt;
- extern macro StoreBigIntBitfield(BigInt, uint32): void;
- extern macro StoreBigIntDigit(BigInt, constexpr int31, uintptr): void;
- extern macro DataViewBuiltinsAssembler::DataViewEncodeBigIntBits(
- constexpr bool, constexpr int31): uint32;
-
- const kPositiveBigInt: constexpr bool = false;
- const kNegativeBigInt: constexpr bool = true;
const kZeroDigitBigInt: constexpr int31 = 0;
const kOneDigitBigInt: constexpr int31 = 1;
const kTwoDigitBigInt: constexpr int31 = 2;
- macro CreateEmptyBigInt(isPositive: bool, length: constexpr int31): BigInt {
- // Allocate a BigInt with the desired length (number of digits).
- let result: BigInt = AllocateBigInt(length);
-
- // Write the desired sign and length to the BigInt bitfield.
- if (isPositive) {
- StoreBigIntBitfield(
- result, DataViewEncodeBigIntBits(kPositiveBigInt, length));
- } else {
- StoreBigIntBitfield(
- result, DataViewEncodeBigIntBits(kNegativeBigInt, length));
- }
-
- return result;
- }
-
// Create a BigInt on a 64-bit architecture from two 32-bit values.
- macro MakeBigIntOn64Bit(
+ macro MakeBigIntOn64Bit(implicit context: Context)(
lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// 0n is represented by a zero-length BigInt.
if (lowWord == 0 && highWord == 0) {
- return AllocateBigInt(kZeroDigitBigInt);
+ return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
}
- let isPositive: bool = true;
- let highPart: intptr = Signed(Convert<uintptr>(highWord));
- let lowPart: intptr = Signed(Convert<uintptr>(lowWord));
+ let sign: uint32 = bigint::kPositiveSign;
+ const highPart: intptr = Signed(Convert<uintptr>(highWord));
+ const lowPart: intptr = Signed(Convert<uintptr>(lowWord));
let rawValue: intptr = (highPart << 32) + lowPart;
if constexpr (signed) {
if (rawValue < 0) {
- isPositive = false;
+ sign = bigint::kNegativeSign;
// We have to store the absolute value of rawValue in the digit.
rawValue = 0 - rawValue;
}
}
// Allocate the BigInt and store the absolute value.
- let result: BigInt = CreateEmptyBigInt(isPositive, kOneDigitBigInt);
-
- StoreBigIntDigit(result, 0, Unsigned(rawValue));
-
- return result;
+ const result: MutableBigInt =
+ bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
+ bigint::StoreBigIntDigit(result, 0, Unsigned(rawValue));
+ return Convert<BigInt>(result);
}
// Create a BigInt on a 32-bit architecture from two 32-bit values.
- macro MakeBigIntOn32Bit(
+ macro MakeBigIntOn32Bit(implicit context: Context)(
lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// 0n is represented by a zero-length BigInt.
if (lowWord == 0 && highWord == 0) {
- return AllocateBigInt(kZeroDigitBigInt);
+ return Convert<BigInt>(bigint::AllocateBigInt(kZeroDigitBigInt));
}
// On a 32-bit platform, we might need 1 or 2 digits to store the number.
let needTwoDigits: bool = false;
- let isPositive: bool = true;
+ let sign: uint32 = bigint::kPositiveSign;
// We need to do some math on lowWord and highWord,
// so Convert them to int32.
@@ -293,7 +269,7 @@ namespace data_view {
if constexpr (signed) {
// If highPart < 0, the number is always negative.
if (highPart < 0) {
- isPositive = false;
+ sign = bigint::kNegativeSign;
// We have to compute the absolute value by hand.
// There will be a negative carry from the low word
@@ -322,25 +298,23 @@ namespace data_view {
}
// Allocate the BigInt with the right sign and length.
- let result: BigInt;
+ let result: MutableBigInt;
if (needTwoDigits) {
- result = CreateEmptyBigInt(isPositive, kTwoDigitBigInt);
+ result = bigint::AllocateEmptyBigInt(sign, kTwoDigitBigInt);
} else {
- result = CreateEmptyBigInt(isPositive, kOneDigitBigInt);
+ result = bigint::AllocateEmptyBigInt(sign, kOneDigitBigInt);
}
// Finally, write the digit(s) to the BigInt.
- StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
-
+ bigint::StoreBigIntDigit(result, 0, Unsigned(Convert<intptr>(lowPart)));
if (needTwoDigits) {
- StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
+ bigint::StoreBigIntDigit(result, 1, Unsigned(Convert<intptr>(highPart)));
}
-
- return result;
+ return Convert<BigInt>(result);
}
- macro MakeBigInt(lowWord: uint32, highWord: uint32, signed: constexpr bool):
- BigInt {
+ macro MakeBigInt(implicit context: Context)(
+ lowWord: uint32, highWord: uint32, signed: constexpr bool): BigInt {
// A BigInt digit has the platform word size, so we only need one digit
// on 64-bit platforms but may need two on 32-bit.
if constexpr (Is64()) {
@@ -350,19 +324,19 @@ namespace data_view {
}
}
- macro LoadDataViewBigInt(
+ macro LoadDataViewBigInt(implicit context: Context)(
buffer: JSArrayBuffer, offset: uintptr, requestedLittleEndian: bool,
signed: constexpr bool): BigInt {
- let dataPointer: RawPtr = buffer.backing_store;
-
- let b0: uint32 = LoadUint8(dataPointer, offset);
- let b1: uint32 = LoadUint8(dataPointer, offset + 1);
- let b2: uint32 = LoadUint8(dataPointer, offset + 2);
- let b3: uint32 = LoadUint8(dataPointer, offset + 3);
- let b4: uint32 = LoadUint8(dataPointer, offset + 4);
- let b5: uint32 = LoadUint8(dataPointer, offset + 5);
- let b6: uint32 = LoadUint8(dataPointer, offset + 6);
- let b7: uint32 = LoadUint8(dataPointer, offset + 7);
+ const dataPointer: RawPtr = buffer.backing_store;
+
+ const b0: uint32 = LoadUint8(dataPointer, offset);
+ const b1: uint32 = LoadUint8(dataPointer, offset + 1);
+ const b2: uint32 = LoadUint8(dataPointer, offset + 2);
+ const b3: uint32 = LoadUint8(dataPointer, offset + 3);
+ const b4: uint32 = LoadUint8(dataPointer, offset + 4);
+ const b5: uint32 = LoadUint8(dataPointer, offset + 5);
+ const b6: uint32 = LoadUint8(dataPointer, offset + 6);
+ const b7: uint32 = LoadUint8(dataPointer, offset + 7);
let lowWord: uint32;
let highWord: uint32;
@@ -385,7 +359,7 @@ namespace data_view {
transitioning macro DataViewGet(
context: Context, receiver: Object, offset: Object,
requestedLittleEndian: Object, kind: constexpr ElementsKind): Numeric {
- let dataView: JSDataView =
+ const dataView: JSDataView =
ValidateDataView(context, receiver, MakeDataViewGetterNameString(kind));
let getIndex: Number;
@@ -396,25 +370,25 @@ namespace data_view {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let littleEndian: bool = ToBoolean(requestedLittleEndian);
- let buffer: JSArrayBuffer = dataView.buffer;
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+ const buffer: JSArrayBuffer = dataView.buffer;
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(kDetachedOperation, MakeDataViewGetterNameString(kind));
}
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ const getIndexFloat: float64 = Convert<float64>(getIndex);
+ const getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = DataViewElementSize(kind);
+ const viewOffsetWord: uintptr = dataView.byte_offset;
+ const viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ const elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ const bufferIndex: uintptr = getIndexWord + viewOffsetWord;
if constexpr (kind == UINT8_ELEMENTS) {
return LoadDataView8(buffer, bufferIndex, false);
@@ -442,84 +416,84 @@ namespace data_view {
}
transitioning javascript builtin DataViewPrototypeGetUint8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
return DataViewGet(context, receiver, offset, Undefined, UINT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetInt8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
return DataViewGet(context, receiver, offset, Undefined, INT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetUint16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, UINT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetInt16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, INT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetUint32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, UINT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetInt32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, INT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetFloat32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, FLOAT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetFloat64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, FLOAT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetBigUint64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, BIGUINT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeGetBigInt64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 1 ? arguments[1] : Undefined;
return DataViewGet(
context, receiver, offset, isLittleEndian, BIGINT64_ELEMENTS);
@@ -539,10 +513,10 @@ namespace data_view {
macro StoreDataView16(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
requestedLittleEndian: bool) {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = value & 0xFF;
- let b1: uint32 = (value >>> 8) & 0xFF;
+ const b0: uint32 = value & 0xFF;
+ const b1: uint32 = (value >>> 8) & 0xFF;
if (requestedLittleEndian) {
StoreWord8(dataPointer, offset, b0);
@@ -556,12 +530,12 @@ namespace data_view {
macro StoreDataView32(
buffer: JSArrayBuffer, offset: uintptr, value: uint32,
requestedLittleEndian: bool) {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = value & 0xFF;
- let b1: uint32 = (value >>> 8) & 0xFF;
- let b2: uint32 = (value >>> 16) & 0xFF;
- let b3: uint32 = value >>> 24; // We don't need to mask here.
+ const b0: uint32 = value & 0xFF;
+ const b1: uint32 = (value >>> 8) & 0xFF;
+ const b2: uint32 = (value >>> 16) & 0xFF;
+ const b3: uint32 = value >>> 24; // We don't need to mask here.
if (requestedLittleEndian) {
StoreWord8(dataPointer, offset, b0);
@@ -579,17 +553,17 @@ namespace data_view {
macro StoreDataView64(
buffer: JSArrayBuffer, offset: uintptr, lowWord: uint32, highWord: uint32,
requestedLittleEndian: bool) {
- let dataPointer: RawPtr = buffer.backing_store;
+ const dataPointer: RawPtr = buffer.backing_store;
- let b0: uint32 = lowWord & 0xFF;
- let b1: uint32 = (lowWord >>> 8) & 0xFF;
- let b2: uint32 = (lowWord >>> 16) & 0xFF;
- let b3: uint32 = lowWord >>> 24;
+ const b0: uint32 = lowWord & 0xFF;
+ const b1: uint32 = (lowWord >>> 8) & 0xFF;
+ const b2: uint32 = (lowWord >>> 16) & 0xFF;
+ const b3: uint32 = lowWord >>> 24;
- let b4: uint32 = highWord & 0xFF;
- let b5: uint32 = (highWord >>> 8) & 0xFF;
- let b6: uint32 = (highWord >>> 16) & 0xFF;
- let b7: uint32 = highWord >>> 24;
+ const b4: uint32 = highWord & 0xFF;
+ const b5: uint32 = (highWord >>> 8) & 0xFF;
+ const b6: uint32 = (highWord >>> 16) & 0xFF;
+ const b7: uint32 = highWord >>> 24;
if (requestedLittleEndian) {
StoreWord8(dataPointer, offset, b0);
@@ -612,11 +586,10 @@ namespace data_view {
}
}
- extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(BigInt):
- uint32;
- extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigInt):
+ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntLength(
+ BigIntBase): uint32;
+ extern macro DataViewBuiltinsAssembler::DataViewDecodeBigIntSign(BigIntBase):
uint32;
- extern macro LoadBigIntDigit(BigInt, constexpr int31): uintptr;
// We might get here a BigInt that is bigger than 64 bits, but we're only
// interested in the 64 lowest ones. This means the lowest BigInt digit
@@ -624,8 +597,8 @@ namespace data_view {
macro StoreDataViewBigInt(
buffer: JSArrayBuffer, offset: uintptr, bigIntValue: BigInt,
requestedLittleEndian: bool) {
- let length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
- let sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
+ const length: uint32 = DataViewDecodeBigIntLength(bigIntValue);
+ const sign: uint32 = DataViewDecodeBigIntSign(bigIntValue);
// The 32-bit words that will hold the BigInt's value in
// two's complement representation.
@@ -636,13 +609,13 @@ namespace data_view {
if (length != 0) {
if constexpr (Is64()) {
// There is always exactly 1 BigInt digit to load in this case.
- let value: uintptr = LoadBigIntDigit(bigIntValue, 0);
+ const value: uintptr = bigint::LoadBigIntDigit(bigIntValue, 0);
lowWord = Convert<uint32>(value); // Truncates value to 32 bits.
highWord = Convert<uint32>(value >>> 32);
} else { // There might be either 1 or 2 BigInt digits we need to load.
- lowWord = Convert<uint32>(LoadBigIntDigit(bigIntValue, 0));
+ lowWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 0));
if (length >= 2) { // Only load the second digit if there is one.
- highWord = Convert<uint32>(LoadBigIntDigit(bigIntValue, 1));
+ highWord = Convert<uint32>(bigint::LoadBigIntDigit(bigIntValue, 1));
}
}
}
@@ -661,7 +634,7 @@ namespace data_view {
transitioning macro DataViewSet(
context: Context, receiver: Object, offset: Object, value: Object,
requestedLittleEndian: Object, kind: constexpr ElementsKind): Object {
- let dataView: JSDataView =
+ const dataView: JSDataView =
ValidateDataView(context, receiver, MakeDataViewSetterNameString(kind));
let getIndex: Number;
@@ -672,52 +645,52 @@ namespace data_view {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let littleEndian: bool = ToBoolean(requestedLittleEndian);
- let buffer: JSArrayBuffer = dataView.buffer;
+ const littleEndian: bool = ToBoolean(requestedLittleEndian);
+ const buffer: JSArrayBuffer = dataView.buffer;
// According to ES6 section 24.2.1.2 SetViewValue, we must perform
// the conversion before doing the bounds check.
if constexpr (kind == BIGUINT64_ELEMENTS || kind == BIGINT64_ELEMENTS) {
- let bigIntValue: BigInt = ToBigInt(context, value);
+ const bigIntValue: BigInt = ToBigInt(context, value);
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind));
}
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ const getIndexFloat: float64 = Convert<float64>(getIndex);
+ const getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = DataViewElementSize(kind);
+ const viewOffsetWord: uintptr = dataView.byte_offset;
+ const viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ const elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ const bufferIndex: uintptr = getIndexWord + viewOffsetWord;
StoreDataViewBigInt(buffer, bufferIndex, bigIntValue, littleEndian);
} else {
- let numValue: Number = ToNumber(context, value);
+ const numValue: Number = ToNumber(context, value);
if (IsDetachedBuffer(buffer)) {
ThrowTypeError(kDetachedOperation, MakeDataViewSetterNameString(kind));
}
- let getIndexFloat: float64 = Convert<float64>(getIndex);
- let getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
+ const getIndexFloat: float64 = Convert<float64>(getIndex);
+ const getIndexWord: uintptr = Convert<uintptr>(getIndexFloat);
- let viewOffsetWord: uintptr = dataView.byte_offset;
- let viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
- let elementSizeFloat: float64 = DataViewElementSize(kind);
+ const viewOffsetWord: uintptr = dataView.byte_offset;
+ const viewSizeFloat: float64 = Convert<float64>(dataView.byte_length);
+ const elementSizeFloat: float64 = DataViewElementSize(kind);
if (getIndexFloat + elementSizeFloat > viewSizeFloat) {
ThrowRangeError(kInvalidDataViewAccessorOffset);
}
- let bufferIndex: uintptr = getIndexWord + viewOffsetWord;
+ const bufferIndex: uintptr = getIndexWord + viewOffsetWord;
- let doubleValue: float64 = ChangeNumberToFloat64(numValue);
+ const doubleValue: float64 = ChangeNumberToFloat64(numValue);
if constexpr (kind == UINT8_ELEMENTS || kind == INT8_ELEMENTS) {
StoreDataView8(
@@ -731,13 +704,13 @@ namespace data_view {
buffer, bufferIndex, TruncateFloat64ToWord32(doubleValue),
littleEndian);
} else if constexpr (kind == FLOAT32_ELEMENTS) {
- let floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
+ const floatValue: float32 = TruncateFloat64ToFloat32(doubleValue);
StoreDataView32(
buffer, bufferIndex, BitcastFloat32ToInt32(floatValue),
littleEndian);
} else if constexpr (kind == FLOAT64_ELEMENTS) {
- let lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
- let highWord: uint32 = Float64ExtractHighWord32(doubleValue);
+ const lowWord: uint32 = Float64ExtractLowWord32(doubleValue);
+ const highWord: uint32 = Float64ExtractHighWord32(doubleValue);
StoreDataView64(buffer, bufferIndex, lowWord, highWord, littleEndian);
}
}
@@ -745,96 +718,96 @@ namespace data_view {
}
transitioning javascript builtin DataViewPrototypeSetUint8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
return DataViewSet(
context, receiver, offset, value, Undefined, UINT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetInt8(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
return DataViewSet(
context, receiver, offset, value, Undefined, INT8_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetUint16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, UINT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetInt16(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, INT16_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetUint32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, UINT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetInt32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, INT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetFloat32(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, FLOAT32_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetFloat64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, FLOAT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetBigUint64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, BIGUINT64_ELEMENTS);
}
transitioning javascript builtin DataViewPrototypeSetBigInt64(
- context: Context, receiver: Object, ...arguments): Object {
- let offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
- let value: Object = arguments.length > 1 ? arguments[1] : Undefined;
- let isLittleEndian: Object =
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
+ const offset: Object = arguments.length > 0 ? arguments[0] : Undefined;
+ const value: Object = arguments.length > 1 ? arguments[1] : Undefined;
+ const isLittleEndian: Object =
arguments.length > 2 ? arguments[2] : Undefined;
return DataViewSet(
context, receiver, offset, value, isLittleEndian, BIGINT64_ELEMENTS);
diff --git a/deps/v8/src/builtins/extras-utils.tq b/deps/v8/src/builtins/extras-utils.tq
index 2b9b79739e..3675fda191 100644
--- a/deps/v8/src/builtins/extras-utils.tq
+++ b/deps/v8/src/builtins/extras-utils.tq
@@ -8,17 +8,18 @@ namespace extras_utils {
extern runtime PromiseStatus(Context, Object): Smi;
javascript builtin ExtrasUtilsCreatePrivateSymbol(
- context: Context, receiver: Object, ...arguments): HeapObject {
+ js-implicit context: Context,
+ receiver: Object)(...arguments): HeapObject {
return CreatePrivateSymbol(context, arguments[0]);
}
javascript builtin ExtrasUtilsMarkPromiseAsHandled(
- context: Context, receiver: Object, ...arguments): Undefined {
+ js-implicit context: Context, receiver: Object)(...arguments): Undefined {
return PromiseMarkAsHandled(context, arguments[0]);
}
javascript builtin ExtrasUtilsPromiseState(
- context: Context, receiver: Object, ...arguments): Smi {
+ js-implicit context: Context, receiver: Object)(...arguments): Smi {
return PromiseStatus(context, arguments[0]);
}
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 0d80c681fb..995be77f75 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -1023,10 +1023,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
// Push bytecode array.
@@ -1534,6 +1534,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
BuiltinContinuationFrameConstants::kFixedFrameSize),
eax);
}
+
+ // Replace the builtin index Smi on the stack with the start address of the
+ // builtin loaded from the builtins table. The ret below will return to this
+ // address.
+ int offset_to_builtin_index = allocatable_register_count * kSystemPointerSize;
+ __ mov(eax, Operand(esp, offset_to_builtin_index));
+ __ LoadEntryFromBuiltinIndex(eax);
+ __ mov(Operand(esp, offset_to_builtin_index), eax);
+
for (int i = allocatable_register_count - 1; i >= 0; --i) {
int code = config->GetAllocatableGeneralCode(i);
__ pop(Register::from_code(code));
@@ -1549,7 +1558,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize;
__ pop(Operand(esp, offsetToPC));
__ Drop(offsetToPC / kSystemPointerSize);
- __ add(Operand(esp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ ret(0);
}
} // namespace
@@ -3012,23 +3020,28 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ mov(esi, __ ExternalReferenceAsOperand(next_address, esi));
__ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi));
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
__ cmpb(Operand(eax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
+ __ j(not_zero, &profiler_enabled);
+ __ Move(eax, Immediate(ExternalReference::address_of_runtime_stats_flag()));
+ __ cmp(Operand(eax, 0), Immediate(0));
+ __ j(not_zero, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ mov(eax, function_address);
+ __ jmp(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual getter function.
+ __ mov(thunk_last_arg, function_address);
+ __ Move(eax, Immediate(thunk_ref));
+ }
+ __ bind(&end_profiler_check);
- // Additional parameter is the address of the actual getter function.
- __ mov(thunk_last_arg, function_address);
// Call the api function.
- __ Move(eax, Immediate(thunk_ref));
__ call(eax);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function.
- __ call(function_address);
- __ bind(&end_profiler_check);
Label prologue;
// Load the value from ReturnValue
@@ -3080,6 +3093,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ CompareRoot(map, RootIndex::kHeapNumberMap);
__ j(equal, &ok, Label::kNear);
+ __ CompareRoot(map, RootIndex::kBigIntMap);
+ __ j(equal, &ok, Label::kNear);
+
__ CompareRoot(return_value, RootIndex::kUndefinedValue);
__ j(equal, &ok, Label::kNear);
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
index 4e75c6d837..d96fa924ab 100644
--- a/deps/v8/src/builtins/internal-coverage.tq
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -28,6 +28,8 @@ namespace internal_coverage {
return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
}
+ @export // Silence unused warning on release builds. SlotCount is only used
+ // in an assert. TODO(szuend): Remove once macros and asserts work.
macro SlotCount(coverageInfo: CoverageInfo): Smi {
assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask));
diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq
index 5c9439dfc7..b770f1b652 100644
--- a/deps/v8/src/builtins/iterator.tq
+++ b/deps/v8/src/builtins/iterator.tq
@@ -20,16 +20,16 @@ namespace iterator {
implicit context: Context)(Object): IteratorRecord;
extern macro IteratorBuiltinsAssembler::IteratorStep(
- implicit context: Context)(IteratorRecord): Object
+ implicit context: Context)(IteratorRecord): JSReceiver
labels Done;
extern macro IteratorBuiltinsAssembler::IteratorStep(
- implicit context: Context)(IteratorRecord, Map): Object
+ implicit context: Context)(IteratorRecord, Map): JSReceiver
labels Done;
extern macro IteratorBuiltinsAssembler::IteratorValue(
- implicit context: Context)(Object): Object;
+ implicit context: Context)(JSReceiver): Object;
extern macro IteratorBuiltinsAssembler::IteratorValue(
- implicit context: Context)(Object, Map): Object;
+ implicit context: Context)(JSReceiver, Map): Object;
extern macro IteratorBuiltinsAssembler::IteratorCloseOnException(
implicit context: Context)(IteratorRecord, Object): never;
diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq
index 84dd1261fa..df43b30efc 100644
--- a/deps/v8/src/builtins/math.tq
+++ b/deps/v8/src/builtins/math.tq
@@ -7,7 +7,7 @@ namespace math {
extern macro Float64Acos(float64): float64;
transitioning javascript builtin
- MathAcos(context: Context, receiver: Object, x: Object): Number {
+ MathAcos(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Acos(value));
}
@@ -16,7 +16,7 @@ namespace math {
extern macro Float64Acosh(float64): float64;
transitioning javascript builtin
- MathAcosh(context: Context, receiver: Object, x: Object): Number {
+ MathAcosh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Acosh(value));
}
@@ -25,7 +25,7 @@ namespace math {
extern macro Float64Asin(float64): float64;
transitioning javascript builtin
- MathAsin(context: Context, receiver: Object, x: Object): Number {
+ MathAsin(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Asin(value));
}
@@ -34,7 +34,7 @@ namespace math {
extern macro Float64Asinh(float64): float64;
transitioning javascript builtin
- MathAsinh(context: Context, receiver: Object, x: Object): Number {
+ MathAsinh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Asinh(value));
}
@@ -43,7 +43,7 @@ namespace math {
extern macro Float64Atan(float64): float64;
transitioning javascript builtin
- MathAtan(context: Context, receiver: Object, x: Object): Number {
+ MathAtan(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Atan(value));
}
@@ -52,7 +52,7 @@ namespace math {
extern macro Float64Atan2(float64, float64): float64;
transitioning javascript builtin
- MathAtan2(context: Context, receiver: Object, y: Object, x: Object): Number {
+ MathAtan2(context: Context, _receiver: Object, y: Object, x: Object): Number {
const yValue = Convert<float64>(ToNumber_Inline(context, y));
const xValue = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Atan2(yValue, xValue));
@@ -62,7 +62,7 @@ namespace math {
extern macro Float64Atanh(float64): float64;
transitioning javascript builtin
- MathAtanh(context: Context, receiver: Object, x: Object): Number {
+ MathAtanh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Atanh(value));
}
@@ -71,7 +71,7 @@ namespace math {
extern macro Float64Cbrt(float64): float64;
transitioning javascript builtin
- MathCbrt(context: Context, receiver: Object, x: Object): Number {
+ MathCbrt(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Cbrt(value));
}
@@ -80,7 +80,7 @@ namespace math {
extern macro Word32Clz(int32): int32;
transitioning javascript builtin
- MathClz32(context: Context, receiver: Object, x: Object): Number {
+ MathClz32(context: Context, _receiver: Object, x: Object): Number {
const num = ToNumber_Inline(context, x);
let value: int32;
@@ -100,7 +100,7 @@ namespace math {
extern macro Float64Cos(float64): float64;
transitioning javascript builtin
- MathCos(context: Context, receiver: Object, x: Object): Number {
+ MathCos(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Cos(value));
}
@@ -109,7 +109,7 @@ namespace math {
extern macro Float64Cosh(float64): float64;
transitioning javascript builtin
- MathCosh(context: Context, receiver: Object, x: Object): Number {
+ MathCosh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Cosh(value));
}
@@ -118,7 +118,7 @@ namespace math {
extern macro Float64Exp(float64): float64;
transitioning javascript builtin
- MathExp(context: Context, receiver: Object, x: Object): Number {
+ MathExp(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Exp(value));
}
@@ -127,14 +127,14 @@ namespace math {
extern macro Float64Expm1(float64): float64;
transitioning javascript builtin
- MathExpm1(context: Context, receiver: Object, x: Object): Number {
+ MathExpm1(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Expm1(value));
}
// ES6 #sec-math.fround
transitioning javascript builtin
- MathFround(context: Context, receiver: Object, x: Object): Number {
+ MathFround(context: Context, _receiver: Object, x: Object): Number {
const x32 = Convert<float32>(ToNumber_Inline(context, x));
const x64 = Convert<float64>(x32);
return Convert<Number>(x64);
@@ -144,7 +144,7 @@ namespace math {
extern macro Float64Log(float64): float64;
transitioning javascript builtin
- MathLog(context: Context, receiver: Object, x: Object): Number {
+ MathLog(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log(value));
}
@@ -153,7 +153,7 @@ namespace math {
extern macro Float64Log1p(float64): float64;
transitioning javascript builtin
- MathLog1p(context: Context, receiver: Object, x: Object): Number {
+ MathLog1p(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log1p(value));
}
@@ -162,7 +162,7 @@ namespace math {
extern macro Float64Log10(float64): float64;
transitioning javascript builtin
- MathLog10(context: Context, receiver: Object, x: Object): Number {
+ MathLog10(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log10(value));
}
@@ -171,7 +171,7 @@ namespace math {
extern macro Float64Log2(float64): float64;
transitioning javascript builtin
- MathLog2(context: Context, receiver: Object, x: Object): Number {
+ MathLog2(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Log2(value));
}
@@ -180,14 +180,14 @@ namespace math {
extern macro Float64Sin(float64): float64;
transitioning javascript builtin
- MathSin(context: Context, receiver: Object, x: Object): Number {
+ MathSin(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Sin(value));
}
// ES6 #sec-math.sign
transitioning javascript builtin
- MathSign(context: Context, receiver: Object, x: Object): Number {
+ MathSign(context: Context, _receiver: Object, x: Object): Number {
const num = ToNumber_Inline(context, x);
const value = Convert<float64>(num);
@@ -204,7 +204,7 @@ namespace math {
extern macro Float64Sinh(float64): float64;
transitioning javascript builtin
- MathSinh(context: Context, receiver: Object, x: Object): Number {
+ MathSinh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Sinh(value));
}
@@ -213,7 +213,7 @@ namespace math {
extern macro Float64Sqrt(float64): float64;
transitioning javascript builtin
- MathSqrt(context: Context, receiver: Object, x: Object): Number {
+ MathSqrt(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Sqrt(value));
}
@@ -222,7 +222,7 @@ namespace math {
extern macro Float64Tan(float64): float64;
transitioning javascript builtin
- MathTan(context: Context, receiver: Object, x: Object): Number {
+ MathTan(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Tan(value));
}
@@ -231,7 +231,7 @@ namespace math {
extern macro Float64Tanh(float64): float64;
transitioning javascript builtin
- MathTanh(context: Context, receiver: Object, x: Object): Number {
+ MathTanh(context: Context, _receiver: Object, x: Object): Number {
const value = Convert<float64>(ToNumber_Inline(context, x));
return Convert<Number>(Float64Tanh(value));
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index ec65c78ee9..a359b2436f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
@@ -70,14 +69,12 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ SmiTag(a0);
- __ Push(a0, a1, a3, a1);
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
// Restore target function and new target.
- __ Pop(a0, a1, a3);
- __ SmiUntag(a0);
+ __ Pop(a1, a3);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
@@ -853,13 +850,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1035,17 +1030,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
-
// Increment invocation count for the function.
__ lw(t0, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -1053,10 +1049,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Reset code age.
- DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
- __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset));
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1464,11 +1471,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
__ lw(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
__ Pop(t0);
__ Addu(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ra);
- __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadEntryFromBuiltinIndex(t0);
__ Jump(t0);
}
} // namespace
@@ -2559,7 +2568,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRoot(t0, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, t0, Operand(a2));
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2825,18 +2834,23 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address == a1 || function_address == a2);
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ li(t9, ExternalReference::is_profiling_address(isolate));
__ lb(t9, MemOperand(t9, 0));
- __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- __ li(t9, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- __ mov(t9, function_address);
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ __ li(t9, ExternalReference::address_of_runtime_stats_flag());
+ __ lw(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t9, function_address);
+ __ Branch(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 34a5774d65..c5565b90de 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -62,7 +62,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
// -- a1 : target function (preserved for callee)
// -- a3 : new target (preserved for callee)
// -----------------------------------
@@ -70,13 +69,11 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push a copy of the target function and the new target.
- __ SmiTag(a0);
- __ Push(a0, a1, a3, a1);
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
// Restore target function and new target.
- __ Pop(a0, a1, a3);
- __ SmiUntag(a0);
+ __ Pop(a1, a3);
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
@@ -870,13 +867,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee if needed, and caller)
// -- a3 : new target (preserved for callee if needed, and caller)
// -- a1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, a1, a3, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1052,16 +1047,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(closure);
-
// Increment invocation count for the function.
__ Lw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
@@ -1069,10 +1066,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
- // Reset code age.
- DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
- __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kBytecodeAgeOffset));
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
@@ -1479,11 +1487,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
}
__ Ld(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
__ Pop(t0);
__ Daddu(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ra);
- __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ LoadEntryFromBuiltinIndex(t0);
__ Jump(t0);
}
} // namespace
@@ -2595,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ LoadRoot(a4, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2));
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2864,18 +2874,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK(function_address == a1 || function_address == a2);
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ li(t9, ExternalReference::is_profiling_address(isolate));
__ Lb(t9, MemOperand(t9, 0));
- __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
-
- // Additional parameter is the address of the actual callback.
- __ li(t9, thunk_ref);
- __ jmp(&end_profiler_check);
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ __ li(t9, ExternalReference::address_of_runtime_stats_flag());
+ __ Lw(t9, MemOperand(t9, 0));
+ __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t9, function_address);
+ __ Branch(&end_profiler_check);
+ }
- __ bind(&profiler_disabled);
- __ mov(t9, function_address);
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t9, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 93851d4e11..32115e78ea 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -33,8 +33,8 @@ namespace object {
}
transitioning javascript builtin
- ObjectFromEntries(implicit context: Context)(receiver: Object, ...arguments):
- Object {
+ ObjectFromEntries(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
const iterable: Object = arguments[0];
try {
if (IsNullOrUndefined(iterable)) goto Throw;
@@ -47,7 +47,8 @@ namespace object {
try {
assert(!IsNullOrUndefined(i.object));
while (true) {
- const step: Object = iterator::IteratorStep(i, fastIteratorResultMap)
+ const step: JSReceiver =
+ iterator::IteratorStep(i, fastIteratorResultMap)
otherwise return result;
const iteratorValue: Object =
iterator::IteratorValue(step, fastIteratorResultMap);
diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq
new file mode 100644
index 0000000000..6706a8f943
--- /dev/null
+++ b/deps/v8/src/builtins/object.tq
@@ -0,0 +1,138 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace runtime {
+ extern transitioning runtime
+ ObjectIsExtensible(implicit context: Context)(Object): Object;
+
+ extern transitioning runtime
+ JSReceiverPreventExtensionsThrow(implicit context: Context)(JSReceiver):
+ Object;
+
+ extern transitioning runtime
+ JSReceiverPreventExtensionsDontThrow(implicit context: Context)(JSReceiver):
+ Object;
+
+ extern transitioning runtime
+ JSReceiverGetPrototypeOf(implicit context: Context)(JSReceiver): Object;
+
+ extern transitioning runtime
+ JSReceiverSetPrototypeOfThrow(implicit context: Context)(JSReceiver, Object):
+ Object;
+
+ extern transitioning runtime
+ JSReceiverSetPrototypeOfDontThrow(implicit context:
+ Context)(JSReceiver, Object): Object;
+} // namespace runtime
+
+namespace object {
+ transitioning macro
+ ObjectIsExtensible(implicit context: Context)(object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::ObjectIsExtensible(objectJSReceiver);
+ return proxy::ProxyIsExtensible(objectJSProxy);
+ }
+
+ transitioning macro
+ ObjectPreventExtensionsThrow(implicit context: Context)(object: Object):
+ Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverPreventExtensionsThrow(
+ objectJSReceiver);
+ proxy::ProxyPreventExtensions(objectJSProxy, True);
+ return objectJSReceiver;
+ }
+
+ transitioning macro
+ ObjectPreventExtensionsDontThrow(implicit context: Context)(object: Object):
+ Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverPreventExtensionsDontThrow(
+ objectJSReceiver);
+ return proxy::ProxyPreventExtensions(objectJSProxy, False);
+ }
+
+ transitioning macro
+ ObjectGetPrototypeOf(implicit context: Context)(object: Object): Object {
+ const objectJSReceiver: JSReceiver = ToObject_Inline(context, object);
+ return object::JSReceiverGetPrototypeOf(objectJSReceiver);
+ }
+
+ transitioning macro
+ JSReceiverGetPrototypeOf(implicit context: Context)(object: JSReceiver):
+ Object {
+ const objectJSProxy = Cast<JSProxy>(object)
+ otherwise return runtime::JSReceiverGetPrototypeOf(object);
+ return proxy::ProxyGetPrototypeOf(objectJSProxy);
+ }
+
+ transitioning macro
+ ObjectSetPrototypeOfThrow(implicit context: Context)(
+ object: Object, proto: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return object;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverSetPrototypeOfThrow(
+ objectJSReceiver, proto);
+ proxy::ProxySetPrototypeOf(objectJSProxy, proto, True);
+ return objectJSReceiver;
+ }
+
+ transitioning macro
+ ObjectSetPrototypeOfDontThrow(implicit context: Context)(
+ object: Object, proto: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object) otherwise return False;
+ const objectJSProxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return runtime::JSReceiverSetPrototypeOfDontThrow(
+ objectJSReceiver, proto);
+ return proxy::ProxySetPrototypeOf(objectJSProxy, proto, False);
+ }
+} // namespace object
+
+namespace object_isextensible {
+ // ES6 section 19.1.2.11 Object.isExtensible ( O )
+ transitioning javascript builtin ObjectIsExtensible(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ return object::ObjectIsExtensible(object);
+ }
+} // namespace object_isextensible
+
+namespace object_preventextensions {
+ // ES6 section 19.1.2.11 Object.isExtensible ( O )
+ transitioning javascript builtin ObjectPreventExtensions(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ return object::ObjectPreventExtensionsThrow(object);
+ }
+} // namespace object_preventextensions
+
+namespace object_getprototypeof {
+ // ES6 section 19.1.2.9 Object.getPrototypeOf ( O )
+ transitioning javascript builtin ObjectGetPrototypeOf(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ return object::ObjectGetPrototypeOf(object);
+ }
+} // namespace object_getprototypeof
+
+namespace object_setprototypeof {
+ // ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
+ transitioning javascript builtin ObjectSetPrototypeOf(
+ js-implicit context:
+ Context)(_receiver: Object, object: Object, proto: Object): Object {
+ // 1. Set O to ? RequireObjectCoercible(O).
+ RequireObjectCoercible(object, 'Object.setPrototypeOf');
+
+ // 2. If Type(proto) is neither Object nor Null, throw a TypeError
+ // exception.
+ // 3. If Type(O) is not Object, return O.
+ // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+ // 5. If status is false, throw a TypeError exception.
+ // 6. Return O.
+ if (proto == Null || Is<JSReceiver>(proto)) {
+ return object::ObjectSetPrototypeOfThrow(object, proto);
+ }
+ ThrowTypeError(kProtoObjectOrNull, proto);
+ }
+} // namespace object_setprototypeof
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index e3c6ce6407..a42cb9bebd 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
// -- r4 : target function (preserved for callee)
// -- r6 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ SmiTag(r3);
- __ Push(r3, r4, r6, r4);
+ __ Push(r4, r6, r4);
__ CallRuntime(function_id, 1);
__ mr(r5, r3);
// Restore target function and new target.
- __ Pop(r3, r4, r6);
- __ SmiUntag(r3);
+ __ Pop(r4, r6);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ JumpCodeObject(r5);
@@ -110,6 +106,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Register scratch = r5;
+
Label stack_overflow;
Generate_StackOverflowCheck(masm, r3, r8, &stack_overflow);
@@ -141,13 +139,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[2*kPointerSize]: context
// -----------------------------------
__ beq(&no_args, cr0);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ sub(sp, sp, ip);
+ __ ShiftLeftImm(scratch, r3, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, scratch);
__ mtctr(r3);
__ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r7, ip));
- __ StorePX(r0, MemOperand(sp, ip));
+ __ subi(scratch, scratch, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r7, scratch));
+ __ StorePX(r0, MemOperand(sp, scratch));
__ bdnz(&loop);
__ bind(&no_args);
@@ -300,13 +298,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
__ cmpi(r3, Operand::Zero());
__ beq(&no_args);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ sub(sp, sp, ip);
+ __ ShiftLeftImm(r9, r3, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, r9);
__ mtctr(r3);
__ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r7, ip));
- __ StorePX(r0, MemOperand(sp, ip));
+ __ subi(r9, r9, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r7, r9));
+ __ StorePX(r0, MemOperand(sp, r9));
__ bdnz(&loop);
__ bind(&no_args);
@@ -416,12 +414,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
+ Register scratch = r8;
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ Move(ip, debug_hook);
- __ LoadByte(ip, MemOperand(ip), r0);
- __ extsb(ip, ip);
- __ CmpSmiLiteral(ip, Smi::zero(), r0);
+ __ Move(scratch, debug_hook);
+ __ LoadByte(scratch, MemOperand(scratch), r0);
+ __ extsb(scratch, scratch);
+ __ CmpSmiLiteral(scratch, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -429,9 +428,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ Move(ip, debug_suspended_generator);
- __ LoadP(ip, MemOperand(ip));
- __ cmp(ip, r4);
+ __ Move(scratch, debug_suspended_generator);
+ __ LoadP(scratch, MemOperand(scratch));
+ __ cmp(scratch, r4);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -442,8 +441,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
- __ Push(ip);
+ __ LoadP(scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
// ----------- S t a t e -------------
// -- r4 : the JSGeneratorObject to resume
@@ -470,8 +469,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mtctr(r6);
__ bind(&loop);
- __ LoadPU(ip, MemOperand(r9, kPointerSize));
- __ push(ip);
+ __ LoadPU(scratch, MemOperand(r9, kPointerSize));
+ __ push(scratch);
__ bdnz(&loop);
__ bind(&done_loop);
@@ -602,6 +601,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ LoadP(r0, MemOperand(r3));
__ push(r0);
+ Register scratch = r9;
// Set up frame pointer for the frame to be pushed.
__ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -611,17 +611,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r3, js_entry_sp);
- __ LoadP(r9, MemOperand(r3));
- __ cmpi(r9, Operand::Zero());
+ __ LoadP(scratch, MemOperand(r3));
+ __ cmpi(scratch, Operand::Zero());
__ bne(&non_outermost_js);
__ StoreP(fp, MemOperand(r3));
- __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
- __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- __ push(ip); // frame-type
+ __ push(scratch); // frame-type
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -642,12 +642,12 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ Move(ip,
- ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
- masm->isolate()));
+ __ Move(scratch,
+ ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
}
- __ StoreP(r3, MemOperand(ip));
+ __ StoreP(r3, MemOperand(scratch));
__ LoadRoot(r3, RootIndex::kException);
__ b(&exit);
@@ -679,16 +679,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pop(r8);
__ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2);
- __ mov(r9, Operand::Zero());
+ __ mov(scratch, Operand::Zero());
__ Move(r8, js_entry_sp);
- __ StoreP(r9, MemOperand(r8));
+ __ StoreP(scratch, MemOperand(r8));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(r6);
- __ Move(ip, ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate()));
- __ StoreP(r6, MemOperand(ip));
+ __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ StoreP(r6, MemOperand(scratch));
// Reset the stack to the callee saved registers.
__ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -894,13 +894,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee if needed, and caller)
- // -- r3 : new target (preserved for callee if needed, and caller)
- // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- r6 : new target (preserved for callee if needed, and caller)
+ // -- r4 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, r4, r6, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1084,6 +1082,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ LoadP(r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadHalfWord(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
+ __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
+ __ bne(&push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
@@ -1102,6 +1109,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
+
+ __ bind(&push_stack_frame);
+
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1109,12 +1119,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ li(r8, Operand(0));
__ StoreHalfWord(r8,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
r0);
// Load initial bytecode offset.
@@ -1395,11 +1405,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
__ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftImm(ip, ip, Operand(kPointerSizeLog2));
+ __ ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2));
__ LoadPX(kJavaScriptCallCodeStartRegister,
- MemOperand(kInterpreterDispatchTableRegister, ip));
+ MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
}
@@ -1526,13 +1538,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
- __ Pop(ip);
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ UseScratchRegisterScope temps(masm);
+ Register builtin = temps.Acquire();
+ __ Pop(builtin);
__ addi(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ mtlr(r0);
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ Jump(builtin);
}
} // namespace
@@ -1702,14 +1718,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r3: actual number of arguments
// r4: callable
{
+ Register scratch = r6;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r5, sp, r5);
__ mtctr(r3);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r5, -kPointerSize));
- __ StoreP(ip, MemOperand(r5));
+ __ LoadP(scratch, MemOperand(r5, -kPointerSize));
+ __ StoreP(scratch, MemOperand(r5));
__ subi(r5, r5, Operand(kPointerSize));
__ bdnz(&loop);
// Adjust the actual number of arguments and remove the top element
@@ -1891,7 +1908,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r7, ip, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r7, scratch, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1902,12 +1919,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ mtctr(r7);
__ bind(&loop);
- __ LoadPU(ip, MemOperand(r5, kPointerSize));
- __ CompareRoot(ip, RootIndex::kTheHoleValue);
+ __ LoadPU(scratch, MemOperand(r5, kPointerSize));
+ __ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
- __ LoadRoot(ip, RootIndex::kUndefinedValue);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
- __ push(ip);
+ __ push(scratch);
__ bdnz(&loop);
__ bind(&no_args);
__ add(r3, r3, r7);
@@ -1953,8 +1970,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ LoadP(scratch,
+ MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ cmpi(scratch,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ beq(&arguments_adaptor);
{
__ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1988,9 +2007,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ add(r3, r3, r8);
__ bind(&loop);
{
- __ ShiftLeftImm(ip, r8, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(r7, ip));
- __ push(ip);
+ __ ShiftLeftImm(scratch, r8, Operand(kPointerSizeLog2));
+ __ LoadPX(scratch, MemOperand(r7, scratch));
+ __ push(scratch);
__ subi(r8, r8, Operand(1));
__ cmpi(r8, Operand::Zero());
__ bne(&loop);
@@ -2134,10 +2153,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- r7 : the number of [[BoundArguments]]
// -----------------------------------
+ Register scratch = r9;
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ mr(r9, sp); // preserve previous stack pointer
+ __ mr(scratch, sp); // preserve previous stack pointer
__ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
__ sub(sp, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2146,7 +2166,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ CompareRoot(sp, RootIndex::kRealStackLimit);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
- __ mr(sp, r9);
+ __ mr(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2166,7 +2186,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ beq(&skip);
__ mtctr(r3);
__ bind(&loop);
- __ LoadPX(r0, MemOperand(r9, r8));
+ __ LoadPX(r0, MemOperand(scratch, r8));
__ StorePX(r0, MemOperand(sp, r8));
__ addi(r8, r8, Operand(kPointerSize));
__ bdnz(&loop);
@@ -2201,9 +2221,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
- __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+ __ LoadP(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ StorePX(ip, MemOperand(sp, r0));
+ __ StorePX(r6, MemOperand(sp, r0));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2388,7 +2408,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ cmpli(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
r0);
__ bne(&skip_adapt_arguments, cr0);
@@ -2686,7 +2706,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CompareRoot(r6, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -2961,13 +2981,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Move(scratch, thunk_ref);
__ isel(eq, scratch, function_address, scratch);
} else {
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check);
- __ bind(&profiler_disabled);
- __ mr(scratch, function_address);
+ Label profiler_enabled, end_profiler_check;
+ __ bne(&profiler_enabled);
+ __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
+ __ lwz(scratch, MemOperand(scratch, 0));
+ __ cmpi(scratch, Operand::Zero());
+ __ bne(&profiler_enabled);
+ {
+ // Call the api function directly.
+ __ mr(scratch, function_address);
+ __ b(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Move(scratch, thunk_ref);
+ }
__ bind(&end_profiler_check);
}
@@ -3264,6 +3293,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ Register temp2 = temps.Acquire();
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
__ mflr(r0);
@@ -3271,11 +3302,11 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
if (ABI_USES_FUNCTION_DESCRIPTORS && FLAG_embedded_builtins) {
// AIX/PPC64BE Linux use a function descriptor;
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0)); // Instruction address
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(temp2, kPointerSize));
+ __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address
}
- __ Call(ip); // Call the C++ function.
+ __ Call(temp2); // Call the C++ function.
__ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ mtlr(r0);
__ blr();
diff --git a/deps/v8/src/builtins/proxy-constructor.tq b/deps/v8/src/builtins/proxy-constructor.tq
index 178759b595..ad60c20e2c 100644
--- a/deps/v8/src/builtins/proxy-constructor.tq
+++ b/deps/v8/src/builtins/proxy-constructor.tq
@@ -6,17 +6,14 @@
namespace proxy {
- extern macro ProxiesCodeStubAssembler::GetProxyConstructorJSNewTarget():
- Object;
-
// ES #sec-proxy-constructor
// https://tc39.github.io/ecma262/#sec-proxy-constructor
transitioning javascript builtin
- ProxyConstructor(implicit context: Context)(
- receiver: Object, target: Object, handler: Object): JSProxy {
+ ProxyConstructor(
+ js-implicit context: Context, receiver: Object,
+ newTarget: Object)(target: Object, handler: Object): JSProxy {
try {
// 1. If NewTarget is undefined, throw a TypeError exception.
- const newTarget: Object = GetProxyConstructorJSNewTarget();
if (newTarget == Undefined) {
ThrowTypeError(kConstructorNotFunction, 'Proxy');
}
diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq
new file mode 100644
index 0000000000..759de766ef
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-delete-property.tq
@@ -0,0 +1,67 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-delete-p
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-delete-p
+ transitioning builtin
+ ProxyDeleteProperty(implicit context: Context)(
+ proxy: JSProxy, name: Name, languageMode: LanguageMode): Object {
+ const kTrapName: constexpr string = 'deleteProperty';
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
+
+ try {
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = UnsafeCast<JSReceiver>(proxy.target);
+
+ // 6. Let trap be ? GetMethod(handler, "deleteProperty").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
+ // « target, P »)).
+ const trapResult = Call(context, trap, handler, target, name);
+
+ // 9. If booleanTrapResult is false, return false.
+ if (BranchIfToBooleanIsFalse(trapResult)) {
+ if (languageMode == SmiConstant(kStrict)) {
+ ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName, name);
+ }
+ return False;
+ }
+
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 11. If targetDesc is undefined, return true.
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ CheckDeleteTrapResult(target, proxy, name);
+
+ // 15. Return true.
+ return True;
+ }
+ label TrapUndefined(target: Object) {
+ // 7.a. Return ? target.[[Delete]](P).
+ return DeleteProperty(target, name, languageMode);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq
index 0915a66d5f..bac07f550c 100644
--- a/deps/v8/src/builtins/proxy-get-property.tq
+++ b/deps/v8/src/builtins/proxy-get-property.tq
@@ -6,9 +6,8 @@
namespace proxy {
- extern transitioning runtime
- GetPropertyWithReceiver(implicit context: Context)(Object, Name, Object, Smi):
- Object;
+ extern transitioning builtin GetPropertyWithReceiver(
+ implicit context: Context)(Object, Name, Object, Smi): Object;
// ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
// https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
@@ -16,36 +15,38 @@ namespace proxy {
ProxyGetProperty(implicit context: Context)(
proxy: JSProxy, name: Name, receiverValue: Object,
onNonExistent: Smi): Object {
+ PerformStackCheck();
// 1. Assert: IsPropertyKey(P) is true.
assert(TaggedIsNotSmi(name));
assert(IsName(name));
assert(!IsPrivateSymbol(name));
// 2. Let handler be O.[[ProxyHandler]].
- const handler: Object = proxy.handler;
-
// 3. If handler is null, throw a TypeError exception.
- if (handler == Null) {
- ThrowTypeError(kProxyRevoked, 'get');
- }
-
// 4. Assert: Type(handler) is Object.
- const handlerJSReceiver = UnsafeCast<JSReceiver>(handler);
+ let handler: JSReceiver;
+ typeswitch (proxy.handler) {
+ case (Null): {
+ ThrowTypeError(kProxyRevoked, 'get');
+ }
+ case (h: JSReceiver): {
+ handler = h;
+ }
+ }
// 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
// 6. Let trap be ? GetMethod(handler, "get").
// 7. If trap is undefined, then (see 7.a below).
// 7.a. Return ? target.[[Get]](P, Receiver).
- // TODO(mslekova): Introduce GetPropertyWithReceiver stub
- const trap: Callable = GetMethod(handlerJSReceiver, 'get')
+ const trap: Callable = GetMethod(handler, 'get')
otherwise return GetPropertyWithReceiver(
target, name, receiverValue, onNonExistent);
// 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »).
const trapResult =
- Call(context, trap, handlerJSReceiver, target, name, receiverValue);
+ Call(context, trap, handler, target, name, receiverValue);
// 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
// 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is
@@ -58,6 +59,7 @@ namespace proxy {
// is undefined, then
// i. If trapResult is not undefined, throw a TypeError exception.
// 11. Return trapResult.
- return CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
+ CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
+ return trapResult;
}
}
diff --git a/deps/v8/src/builtins/proxy-get-prototype-of.tq b/deps/v8/src/builtins/proxy-get-prototype-of.tq
new file mode 100644
index 0000000000..2418eaf423
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-get-prototype-of.tq
@@ -0,0 +1,70 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ transitioning builtin
+ ProxyGetPrototypeOf(implicit context: Context)(proxy: JSProxy): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'getPrototypeOf';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let handlerProto be ? Call(trap, handler, « target »).
+ const handlerProto = Call(context, trap, handler, target);
+
+ // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError
+ // exception.
+ if (!Is<JSReceiver>(handlerProto)) {
+ goto ThrowProxyGetPrototypeOfInvalid;
+ }
+
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ // 10. If extensibleTarget is true, return handlerProto.
+ const extensibleTarget: Object = object::ObjectIsExtensible(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ return handlerProto;
+ }
+
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ const targetProto = object::ObjectGetPrototypeOf(target);
+
+ // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError
+ // exception.
+ // 13. Return handlerProto.
+ if (BranchIfSameValue(targetProto, handlerProto)) {
+ return handlerProto;
+ }
+ ThrowTypeError(kProxyGetPrototypeOfNonExtensible);
+ }
+ label TrapUndefined(target: Object) {
+ // 6.a. Return ? target.[[GetPrototypeOf]]().
+ return object::ObjectGetPrototypeOf(target);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ label ThrowProxyGetPrototypeOfInvalid deferred {
+ ThrowTypeError(kProxyGetPrototypeOfInvalid);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq
index ab3898a9c7..ee394c5d84 100644
--- a/deps/v8/src/builtins/proxy-has-property.tq
+++ b/deps/v8/src/builtins/proxy-has-property.tq
@@ -22,11 +22,12 @@ namespace proxy {
// 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
const handler =
Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
// 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ const target = Cast<JSReceiver>(proxy.target) otherwise unreachable;
// 6. Let trap be ? GetMethod(handler, "has").
// 7. If trap is undefined, then (see 7.a below).
@@ -42,7 +43,8 @@ namespace proxy {
if (BranchIfToBooleanIsTrue(trapResult)) {
return True;
}
- return CheckHasTrapResult(target, proxy, name);
+ CheckHasTrapResult(target, proxy, name);
+ return False;
}
label TrapUndefined(target: Object) {
// 7.a. Return ? target.[[HasProperty]](P).
diff --git a/deps/v8/src/builtins/proxy-is-extensible.tq b/deps/v8/src/builtins/proxy-is-extensible.tq
new file mode 100644
index 0000000000..82f4a5b955
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-is-extensible.tq
@@ -0,0 +1,56 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-isextensible
+ transitioning builtin ProxyIsExtensible(implicit context:
+ Context)(proxy: JSProxy): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'isExtensible';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "isExtensible").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «
+ // target»)).
+ const trapResult = ToBoolean(Call(context, trap, handler, target));
+
+ // 8. Let targetResult be ? IsExtensible(target).
+ const targetResult: bool = ToBoolean(object::ObjectIsExtensible(target));
+
+ // 9. If SameValue(booleanTrapResult, targetResult) is false, throw a
+ // TypeError exception.
+ if (trapResult != targetResult) {
+ ThrowTypeError(
+ kProxyIsExtensibleInconsistent,
+ SelectBooleanConstant(targetResult));
+ }
+ // 10. Return booleanTrapResult.
+ return SelectBooleanConstant(trapResult);
+ }
+ label TrapUndefined(target: Object) {
+ // 6.a. Return ? IsExtensible(target).
+ return object::ObjectIsExtensible(target);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-prevent-extensions.tq b/deps/v8/src/builtins/proxy-prevent-extensions.tq
new file mode 100644
index 0000000000..6d5d2569fb
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-prevent-extensions.tq
@@ -0,0 +1,66 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-preventextensions
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-preventextensions
+ transitioning builtin
+ ProxyPreventExtensions(implicit context: Context)(
+ proxy: JSProxy, doThrow: Boolean): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'preventExtensions';
+ try {
+ // 1. Let handler be O.[[ProxyHandler]].
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 4. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 5. Let trap be ? GetMethod(handler, "preventExtensions").
+ // 6. If trap is undefined, then (see 6.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target);
+
+ // 7. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «
+ // target»)).
+ const trapResult = Call(context, trap, handler, target);
+
+ // 8. If booleanTrapResult is true, then
+ // 8.a. Let extensibleTarget be ? IsExtensible(target).
+ // 8.b If extensibleTarget is true, throw a TypeError exception.
+ if (BranchIfToBooleanIsTrue(trapResult)) {
+ const extensibleTarget: Object = object::ObjectIsExtensible(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ ThrowTypeError(kProxyPreventExtensionsExtensible);
+ }
+ } else {
+ if (doThrow == True) {
+ ThrowTypeError(kProxyTrapReturnedFalsish, kTrapName);
+ }
+ return False;
+ }
+
+ // 9. Return booleanTrapResult.
+ return True;
+ }
+ label TrapUndefined(target: Object) {
+ // 6.a. Return ? target.[[PreventExtensions]]().
+ if (doThrow == True) {
+ return object::ObjectPreventExtensionsThrow(target);
+ }
+ return object::ObjectPreventExtensionsDontThrow(target);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+} // namespace proxy
diff --git a/deps/v8/src/builtins/proxy-revocable.tq b/deps/v8/src/builtins/proxy-revocable.tq
index 695f005c9b..b09baab9cf 100644
--- a/deps/v8/src/builtins/proxy-revocable.tq
+++ b/deps/v8/src/builtins/proxy-revocable.tq
@@ -7,17 +7,13 @@
namespace proxy {
extern macro ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(
- Object, Object): JSFunction;
- macro AllocateProxyRevokeFunction(implicit context: Context)(proxy: JSProxy):
- JSFunction {
- return AllocateProxyRevokeFunction(proxy, context);
- }
+ implicit context: Context)(JSProxy): JSFunction;
// Proxy.revocable(target, handler)
// https://tc39.github.io/ecma262/#sec-proxy.revocable
transitioning javascript builtin
ProxyRevocable(
- context: Context, receiver: Object, target: Object,
+ context: Context, _receiver: Object, target: Object,
handler: Object): JSProxyRevocableResult {
try {
const targetJSReceiver =
diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq
index 400f586b21..d89b54077a 100644
--- a/deps/v8/src/builtins/proxy-revoke.tq
+++ b/deps/v8/src/builtins/proxy-revoke.tq
@@ -9,7 +9,7 @@ namespace proxy {
// Proxy Revocation Functions
// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
transitioning javascript builtin
- ProxyRevoke(implicit context: Context)(): Undefined {
+ ProxyRevoke(js-implicit context: Context)(): Undefined {
// 1. Let p be F.[[RevocableProxy]].
const proxyObject: Object = context[PROXY_SLOT];
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
index 72181e08a8..d0411a8e89 100644
--- a/deps/v8/src/builtins/proxy-set-property.tq
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -30,21 +30,20 @@ namespace proxy {
return Undefined;
}
- // 2. Let handler be O.[[ProxyHandler]].
- const handler: Object = proxy.handler;
-
try {
+ // 2. Let handler be O.[[ProxyHandler]].
// 3. If handler is null, throw a TypeError exception.
// 4. Assert: Type(handler) is Object.
- const handlerJSReceiver =
- Cast<JSReceiver>(handler) otherwise ThrowProxyHandlerRevoked;
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
// 5. Let target be O.[[ProxyTarget]].
- const target = proxy.target;
+ const target = UnsafeCast<JSReceiver>(proxy.target);
// 6. Let trap be ? GetMethod(handler, "set").
// 7. If trap is undefined, then (see 7.a below).
- const trap: Callable = GetMethod(handlerJSReceiver, 'set')
+ const trap: Callable = GetMethod(handler, 'set')
otherwise goto TrapUndefined(target);
// 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
@@ -61,11 +60,11 @@ namespace proxy {
// i. If targetDesc.[[Set]] is undefined, throw a TypeError
// exception.
// 12. Return true.
- const trapResult = Call(
- context, trap, handlerJSReceiver, target, name, value, receiverValue);
+ const trapResult =
+ Call(context, trap, handler, target, name, value, receiverValue);
if (BranchIfToBooleanIsTrue(trapResult)) {
- return CheckGetSetTrapResult(
- target, proxy, name, trapResult, kProxySet);
+ CheckGetSetTrapResult(target, proxy, name, value, kProxySet);
+ return value;
}
ThrowTypeErrorIfStrict(
SmiConstant(kProxyTrapReturnedFalsishFor), 'set', name);
@@ -77,7 +76,6 @@ namespace proxy {
return value;
}
label ThrowProxyHandlerRevoked deferred {
- assert(handler == Null);
ThrowTypeError(kProxyRevoked, 'set');
}
}
diff --git a/deps/v8/src/builtins/proxy-set-prototype-of.tq b/deps/v8/src/builtins/proxy-set-prototype-of.tq
new file mode 100644
index 0000000000..bbd99be411
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-set-prototype-of.tq
@@ -0,0 +1,77 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
+ // https://tc39.es/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-setprototypeof-v
+ transitioning builtin
+ ProxySetPrototypeOf(implicit context: Context)(
+ proxy: JSProxy, proto: Object, doThrow: Boolean): Object {
+ PerformStackCheck();
+ const kTrapName: constexpr string = 'setPrototypeOf';
+ try {
+ // 1. Assert: Either Type(V) is Object or Type(V) is Null.
+ assert(proto == Null || Is<JSReceiver>(proto));
+
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ assert(proxy.handler == Null || Is<JSReceiver>(proxy.handler));
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 6. Let trap be ? GetMethod(handler, "setPrototypeOf").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, kTrapName)
+ otherwise goto TrapUndefined(target, proto);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « target, V
+ // »)).
+ const trapResult = Call(context, trap, handler, target, proto);
+
+ // 9. If booleanTrapResult is false, return false.
+ if (BranchIfToBooleanIsFalse(trapResult)) {
+ if (doThrow == True) {
+ ThrowTypeError(kProxyTrapReturnedFalsishFor, kTrapName);
+ }
+ return False;
+ }
+
+ // 10. Let extensibleTarget be ? IsExtensible(target).
+ // 11. If extensibleTarget is true, return true.
+ const extensibleTarget: Object = object::ObjectIsExtensible(target);
+ assert(extensibleTarget == True || extensibleTarget == False);
+ if (extensibleTarget == True) {
+ return True;
+ }
+
+ // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
+ const targetProto = object::ObjectGetPrototypeOf(target);
+
+ // 13. If SameValue(V, targetProto) is false, throw a TypeError
+ // exception.
+ // 14. Return true.
+ if (BranchIfSameValue(proto, targetProto)) {
+ return True;
+ }
+ ThrowTypeError(kProxySetPrototypeOfNonExtensible);
+ }
+ label TrapUndefined(target: Object, proto: Object) {
+ // 7.a. Return ? target.[[SetPrototypeOf]]().
+ if (doThrow == True) {
+ return object::ObjectSetPrototypeOfThrow(target, proto);
+ }
+ return object::ObjectSetPrototypeOfDontThrow(target, proto);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, kTrapName);
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq
index 16bba85292..d95def5d0e 100644
--- a/deps/v8/src/builtins/proxy.tq
+++ b/deps/v8/src/builtins/proxy.tq
@@ -7,25 +7,23 @@
namespace proxy {
extern macro ProxiesCodeStubAssembler::AllocateProxy(
- JSReceiver, JSReceiver, Context): JSProxy;
- macro AllocateProxy(implicit context: Context)(
- target: JSReceiver, handler: JSReceiver): JSProxy {
- return AllocateProxy(target, handler, context);
- }
+ implicit context: Context)(JSReceiver, JSReceiver): JSProxy;
macro IsRevokedProxy(implicit context: Context)(o: JSReceiver): bool {
const proxy: JSProxy = Cast<JSProxy>(o) otherwise return false;
- const handler: JSReceiver =
- Cast<JSReceiver>(proxy.handler) otherwise return true;
+ Cast<JSReceiver>(proxy.handler) otherwise return true;
return false;
}
extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
implicit context:
- Context)(Object, JSProxy, Name, Object, constexpr int31): Object;
+ Context)(JSReceiver, JSProxy, Name, Object, constexpr int31);
+
+ extern transitioning macro ProxiesCodeStubAssembler::CheckDeleteTrapResult(
+ implicit context: Context)(JSReceiver, JSProxy, Name);
extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
- implicit context: Context)(Object, JSProxy, Name): Object;
+ implicit context: Context)(JSReceiver, JSProxy, Name);
const kProxyNonObject: constexpr MessageTemplate
generates 'MessageTemplate::kProxyNonObject';
@@ -37,6 +35,20 @@ namespace proxy {
generates 'MessageTemplate::kProxyTrapReturnedFalsishFor';
const kProxyPrivate: constexpr MessageTemplate
generates 'MessageTemplate::kProxyPrivate';
+ const kProxyIsExtensibleInconsistent: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyIsExtensibleInconsistent';
+ const kProxyPreventExtensionsExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyPreventExtensionsExtensible';
+ const kProxyTrapReturnedFalsish: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyTrapReturnedFalsish';
+ const kProxyGetPrototypeOfInvalid: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyGetPrototypeOfInvalid';
+ const kProxyGetPrototypeOfNonExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyGetPrototypeOfNonExtensible';
+ const kProxySetPrototypeOfNonExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxySetPrototypeOfNonExtensible';
+ const kProxyDeletePropertyNonExtensible: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyDeletePropertyNonExtensible';
const kProxyGet: constexpr int31
generates 'JSProxy::AccessKind::kGet';
diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq
new file mode 100644
index 0000000000..4c25e8338f
--- /dev/null
+++ b/deps/v8/src/builtins/reflect.tq
@@ -0,0 +1,82 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace reflect {
+
+ const kCalledOnNonObject: constexpr MessageTemplate
+ generates 'MessageTemplate::kCalledOnNonObject';
+
+ // ES6 section 26.1.10 Reflect.isExtensible
+ transitioning javascript builtin ReflectIsExtensible(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.isExtensible');
+ return object::ObjectIsExtensible(objectJSReceiver);
+ }
+
+ // ES6 section 26.1.12 Reflect.preventExtensions
+ transitioning javascript builtin ReflectPreventExtensions(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.preventExtensions');
+ return object::ObjectPreventExtensionsDontThrow(objectJSReceiver);
+ }
+
+ // ES6 section 26.1.8 Reflect.getPrototypeOf
+ transitioning javascript builtin ReflectGetPrototypeOf(
+ js-implicit context: Context)(_receiver: Object, object: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.getPrototypeOf');
+ return object::JSReceiverGetPrototypeOf(objectJSReceiver);
+ }
+
+ // ES6 section 26.1.14 Reflect.setPrototypeOf
+ transitioning javascript builtin ReflectSetPrototypeOf(
+ js-implicit context:
+ Context)(_receiver: Object, object: Object, proto: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.setPrototypeOf');
+ if (proto == Null || Is<JSReceiver>(proto)) {
+ return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto);
+ }
+ ThrowTypeError(kProtoObjectOrNull, proto);
+ }
+
+ extern transitioning builtin ToName(implicit context: Context)(Object): Name;
+ type OnNonExistent constexpr 'OnNonExistent';
+ const kReturnUndefined: constexpr OnNonExistent
+ generates 'OnNonExistent::kReturnUndefined';
+ extern macro SmiConstant(constexpr OnNonExistent): Smi;
+ extern transitioning builtin GetPropertyWithReceiver(
+ implicit context: Context)(Object, Name, Object, Smi): Object;
+
+ // ES6 section 26.1.6 Reflect.get
+ transitioning javascript builtin
+ ReflectGet(js-implicit context: Context)(...arguments): Object {
+ const length = arguments.length;
+ const object: Object = length > 0 ? arguments[0] : Undefined;
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.get');
+ const propertyKey: Object = length > 1 ? arguments[1] : Undefined;
+ const name: Name = ToName(propertyKey);
+ const receiver: Object = length > 2 ? arguments[2] : objectJSReceiver;
+ return GetPropertyWithReceiver(
+ objectJSReceiver, name, receiver, SmiConstant(kReturnUndefined));
+ }
+
+ // ES6 section 26.1.4 Reflect.deleteProperty
+ transitioning javascript builtin ReflectDeleteProperty(
+ js-implicit context:
+ Context)(_receiver: Object, object: Object, key: Object): Object {
+ const objectJSReceiver = Cast<JSReceiver>(object)
+ otherwise ThrowTypeError(kCalledOnNonObject, 'Reflect.deleteProperty');
+ const name: Name = ToName(key);
+ if (IsPrivateSymbol(name)) {
+ return DeleteProperty(objectJSReceiver, name, kSloppy);
+ }
+ const proxy = Cast<JSProxy>(objectJSReceiver)
+ otherwise return DeleteProperty(objectJSReceiver, name, kSloppy);
+ return proxy::ProxyDeleteProperty(proxy, name, kSloppy);
+ }
+} // namespace reflect
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index 9b95f99f41..cb0038c6b6 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -22,7 +22,7 @@ namespace regexp_replace {
String, JSRegExp, Callable): String;
extern macro
- RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Number, bool): Smi;
+ RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Smi, bool): Smi;
extern macro
RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
implicit context: Context)(JSReceiver, String):
@@ -72,8 +72,7 @@ namespace regexp_replace {
transitioning macro
RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
- matchesElements: FixedArray, matchesLength: intptr, string: String,
- replaceFn: Callable) {
+ matchesElements: FixedArray, matchesLength: intptr, replaceFn: Callable) {
for (let i: intptr = 0; i < matchesLength; i++) {
const elArray =
Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
@@ -124,7 +123,7 @@ namespace regexp_replace {
matchesElements, matchesLengthInt, string, replaceFn);
} else {
RegExpReplaceCallableWithExplicitCaptures(
- matchesElements, matchesLengthInt, string, replaceFn);
+ matchesElements, matchesLengthInt, replaceFn);
}
return StringBuilderConcat(matches, matchesLength, string);
@@ -138,7 +137,7 @@ namespace regexp_replace {
let result: String = kEmptyString;
let lastMatchEnd: Smi = 0;
let unicode: bool = false;
- let replaceLength: Smi = replaceString.length_smi;
+ const replaceLength: Smi = replaceString.length_smi;
const global: bool = regexp.global;
if (global) {
@@ -209,7 +208,7 @@ namespace regexp_replace {
}
transitioning javascript builtin RegExpPrototypeReplace(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
const methodName: constexpr string = 'RegExp.prototype.@@replace';
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index bf8c0cb68a..854f31cece 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -60,24 +60,20 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
- // -- r2 : argument count (preserved for callee)
// -- r3 : target function (preserved for callee)
// -- r5 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the number of arguments to the callee.
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ SmiTag(r2);
- __ Push(r2, r3, r5, r3);
+ __ Push(r3, r5, r3);
__ CallRuntime(function_id, 1);
__ LoadRR(r4, r2);
// Restore target function and new target.
- __ Pop(r2, r3, r5);
- __ SmiUntag(r2);
+ __ Pop(r3, r5);
}
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
__ JumpCodeObject(r4);
@@ -110,6 +106,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Register scratch = r4;
Label stack_overflow;
Generate_StackOverflowCheck(masm, r2, r7, &stack_overflow);
@@ -138,13 +135,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args);
- __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
- __ SubP(sp, sp, ip);
+ __ ShiftLeftP(scratch, r2, Operand(kPointerSizeLog2));
+ __ SubP(sp, sp, scratch);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ lay(ip, MemOperand(ip, -kPointerSize));
- __ LoadP(r0, MemOperand(ip, r6));
- __ StoreP(r0, MemOperand(ip, sp));
+ __ lay(scratch, MemOperand(scratch, -kPointerSize));
+ __ LoadP(r0, MemOperand(scratch, r6));
+ __ StoreP(r0, MemOperand(scratch, sp));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
@@ -159,15 +156,15 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore context from the frame.
__ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ SmiToPtrArrayOffset(r3, r3);
- __ AddP(sp, sp, r3);
+ __ SmiToPtrArrayOffset(scratch, scratch);
+ __ AddP(sp, sp, scratch);
__ AddP(sp, sp, Operand(kPointerSize));
__ Ret();
@@ -296,13 +293,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ ltgr(r2, r2);
__ beq(&no_args);
- __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
- __ SubP(sp, sp, ip);
+ __ ShiftLeftP(r8, r2, Operand(kPointerSizeLog2));
+ __ SubP(sp, sp, r8);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ lay(ip, MemOperand(ip, -kPointerSize));
- __ LoadP(r0, MemOperand(ip, r6));
- __ StoreP(r0, MemOperand(ip, sp));
+ __ lay(r8, MemOperand(r8, -kPointerSize));
+ __ LoadP(r0, MemOperand(r8, r6));
+ __ StoreP(r0, MemOperand(r8, sp));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
@@ -409,11 +406,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
+ Register scratch = r7;
+
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
- __ Move(ip, debug_hook);
- __ LoadB(ip, MemOperand(ip));
- __ CmpSmiLiteral(ip, Smi::zero(), r0);
+ __ Move(scratch, debug_hook);
+ __ LoadB(scratch, MemOperand(scratch));
+ __ CmpSmiLiteral(scratch, Smi::zero(), r0);
__ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -421,9 +420,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
- __ Move(ip, debug_suspended_generator);
- __ LoadP(ip, MemOperand(ip));
- __ CmpP(ip, r3);
+ __ Move(scratch, debug_suspended_generator);
+ __ LoadP(scratch, MemOperand(scratch));
+ __ CmpP(scratch, r3);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -434,8 +433,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&stack_overflow);
// Push receiver.
- __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
- __ Push(ip);
+ __ LoadP(scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ Push(scratch);
// ----------- S t a t e -------------
// -- r3 : the JSGeneratorObject to resume
@@ -626,6 +625,9 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
IsolateAddressId::kCEntryFPAddress, masm->isolate()));
__ LoadP(r6, MemOperand(r6));
__ StoreMultipleP(r6, r9, MemOperand(sp, kPointerSize));
+
+ Register scrach = r8;
+
// Set up frame pointer for the frame to be pushed.
// Need to add kPointerSize, because sp has one extra
// frame already for the frame type being pushed later.
@@ -642,17 +644,17 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r7, js_entry_sp);
- __ LoadAndTestP(r8, MemOperand(r7));
+ __ LoadAndTestP(scrach, MemOperand(r7));
__ bne(&non_outermost_js, Label::kNear);
__ StoreP(fp, MemOperand(r7));
- __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ Load(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont, Label::kNear);
__ bind(&non_outermost_js);
- __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ Load(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
- __ StoreP(ip, MemOperand(sp)); // frame-type
+ __ StoreP(scrach, MemOperand(sp)); // frame-type
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -668,10 +670,11 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
- __ Move(ip, ExternalReference::Create(
- IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ Move(scrach,
+ ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
+ masm->isolate()));
- __ StoreP(r2, MemOperand(ip));
+ __ StoreP(r2, MemOperand(scrach));
__ LoadRoot(r2, RootIndex::kException);
__ b(&exit, Label::kNear);
@@ -704,16 +707,16 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ pop(r7);
__ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ bne(&non_outermost_js_2, Label::kNear);
- __ mov(r8, Operand::Zero());
+ __ mov(scrach, Operand::Zero());
__ Move(r7, js_entry_sp);
- __ StoreP(r8, MemOperand(r7));
+ __ StoreP(scrach, MemOperand(r7));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(r5);
- __ Move(ip, ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, masm->isolate()));
- __ StoreP(r5, MemOperand(ip));
+ __ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ StoreP(r5, MemOperand(scrach));
// Reset the stack to the callee saved registers.
__ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
@@ -949,13 +952,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register scratch1, Register scratch2,
Register scratch3) {
// ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee if needed, and caller)
- // -- r3 : new target (preserved for callee if needed, and caller)
- // -- r1 : target function (preserved for callee if needed, and caller)
+ // -- r5 : new target (preserved for callee if needed, and caller)
+ // -- r3 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(
- !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, r3, r5, scratch1, scratch2, scratch3));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1140,6 +1141,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ LoadP(r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ LoadLogicalHalfWordP(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
+ __ CmpP(r6, Operand(FEEDBACK_VECTOR_TYPE));
+ __ bne(&push_stack_frame);
+
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
@@ -1154,6 +1164,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
+ __ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
@@ -1161,12 +1172,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ lghi(r1, Operand(0));
__ StoreHalfWord(r1,
FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
r0);
// Load the initial bytecode offset.
@@ -1447,11 +1458,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
- __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister));
- __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2));
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ LoadlB(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftP(scratch, scratch, Operand(kPointerSizeLog2));
__ LoadP(kJavaScriptCallCodeStartRegister,
- MemOperand(kInterpreterDispatchTableRegister, ip));
+ MemOperand(kInterpreterDispatchTableRegister, scratch));
__ Jump(kJavaScriptCallCodeStartRegister);
}
@@ -1578,13 +1591,17 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ LoadP(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
- __ Pop(ip);
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ UseScratchRegisterScope temps(masm);
+ Register builtin = temps.Acquire();
+ __ Pop(builtin);
__ AddP(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(r0);
__ LoadRR(r14, r0);
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ __ LoadEntryFromBuiltinIndex(builtin);
+ __ Jump(builtin);
}
} // namespace
@@ -1745,13 +1762,14 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r2: actual number of arguments
// r3: callable
{
+ Register scratch = r5;
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ AddP(r4, sp, r4);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r4, -kPointerSize));
- __ StoreP(ip, MemOperand(r4));
+ __ LoadP(scratch, MemOperand(r4, -kPointerSize));
+ __ StoreP(scratch, MemOperand(r4));
__ SubP(r4, Operand(kPointerSize));
__ CmpP(r4, sp);
__ bne(&loop);
@@ -1944,7 +1962,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Check for stack overflow.
Label stack_overflow;
- Generate_StackOverflowCheck(masm, r6, ip, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r6, scratch, &stack_overflow);
// Push arguments onto the stack (thisArgument is already on the stack).
{
@@ -1955,13 +1973,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ LoadRR(r1, r6);
__ bind(&loop);
- __ LoadP(ip, MemOperand(r4, kPointerSize));
+ __ LoadP(scratch, MemOperand(r4, kPointerSize));
__ la(r4, MemOperand(r4, kPointerSize));
- __ CompareRoot(ip, RootIndex::kTheHoleValue);
+ __ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip, Label::kNear);
- __ LoadRoot(ip, RootIndex::kUndefinedValue);
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ bind(&skip);
- __ push(ip);
+ __ push(scratch);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ AddP(r2, r2, r6);
@@ -2007,8 +2025,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ LoadP(scratch,
+ MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ CmpP(scratch,
+ Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ beq(&arguments_adaptor);
{
__ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -2042,9 +2062,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AddP(r2, r2, r7);
__ bind(&loop);
{
- __ ShiftLeftP(ip, r7, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(r6, ip));
- __ push(ip);
+ __ ShiftLeftP(scratch, r7, Operand(kPointerSizeLog2));
+ __ LoadP(scratch, MemOperand(r6, scratch));
+ __ push(scratch);
__ SubP(r7, r7, Operand(1));
__ CmpP(r7, Operand::Zero());
__ bne(&loop);
@@ -2189,10 +2209,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- r6 : the number of [[BoundArguments]]
// -----------------------------------
+ Register scratch = r8;
// Reserve stack space for the [[BoundArguments]].
{
Label done;
- __ LoadRR(r8, sp); // preserve previous stack pointer
+ __ LoadRR(scratch, sp); // preserve previous stack pointer
__ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
__ SubP(sp, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
@@ -2201,7 +2222,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ CompareRoot(sp, RootIndex::kRealStackLimit);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
- __ LoadRR(sp, r8);
+ __ LoadRR(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2221,7 +2242,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ beq(&skip);
__ LoadRR(r1, r2);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r8, r7));
+ __ LoadP(r0, MemOperand(scratch, r7));
__ StoreP(r0, MemOperand(sp, r7));
__ AddP(r7, r7, Operand(kPointerSize));
__ BranchOnCount(r1, &loop);
@@ -2257,9 +2278,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r3);
// Patch the receiver to [[BoundThis]].
- __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
+ __ LoadP(r5, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
- __ StoreP(ip, MemOperand(sp, r1));
+ __ StoreP(r5, MemOperand(sp, r1));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
@@ -2749,7 +2770,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ CompareRoot(r1, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay, Label::kNear);
- __ stop("Unexpected pending exception");
+ __ stop();
__ bind(&okay);
}
@@ -3000,13 +3021,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LoadlB(scratch, MemOperand(scratch, 0));
__ CmpP(scratch, Operand::Zero());
- Label profiler_disabled;
- Label end_profiler_check;
- __ beq(&profiler_disabled, Label::kNear);
- __ Move(scratch, thunk_ref);
- __ b(&end_profiler_check, Label::kNear);
- __ bind(&profiler_disabled);
- __ LoadRR(scratch, function_address);
+ Label profiler_enabled, end_profiler_check;
+ __ bne(&profiler_enabled, Label::kNear);
+ __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
+ __ LoadlW(scratch, MemOperand(scratch, 0));
+ __ CmpP(scratch, Operand::Zero());
+ __ bne(&profiler_enabled, Label::kNear);
+ {
+ // Call the api function directly.
+ __ LoadRR(scratch, function_address);
+ __ b(&end_profiler_check, Label::kNear);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ Move(scratch, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
@@ -3304,7 +3334,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// Unused.
- __ stop(0);
+ __ stop();
}
#undef __
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index e3403c601d..7188eb04a8 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -157,10 +157,7 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- SegmentSize segment_size = isolate->serializer_enabled()
- ? SegmentSize::kLarge
- : SegmentSize::kDefault;
- Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
+ Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(
@@ -181,10 +178,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
- SegmentSize segment_size = isolate->serializer_enabled()
- ? SegmentSize::kLarge
- : SegmentSize::kDefault;
- Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
+ Zone zone(isolate->allocator(), ZONE_NAME);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(interface_descriptor);
@@ -232,9 +226,9 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
- HeapIterator iterator(isolate->heap());
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(isolate->heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsCode()) continue;
Code code = Code::cast(obj);
bool flush_icache = false;
@@ -282,10 +276,6 @@ Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
} // namespace
-#ifdef _MSC_VER
-#pragma optimize( "", off )
-#endif
-
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
@@ -363,10 +353,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
builtins->MarkInitialized();
}
-#ifdef _MSC_VER
-#pragma optimize( "", on )
-#endif
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/string-endswith.tq b/deps/v8/src/builtins/string-endswith.tq
index 16405d4c12..8b9fe84dfb 100644
--- a/deps/v8/src/builtins/string-endswith.tq
+++ b/deps/v8/src/builtins/string-endswith.tq
@@ -28,12 +28,13 @@ namespace string {
// https://tc39.github.io/ecma262/#sec-string.prototype.endswith
transitioning javascript builtin StringPrototypeEndsWith(
- context: Context, receiver: Object, ...arguments): Boolean {
+ js-implicit context: Context, receiver: Object)(...arguments): Boolean {
const searchString: Object = arguments[0];
const endPosition: Object = arguments[1];
+ const kBuiltinName: constexpr string = 'String.prototype.endsWith';
// 1. Let O be ? RequireObjectCoercible(this value).
- const object: Object = RequireObjectCoercible(receiver);
+ const object: Object = RequireObjectCoercible(receiver, kBuiltinName);
// 2. Let S be ? ToString(O).
const string: String = ToString_Inline(context, object);
@@ -41,7 +42,7 @@ namespace string {
// 3. Let isRegExp be ? IsRegExp(searchString).
// 4. If isRegExp is true, throw a TypeError exception.
if (IsRegExp(searchString)) {
- ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.endsWith');
+ ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName);
}
// 5. Let searchStr be ? ToString(searchString).
@@ -63,7 +64,7 @@ namespace string {
const searchLength: Smi = searchStr.length_smi;
// 10. Let start be end - searchLength.
- let start = end - searchLength;
+ const start = end - searchLength;
// 11. If start is less than 0, return false.
if (start < 0) return False;
diff --git a/deps/v8/src/builtins/string-html.tq b/deps/v8/src/builtins/string-html.tq
index a2b1625206..80b5f77887 100644
--- a/deps/v8/src/builtins/string-html.tq
+++ b/deps/v8/src/builtins/string-html.tq
@@ -22,22 +22,23 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.anchor
transitioning javascript builtin StringPrototypeAnchor(
- context: Context, receiver: Object, ...arguments): String {
+ js-implicit context: Context, receiver: Object)(...arguments): String {
return CreateHTML(
receiver, 'String.prototype.anchor', 'a', 'name', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.big
transitioning javascript builtin
- StringPrototypeBig(context: Context, receiver: Object, ...arguments): String {
+ StringPrototypeBig(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.big', 'big', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.blink
transitioning javascript builtin
- StringPrototypeBlink(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeBlink(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.blink', 'blink', kEmptyString,
kEmptyString);
@@ -45,56 +46,56 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.bold
transitioning javascript builtin
- StringPrototypeBold(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeBold(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.bold', 'b', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.fontcolor
transitioning javascript builtin
- StringPrototypeFontcolor(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeFontcolor(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.fontcolor', 'font', 'color', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.fontsize
transitioning javascript builtin
- StringPrototypeFontsize(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeFontsize(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.fontsize', 'font', 'size', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.fixed
transitioning javascript builtin
- StringPrototypeFixed(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeFixed(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.fixed', 'tt', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.italics
transitioning javascript builtin
- StringPrototypeItalics(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeItalics(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.italics', 'i', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.link
transitioning javascript builtin
- StringPrototypeLink(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeLink(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.link', 'a', 'href', arguments[0]);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.small
transitioning javascript builtin
- StringPrototypeSmall(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeSmall(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.small', 'small', kEmptyString,
kEmptyString);
@@ -102,8 +103,8 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.strike
transitioning javascript builtin
- StringPrototypeStrike(context: Context, receiver: Object, ...arguments):
- String {
+ StringPrototypeStrike(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.strike', 'strike', kEmptyString,
kEmptyString);
@@ -111,14 +112,16 @@ namespace string_html {
// https://tc39.github.io/ecma262/#sec-string.prototype.sub
transitioning javascript builtin
- StringPrototypeSub(context: Context, receiver: Object, ...arguments): String {
+ StringPrototypeSub(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.sub', 'sub', kEmptyString, kEmptyString);
}
// https://tc39.github.io/ecma262/#sec-string.prototype.sup
transitioning javascript builtin
- StringPrototypeSup(context: Context, receiver: Object, ...arguments): String {
+ StringPrototypeSup(js-implicit context: Context, receiver: Object)(
+ ...arguments): String {
return CreateHTML(
receiver, 'String.prototype.sup', 'sup', kEmptyString, kEmptyString);
}
diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq
index f5c6099c25..5b8f864661 100644
--- a/deps/v8/src/builtins/string-iterator.tq
+++ b/deps/v8/src/builtins/string-iterator.tq
@@ -17,7 +17,7 @@ namespace string_iterator {
// ES6 #sec-string.prototype-@@iterator
transitioning javascript builtin StringPrototypeIterator(
- implicit context: Context)(receiver: Object): JSStringIterator {
+ js-implicit context: Context)(receiver: Object): JSStringIterator {
const name: String =
ToThisString(receiver, 'String.prototype[Symbol.iterator]');
const index: Smi = 0;
@@ -26,7 +26,7 @@ namespace string_iterator {
// ES6 #sec-%stringiteratorprototype%.next
transitioning javascript builtin StringIteratorPrototypeNext(
- implicit context: Context)(receiver: Object): JSIteratorResult {
+ js-implicit context: Context)(receiver: Object): JSObject {
const iterator = Cast<JSStringIterator>(receiver) otherwise ThrowTypeError(
kIncompatibleMethodReceiver, 'String Iterator.prototype.next',
receiver);
@@ -34,13 +34,13 @@ namespace string_iterator {
const position: intptr = SmiUntag(iterator.next_index);
const length: intptr = string.length_intptr;
if (position >= length) {
- return NewJSIteratorResult(Undefined, True);
+ return AllocateJSIteratorResult(Undefined, True);
}
// Move to next codepoint.
const encoding = UTF16;
const ch = string::LoadSurrogatePairAt(string, length, position, encoding);
- const value: String = string::StringFromSingleCodePoint(ch, encoding);
+ const value: String = string::StringFromSingleUTF16EncodedCodePoint(ch);
iterator.next_index = SmiTag(position + value.length_intptr);
- return NewJSIteratorResult(value, False);
+ return AllocateJSIteratorResult(value, False);
}
}
diff --git a/deps/v8/src/builtins/string-repeat.tq b/deps/v8/src/builtins/string-repeat.tq
index f2590011ea..0d9d4ee498 100644
--- a/deps/v8/src/builtins/string-repeat.tq
+++ b/deps/v8/src/builtins/string-repeat.tq
@@ -28,7 +28,7 @@ namespace string_repeat {
// https://tc39.github.io/ecma262/#sec-string.prototype.repeat
transitioning javascript builtin StringPrototypeRepeat(
- context: Context, receiver: Object, count: Object): String {
+ js-implicit context: Context, receiver: Object)(count: Object): String {
// 1. Let O be ? RequireObjectCoercible(this value).
// 2. Let S be ? ToString(O).
const s: String = ToThisString(receiver, kBuiltinName);
diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq
index 41eb38b0ad..b066fb7669 100644
--- a/deps/v8/src/builtins/string-slice.tq
+++ b/deps/v8/src/builtins/string-slice.tq
@@ -9,7 +9,7 @@ namespace string_slice {
// ES6 #sec-string.prototype.slice ( start, end )
// https://tc39.github.io/ecma262/#sec-string.prototype.slice
transitioning javascript builtin StringPrototypeSlice(
- implicit context: Context)(receiver: Object, ...arguments): String {
+ js-implicit context: Context, receiver: Object)(...arguments): String {
// 1. Let O be ? RequireObjectCoercible(this value).
// 2. Let S be ? ToString(O).
const string: String = ToThisString(receiver, 'String.prototype.slice');
diff --git a/deps/v8/src/builtins/string-startswith.tq b/deps/v8/src/builtins/string-startswith.tq
index 1f885a2afd..b03e67ecf5 100644
--- a/deps/v8/src/builtins/string-startswith.tq
+++ b/deps/v8/src/builtins/string-startswith.tq
@@ -8,23 +8,15 @@ namespace string {
extern macro RegExpBuiltinsAssembler::IsRegExp(implicit context:
Context)(Object): bool;
- // TODO(ryzokuken): Add RequireObjectCoercible to base.tq and update callsites
- macro RequireObjectCoercible(implicit context: Context)(argument: Object):
- Object {
- if (IsNullOrUndefined(argument)) {
- ThrowTypeError(kCalledOnNullOrUndefined, 'String.prototype.startsWith');
- }
- return argument;
- }
-
// https://tc39.github.io/ecma262/#sec-string.prototype.startswith
transitioning javascript builtin StringPrototypeStartsWith(
- context: Context, receiver: Object, ...arguments): Boolean {
+ js-implicit context: Context, receiver: Object)(...arguments): Boolean {
const searchString: Object = arguments[0];
const position: Object = arguments[1];
+ const kBuiltinName: constexpr string = 'String.prototype.startsWith';
// 1. Let O be ? RequireObjectCoercible(this value).
- const object: Object = RequireObjectCoercible(receiver);
+ const object: Object = RequireObjectCoercible(receiver, kBuiltinName);
// 2. Let S be ? ToString(O).
const string: String = ToString_Inline(context, object);
@@ -32,7 +24,7 @@ namespace string {
// 3. Let isRegExp be ? IsRegExp(searchString).
// 4. If isRegExp is true, throw a TypeError exception.
if (IsRegExp(searchString)) {
- ThrowTypeError(kFirstArgumentNotRegExp, 'String.prototype.startsWith');
+ ThrowTypeError(kFirstArgumentNotRegExp, kBuiltinName);
}
// 5. Let searchStr be ? ToString(searchString).
diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq
index f322eeed06..1fafb8af43 100644
--- a/deps/v8/src/builtins/string-substring.tq
+++ b/deps/v8/src/builtins/string-substring.tq
@@ -28,7 +28,7 @@ namespace string_substring {
// ES6 #sec-string.prototype.substring
transitioning javascript builtin StringPrototypeSubstring(
- implicit context: Context)(receiver: Object, ...arguments): String {
+ js-implicit context: Context, receiver: Object)(...arguments): String {
// Check that {receiver} is coercible to Object and convert it to a String.
const string: String = ToThisString(receiver, 'String.prototype.substring');
const length = string.length_smi;
diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq
index 1e5a74eb49..dbcc5799e1 100644
--- a/deps/v8/src/builtins/string.tq
+++ b/deps/v8/src/builtins/string.tq
@@ -7,20 +7,21 @@
namespace string {
// ES6 #sec-string.prototype.tostring
transitioning javascript builtin
- StringPrototypeToString(implicit context: Context)(receiver: Object): Object {
+ StringPrototypeToString(js-implicit context: Context)(receiver: Object):
+ Object {
return ToThisValue(receiver, kString, 'String.prototype.toString');
}
// ES6 #sec-string.prototype.valueof
transitioning javascript builtin
- StringPrototypeValueOf(implicit context: Context)(receiver: Object): Object {
+ StringPrototypeValueOf(js-implicit context: Context)(receiver: Object):
+ Object {
return ToThisValue(receiver, kString, 'String.prototype.valueOf');
}
extern macro StringBuiltinsAssembler::LoadSurrogatePairAt(
String, intptr, intptr, constexpr UnicodeEncoding): int32;
- extern macro StringFromSingleCodePoint(int32, constexpr UnicodeEncoding):
- String;
+ extern macro StringFromSingleUTF16EncodedCodePoint(int32): String;
// This function assumes StringPrimitiveWithNoCustomIteration is true.
transitioning builtin StringToList(implicit context: Context)(string: String):
@@ -38,7 +39,7 @@ namespace string {
let i: intptr = 0;
while (i < stringLength) {
const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding);
- const value: String = StringFromSingleCodePoint(ch, encoding);
+ const value: String = StringFromSingleUTF16EncodedCodePoint(ch);
elements[arrayLength] = value;
// Increment and continue the loop.
i = i + value.length_intptr;
@@ -52,9 +53,9 @@ namespace string {
}
transitioning macro GenerateStringAt(implicit context: Context)(
- receiver: Object, position: Object, methodName: constexpr string):
- never labels IfInBounds(String, intptr, intptr),
- IfOutOfBounds {
+ receiver: Object, position: Object,
+ methodName: constexpr string): never labels
+ IfInBounds(String, intptr, intptr), IfOutOfBounds {
// Check that {receiver} is coercible to Object and convert it to a String.
const string: String = ToThisString(receiver, methodName);
// Convert the {position} to a Smi and check that it's in bounds of
@@ -70,12 +71,13 @@ namespace string {
// ES6 #sec-string.prototype.charat
transitioning javascript builtin StringPrototypeCharAt(
- implicit context: Context)(receiver: Object, position: Object): Object {
+ js-implicit context: Context,
+ receiver: Object)(position: Object): Object {
try {
GenerateStringAt(receiver, position, 'String.prototype.charAt')
otherwise IfInBounds, IfOutOfBounds;
}
- label IfInBounds(string: String, index: intptr, length: intptr) {
+ label IfInBounds(string: String, index: intptr, _length: intptr) {
const code: int32 = StringCharCodeAt(string, index);
return StringFromSingleCharCode(code);
}
@@ -86,12 +88,13 @@ namespace string {
// ES6 #sec-string.prototype.charcodeat
transitioning javascript builtin StringPrototypeCharCodeAt(
- implicit context: Context)(receiver: Object, position: Object): Object {
+ js-implicit context: Context,
+ receiver: Object)(position: Object): Object {
try {
GenerateStringAt(receiver, position, 'String.prototype.charCodeAt')
otherwise IfInBounds, IfOutOfBounds;
}
- label IfInBounds(string: String, index: intptr, length: intptr) {
+ label IfInBounds(string: String, index: intptr, _length: intptr) {
const code: int32 = StringCharCodeAt(string, index);
return Convert<Smi>(code);
}
@@ -102,7 +105,8 @@ namespace string {
// ES6 #sec-string.prototype.codepointat
transitioning javascript builtin StringPrototypeCodePointAt(
- implicit context: Context)(receiver: Object, position: Object): Object {
+ js-implicit context: Context,
+ receiver: Object)(position: Object): Object {
try {
GenerateStringAt(receiver, position, 'String.prototype.codePointAt')
otherwise IfInBounds, IfOutOfBounds;
@@ -121,7 +125,7 @@ namespace string {
// ES6 String.prototype.concat(...args)
// ES6 #sec-string.prototype.concat
transitioning javascript builtin StringPrototypeConcat(
- implicit context: Context)(receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// Check that {receiver} is coercible to Object and convert it to a String.
let string: String = ToThisString(receiver, 'String.prototype.concat');
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index a0d745b2f4..f6ab289e12 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -8,30 +8,77 @@ namespace typed_array_createtypedarray {
extern builtin IterableToListMayPreserveHoles(Context, Object, Callable):
JSArray;
- extern macro ConstructorBuiltinsAssembler::EmitFastNewObject(
- implicit context: Context)(JSFunction, JSReceiver): JSTypedArray;
extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
- implicit context: Context)(JSTypedArray, uintptr): JSArrayBuffer;
+ implicit context: Context)(uintptr): JSArrayBuffer;
+ extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction;
extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
bool;
- extern macro TypedArrayBuiltinsAssembler::SetupTypedArray(
- JSTypedArray, uintptr, uintptr, uintptr): void;
+ extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
+ JSTypedArray): void;
extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
Map, String): never;
extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
void;
+ transitioning macro AllocateTypedArray(implicit context: Context)(
+ isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
+ byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
+ let elements: ByteArray;
+ let externalPointer: RawPtr;
+ let basePointer: ByteArray | Smi;
+ if constexpr (isOnHeap) {
+ elements = AllocateByteArray(byteLength);
+ basePointer = elements;
+ externalPointer = PointerConstant(kExternalPointerForOnHeapArray);
+ } else {
+ basePointer = Convert<Smi>(0);
+
+ // The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
+ // platforms are self-limiting, because we can't allocate an array bigger
+ // than our 32-bit arithmetic range anyway. 64 bit platforms could
+ // theoretically have an offset up to 2^35 - 1.
+ const backingStore: RawPtr = buffer.backing_store;
+ externalPointer = backingStore + Convert<intptr>(byteOffset);
+
+ // Assert no overflow has occurred. Only assert if the mock array buffer
+ // allocator is NOT used. When the mock array buffer is used, impossibly
+ // large allocations are allowed that would erroneously cause an overflow
+ // and this assertion to fail.
+ assert(
+ IsMockArrayBufferAllocatorFlag() ||
+ Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
+
+ elements = kEmptyByteArray;
+ }
+
+ // We can't just build the new object with "new JSTypedArray" here because
+ // Torque doesn't know its full size including embedder fields, so use CSA
+ // for the allocation step.
+ const typedArray =
+ UnsafeCast<JSTypedArray>(AllocateFastOrSlowJSObjectFromMap(map));
+ typedArray.elements = elements;
+ typedArray.buffer = buffer;
+ typedArray.byte_offset = byteOffset;
+ typedArray.byte_length = byteLength;
+ typedArray.length = length;
+ typedArray.external_pointer = externalPointer;
+ typedArray.base_pointer = basePointer;
+ SetupTypedArrayEmbedderFields(typedArray);
+ return typedArray;
+ }
+
transitioning macro TypedArrayInitialize(implicit context: Context)(
- initialize: constexpr bool, typedArray: JSTypedArray, length: PositiveSmi,
+ initialize: constexpr bool, map: Map, length: PositiveSmi,
elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): uintptr {
+ bufferConstructor: JSReceiver): JSTypedArray {
const byteLength = elementsInfo.CalculateByteLength(length)
otherwise ThrowRangeError(kInvalidArrayBufferLength);
const byteLengthNum = Convert<Number>(byteLength);
const defaultConstructor = GetArrayBufferFunction();
+ const byteOffset: uintptr = 0;
try {
if (bufferConstructor != defaultConstructor) {
@@ -39,14 +86,21 @@ namespace typed_array_createtypedarray {
defaultConstructor, bufferConstructor, byteLengthNum));
}
- if (byteLength > V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP) goto AllocateOffHeap;
+ if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap;
+
+ const buffer = AllocateEmptyOnHeapBuffer(byteLength);
- AllocateEmptyOnHeapBuffer(typedArray, byteLength);
+ const isOnHeap: constexpr bool = true;
+ const typedArray = AllocateTypedArray(
+ isOnHeap, map, buffer, byteOffset, byteLength,
+ Convert<uintptr>(length));
if constexpr (initialize) {
const backingStore = typedArray.data_ptr;
typed_array::CallCMemset(backingStore, 0, byteLength);
}
+
+ return typedArray;
}
label AllocateOffHeap {
if constexpr (initialize) {
@@ -58,22 +112,18 @@ namespace typed_array_createtypedarray {
}
label AttachOffHeapBuffer(bufferObj: Object) {
const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
- const byteOffset: uintptr = 0;
- typedArray.AttachOffHeapBuffer(buffer, byteOffset);
+ const isOnHeap: constexpr bool = false;
+ return AllocateTypedArray(
+ isOnHeap, map, buffer, byteOffset, byteLength,
+ Convert<uintptr>(length));
}
-
- const byteOffset: uintptr = 0;
- SetupTypedArray(
- typedArray, Convert<uintptr>(length), byteOffset, byteLength);
-
- return byteLength;
}
// 22.2.4.2 TypedArray ( length )
// ES #sec-typedarray-length
transitioning macro ConstructByLength(implicit context: Context)(
- typedArray: JSTypedArray, length: Object,
- elementsInfo: typed_array::TypedArrayElementsInfo): void {
+ map: Map, length: Object,
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
const convertedLength: Number =
ToInteger_Inline(context, length, kTruncateMinusZero);
// The maximum length of a TypedArray is MaxSmi().
@@ -84,23 +134,22 @@ namespace typed_array_createtypedarray {
otherwise ThrowRangeError(kInvalidTypedArrayLength, length);
const defaultConstructor: Constructor = GetArrayBufferFunction();
const initialize: constexpr bool = true;
- TypedArrayInitialize(
- initialize, typedArray, positiveLength, elementsInfo,
- defaultConstructor);
+ return TypedArrayInitialize(
+ initialize, map, positiveLength, elementsInfo, defaultConstructor);
}
// 22.2.4.4 TypedArray ( object )
// ES #sec-typedarray-object
transitioning macro ConstructByArrayLike(implicit context: Context)(
- typedArray: JSTypedArray, arrayLike: HeapObject, initialLength: Object,
+ map: Map, arrayLike: HeapObject, initialLength: Object,
elementsInfo: typed_array::TypedArrayElementsInfo,
- bufferConstructor: JSReceiver): void {
+ bufferConstructor: JSReceiver): JSTypedArray {
// The caller has looked up length on arrayLike, which is observable.
const length: PositiveSmi = ToSmiLength(initialLength)
otherwise ThrowRangeError(kInvalidTypedArrayLength, initialLength);
const initialize: constexpr bool = false;
- const byteLength = TypedArrayInitialize(
- initialize, typedArray, length, elementsInfo, bufferConstructor);
+ const typedArray = TypedArrayInitialize(
+ initialize, map, length, elementsInfo, bufferConstructor);
try {
const src: JSTypedArray = Cast<JSTypedArray>(arrayLike) otherwise IfSlow;
@@ -112,6 +161,7 @@ namespace typed_array_createtypedarray {
goto IfSlow;
} else if (length > 0) {
+ const byteLength = typedArray.byte_length;
assert(byteLength <= kArrayBufferMaxByteLength);
typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength);
}
@@ -121,13 +171,13 @@ namespace typed_array_createtypedarray {
TypedArrayCopyElements(context, typedArray, arrayLike, length);
}
}
+ return typedArray;
}
// 22.2.4.4 TypedArray ( object )
// ES #sec-typedarray-object
transitioning macro ConstructByIterable(implicit context: Context)(
- typedArray: JSTypedArray, iterable: JSReceiver, iteratorFn: Callable,
- elementsInfo: typed_array::TypedArrayElementsInfo): never
+ iterable: JSReceiver, iteratorFn: Callable): never
labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
const array: JSArray =
IterableToListMayPreserveHoles(context, iterable, iteratorFn);
@@ -137,8 +187,7 @@ namespace typed_array_createtypedarray {
// 22.2.4.3 TypedArray ( typedArray )
// ES #sec-typedarray-typedarray
transitioning macro ConstructByTypedArray(implicit context: Context)(
- typedArray: JSTypedArray, srcTypedArray: JSTypedArray,
- elementsInfo: typed_array::TypedArrayElementsInfo): never
+ srcTypedArray: JSTypedArray): never
labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
let bufferConstructor: JSReceiver = GetArrayBufferFunction();
const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
@@ -161,8 +210,8 @@ namespace typed_array_createtypedarray {
// 22.2.4.5 TypedArray ( buffer, byteOffset, length )
// ES #sec-typedarray-buffer-byteoffset-length
transitioning macro ConstructByArrayBuffer(implicit context: Context)(
- typedArray: JSTypedArray, buffer: JSArrayBuffer, byteOffset: Object,
- length: Object, elementsInfo: typed_array::TypedArrayElementsInfo): void {
+ map: Map, buffer: JSArrayBuffer, byteOffset: Object, length: Object,
+ elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
try {
let offset: uintptr = 0;
if (byteOffset != Undefined) {
@@ -224,12 +273,13 @@ namespace typed_array_createtypedarray {
goto IfInvalidLength;
}
- SetupTypedArray(
- typedArray, Convert<uintptr>(newLength), offset, newByteLength);
- typedArray.AttachOffHeapBuffer(buffer, offset);
+ const isOnHeap: constexpr bool = false;
+ return AllocateTypedArray(
+ isOnHeap, map, buffer, offset, newByteLength,
+ Convert<uintptr>(newLength));
}
label IfInvalidAlignment(problemString: String) deferred {
- ThrowInvalidTypedArrayAlignment(typedArray.map, problemString);
+ ThrowInvalidTypedArrayAlignment(map, problemString);
}
label IfInvalidByteLength deferred {
ThrowRangeError(kInvalidArrayBufferLength);
@@ -242,16 +292,15 @@ namespace typed_array_createtypedarray {
}
}
- transitioning macro ConstructByJSReceiver(implicit context: Context)(
- array: JSTypedArray, obj: JSReceiver,
- elementsInfo: typed_array::TypedArrayElementsInfo): never
+ transitioning macro ConstructByJSReceiver(implicit context:
+ Context)(obj: JSReceiver): never
labels IfConstructByArrayLike(HeapObject, Object, JSReceiver) {
try {
const iteratorMethod: Object =
GetIteratorMethod(obj) otherwise IfIteratorUndefined;
const iteratorFn: Callable = Cast<Callable>(iteratorMethod)
otherwise ThrowTypeError(kIteratorSymbolNonCallable);
- ConstructByIterable(array, obj, iteratorFn, elementsInfo)
+ ConstructByIterable(obj, iteratorFn)
otherwise IfConstructByArrayLike;
}
label IfIteratorUndefined {
@@ -273,22 +322,12 @@ namespace typed_array_createtypedarray {
assert(IsConstructor(target));
// 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
// "%TypedArrayPrototype%").
- const array: JSTypedArray = EmitFastNewObject(target, newTarget);
- // We need to set the byte_offset / byte_length to some sane values
- // to keep the heap verifier happy.
- // TODO(bmeurer, v8:4153): Fix this initialization to not use
- // EmitFastNewObject, which causes the problem, since it puts
- // Undefined into all slots of the object even though that
- // doesn't make any sense for these fields.
- array.byte_offset = 0;
- array.byte_length = 0;
- array.length = 0;
- array.base_pointer = Convert<Smi>(0);
+ const map = GetDerivedMap(target, newTarget);
// 5. Let elementSize be the Number value of the Element Size value in Table
// 56 for constructorName.
const elementsInfo: typed_array::TypedArrayElementsInfo =
- typed_array::GetTypedArrayElementsInfo(array);
+ typed_array::GetTypedArrayElementsInfo(map);
try {
typeswitch (arg1) {
@@ -296,15 +335,13 @@ namespace typed_array_createtypedarray {
goto IfConstructByLength(length);
}
case (buffer: JSArrayBuffer): {
- ConstructByArrayBuffer(array, buffer, arg2, arg3, elementsInfo);
+ return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo);
}
case (typedArray: JSTypedArray): {
- ConstructByTypedArray(array, typedArray, elementsInfo)
- otherwise IfConstructByArrayLike;
+ ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
}
case (obj: JSReceiver): {
- ConstructByJSReceiver(array, obj, elementsInfo)
- otherwise IfConstructByArrayLike;
+ ConstructByJSReceiver(obj) otherwise IfConstructByArrayLike;
}
// The first argument was a number or fell through and is treated as
// a number. https://tc39.github.io/ecma262/#sec-typedarray-length
@@ -314,14 +351,13 @@ namespace typed_array_createtypedarray {
}
}
label IfConstructByLength(length: Object) {
- ConstructByLength(array, length, elementsInfo);
+ return ConstructByLength(map, length, elementsInfo);
}
label IfConstructByArrayLike(
arrayLike: HeapObject, length: Object, bufferConstructor: JSReceiver) {
- ConstructByArrayLike(
- array, arrayLike, length, elementsInfo, bufferConstructor);
+ return ConstructByArrayLike(
+ map, arrayLike, length, elementsInfo, bufferConstructor);
}
- return array;
}
transitioning macro TypedArraySpeciesCreate(implicit context: Context)(
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index 4f8804880e..221814cb79 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -29,8 +29,8 @@ namespace typed_array_every {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning javascript builtin
- TypedArrayPrototypeEvery(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeEvery(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg
try {
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index 9407c3a7af..3937699c73 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -10,7 +10,7 @@ namespace typed_array_filter {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.filter
transitioning javascript builtin TypedArrayPrototypeFilter(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg
try {
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index 3c331eb3bb..be1943ccf4 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -29,8 +29,8 @@ namespace typed_array_find {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
transitioning javascript builtin
- TypedArrayPrototypeFind(implicit context:
- Context)(receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeFind(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg
try {
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index 05f112d0d5..a5ee7897d3 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -29,8 +29,8 @@ namespace typed_array_findindex {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.findIndex
transitioning javascript builtin
- TypedArrayPrototypeFindIndex(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeFindIndex(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg.
try {
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index dbf1a121da..656a22e07d 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -25,8 +25,8 @@ namespace typed_array_foreach {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning javascript builtin
- TypedArrayPrototypeForEach(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeForEach(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = this_arg.
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index 7af918a07b..d69dc9a98d 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -19,7 +19,7 @@ namespace typed_array_reduce {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
const value: Object = witness.Load(k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -27,7 +27,7 @@ namespace typed_array_reduce {
witness.GetStable());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, kBuiltinName);
}
return accumulator;
@@ -35,8 +35,8 @@ namespace typed_array_reduce {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduce
transitioning javascript builtin
- TypedArrayPrototypeReduce(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeReduce(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = initialValue.
try {
@@ -45,7 +45,7 @@ namespace typed_array_reduce {
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const initialValue = arguments.length >= 2 ? arguments[1] : Hole;
+ const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
return ReduceAllElements(uarray, callbackfn, initialValue);
}
label NotCallable deferred {
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index 59ce7ff55b..99a84401ed 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -19,7 +19,7 @@ namespace typed_array_reduceright {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
const value: Object = witness.Load(k);
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
accumulator = value;
} else {
accumulator = Call(
@@ -27,7 +27,7 @@ namespace typed_array_reduceright {
witness.GetStable());
}
}
- if (accumulator == Hole) {
+ if (accumulator == TheHole) {
ThrowTypeError(kReduceNoInitial, kBuiltinName);
}
return accumulator;
@@ -35,8 +35,8 @@ namespace typed_array_reduceright {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
transitioning javascript builtin
- TypedArrayPrototypeReduceRight(implicit context: Context)(
- receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeReduceRight(
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// arguments[0] = callback
// arguments[1] = initialValue.
try {
@@ -45,7 +45,7 @@ namespace typed_array_reduceright {
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
- const initialValue = arguments.length >= 2 ? arguments[1] : Hole;
+ const initialValue = arguments.length >= 2 ? arguments[1] : TheHole;
return ReduceRightAllElements(uarray, callbackfn, initialValue);
}
diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq
index f45654b71e..c0087ae1be 100644
--- a/deps/v8/src/builtins/typed-array-slice.tq
+++ b/deps/v8/src/builtins/typed-array-slice.tq
@@ -53,7 +53,7 @@ namespace typed_array_slice {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.slice
transitioning javascript builtin TypedArrayPrototypeSlice(
- context: Context, receiver: Object, ...arguments): Object {
+ js-implicit context: Context, receiver: Object)(...arguments): Object {
// arguments[0] = start
// arguments[1] = end
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index 991cad6b1b..7056650fba 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -29,8 +29,8 @@ namespace typed_array_some {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.some
transitioning javascript builtin
- TypedArrayPrototypeSome(implicit context:
- Context)(receiver: Object, ...arguments): Object {
+ TypedArrayPrototypeSome(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// arguments[0] = callback
// arguments[1] = thisArg.
try {
diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq
index 54b945f44e..4f98123f82 100644
--- a/deps/v8/src/builtins/typed-array-subarray.tq
+++ b/deps/v8/src/builtins/typed-array-subarray.tq
@@ -5,7 +5,8 @@
namespace typed_array_subarray {
// ES %TypedArray%.prototype.subarray
transitioning javascript builtin TypedArrayPrototypeSubArray(
- context: Context, receiver: Object, ...arguments): JSTypedArray {
+ js-implicit context: Context,
+ receiver: Object)(...arguments): JSTypedArray {
const methodName: constexpr string = '%TypedArray%.prototype.subarray';
// 1. Let O be the this value.
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 8f923947f1..d03c1a0be9 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -65,29 +65,18 @@ namespace typed_array {
implicit context: Context)(JSTypedArray): JSArrayBuffer;
extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
JSTypedArray): TypedArrayElementsInfo;
+ extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map):
+ TypedArrayElementsInfo;
extern macro TypedArrayBuiltinsAssembler::IsBigInt64ElementsKind(
ElementsKind): bool;
extern macro LoadFixedTypedArrayElementAsTagged(
- RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object;
+ RawPtr, Smi, constexpr ElementsKind): Numeric;
extern macro StoreJSTypedArrayElementFromTagged(
- Context, JSTypedArray, Smi, Object, constexpr ElementsKind,
- constexpr ParameterMode);
+ Context, JSTypedArray, Smi, Object, constexpr ElementsKind);
type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
type StoreFn = builtin(Context, JSTypedArray, Smi, Object) => Object;
- // These UnsafeCast specializations are necessary becuase there is no
- // way to definitively test whether an Object is a Torque function
- // with a specific signature, and the default UnsafeCast implementation
- // would try to check this through an assert(Is<>), so the test
- // is bypassed in this specialization.
- UnsafeCast<LoadFn>(implicit context: Context)(o: Object): LoadFn {
- return %RawDownCast<LoadFn>(o);
- }
- UnsafeCast<StoreFn>(implicit context: Context)(o: Object): StoreFn {
- return %RawDownCast<StoreFn>(o);
- }
-
// AttachedJSTypedArray guards that the array's buffer is not detached.
transient type AttachedJSTypedArray extends JSTypedArray;
@@ -201,17 +190,16 @@ namespace typed_array {
}
builtin LoadFixedElement<T: type>(
- context: Context, array: JSTypedArray, index: Smi): Object {
+ _context: Context, array: JSTypedArray, index: Smi): Object {
return LoadFixedTypedArrayElementAsTagged(
- array.data_ptr, index, KindForArrayType<T>(), SMI_PARAMETERS);
+ array.data_ptr, index, KindForArrayType<T>());
}
builtin StoreFixedElement<T: type>(
context: Context, typedArray: JSTypedArray, index: Smi,
value: Object): Object {
StoreJSTypedArrayElementFromTagged(
- context, typedArray, index, value, KindForArrayType<T>(),
- SMI_PARAMETERS);
+ context, typedArray, index, value, KindForArrayType<T>());
return Undefined;
}
@@ -288,7 +276,8 @@ namespace typed_array {
// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort
transitioning javascript builtin TypedArrayPrototypeSort(
- context: Context, receiver: Object, ...arguments): JSTypedArray {
+ js-implicit context: Context,
+ receiver: Object)(...arguments): JSTypedArray {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
const comparefnObj: Object =
@@ -322,7 +311,7 @@ namespace typed_array {
let loadfn: LoadFn;
let storefn: StoreFn;
- let elementsKind: ElementsKind = array.elements_kind;
+ const elementsKind: ElementsKind = array.elements_kind;
if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
if (elementsKind == INT32_ELEMENTS) {
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 5c09b3a8de..f15c8ba29f 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -1109,10 +1109,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- BytecodeArray::kOSRNestingLevelOffset + kCharSize);
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kOSRNestingLevelOffset),
+ BytecodeArray::kOsrNestingLevelOffset),
Immediate(0));
// Load initial bytecode offset.
@@ -1562,7 +1562,15 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
kSystemPointerSize;
__ popq(Operand(rsp, offsetToPC));
__ Drop(offsetToPC / kSystemPointerSize);
- __ addq(Operand(rsp, 0), Immediate(Code::kHeaderSize - kHeapObjectTag));
+
+ // Replace the builtin index Smi on the stack with the instruction start
+ // address of the builtin from the builtins table, and then Ret to this
+ // address
+ __ movq(kScratchRegister, Operand(rsp, 0));
+ __ movq(kScratchRegister,
+ __ EntryFromBuiltinIndexAsOperand(kScratchRegister));
+ __ movq(Operand(rsp, 0), kScratchRegister);
+
__ Ret();
}
} // namespace
@@ -3002,21 +3010,24 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ addl(Operand(base_reg, kLevelOffset), Immediate(1));
- Label profiler_disabled;
- Label end_profiler_check;
+ Label profiler_enabled, end_profiler_check;
__ Move(rax, ExternalReference::is_profiling_address(isolate));
__ cmpb(Operand(rax, 0), Immediate(0));
- __ j(zero, &profiler_disabled);
-
- // Third parameter is the address of the actual getter function.
- __ Move(thunk_last_arg, function_address);
- __ Move(rax, thunk_ref);
- __ jmp(&end_profiler_check);
-
- __ bind(&profiler_disabled);
- // Call the api function!
- __ Move(rax, function_address);
-
+ __ j(not_zero, &profiler_enabled);
+ __ Move(rax, ExternalReference::address_of_runtime_stats_flag());
+ __ cmpl(Operand(rax, 0), Immediate(0));
+ __ j(not_zero, &profiler_enabled);
+ {
+ // Call the api function directly.
+ __ Move(rax, function_address);
+ __ jmp(&end_profiler_check);
+ }
+ __ bind(&profiler_enabled);
+ {
+ // Third parameter is the address of the actual getter function.
+ __ Move(thunk_last_arg, function_address);
+ __ Move(rax, thunk_ref);
+ }
__ bind(&end_profiler_check);
// Call the api function!
@@ -3065,6 +3076,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ CompareRoot(map, RootIndex::kHeapNumberMap);
__ j(equal, &ok, Label::kNear);
+ __ CompareRoot(map, RootIndex::kBigIntMap);
+ __ j(equal, &ok, Label::kNear);
+
__ CompareRoot(return_value, RootIndex::kUndefinedValue);
__ j(equal, &ok, Label::kNear);
diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS
new file mode 100644
index 0000000000..f3715e6ad0
--- /dev/null
+++ b/deps/v8/src/codegen/DEPS
@@ -0,0 +1,9 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+specific_include_rules = {
+ "external-reference.cc": [
+ "+src/regexp/regexp-macro-assembler-arch.h",
+ ],
+}
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 345e80a16e..feb2f62f78 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -1,9 +1,12 @@
-ahaas@chromium.org
+bbudge@chromium.org
bmeurer@chromium.org
clemensh@chromium.org
+gdeepti@chromium.org
+ishell@chromium.org
jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
+leszeks@chromium.org
mslekova@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
@@ -11,3 +14,6 @@ neis@chromium.org
rmcilroy@chromium.org
sigurds@chromium.org
tebbi@chromium.org
+titzer@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index c8ef586fc1..7ca49a3f9f 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -2210,7 +2210,7 @@ void Assembler::stm(BlockAddrMode am, Register base, RegList src,
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-arm.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code) {
+void Assembler::stop(Condition cond, int32_t code) {
#ifndef __arm__
DCHECK_GE(code, kDefaultStopCode);
{
@@ -4827,12 +4827,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::CONST_POOL);
- // We can share CODE_TARGETs because we don't patch the code objects anymore,
- // and we make sure we emit only one reloc info for them (thus delta patching)
- // will apply the delta only once. At the moment, we do not dedup code targets
- // if they are wrapped in a heap object request (value == 0).
+ // We can share CODE_TARGETs and embedded objects, but we must make sure we
+ // only emit one reloc info for them (thus delta patching will apply the delta
+ // only once). At the moment, we do not deduplicate heap object request which
+ // are indicated by value == 0.
bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) ||
- (rmode == RelocInfo::CODE_TARGET && value != 0);
+ (rmode == RelocInfo::CODE_TARGET && value != 0) ||
+ (RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index 4db825fa97..f383632f73 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -625,8 +625,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
- void stop(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode);
+ void stop(Condition cond = al, int32_t code = kDefaultStopCode);
void bkpt(uint32_t imm16); // v5 and above
void svc(uint32_t imm24, Condition cond = al);
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index bcda320f8b..ba334cd0b6 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -303,20 +303,24 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, mode);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
- mov(builtin_pointer,
- Operand(builtin_pointer, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
- add(builtin_pointer, builtin_pointer,
+ mov(builtin_index,
+ Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
+ add(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
- ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -632,7 +636,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
add(scratch, object, Operand(offset - kHeapObjectTag));
tst(scratch, Operand(kPointerSize - 1));
b(eq, &ok);
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -1951,15 +1955,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -2402,7 +2406,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
- stop("Unexpected alignment");
+ stop();
bind(&alignment_as_expected);
}
}
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 4f497dcea4..e4ce734f52 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -300,7 +300,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index 5680d8b054..baae106c1c 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -5,6 +5,9 @@
#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
+#include <type_traits>
+
+#include "src/base/memory.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
@@ -22,8 +25,9 @@ void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references and immediate branches need extra work.
if (RelocInfo::IsInternalReference(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
- intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
- *p += delta; // Relocate entry.
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(pc_);
+ internal_ref += delta; // Relocate entry.
+ WriteUnalignedValue<intptr_t>(pc_, internal_ref);
} else {
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
@@ -193,17 +197,16 @@ inline VRegister CPURegister::Q() const {
// Default initializer is for int types
template <typename T>
struct ImmediateInitializer {
- static const bool kIsIntType = true;
static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
+ STATIC_ASSERT(std::is_integral<T>::value || std::is_enum<T>::value);
return t;
}
};
template <>
struct ImmediateInitializer<Smi> {
- static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; }
static inline int64_t immediate_for(Smi t) {
return static_cast<int64_t>(t.ptr());
@@ -212,7 +215,6 @@ struct ImmediateInitializer<Smi> {
template <>
struct ImmediateInitializer<ExternalReference> {
- static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
return RelocInfo::EXTERNAL_REFERENCE;
}
@@ -222,8 +224,9 @@ struct ImmediateInitializer<ExternalReference> {
};
template <typename T>
-Immediate::Immediate(Handle<T> value) {
- InitializeHandle(value);
+Immediate::Immediate(Handle<T> handle, RelocInfo::Mode mode)
+ : value_(static_cast<intptr_t>(handle.address())), rmode_(mode) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(mode));
}
template <typename T>
@@ -234,13 +237,9 @@ Immediate::Immediate(T t)
template <typename T>
Immediate::Immediate(T t, RelocInfo::Mode rmode)
: value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
- STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
+ STATIC_ASSERT(std::is_integral<T>::value);
}
-// Operand.
-template <typename T>
-Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
-
template <typename T>
Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
@@ -479,7 +478,7 @@ void Assembler::Unreachable() {
Address Assembler::target_pointer_address_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
- DCHECK(instr->IsLdrLiteralX());
+ DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
}
@@ -494,6 +493,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
}
+Tagged_t Assembler::target_compressed_address_at(Address pc,
+ Address constant_pool) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ CHECK(instr->IsLdrLiteralW());
+ return Memory<Tagged_t>(target_pointer_address_at(pc));
+}
+
Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
@@ -502,14 +508,39 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
- return GetCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2);
+ return Handle<Code>::cast(
+ GetEmbeddedObject(instr->ImmPCOffset() >> kInstrSizeLog2));
}
}
-Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
+AssemblerBase::EmbeddedObjectIndex
+Assembler::embedded_object_index_referenced_from(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
- CHECK(!instr->IsLdrLiteralX());
- return GetCompressedEmbeddedObject(ReadUnalignedValue<int32_t>(pc));
+ if (instr->IsLdrLiteralX()) {
+ STATIC_ASSERT(sizeof(EmbeddedObjectIndex) == sizeof(intptr_t));
+ return Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc));
+ } else {
+ DCHECK(instr->IsLdrLiteralW());
+ return Memory<uint32_t>(target_pointer_address_at(pc));
+ }
+}
+
+void Assembler::set_embedded_object_index_referenced_from(
+ Address pc, EmbeddedObjectIndex data) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ if (instr->IsLdrLiteralX()) {
+ Memory<EmbeddedObjectIndex>(target_pointer_address_at(pc)) = data;
+ } else {
+ DCHECK(instr->IsLdrLiteralW());
+ DCHECK(is_uint32(data));
+ WriteUnalignedValue<uint32_t>(target_pointer_address_at(pc),
+ static_cast<uint32_t>(data));
+ }
+}
+
+Handle<HeapObject> Assembler::target_object_handle_at(Address pc) {
+ return GetEmbeddedObject(
+ Assembler::embedded_object_index_referenced_from(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
@@ -557,7 +588,7 @@ void Assembler::deserialization_set_special_target_at(Address location,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
- Memory<Address>(pc) = target;
+ WriteUnalignedValue<Address>(pc, target);
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
@@ -585,12 +616,21 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
}
+void Assembler::set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ CHECK(instr->IsLdrLiteralW());
+ Memory<Tagged_t>(target_pointer_address_at(pc)) = target;
+}
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
- DCHECK(reinterpret_cast<Instruction*>(pc_)->IsLdrLiteralX());
- return kSystemPointerSize;
+ Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+ DCHECK(instr->IsLdrLiteralX() || instr->IsLdrLiteralW());
+ return instr->IsLdrLiteralW() ? kTaggedSize : kSystemPointerSize;
}
}
@@ -629,19 +669,30 @@ Address RelocInfo::constant_pool_entry_address() {
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return HeapObject::cast(
- Object(Assembler::target_address_at(pc_, constant_pool_)));
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ host_.address(),
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+ }
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
- return target_object();
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(DecompressTaggedAny(
+ isolate,
+ Assembler::target_compressed_address_at(pc_, constant_pool_))));
+ } else {
+ return target_object();
+ }
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (IsFullEmbeddedObject(rmode_)) {
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsEmbeddedObjectMode(rmode_)) {
+ return origin->target_object_handle_at(pc_);
} else {
DCHECK(IsCodeTarget(rmode_));
return origin->code_target_object_handle_at(pc_);
@@ -651,9 +702,15 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
- icache_flush_mode);
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(
+ pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
+ } else {
+ DCHECK(IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
}
@@ -673,7 +730,7 @@ void RelocInfo::set_target_external_reference(
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
- return Memory<Address>(pc_);
+ return ReadUnalignedValue<Address>(pc_);
}
Address RelocInfo::target_internal_reference_address() {
@@ -701,11 +758,14 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
- Memory<Address>(pc_) = kNullAddress;
+ WriteUnalignedValue<Address>(pc_, kNullAddress);
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Assembler::set_target_compressed_address_at(pc_, constant_pool_,
+ kNullAddress);
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
@@ -1025,9 +1085,7 @@ inline void Assembler::CheckBuffer() {
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
}
- if (pc_offset() >= next_constant_pool_check_) {
- CheckConstPool(false, true);
- }
+ constpool_.MaybeCheck();
}
} // namespace internal
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 1806f82b46..159e763ba2 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -34,6 +34,7 @@
#include "src/base/cpu.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/execution/frame-constants.h"
@@ -283,11 +284,6 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
return true;
}
-void Immediate::InitializeHandle(Handle<HeapObject> handle) {
- value_ = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
-}
-
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
@@ -298,167 +294,6 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode);
}
-bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
- int offset) {
- auto existing = entry_map.find(data);
- if (existing == entry_map.end()) {
- entry_map[data] = static_cast<int>(entries_.size());
- entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
- return true;
- }
- int index = existing->second;
- entries_[index].second.push_back(offset);
- return false;
-}
-
-// Constant Pool.
-bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
- DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL &&
- mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
- mode != RelocInfo::DEOPT_INLINING_ID &&
- mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
-
- bool write_reloc_info = true;
-
- uint64_t raw_data = static_cast<uint64_t>(data);
- int offset = assm_->pc_offset();
- if (IsEmpty()) {
- first_use_ = offset;
- }
-
- if (RelocInfo::IsShareableRelocMode(mode)) {
- write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
- } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
- // A zero data value is a placeholder and must not be shared.
- write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
- } else {
- entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
- }
-
- if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
- // Request constant pool emission after the next instruction.
- assm_->SetNextConstPoolCheckIn(1);
- }
-
- return write_reloc_info;
-}
-
-int ConstPool::DistanceToFirstUse() {
- DCHECK_GE(first_use_, 0);
- return assm_->pc_offset() - first_use_;
-}
-
-int ConstPool::MaxPcOffset() {
- // There are no pending entries in the pool so we can never get out of
- // range.
- if (IsEmpty()) return kMaxInt;
-
- // Entries are not necessarily emitted in the order they are added so in the
- // worst case the first constant pool use will be accessing the last entry.
- return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
-}
-
-int ConstPool::WorstCaseSize() {
- if (IsEmpty()) return 0;
-
- // Max size prologue:
- // b over
- // ldr xzr, #pool_size
- // blr xzr
- // nop
- // All entries are 64-bit for now.
- return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
-}
-
-int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
- if (IsEmpty()) return 0;
-
- // Prologue is:
- // b over ;; if require_jump
- // ldr xzr, #pool_size
- // blr xzr
- // nop ;; if not 64-bit aligned
- int prologue_size = require_jump ? kInstrSize : 0;
- prologue_size += 2 * kInstrSize;
- prologue_size +=
- IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
-
- // All entries are 64-bit for now.
- return prologue_size + EntryCount() * kSystemPointerSize;
-}
-
-void ConstPool::Emit(bool require_jump) {
- DCHECK(!assm_->is_const_pool_blocked());
- // Prevent recursive pool emission and protect from veneer pools.
- Assembler::BlockPoolsScope block_pools(assm_);
-
- int size = SizeIfEmittedAtCurrentPc(require_jump);
- Label size_check;
- assm_->bind(&size_check);
-
- assm_->RecordConstPool(size);
- // Emit the constant pool. It is preceded by an optional branch if
- // require_jump and a header which will:
- // 1) Encode the size of the constant pool, for use by the disassembler.
- // 2) Terminate the program, to try to prevent execution from accidentally
- // flowing into the constant pool.
- // 3) align the pool entries to 64-bit.
- // The header is therefore made of up to three arm64 instructions:
- // ldr xzr, #<size of the constant pool in 32-bit words>
- // blr xzr
- // nop
- //
- // If executed, the header will likely segfault and lr will point to the
- // instruction following the offending blr.
- // TODO(all): Make the alignment part less fragile. Currently code is
- // allocated as a byte array so there are no guarantees the alignment will
- // be preserved on compaction. Currently it works as allocation seems to be
- // 64-bit aligned.
-
- // Emit branch if required
- Label after_pool;
- if (require_jump) {
- assm_->b(&after_pool);
- }
-
- // Emit the header.
- assm_->RecordComment("[ Constant Pool");
- EmitMarker();
- EmitGuard();
- assm_->Align(8);
-
- // Emit constant pool entries.
- // TODO(all): currently each relocated constant is 64 bits, consider adding
- // support for 32-bit entries.
- EmitEntries();
- assm_->RecordComment("]");
-
- if (after_pool.is_linked()) {
- assm_->bind(&after_pool);
- }
-
- DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
- static_cast<unsigned>(size));
-}
-
-void ConstPool::Clear() {
- shared_entries_.clear();
- handle_to_index_map_.clear();
- entries_.clear();
- first_use_ = -1;
-}
-
-void ConstPool::EmitMarker() {
- // A constant pool size is expressed in number of 32-bits words.
- // Currently all entries are 64-bit.
- // + 1 is for the crash guard.
- // + 0/1 for alignment.
- int word_count =
- EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
- assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
- Assembler::Rt(xzr));
-}
-
MemOperand::PairResult MemOperand::AreConsistentForPair(
const MemOperand& operandA, const MemOperand& operandB,
int access_size_log2) {
@@ -484,47 +319,18 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
return kNotPair;
}
-void ConstPool::EmitGuard() {
-#ifdef DEBUG
- Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
- DCHECK(instr->preceding()->IsLdrLiteralX() &&
- instr->preceding()->Rt() == xzr.code());
-#endif
- assm_->EmitPoolGuard();
-}
-
-void ConstPool::EmitEntries() {
- DCHECK(IsAligned(assm_->pc_offset(), 8));
-
- // Emit entries.
- for (const auto& entry : entries_) {
- for (const auto& pc : entry.second) {
- Instruction* instr = assm_->InstructionAt(pc);
-
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
- }
-
- assm_->dc64(entry.first);
- }
- Clear();
-}
-
// Assembler
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- constpool_(this),
- unresolved_branches_() {
- const_pool_blocked_nesting_ = 0;
+ unresolved_branches_(),
+ constpool_(this) {
veneer_pool_blocked_nesting_ = 0;
Reset();
}
Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
}
@@ -533,7 +339,6 @@ void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }
void Assembler::Reset() {
#ifdef DEBUG
DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_start_, 0, pc_ - buffer_start_);
@@ -541,9 +346,7 @@ void Assembler::Reset() {
pc_ = buffer_start_;
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
constpool_.Clear();
- next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
- no_const_pool_before_ = 0;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
@@ -554,14 +357,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
request.heap_number(), AllocationType::kOld);
- set_target_address_at(pc, 0 /* unused */, object.address());
+ EmbeddedObjectIndex index = AddEmbeddedObject(object);
+ set_embedded_object_index_referenced_from(pc, index);
break;
}
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
- set_target_address_at(pc, 0 /* unused */,
- str->AllocateStringConstant(isolate).address());
+ EmbeddedObjectIndex index =
+ AddEmbeddedObject(str->AllocateStringConstant(isolate));
+ set_embedded_object_index_referenced_from(pc, index);
break;
}
}
@@ -572,7 +377,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
int handler_table_offset) {
// Emit constant pool if necessary.
- CheckConstPool(true, false);
+ ForceConstantPoolEmissionWithoutJump();
DCHECK(constpool_.IsEmpty());
int code_comments_size = WriteCodeComments();
@@ -870,32 +675,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
}
}
-void Assembler::StartBlockConstPool() {
- if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_constant_pool_check_ = kMaxInt;
- }
-}
-
-void Assembler::EndBlockConstPool() {
- if (--const_pool_blocked_nesting_ == 0) {
- // Check the constant pool hasn't been blocked for too long.
- DCHECK(pc_offset() < constpool_.MaxPcOffset());
- // Two cases:
- // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
- // will trigger a check.
- next_constant_pool_check_ = no_const_pool_before_;
- }
-}
-
-bool Assembler::is_const_pool_blocked() const {
- return (const_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_const_pool_before_);
-}
-
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
@@ -1497,6 +1276,7 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
if (operand.IsHeapObjectRequest()) {
+ BlockPoolsScope no_pool_before_ldr_of_heap_object_request(this);
RequestHeapObject(operand.heap_object_request());
ldr(rt, operand.immediate_for_heap_object_request());
} else {
@@ -1505,11 +1285,8 @@ void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
}
void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
- // Currently we only support 64-bit literals.
- DCHECK(rt.Is64Bits());
-
+ BlockPoolsScope no_pool_before_ldr_pcrel_instr(this);
RecordRelocInfo(imm.rmode(), imm.value());
- BlockConstPoolFor(1);
// The load will be patched when the constpool is emitted, patching code
// expect a load literal with offset 0.
ldr_pcrel(rt, 0);
@@ -3679,6 +3456,7 @@ void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index) {
}
void Assembler::dcptr(Label* label) {
+ BlockPoolsScope no_pool_inbetween(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
if (label->is_bound()) {
// The label is bound, so it does not need to be updated and the internal
@@ -4471,8 +4249,10 @@ void Assembler::GrowBuffer() {
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
- intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
- *p += pc_delta;
+ Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
+ internal_ref += pc_delta;
+ WriteUnalignedValue<intptr_t>(address, internal_ref);
}
// Pending relocation entries are also relative, no need to relocate.
@@ -4492,17 +4272,31 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
- bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- if (!new_constpool_entry) return;
+ if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
+ Handle<HeapObject> handle(reinterpret_cast<Address*>(data));
+ data = AddEmbeddedObject(handle);
+ }
+ if (rmode == RelocInfo::COMPRESSED_EMBEDDED_OBJECT) {
+ if (constpool_.RecordEntry(static_cast<uint32_t>(data), rmode) ==
+ RelocInfoStatus::kMustOmitForDuplicate) {
+ return;
+ }
+ } else {
+ if (constpool_.RecordEntry(static_cast<uint64_t>(data), rmode) ==
+ RelocInfoStatus::kMustOmitForDuplicate) {
+ return;
+ }
+ }
}
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
if (!ShouldRecordRelocInfo(rmode)) return;
+ // Callers should ensure that constant pool emission is blocked until the
+ // instruction the reloc info is associated with has been emitted.
+ DCHECK(constpool_.IsBlocked());
+
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
@@ -4511,103 +4305,127 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
+ BlockPoolsScope no_pool_before_b_instr(this);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
}
void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
+ BlockPoolsScope no_pool_before_bl_instr(this);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
bl(offset);
}
void Assembler::near_call(HeapObjectRequest request) {
+ BlockPoolsScope no_pool_before_bl_instr(this);
RequestHeapObject(request);
- int index = AddCodeTarget(Handle<Code>());
+ EmbeddedObjectIndex index = AddEmbeddedObject(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
- bl(index);
+ DCHECK(is_int32(index));
+ bl(static_cast<int>(index));
}
-void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstrSize;
- if (no_const_pool_before_ < pc_limit) {
- no_const_pool_before_ = pc_limit;
- // Make sure the pool won't be blocked for too long.
- DCHECK(pc_limit < constpool_.MaxPcOffset());
- }
+// Constant Pool
- if (next_constant_pool_check_ < no_const_pool_before_) {
- next_constant_pool_check_ = no_const_pool_before_;
- }
+void ConstantPool::EmitPrologue(Alignment require_alignment) {
+ // Recorded constant pool size is expressed in number of 32-bits words,
+ // and includes prologue and alignment, but not the jump around the pool
+ // and the size of the marker itself.
+ const int marker_size = 1;
+ int word_count =
+ ComputeSize(Jump::kOmitted, require_alignment) / kInt32Size - marker_size;
+ assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
+ Assembler::Rt(xzr));
+ assm_->EmitPoolGuard();
}
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Some short sequence of instruction mustn't be broken up by constant pool
- // emission, such sequences are protected by calls to BlockConstPoolFor and
- // BlockConstPoolScope.
- if (is_const_pool_blocked()) {
- // Something is wrong if emission is forced and blocked at the same time.
- DCHECK(!force_emit);
- return;
- }
+int ConstantPool::PrologueSize(Jump require_jump) const {
+ // Prologue is:
+ // b over ;; if require_jump
+ // ldr xzr, #pool_size
+ // blr xzr
+ int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
+ prologue_size += 2 * kInstrSize;
+ return prologue_size;
+}
- // There is nothing to do if there are no pending constant pool entries.
- if (constpool_.IsEmpty()) {
- // Calculate the offset of the next check.
- SetNextConstPoolCheckIn(kCheckConstPoolInterval);
- return;
- }
+void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
+ Instruction* entry_offset,
+ const ConstantPoolKey& key) {
+ Instruction* instr = assm_->InstructionAt(load_offset);
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->options(), entry_offset);
+}
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kApproxMaxDistToConstPool or more.
- // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
- int dist = constpool_.DistanceToFirstUse();
- int count = constpool_.EntryCount();
- if (!force_emit && (dist < kApproxMaxDistToConstPool) &&
- (count < kApproxMaxPoolEntryCount)) {
+void ConstantPool::Check(Emission force_emit, Jump require_jump,
+ size_t margin) {
+ // Some short sequence of instruction must not be broken up by constant pool
+ // emission, such sequences are protected by a ConstPool::BlockScope.
+ if (IsBlocked()) {
+ // Something is wrong if emission is forced and blocked at the same time.
+ DCHECK_EQ(force_emit, Emission::kIfNeeded);
return;
}
- // Emit veneers for branches that would go out of range during emission of the
- // constant pool.
- int worst_case_size = constpool_.WorstCaseSize();
- CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size);
+ // We emit a constant pool only if :
+ // * it is not empty
+ // * emission is forced by parameter force_emit (e.g. at function end).
+ // * emission is mandatory or opportune according to {ShouldEmitNow}.
+ if (!IsEmpty() && (force_emit == Emission::kForced ||
+ ShouldEmitNow(require_jump, margin))) {
+ // Emit veneers for branches that would go out of range during emission of
+ // the constant pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+ assm_->CheckVeneerPool(false, require_jump == Jump::kRequired,
+ assm_->kVeneerDistanceMargin + worst_case_size +
+ static_cast<int>(margin));
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (this includes the gap to the relocation information).
+ int needed_space = worst_case_size + assm_->kGap;
+ while (assm_->buffer_space() <= needed_space) {
+ assm_->GrowBuffer();
+ }
- // Check that the code buffer is large enough before emitting the constant
- // pool (this includes the gap to the relocation information).
- int needed_space = worst_case_size + kGap + 1 * kInstrSize;
- while (buffer_space() <= needed_space) {
- GrowBuffer();
+ EmitAndClear(require_jump);
}
-
- Label size_check;
- bind(&size_check);
- constpool_.Emit(require_jump);
- DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
- static_cast<unsigned>(worst_case_size));
-
- // Since a constant pool was just emitted, move the check offset forward by
+ // Since a constant pool is (now) empty, move the check offset forward by
// the standard interval.
- SetNextConstPoolCheckIn(kCheckConstPoolInterval);
+ SetNextCheckIn(ConstantPool::kCheckInterval);
}
-bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+// Pool entries are accessed with pc relative load therefore this cannot be more
+// than 1 * MB. Since constant pool emission checks are interval based, and we
+// want to keep entries close to the code, we try to emit every 64KB.
+const size_t ConstantPool::kMaxDistToPool32 = 1 * MB;
+const size_t ConstantPool::kMaxDistToPool64 = 1 * MB;
+const size_t ConstantPool::kCheckInterval = 128 * kInstrSize;
+const size_t ConstantPool::kApproxDistToPool32 = 64 * KB;
+const size_t ConstantPool::kApproxDistToPool64 = kApproxDistToPool32;
+
+const size_t ConstantPool::kOpportunityDistToPool32 = 64 * KB;
+const size_t ConstantPool::kOpportunityDistToPool64 = 64 * KB;
+const size_t ConstantPool::kApproxMaxEntryCount = 512;
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, size_t margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstrSize;
- return pc_offset() >
- max_reachable_pc - margin - protection_offset -
- static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+ return static_cast<intptr_t>(pc_offset() + margin + protection_offset +
+ unresolved_branches_.size() *
+ kMaxVeneerCodeSize) >= max_reachable_pc;
}
void Assembler::RecordVeneerPool(int location_offset, int size) {
+ Assembler::BlockPoolsScope block_pools(this, PoolEmissionCheck::kSkip);
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
reloc_info_writer.Write(&rinfo);
}
-void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
- BlockPoolsScope scope(this);
+void Assembler::EmitVeneers(bool force_emit, bool need_protection,
+ size_t margin) {
+ BlockPoolsScope scope(this, PoolEmissionCheck::kSkip);
RecordComment("[ Veneers");
// The exact size of the veneer pool must be recorded (see the comment at the
@@ -4677,7 +4495,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
- int margin) {
+ size_t margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
@@ -4713,6 +4531,7 @@ int Assembler::buffer_space() const {
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
+ Assembler::BlockPoolsScope block_pools(this);
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
}
diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 04cd422241..6a6bf633c1 100644
--- a/deps/v8/src/codegen/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -35,7 +35,8 @@ class SafepointTableBuilder;
class Immediate {
public:
template <typename T>
- inline explicit Immediate(Handle<T> handle);
+ inline explicit Immediate(
+ Handle<T> handle, RelocInfo::Mode mode = RelocInfo::FULL_EMBEDDED_OBJECT);
// This is allowed to be an implicit constructor because Immediate is
// a wrapper class that doesn't normally perform any type conversion.
@@ -49,8 +50,6 @@ class Immediate {
RelocInfo::Mode rmode() const { return rmode_; }
private:
- V8_EXPORT_PRIVATE void InitializeHandle(Handle<HeapObject> value);
-
int64_t value_;
RelocInfo::Mode rmode_;
};
@@ -85,9 +84,6 @@ class Operand {
inline HeapObjectRequest heap_object_request() const;
inline Immediate immediate_for_heap_object_request() const;
- template <typename T>
- inline explicit Operand(Handle<T> handle);
-
// Implicit constructor for all int types, ExternalReference, and Smi.
template <typename T>
inline Operand(T t); // NOLINT(runtime/explicit)
@@ -174,60 +170,6 @@ class MemOperand {
unsigned shift_amount_;
};
-class ConstPool {
- public:
- explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
- // Returns true when we need to write RelocInfo and false when we do not.
- bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
- int EntryCount() const { return static_cast<int>(entries_.size()); }
- bool IsEmpty() const { return entries_.empty(); }
- // Distance in bytes between the current pc and the first instruction
- // using the pool. If there are no pending entries return kMaxInt.
- int DistanceToFirstUse();
- // Offset after which instructions using the pool will be out of range.
- int MaxPcOffset();
- // Maximum size the constant pool can be with current entries. It always
- // includes alignment padding and branch over.
- int WorstCaseSize();
- // Size in bytes of the literal pool *if* it is emitted at the current
- // pc. The size will include the branch over the pool if it was requested.
- int SizeIfEmittedAtCurrentPc(bool require_jump);
- // Emit the literal pool at the current pc with a branch over the pool if
- // requested.
- void Emit(bool require_jump);
- // Discard any pending pool entries.
- void Clear();
-
- private:
- void EmitMarker();
- void EmitGuard();
- void EmitEntries();
-
- using SharedEntryMap = std::map<uint64_t, int>;
- // Adds a shared entry to entries_, using 'entry_map' to determine whether we
- // already track this entry. Returns true if this is the first time we add
- // this entry, false otherwise.
- bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
-
- Assembler* assm_;
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_use_;
-
- // Map of data to index in entries_ for shared entries.
- SharedEntryMap shared_entries_;
-
- // Map of address of handle to index in entries_. We need to keep track of
- // code targets separately from other shared entries, as they can be
- // relocated.
- SharedEntryMap handle_to_index_map_;
-
- // Values, pc offset(s) of entries. Use a vector to preserve the order of
- // insertion, as the serializer expects code target RelocInfo to point to
- // constant pool addresses in an ascending order.
- std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
-};
-
// -----------------------------------------------------------------------------
// Assembler.
@@ -312,15 +254,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ inline static Tagged_t target_compressed_address_at(Address pc,
+ Address constant_pool);
inline static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ inline static void set_target_compressed_address_at(
+ Address pc, Address constant_pool, Tagged_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
inline Handle<Code> code_target_object_handle_at(Address pc);
-
- inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
+ inline EmbeddedObjectIndex embedded_object_index_referenced_from(Address pc);
+ inline void set_embedded_object_index_referenced_from(
+ Address p, EmbeddedObjectIndex index);
+ // Returns the handle for the heap object referenced at 'pc'.
+ inline Handle<HeapObject> target_object_handle_at(Address pc);
// Returns the target address for a runtime function for the call encoded
// at 'pc'.
@@ -371,16 +324,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
- // Prevent contant pool emission until EndBlockConstPool is called.
- // Call to this function can be nested but must be followed by an equal
- // number of calls to EndBlockConstpool.
- void StartBlockConstPool();
-
- // Resume constant pool emission. Need to be called as many time as
- // StartBlockConstPool to have an effect.
- void EndBlockConstPool();
-
- bool is_const_pool_blocked() const;
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
@@ -399,16 +342,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return veneer_pool_blocked_nesting_ > 0;
}
- // Block/resume emission of constant pools and veneer pools.
- void StartBlockPools() {
- StartBlockConstPool();
- StartBlockVeneerPool();
- }
- void EndBlockPools() {
- EndBlockConstPool();
- EndBlockVeneerPool();
- }
-
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
@@ -2120,8 +2053,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Code generation helpers --------------------------------------------------
- bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
-
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(ptrdiff_t offset) const {
@@ -2405,31 +2336,26 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// FP register type.
inline static Instr FPType(VRegister fd);
- // Class for scoping postponing the constant pool generation.
- class BlockConstPoolScope {
- public:
- explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockConstPool();
- }
- ~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
- };
-
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
- // Check if is time to emit a constant pool.
- void CheckConstPool(bool force_emit, bool require_jump);
+ void ForceConstantPoolEmissionWithoutJump() {
+ constpool_.Check(Emission::kForced, Jump::kOmitted);
+ }
+ void ForceConstantPoolEmissionWithJump() {
+ constpool_.Check(Emission::kForced, Jump::kRequired);
+ }
+ // Check if the const pool needs to be emitted while pretending that {margin}
+ // more bytes of instructions have already been emitted.
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
+ }
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
- int margin = kVeneerDistanceMargin);
- bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
+ size_t margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneers(size_t margin = kVeneerDistanceMargin) {
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
}
@@ -2443,23 +2369,34 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool force_emit, bool need_protection,
- int margin = kVeneerDistanceMargin);
+ size_t margin = kVeneerDistanceMargin);
void EmitVeneersGuard() { EmitPoolGuard(); }
// Checks whether veneers need to be emitted at this point.
// If force_emit is set, a veneer is generated for *all* unresolved branches.
void CheckVeneerPool(bool force_emit, bool require_jump,
- int margin = kVeneerDistanceMargin);
+ size_t margin = kVeneerDistanceMargin);
+
+ using BlockConstPoolScope = ConstantPool::BlockScope;
class BlockPoolsScope {
public:
- explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockPools();
+ // Block veneer and constant pool. Emits pools if necessary to ensure that
+ // {margin} more bytes can be emitted without triggering pool emission.
+ explicit BlockPoolsScope(Assembler* assem, size_t margin = 0)
+ : assem_(assem), block_const_pool_(assem, margin) {
+ assem_->CheckVeneerPool(false, true, margin);
+ assem_->StartBlockVeneerPool();
+ }
+
+ BlockPoolsScope(Assembler* assem, PoolEmissionCheck check)
+ : assem_(assem), block_const_pool_(assem, check) {
+ assem_->StartBlockVeneerPool();
}
- ~BlockPoolsScope() { assem_->EndBlockPools(); }
+ ~BlockPoolsScope() { assem_->EndBlockVeneerPool(); }
private:
Assembler* assem_;
-
+ BlockConstPoolScope block_const_pool_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
@@ -2622,15 +2559,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const* label);
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Set how far from current pc the next constant pool check will be.
- void SetNextConstPoolCheckIn(int instructions) {
- next_constant_pool_check_ = pc_offset() + instructions * kInstrSize;
- }
-
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
@@ -2658,40 +2586,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckBufferSpace();
void CheckBuffer();
- // Pc offset of the next constant pool check.
- int next_constant_pool_check_;
-
- // Constant pool generation
- // Pools are emitted in the instruction stream. They are emitted when:
- // * the distance to the first use is above a pre-defined distance or
- // * the numbers of entries in the pool is above a pre-defined size or
- // * code generation is finished
- // If a pool needs to be emitted before code generation is finished a branch
- // over the emitted pool will be inserted.
-
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static constexpr int kCheckConstPoolInterval = 128;
-
- // Distance to first use after a which a pool will be emitted. Pool entries
- // are accessed with pc relative load therefore this cannot be more than
- // 1 * MB. Since constant pool emission checks are interval based this value
- // is an approximation.
- static constexpr int kApproxMaxDistToConstPool = 64 * KB;
-
- // Number of pool entries after which a pool will be emitted. Since constant
- // pool emission checks are interval based this value is an approximation.
- static constexpr int kApproxMaxPoolEntryCount = 512;
-
- // Emission of the constant pool may be blocked in some code sequences.
- int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
-
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -2705,16 +2599,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
-
- // The pending constant pool.
- ConstPool constpool_;
-
protected:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
@@ -2727,17 +2611,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
public:
#ifdef DEBUG
// Functions used for testing.
- int GetConstantPoolEntriesSizeForTesting() const {
+ size_t GetConstantPoolEntriesSizeForTesting() const {
// Do not include branch over the pool.
- return constpool_.EntryCount() * kSystemPointerSize;
+ return constpool_.Entry32Count() * kInt32Size +
+ constpool_.Entry64Count() * kInt64Size;
}
- static constexpr int GetCheckConstPoolIntervalForTesting() {
- return kCheckConstPoolInterval;
+ static size_t GetCheckConstPoolIntervalForTesting() {
+ return ConstantPool::kCheckInterval;
}
- static constexpr int GetApproxMaxDistToConstPoolForTesting() {
- return kApproxMaxDistToConstPool;
+ static size_t GetApproxMaxDistToConstPoolForTesting() {
+ return ConstantPool::kApproxDistToPool64;
}
#endif
@@ -2779,7 +2664,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
- // This is similar to next_constant_pool_check_ and helps reduce the overhead
+ // This PC-offset of the next veneer pool check helps reduce the overhead
// of checking for veneer pools.
// It is maintained to the closest unresolved branch limit minus the maximum
// veneer margin (or kMaxInt if there are no unresolved branches).
@@ -2804,8 +2689,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
int WriteCodeComments();
+ // The pending constant pool.
+ ConstantPool constpool_;
+
friend class EnsureSpace;
- friend class ConstPool;
+ friend class ConstantPool;
};
class PatchingAssembler : public Assembler {
@@ -2822,19 +2710,12 @@ class PatchingAssembler : public Assembler {
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
: Assembler(options,
- ExternalAssemblerBuffer(start, count * kInstrSize + kGap)) {
- // Block constant pool emission.
- StartBlockPools();
- }
+ ExternalAssemblerBuffer(start, count * kInstrSize + kGap)),
+ block_constant_pool_emission_scope(this) {}
~PatchingAssembler() {
- // Const pool should still be blocked.
- DCHECK(is_const_pool_blocked());
- EndBlockPools();
// Verify we have generated the number of instruction we expected.
DCHECK_EQ(pc_offset() + kGap, buffer_->size());
- // Verify no relocation information has been emitted.
- DCHECK(IsConstPoolEmpty());
}
// See definition of PatchAdrFar() for details.
@@ -2842,11 +2723,19 @@ class PatchingAssembler : public Assembler {
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
void PatchSubSp(uint32_t immediate);
+
+ private:
+ BlockPoolsScope block_constant_pool_emission_scope;
};
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); }
+ explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
+ assembler->CheckBufferSpace();
+ }
+
+ private:
+ Assembler::BlockPoolsScope block_pools_scope_;
};
} // namespace internal
diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index eb3fb3a6be..a1e962452b 100644
--- a/deps/v8/src/codegen/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
-constexpr size_t kLoadLiteralScaleLog2 = 2;
-constexpr size_t kMaxLoadLiteralRange = 1 * MB;
+constexpr uint8_t kLoadLiteralScaleLog2 = 2;
+constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
index e0ab589914..32bcc6f268 100644
--- a/deps/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -15,7 +15,7 @@ namespace internal {
class CacheLineSizes {
public:
CacheLineSizes() {
-#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
+#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN) || defined(__APPLE__)
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
diff --git a/deps/v8/src/codegen/arm64/decoder-arm64.h b/deps/v8/src/codegen/arm64/decoder-arm64.h
index 3d113eb836..7621c516ce 100644
--- a/deps/v8/src/codegen/arm64/decoder-arm64.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64.h
@@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE DecoderVisitor {
};
// A visitor that dispatches to a list of visitors.
-class DispatchingDecoderVisitor : public DecoderVisitor {
+class V8_EXPORT_PRIVATE DispatchingDecoderVisitor : public DecoderVisitor {
public:
DispatchingDecoderVisitor() {}
virtual ~DispatchingDecoderVisitor() {}
diff --git a/deps/v8/src/codegen/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 5c3cf687e7..a73c3feed7 100644
--- a/deps/v8/src/codegen/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -203,6 +203,7 @@ class Instruction {
}
bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; }
+ bool IsLdrLiteralW() const { return Mask(LoadLiteralMask) == LDR_w_lit; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index aab9fc79a2..792a8637f6 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -291,8 +291,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
ExternalReference reference = bit_cast<ExternalReference>(addr);
IndirectLoadExternalReference(rd, reference);
return;
- } else if (operand.ImmediateRMode() ==
- RelocInfo::FULL_EMBEDDED_OBJECT) {
+ } else if (RelocInfo::IsEmbeddedObjectMode(operand.ImmediateRMode())) {
Handle<HeapObject> x(
reinterpret_cast<Address*>(operand.ImmediateValue()));
IndirectLoadConstant(rd, x);
@@ -1866,7 +1865,9 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
if (CanUseNearCallOrJump(rmode)) {
- JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
+ EmbeddedObjectIndex index = AddEmbeddedObject(code);
+ DCHECK(is_int32(index));
+ JumpHelper(static_cast<int64_t>(index), rmode, cond);
} else {
Jump(code.address(), rmode, cond);
}
@@ -1912,7 +1913,9 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
}
if (CanUseNearCallOrJump(rmode)) {
- near_call(AddCodeTarget(code), rmode);
+ EmbeddedObjectIndex index = AddEmbeddedObject(code);
+ DCHECK(is_int32(index));
+ near_call(static_cast<int32_t>(index), rmode);
} else {
IndirectCall(code.address(), rmode);
}
@@ -1925,24 +1928,27 @@ void TurboAssembler::Call(ExternalReference target) {
Call(temp);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 0);
- Lsl(builtin_pointer, builtin_pointer, kSystemPointerSizeLog2 - kSmiShift);
+ Lsl(builtin_index, builtin_index, kSystemPointerSizeLog2 - kSmiShift);
#else
STATIC_ASSERT(kSmiShiftSize == 31);
- Asr(builtin_pointer, builtin_pointer, kSmiShift - kSystemPointerSizeLog2);
+ Asr(builtin_index, builtin_index, kSmiShift - kSystemPointerSizeLog2);
#endif
- Add(builtin_pointer, builtin_pointer,
- IsolateData::builtin_entry_table_offset());
- Ldr(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ Add(builtin_index, builtin_index, IsolateData::builtin_entry_table_offset());
+ Ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -2723,7 +2729,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldrsw(destination, field_operand);
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInGeneratedCode) {
UseScratchRegisterScope temps(this);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
@@ -2747,7 +2753,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInGeneratedCode) {
UseScratchRegisterScope temps(this);
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index f217c3c586..d4e9c3055b 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -852,7 +852,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -1920,17 +1923,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
class InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
- : tasm_(tasm)
+ : tasm_(tasm),
+ block_pool_(tasm, count * kInstrSize)
#ifdef DEBUG
,
size_(count * kInstrSize)
#endif
{
- // Before blocking the const pool, see if it needs to be emitted.
- tasm_->CheckConstPool(false, true);
- tasm_->CheckVeneerPool(false, true);
-
- tasm_->StartBlockPools();
+ tasm_->CheckVeneerPool(false, true, count * kInstrSize);
+ tasm_->StartBlockVeneerPool();
#ifdef DEBUG
if (count != 0) {
tasm_->bind(&start_);
@@ -1941,7 +1942,7 @@ class InstructionAccurateScope {
}
~InstructionAccurateScope() {
- tasm_->EndBlockPools();
+ tasm_->EndBlockVeneerPool();
#ifdef DEBUG
if (start_.is_bound()) {
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
@@ -1952,6 +1953,7 @@ class InstructionAccurateScope {
private:
TurboAssembler* tasm_;
+ TurboAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG
size_t size_;
Label start_;
@@ -1979,7 +1981,7 @@ class UseScratchRegisterScope {
DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
}
- ~UseScratchRegisterScope();
+ V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
// Take a register from the appropriate temps list. It will be returned
// automatically when the scope ends.
@@ -1993,10 +1995,11 @@ class UseScratchRegisterScope {
}
Register AcquireSameSizeAs(const Register& reg);
- VRegister AcquireSameSizeAs(const VRegister& reg);
+ V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
private:
- static CPURegister AcquireNextAvailable(CPURegList* available);
+ V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
+ CPURegList* available);
// Available scratch registers.
CPURegList* available_; // kRegister
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index b429786aa9..741866dfd6 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -559,8 +559,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 687ae98bfe..498afb0320 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -64,8 +64,8 @@ AssemblerOptions AssemblerOptions::Default(
// might be run on real hardware.
options.enable_simulator_code = !serializer;
#endif
- options.inline_offheap_trampolines =
- FLAG_embedded_builtins && !serializer && !generating_embedded_builtin;
+ options.inline_offheap_trampolines &=
+ !serializer && !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
const base::AddressRegion& code_range =
isolate->heap()->memory_allocator()->code_range();
@@ -226,23 +226,33 @@ int AssemblerBase::AddCodeTarget(Handle<Code> target) {
}
}
-int AssemblerBase::AddCompressedEmbeddedObject(Handle<HeapObject> object) {
- int current = static_cast<int>(compressed_embedded_objects_.size());
- compressed_embedded_objects_.push_back(object);
- return current;
+Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+ DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
+ return code_targets_[code_target_index];
}
-Handle<HeapObject> AssemblerBase::GetCompressedEmbeddedObject(
- intptr_t index) const {
- DCHECK_LT(static_cast<size_t>(index), compressed_embedded_objects_.size());
- return compressed_embedded_objects_[index];
+AssemblerBase::EmbeddedObjectIndex AssemblerBase::AddEmbeddedObject(
+ Handle<HeapObject> object) {
+ EmbeddedObjectIndex current = embedded_objects_.size();
+ // Do not deduplicate invalid handles, they are to heap object requests.
+ if (!object.is_null()) {
+ auto entry = embedded_objects_map_.find(object);
+ if (entry != embedded_objects_map_.end()) {
+ return entry->second;
+ }
+ embedded_objects_map_[object] = current;
+ }
+ embedded_objects_.push_back(object);
+ return current;
}
-Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
- DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
- return code_targets_[code_target_index];
+Handle<HeapObject> AssemblerBase::GetEmbeddedObject(
+ EmbeddedObjectIndex index) const {
+ DCHECK_LT(index, embedded_objects_.size());
+ return embedded_objects_[index];
}
+
int Assembler::WriteCodeComments() {
if (!FLAG_code_comments || code_comments_writer_.entry_count() == 0) return 0;
int offset = pc_offset();
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index eae5d53a4f..98639583d8 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -36,7 +36,9 @@
#define V8_CODEGEN_ASSEMBLER_H_
#include <forward_list>
+#include <unordered_map>
+#include "src/base/memory.h"
#include "src/codegen/code-comments.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h"
@@ -55,6 +57,10 @@ class ApiFunction;
namespace internal {
+using base::Memory;
+using base::ReadUnalignedValue;
+using base::WriteUnalignedValue;
+
// Forward declarations.
class EmbeddedData;
class InstructionStream;
@@ -155,7 +161,7 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
bool isolate_independent_code = false;
// Enables the use of isolate-independent builtins through an off-heap
// trampoline. (macro assembler feature).
- bool inline_offheap_trampolines = false;
+ bool inline_offheap_trampolines = FLAG_embedded_builtins;
// On some platforms, all code is within a given range in the process,
// and the start of this range is configured here.
Address code_range_start = 0;
@@ -272,8 +278,11 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int AddCodeTarget(Handle<Code> target);
Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
- int AddCompressedEmbeddedObject(Handle<HeapObject> object);
- Handle<HeapObject> GetCompressedEmbeddedObject(intptr_t index) const;
+ // Add 'object' to the {embedded_objects_} vector and return the index at
+ // which it is stored.
+ using EmbeddedObjectIndex = size_t;
+ EmbeddedObjectIndex AddEmbeddedObject(Handle<HeapObject> object);
+ Handle<HeapObject> GetEmbeddedObject(EmbeddedObjectIndex index) const;
// The buffer into which code and relocation info are generated.
std::unique_ptr<AssemblerBuffer> buffer_;
@@ -321,12 +330,18 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// the code handle in the vector instead.
std::vector<Handle<Code>> code_targets_;
- // When pointer compression is enabled, we need to store indexes to this
- // table in the code until we are ready to copy the code and embed the real
- // object pointers. We don't need to do the same thing for non-compressed
- // embedded objects, because we've got enough space (kPointerSize) in the
- // code stream to just embed the address of the object handle.
- std::vector<Handle<HeapObject>> compressed_embedded_objects_;
+ // If an assembler needs a small number to refer to a heap object handle
+ // (for example, because there are only 32bit available on a 64bit arch), the
+ // assembler adds the object into this vector using AddEmbeddedObject, and
+ // may then refer to the heap object using the handle's index in this vector.
+ std::vector<Handle<HeapObject>> embedded_objects_;
+
+ // Embedded objects are deduplicated based on handle location. This is a
+ // compromise that is almost as effective as deduplication based on actual
+ // heap object addresses maintains GC safety.
+ std::unordered_map<Handle<HeapObject>, EmbeddedObjectIndex,
+ Handle<HeapObject>::hash, Handle<HeapObject>::equal_to>
+ embedded_objects_map_;
const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index d967d84874..390746c27d 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -63,57 +63,27 @@ void CodeStubAssembler::HandleBreakOnNode() {
void CodeStubAssembler::Assert(const BranchGenerator& branch,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5,
- const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
if (FLAG_debug_code) {
- Check(branch, message, file, line, extra_node1, extra_node1_name,
- extra_node2, extra_node2_name, extra_node3, extra_node3_name,
- extra_node4, extra_node4_name, extra_node5, extra_node5_name);
+ Check(branch, message, file, line, extra_nodes);
}
#endif
}
void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5,
- const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
#if defined(DEBUG)
if (FLAG_debug_code) {
- Check(condition_body, message, file, line, extra_node1, extra_node1_name,
- extra_node2, extra_node2_name, extra_node3, extra_node3_name,
- extra_node4, extra_node4_name, extra_node5, extra_node5_name);
+ Check(condition_body, message, file, line, extra_nodes);
}
#endif
}
-#ifdef DEBUG
-namespace {
-void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node,
- const char* node_name) {
- if (node != nullptr) {
- csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0),
- csa->StringConstant(node_name), node);
- }
-}
-} // namespace
-#endif
-
void CodeStubAssembler::Check(const BranchGenerator& branch,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5, const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
Label ok(this);
Label not_ok(this, Label::kDeferred);
if (message != nullptr && FLAG_code_comments) {
@@ -124,9 +94,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
branch(&ok, &not_ok);
BIND(&not_ok);
- FailAssert(message, file, line, extra_node1, extra_node1_name, extra_node2,
- extra_node2_name, extra_node3, extra_node3_name, extra_node4,
- extra_node4_name, extra_node5, extra_node5_name);
+ FailAssert(message, file, line, extra_nodes);
BIND(&ok);
Comment("] Assert");
@@ -134,20 +102,14 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
void CodeStubAssembler::Check(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
- Node* extra_node1, const char* extra_node1_name,
- Node* extra_node2, const char* extra_node2_name,
- Node* extra_node3, const char* extra_node3_name,
- Node* extra_node4, const char* extra_node4_name,
- Node* extra_node5, const char* extra_node5_name) {
+ std::initializer_list<ExtraNode> extra_nodes) {
BranchGenerator branch = [=](Label* ok, Label* not_ok) {
Node* condition = condition_body();
DCHECK_NOT_NULL(condition);
Branch(condition, ok, not_ok);
};
- Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2,
- extra_node2_name, extra_node3, extra_node3_name, extra_node4,
- extra_node4_name, extra_node5, extra_node5_name);
+ Check(branch, message, file, line, extra_nodes);
}
void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
@@ -162,31 +124,25 @@ void CodeStubAssembler::FastCheck(TNode<BoolT> condition) {
}
void CodeStubAssembler::FailAssert(
- const char* message, const char* file, int line, Node* extra_node1,
- const char* extra_node1_name, Node* extra_node2,
- const char* extra_node2_name, Node* extra_node3,
- const char* extra_node3_name, Node* extra_node4,
- const char* extra_node4_name, Node* extra_node5,
- const char* extra_node5_name) {
+ const char* message, const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes) {
DCHECK_NOT_NULL(message);
EmbeddedVector<char, 1024> chars;
if (file != nullptr) {
- SNPrintF(chars, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
- } else {
- SNPrintF(chars, "CSA_ASSERT failed: %s\n", message);
+ SNPrintF(chars, "%s [%s:%d]", message, file, line);
+ message = chars.begin();
}
- Node* message_node = StringConstant(chars.begin());
+ Node* message_node = StringConstant(message);
#ifdef DEBUG
// Only print the extra nodes in debug builds.
- MaybePrintNodeWithName(this, extra_node1, extra_node1_name);
- MaybePrintNodeWithName(this, extra_node2, extra_node2_name);
- MaybePrintNodeWithName(this, extra_node3, extra_node3_name);
- MaybePrintNodeWithName(this, extra_node4, extra_node4_name);
- MaybePrintNodeWithName(this, extra_node5, extra_node5_name);
+ for (auto& node : extra_nodes) {
+ CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0),
+ StringConstant(node.second), node.first);
+ }
#endif
- DebugAbort(message_node);
+ AbortCSAAssert(message_node);
Unreachable();
}
@@ -567,7 +523,7 @@ TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) {
// Check that the Smi value is properly sign-extended.
- TNode<IntPtrT> value = Signed(BitcastTaggedToWord(smi));
+ TNode<IntPtrT> value = Signed(BitcastTaggedSignedToWord(smi));
return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value)));
}
return Int32TrueConstant();
@@ -611,7 +567,8 @@ TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
if (ToIntPtrConstant(value, constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
- return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
+ return Signed(
+ WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
@@ -660,13 +617,14 @@ TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
- return BitcastWordToTaggedSigned(TryIntPtrAdd(
- BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs), if_overflow));
+ return BitcastWordToTaggedSigned(
+ TryIntPtrAdd(BitcastTaggedSignedToWord(lhs),
+ BitcastTaggedSignedToWord(rhs), if_overflow));
} else {
DCHECK(SmiValuesAre31Bits());
- TNode<PairT<Int32T, BoolT>> pair =
- Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
+ TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow(
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -678,16 +636,16 @@ TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
- BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
+ BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<IntPtrT> result = Projection<0>(pair);
return BitcastWordToTaggedSigned(result);
} else {
DCHECK(SmiValuesAre31Bits());
- TNode<PairT<Int32T, BoolT>> pair =
- Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
- TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
+ TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow(
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)),
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
@@ -933,7 +891,7 @@ TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor,
BIND(&divisor_is_not_minus_one);
TNode<Int32T> untagged_result = Int32Div(untagged_dividend, untagged_divisor);
- TNode<Int32T> truncated = Signed(Int32Mul(untagged_result, untagged_divisor));
+ TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout);
@@ -973,9 +931,12 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
}
TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
- return WordNotEqual(
- WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
- IntPtrConstant(0));
+ // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we
+ // can nonetheless use it to inspect the Smi tag. The assumption here is that
+ // the GC will not exchange Smis for HeapObjects or vice-versa.
+ TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
+ return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
@@ -1031,7 +992,7 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
TNode<Int32T> prototype_instance_type = LoadMapInstanceType(prototype_map);
// Pessimistically assume elements if a Proxy, Special API Object,
- // or JSValue wrapper is found on the prototype chain. After this
+ // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this
// instance type check, it's not necessary to check for interceptors or
// access checks.
Label if_custom(this, Label::kDeferred), if_notcustom(this);
@@ -1040,11 +1001,12 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
BIND(&if_custom);
{
- // For string JSValue wrappers we still support the checks as long
- // as they wrap the empty string.
- GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE),
- possibly_elements);
- Node* prototype_value = LoadJSValueValue(prototype);
+ // For string JSPrimitiveWrapper wrappers we still support the checks as
+ // long as they wrap the empty string.
+ GotoIfNot(
+ InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
+ possibly_elements);
+ Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype);
Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
}
@@ -1121,20 +1083,23 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
bool needs_double_alignment = flags & kDoubleAlignment;
+ bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation;
- if (flags & kAllowLargeObjectAllocation) {
+ if (allow_large_object_allocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
+ TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
+ AllocateDoubleAlignFlag::encode(needs_double_alignment) |
+ AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (FLAG_young_generation_large_objects) {
- result = CallRuntime(Runtime::kAllocateInYoungGeneration,
- NoContextConstant(), SmiTag(size_in_bytes));
+ result =
+ CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
} else {
- TNode<Smi> alignment_flag = SmiConstant(Smi::FromInt(
- AllocateDoubleAlignFlag::encode(needs_double_alignment)));
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
- SmiTag(size_in_bytes), alignment_flag);
+ SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
@@ -1161,15 +1126,17 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
BIND(&runtime_call);
{
+ TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
+ AllocateDoubleAlignFlag::encode(needs_double_alignment) |
+ AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (flags & kPretenured) {
- TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
- AllocateDoubleAlignFlag::encode(needs_double_alignment)));
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
- result = CallRuntime(Runtime::kAllocateInYoungGeneration,
- NoContextConstant(), SmiTag(size_in_bytes));
+ result =
+ CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
}
@@ -1394,14 +1361,15 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType type) {
CSA_ASSERT(this, IsStrong(object));
- return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
+ return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType type) {
CSA_ASSERT(this, IsStrong(object));
- return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
+ return LoadFromObject(type, object,
+ IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
@@ -1469,12 +1437,18 @@ TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
object, HeapNumber::kValueOffset, MachineType::Float64()));
}
+TNode<Map> CodeStubAssembler::GetStructMap(InstanceType instance_type) {
+ Handle<Map> map_handle(Map::GetStructMap(isolate(), instance_type),
+ isolate());
+ return HeapConstant(map_handle);
+}
+
TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset,
MachineType::TaggedPointer()));
}
-TNode<Int32T> CodeStubAssembler::LoadInstanceType(
+TNode<Uint16T> CodeStubAssembler::LoadInstanceType(
SloppyTNode<HeapObject> object) {
return LoadMapInstanceType(LoadMap(object));
}
@@ -1591,8 +1565,8 @@ TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32()));
}
-TNode<Int32T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
- return UncheckedCast<Int32T>(
+TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
+ return UncheckedCast<Uint16T>(
LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16()));
}
@@ -1700,12 +1674,10 @@ TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) {
- // This check can have false positives, since it applies to any JSValueType.
+ // This check can have false positives, since it applies to any
+ // JSPrimitiveWrapper type.
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout);
- GotoIf(IsSetWord32(LoadMapBitField2(map), Map::HasHiddenPrototypeBit::kMask),
- bailout);
-
TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask), bailout);
@@ -1810,9 +1782,9 @@ Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
-Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
- CSA_ASSERT(this, IsJSValue(object));
- return LoadObjectField(object, JSValue::kValueOffset);
+Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) {
+ CSA_ASSERT(this, IsJSPrimitiveWrapper(object));
+ return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset);
}
void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
@@ -1941,11 +1913,13 @@ TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
return LoadAndUntagWeakFixedArrayLength(array);
}
-template <typename Array>
-TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
- TNode<Array> array, int array_header_size, Node* index_node,
- int additional_offset, ParameterMode parameter_mode,
- LoadSensitivity needs_poisoning) {
+template <typename Array, typename T>
+TNode<T> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
+ int array_header_size,
+ Node* index_node,
+ int additional_offset,
+ ParameterMode parameter_mode,
+ LoadSensitivity needs_poisoning) {
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
@@ -1955,8 +1929,13 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
parameter_mode, header_size);
CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
- return UncheckedCast<MaybeObject>(
- Load(MachineType::AnyTagged(), array, offset, needs_poisoning));
+ constexpr MachineType machine_type = MachineTypeOf<T>::value;
+ // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning
+ if (needs_poisoning == LoadSensitivity::kSafe) {
+ return UncheckedCast<T>(LoadFromObject(machine_type, array, offset));
+ } else {
+ return UncheckedCast<T>(Load(machine_type, array, offset, needs_poisoning));
+ }
}
template TNode<MaybeObject>
@@ -2046,8 +2025,8 @@ TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore(
IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
}
-Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
- Node* data_pointer, Node* offset) {
+TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
if (Is64()) {
TNode<IntPtrT> value = UncheckedCast<IntPtrT>(
Load(MachineType::IntPtr(), data_pointer, offset));
@@ -2059,13 +2038,15 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#else
TNode<IntPtrT> low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<IntPtrT> high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#endif
return BigIntFromInt32Pair(low, high);
}
@@ -2176,8 +2157,9 @@ TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) {
return var_result.value();
}
-Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
- Node* data_pointer, Node* offset) {
+compiler::TNode<BigInt>
+CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) {
Label if_zero(this), done(this);
if (Is64()) {
TNode<UintPtrT> value = UncheckedCast<UintPtrT>(
@@ -2190,13 +2172,15 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#else
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
- Int32Add(offset, Int32Constant(kSystemPointerSize))));
+ Int32Add(TruncateIntPtrToInt32(offset),
+ Int32Constant(kSystemPointerSize))));
#endif
return BigIntFromUint32Pair(low, high);
}
@@ -2244,10 +2228,10 @@ TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) {
return var_result.value();
}
-Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
+ TNode<RawPtrT> data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
- Node* offset =
+ TNode<IntPtrT> offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
@@ -2281,7 +2265,8 @@ Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
}
TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
- TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind) {
+ TNode<RawPtrT> data_pointer, TNode<Smi> index,
+ TNode<Int32T> elements_kind) {
TVARIABLE(Numeric, var_result);
Label done(this), if_unknown_type(this, Label::kDeferred);
int32_t elements_kinds[] = {
@@ -2307,12 +2292,12 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
BIND(&if_unknown_type);
Unreachable();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- BIND(&if_##type##array); \
- { \
- var_result = CAST(LoadFixedTypedArrayElementAsTagged( \
- data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
- Goto(&done); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ BIND(&if_##type##array); \
+ { \
+ var_result = LoadFixedTypedArrayElementAsTagged( \
+ data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS); \
+ Goto(&done); \
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -2323,8 +2308,7 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
TNode<Context> context, TNode<JSTypedArray> typed_array,
- TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
- ParameterMode parameter_mode) {
+ TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) {
TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array);
switch (elements_kind) {
case UINT8_ELEMENTS:
@@ -2333,26 +2317,26 @@ void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- SmiToInt32(CAST(value)), parameter_mode);
+ SmiToInt32(CAST(value)), SMI_PARAMETERS);
break;
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- TruncateTaggedToWord32(context, value), parameter_mode);
+ TruncateTaggedToWord32(context, value), SMI_PARAMETERS);
break;
case FLOAT32_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))),
- parameter_mode);
+ SMI_PARAMETERS);
break;
case FLOAT64_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- LoadHeapNumberValue(CAST(value)), parameter_mode);
+ LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS);
break;
case BIGUINT64_ELEMENTS:
case BIGINT64_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
- UncheckedCast<BigInt>(value), parameter_mode);
+ UncheckedCast<BigInt>(value), SMI_PARAMETERS);
break;
default:
UNREACHABLE();
@@ -2925,15 +2909,12 @@ TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Map> map,
// Disallow pushing onto prototypes. It might be the JSArray prototype.
// Disallow pushing onto non-extensible objects.
Comment("Disallow pushing onto prototypes");
- Node* bit_field2 = LoadMapBitField2(map);
- int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask;
- Node* test = Word32And(bit_field2, Int32Constant(mask));
- GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)),
- bailout);
+ GotoIfNot(IsExtensibleNonPrototypeMap(map), bailout);
EnsureArrayLengthWritable(map, bailout);
- TNode<Uint32T> kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ TNode<Uint32T> kind =
+ DecodeWord32<Map::ElementsKindBits>(LoadMapBitField2(map));
return Signed(kind);
}
@@ -3022,7 +3003,7 @@ void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
GotoIfNotNumber(value, bailout);
}
if (IsDoubleElementsKind(kind)) {
- value = ChangeNumberToFloat64(value);
+ value = ChangeNumberToFloat64(CAST(value));
}
StoreElement(elements, kind, index, value, mode);
}
@@ -3131,14 +3112,10 @@ TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
}
TNode<BigInt> CodeStubAssembler::AllocateRawBigInt(TNode<IntPtrT> length) {
- // This is currently used only for 64-bit wide BigInts. If more general
- // applicability is required, a large-object check must be added.
- CSA_ASSERT(this, UintPtrLessThan(length, IntPtrConstant(3)));
-
TNode<IntPtrT> size =
IntPtrAdd(IntPtrConstant(BigInt::kHeaderSize),
Signed(WordShl(length, kSystemPointerSizeLog2)));
- Node* raw_result = Allocate(size, kNone);
+ Node* raw_result = Allocate(size, kAllowLargeObjectAllocation);
StoreMapNoWriteBarrier(raw_result, RootIndex::kBigIntMap);
if (FIELD_SIZE(BigInt::kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(BigInt::kOptionalPaddingOffset));
@@ -3155,11 +3132,26 @@ void CodeStubAssembler::StoreBigIntBitfield(TNode<BigInt> bigint,
MachineRepresentation::kWord32);
}
-void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint,
+ intptr_t digit_index,
TNode<UintPtrT> digit) {
+ CHECK_LE(0, digit_index);
+ CHECK_LT(digit_index, BigInt::kMaxLength);
StoreObjectFieldNoWriteBarrier(
- bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize, digit,
- UintPtrT::kMachineRepresentation);
+ bigint,
+ BigInt::kDigitsOffset +
+ static_cast<int>(digit_index) * kSystemPointerSize,
+ digit, UintPtrT::kMachineRepresentation);
+}
+
+void CodeStubAssembler::StoreBigIntDigit(TNode<BigInt> bigint,
+ TNode<IntPtrT> digit_index,
+ TNode<UintPtrT> digit) {
+ TNode<IntPtrT> offset =
+ IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset),
+ IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize)));
+ StoreObjectFieldNoWriteBarrier(bigint, offset, digit,
+ UintPtrT::kMachineRepresentation);
}
TNode<Word32T> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
@@ -3168,10 +3160,23 @@ TNode<Word32T> CodeStubAssembler::LoadBigIntBitfield(TNode<BigInt> bigint) {
}
TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
- int digit_index) {
- return UncheckedCast<UintPtrT>(LoadObjectField(
- bigint, BigInt::kDigitsOffset + digit_index * kSystemPointerSize,
- MachineType::UintPtr()));
+ intptr_t digit_index) {
+ CHECK_LE(0, digit_index);
+ CHECK_LT(digit_index, BigInt::kMaxLength);
+ return UncheckedCast<UintPtrT>(
+ LoadObjectField(bigint,
+ BigInt::kDigitsOffset +
+ static_cast<int>(digit_index) * kSystemPointerSize,
+ MachineType::UintPtr()));
+}
+
+TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
+ TNode<IntPtrT> digit_index) {
+ TNode<IntPtrT> offset =
+ IntPtrAdd(IntPtrConstant(BigInt::kDigitsOffset),
+ IntPtrMul(digit_index, IntPtrConstant(kSystemPointerSize)));
+ return UncheckedCast<UintPtrT>(
+ LoadObjectField(bigint, offset, MachineType::UintPtr()));
}
TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
@@ -3440,16 +3445,16 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionary(
- TNode<IntPtrT> at_least_space_for) {
+ TNode<IntPtrT> at_least_space_for, AllocationFlags flags) {
CSA_ASSERT(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
TNode<IntPtrT> capacity = HashTableComputeCapacity(at_least_space_for);
- return AllocateNameDictionaryWithCapacity(capacity);
+ return AllocateNameDictionaryWithCapacity(capacity, flags);
}
TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
- TNode<IntPtrT> capacity) {
+ TNode<IntPtrT> capacity, AllocationFlags flags) {
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
TNode<IntPtrT> length = EntryToIndex<NameDictionary>(capacity);
@@ -3457,39 +3462,51 @@ TNode<NameDictionary> CodeStubAssembler::AllocateNameDictionaryWithCapacity(
TimesTaggedSize(length), IntPtrConstant(NameDictionary::kHeaderSize));
TNode<NameDictionary> result =
- UncheckedCast<NameDictionary>(AllocateInNewSpace(store_size));
- Comment("Initialize NameDictionary");
+ UncheckedCast<NameDictionary>(Allocate(store_size, flags));
+
// Initialize FixedArray fields.
- DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap));
- StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap);
- StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
- SmiFromIntPtr(length));
+ {
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kNameDictionaryMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kNameDictionaryMap);
+ StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+ SmiFromIntPtr(length));
+ }
+
// Initialized HashTable fields.
- TNode<Smi> zero = SmiConstant(0);
- StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
- SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
- zero, SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
- SmiTag(capacity), SKIP_WRITE_BARRIER);
- // Initialize Dictionary fields.
- TNode<HeapObject> filler = UndefinedConstant();
- StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
- SmiConstant(PropertyDetails::kInitialIndex),
- SKIP_WRITE_BARRIER);
- StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex,
- SmiConstant(PropertyArray::kNoHashSentinel),
- SKIP_WRITE_BARRIER);
+ {
+ TNode<Smi> zero = SmiConstant(0);
+ StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result,
+ NameDictionary::kNumberOfDeletedElementsIndex, zero,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
+ SmiTag(capacity), SKIP_WRITE_BARRIER);
+ // Initialize Dictionary fields.
+ StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
+ SmiConstant(PropertyDetails::kInitialIndex),
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex,
+ SmiConstant(PropertyArray::kNoHashSentinel),
+ SKIP_WRITE_BARRIER);
+ }
// Initialize NameDictionary elements.
- TNode<WordT> result_word = BitcastTaggedToWord(result);
- TNode<WordT> start_address = IntPtrAdd(
- result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
- NameDictionary::kElementsStartIndex) -
- kHeapObjectTag));
- TNode<WordT> end_address = IntPtrAdd(
- result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
- StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ {
+ TNode<WordT> result_word = BitcastTaggedToWord(result);
+ TNode<WordT> start_address = IntPtrAdd(
+ result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+ NameDictionary::kElementsStartIndex) -
+ kHeapObjectTag));
+ TNode<WordT> end_address = IntPtrAdd(
+ result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
+
+ TNode<HeapObject> filler = UndefinedConstant();
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kUndefinedValue));
+
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ }
+
return result;
}
@@ -3605,6 +3622,17 @@ TNode<CollectionType> CodeStubAssembler::AllocateSmallOrderedHashTable(
StoreMapNoWriteBarrier(table_obj, small_ordered_hash_map);
TNode<CollectionType> table = UncheckedCast<CollectionType>(table_obj);
+ {
+ // This store overlaps with the header fields stored below.
+ // Since it happens first, it effectively still just zero-initializes the
+ // padding.
+ constexpr int offset =
+ RoundDown<kTaggedSize>(CollectionType::PaddingOffset());
+ STATIC_ASSERT(offset + kTaggedSize == CollectionType::PaddingOffset() +
+ CollectionType::PaddingSize());
+ StoreObjectFieldNoWriteBarrier(table, offset, SmiConstant(0));
+ }
+
// Initialize the SmallOrderedHashTable fields.
StoreObjectByteNoWriteBarrier(
table, CollectionType::NumberOfBucketsOffset(),
@@ -3748,8 +3776,9 @@ void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
-Node* CodeStubAssembler::AllocateJSObjectFromMap(
- Node* map, Node* properties, Node* elements, AllocationFlags flags,
+TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
+ SloppyTNode<Map> map, SloppyTNode<HeapObject> properties,
+ SloppyTNode<FixedArray> elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
CSA_ASSERT(this, IsMap(map));
CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
@@ -3761,7 +3790,7 @@ Node* CodeStubAssembler::AllocateJSObjectFromMap(
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, instance_size, properties, elements,
slack_tracking_mode);
- return object;
+ return CAST(object);
}
void CodeStubAssembler::InitializeJSObjectFromMap(
@@ -5508,7 +5537,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
}
BIND(&is_heap_number);
- var_word32->Bind(TruncateHeapNumberValueToWord32(value));
+ var_word32->Bind(TruncateHeapNumberValueToWord32(CAST(value)));
CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber);
Goto(if_number);
@@ -5521,9 +5550,10 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl(
}
}
-Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+TNode<Int32T> CodeStubAssembler::TruncateHeapNumberValueToWord32(
+ TNode<HeapNumber> object) {
Node* value = LoadHeapNumberValue(object);
- return TruncateFloat64ToWord32(value);
+ return Signed(TruncateFloat64ToWord32(value));
}
void CodeStubAssembler::TryHeapNumberToSmi(TNode<HeapNumber> number,
@@ -5731,10 +5761,7 @@ TNode<Uint32T> CodeStubAssembler::ChangeNumberToUint32(TNode<Number> value) {
return var_result.value();
}
-TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(
- SloppyTNode<Number> value) {
- // TODO(tebbi): Remove assert once argument is TNode instead of SloppyTNode.
- CSA_SLOW_ASSERT(this, IsNumber(value));
+TNode<Float64T> CodeStubAssembler::ChangeNumberToFloat64(TNode<Number> value) {
TVARIABLE(Float64T, result);
Label smi(this);
Label done(this, &result);
@@ -5795,43 +5822,43 @@ TNode<WordT> CodeStubAssembler::TimesDoubleSize(SloppyTNode<WordT> value) {
return WordShl(value, kDoubleSizeLog2);
}
-Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
- PrimitiveType primitive_type,
- char const* method_name) {
- // We might need to loop once due to JSValue unboxing.
- VARIABLE(var_value, MachineRepresentation::kTagged, value);
+TNode<Object> CodeStubAssembler::ToThisValue(TNode<Context> context,
+ TNode<Object> value,
+ PrimitiveType primitive_type,
+ char const* method_name) {
+ // We might need to loop once due to JSPrimitiveWrapper unboxing.
+ TVARIABLE(Object, var_value, value);
Label loop(this, &var_value), done_loop(this),
done_throw(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
- // Load the current {value}.
- value = var_value.value();
-
// Check if the {value} is a Smi or a HeapObject.
- GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber)
- ? &done_loop
- : &done_throw);
+ GotoIf(
+ TaggedIsSmi(var_value.value()),
+ (primitive_type == PrimitiveType::kNumber) ? &done_loop : &done_throw);
+
+ TNode<HeapObject> value = CAST(var_value.value());
// Load the map of the {value}.
- Node* value_map = LoadMap(value);
+ TNode<Map> value_map = LoadMap(value);
// Load the instance type of the {value}.
- Node* value_instance_type = LoadMapInstanceType(value_map);
+ TNode<Uint16T> value_instance_type = LoadMapInstanceType(value_map);
- // Check if {value} is a JSValue.
- Label if_valueisvalue(this, Label::kDeferred), if_valueisnotvalue(this);
- Branch(InstanceTypeEqual(value_instance_type, JS_VALUE_TYPE),
- &if_valueisvalue, &if_valueisnotvalue);
+ // Check if {value} is a JSPrimitiveWrapper.
+ Label if_valueiswrapper(this, Label::kDeferred), if_valueisnotwrapper(this);
+ Branch(InstanceTypeEqual(value_instance_type, JS_PRIMITIVE_WRAPPER_TYPE),
+ &if_valueiswrapper, &if_valueisnotwrapper);
- BIND(&if_valueisvalue);
+ BIND(&if_valueiswrapper);
{
// Load the actual value from the {value}.
- var_value.Bind(LoadObjectField(value, JSValue::kValueOffset));
+ var_value = LoadObjectField(value, JSPrimitiveWrapper::kValueOffset);
Goto(&loop);
}
- BIND(&if_valueisnotvalue);
+ BIND(&if_valueisnotwrapper);
{
switch (primitive_type) {
case PrimitiveType::kBoolean:
@@ -5988,13 +6015,12 @@ TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(
TNode<BoolT> CodeStubAssembler::IsDictionaryMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- Node* bit_field3 = LoadMapBitField3(map);
- return IsSetWord32<Map::IsDictionaryMapBit>(bit_field3);
+ return IsSetWord32<Map::IsDictionaryMapBit>(LoadMapBitField3(map));
}
TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
- return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField2(map));
+ return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField3(map));
}
TNode<BoolT> CodeStubAssembler::IsFrozenOrSealedElementsKindMap(
@@ -6007,7 +6033,7 @@ TNode<BoolT> CodeStubAssembler::IsFrozenOrSealedElementsKindMap(
TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
int kMask = Map::IsExtensibleBit::kMask | Map::IsPrototypeMapBit::kMask;
int kExpected = Map::IsExtensibleBit::kMask;
- return Word32Equal(Word32And(LoadMapBitField2(map), Int32Constant(kMask)),
+ return Word32Equal(Word32And(LoadMapBitField3(map), Int32Constant(kMask)),
Int32Constant(kExpected));
}
@@ -6072,10 +6098,13 @@ TNode<BoolT> CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
-TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid() {
- Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(RootIndex::kRegExpSpeciesProtector);
- Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+TNode<BoolT> CodeStubAssembler::IsRegExpSpeciesProtectorCellInvalid(
+ TNode<Context> native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ TNode<PropertyCell> cell = CAST(LoadContextElement(
+ native_context, Context::REGEXP_SPECIES_PROTECTOR_INDEX));
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ TNode<Smi> invalid = SmiConstant(Isolate::kProtectorInvalid);
return WordEqual(cell_value, invalid);
}
@@ -6270,6 +6299,15 @@ TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyInstanceType(
return InstanceTypeEqual(instance_type, JS_GLOBAL_PROXY_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxyMap(SloppyTNode<Map> map) {
+ return IsJSGlobalProxyInstanceType(LoadMapInstanceType(map));
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
+ SloppyTNode<HeapObject> object) {
+ return IsJSGlobalProxyMap(LoadMap(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
SloppyTNode<Int32T> instance_type) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
@@ -6304,26 +6342,22 @@ TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
- SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, JS_GLOBAL_PROXY_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsMap(SloppyTNode<HeapObject> map) {
return IsMetaMap(LoadMap(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSValueInstanceType(
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperInstanceType(
SloppyTNode<Int32T> instance_type) {
- return InstanceTypeEqual(instance_type, JS_VALUE_TYPE);
+ return InstanceTypeEqual(instance_type, JS_PRIMITIVE_WRAPPER_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsJSValue(SloppyTNode<HeapObject> object) {
- return IsJSValueMap(LoadMap(object));
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapper(
+ SloppyTNode<HeapObject> object) {
+ return IsJSPrimitiveWrapperMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsJSValueMap(SloppyTNode<Map> map) {
- return IsJSValueInstanceType(LoadMapInstanceType(map));
+TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
+ return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
@@ -6420,7 +6454,7 @@ TNode<BoolT> CodeStubAssembler::IsFixedArrayWithKind(
if (IsDoubleElementsKind(kind)) {
return IsFixedDoubleArray(object);
} else {
- DCHECK(IsSmiOrObjectElementsKind(kind));
+ DCHECK(IsSmiOrObjectElementsKind(kind) || IsSealedElementsKind(kind));
return IsFixedArraySubclass(object);
}
}
@@ -6562,6 +6596,11 @@ TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
[=] { return Int32FalseConstant(); });
}
+TNode<BoolT> CodeStubAssembler::IsPrivateName(SloppyTNode<Symbol> symbol) {
+ TNode<Uint32T> flags = LoadObjectField<Uint32T>(symbol, Symbol::kFlagsOffset);
+ return IsSetWord32<Symbol::IsPrivateNameBit>(flags);
+}
+
TNode<BoolT> CodeStubAssembler::IsNativeContext(
SloppyTNode<HeapObject> object) {
return WordEqual(LoadMap(object), LoadRoot(RootIndex::kNativeContextMap));
@@ -6769,7 +6808,7 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberUint32(TNode<HeapNumber> number) {
IsHeapNumberPositive(number),
[=] {
TNode<Float64T> value = LoadHeapNumberValue(number);
- TNode<Uint32T> int_value = Unsigned(TruncateFloat64ToWord32(value));
+ TNode<Uint32T> int_value = TruncateFloat64ToWord32(value);
return Float64Equal(value, ChangeUint32ToFloat64(int_value));
},
[=] { return Int32FalseConstant(); });
@@ -7423,8 +7462,8 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
return result.value();
}
-TNode<String> CodeStubAssembler::StringFromSingleCodePoint(
- TNode<Int32T> codepoint, UnicodeEncoding encoding) {
+TNode<String> CodeStubAssembler::StringFromSingleUTF16EncodedCodePoint(
+ TNode<Int32T> codepoint) {
VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
Label if_isword16(this), if_isword32(this), return_result(this);
@@ -7440,27 +7479,6 @@ TNode<String> CodeStubAssembler::StringFromSingleCodePoint(
BIND(&if_isword32);
{
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- break;
- case UnicodeEncoding::UTF32: {
- // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = Int32Constant(0xD800 - (0x10000 >> 10));
-
- // lead = (codepoint >> 10) + LEAD_OFFSET
- Node* lead =
- Int32Add(Word32Shr(codepoint, Int32Constant(10)), lead_offset);
-
- // trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail = Int32Add(Word32And(codepoint, Int32Constant(0x3FF)),
- Int32Constant(0xDC00));
-
- // codpoint = (trail << 16) | lead;
- codepoint = Signed(Word32Or(Word32Shl(trail, Int32Constant(16)), lead));
- break;
- }
- }
-
Node* value = AllocateSeqTwoByteString(2);
StoreNoWriteBarrier(
MachineRepresentation::kWord32, value,
@@ -7513,7 +7531,7 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
// contains two elements (number and string) for each cache entry.
// TODO(ishell): cleanup mask handling.
Node* mask =
- BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache));
+ BitcastTaggedSignedToWord(LoadFixedArrayBaseLength(number_string_cache));
TNode<IntPtrT> one = IntPtrConstant(1);
mask = IntPtrSub(mask, one);
@@ -7560,8 +7578,8 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
BIND(&if_smi);
{
// Load the smi key, make sure it matches the smi we're looking for.
- Node* smi_index = BitcastWordToTagged(
- WordAnd(WordShl(BitcastTaggedToWord(smi_input.value()), one), mask));
+ Node* smi_index = BitcastWordToTagged(WordAnd(
+ WordShl(BitcastTaggedSignedToWord(smi_input.value()), one), mask));
Node* smi_key = UnsafeLoadFixedArrayElement(CAST(number_string_cache),
smi_index, 0, SMI_PARAMETERS);
GotoIf(WordNotEqual(smi_key, smi_input.value()), &runtime);
@@ -8333,40 +8351,41 @@ TNode<IntPtrT> CodeStubAssembler::EntryToIndex(TNode<IntPtrT> entry,
field_index));
}
-TNode<MaybeObject> CodeStubAssembler::LoadDescriptorArrayElement(
- TNode<DescriptorArray> object, Node* index, int additional_offset) {
- return LoadArrayElement(object, DescriptorArray::kHeaderSize, index,
- additional_offset);
+template <typename T>
+TNode<T> CodeStubAssembler::LoadDescriptorArrayElement(
+ TNode<DescriptorArray> object, TNode<IntPtrT> index,
+ int additional_offset) {
+ return LoadArrayElement<DescriptorArray, T>(
+ object, DescriptorArray::kHeaderSize, index, additional_offset);
}
TNode<Name> CodeStubAssembler::LoadKeyByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- return CAST(LoadDescriptorArrayElement(container, key_index, 0));
+ return CAST(LoadDescriptorArrayElement<HeapObject>(container, key_index, 0));
}
TNode<Uint32T> CodeStubAssembler::LoadDetailsByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToDetails =
- DescriptorArray::ToDetailsIndex(0) - DescriptorArray::ToKeyIndex(0);
- return Unsigned(
- LoadAndUntagToWord32ArrayElement(container, DescriptorArray::kHeaderSize,
- key_index, kKeyToDetails * kTaggedSize));
+ const int kKeyToDetailsOffset =
+ DescriptorArray::kEntryDetailsOffset - DescriptorArray::kEntryKeyOffset;
+ return Unsigned(LoadAndUntagToWord32ArrayElement(
+ container, DescriptorArray::kHeaderSize, key_index, kKeyToDetailsOffset));
}
TNode<Object> CodeStubAssembler::LoadValueByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToValue =
- DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0);
- return CAST(LoadDescriptorArrayElement(container, key_index,
- kKeyToValue * kTaggedSize));
+ const int kKeyToValueOffset =
+ DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset;
+ return LoadDescriptorArrayElement<Object>(container, key_index,
+ kKeyToValueOffset);
}
TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByKeyIndex(
TNode<DescriptorArray> container, TNode<IntPtrT> key_index) {
- const int kKeyToValue =
- DescriptorArray::ToValueIndex(0) - DescriptorArray::ToKeyIndex(0);
- return LoadDescriptorArrayElement(container, key_index,
- kKeyToValue * kTaggedSize);
+ const int kKeyToValueOffset =
+ DescriptorArray::kEntryValueOffset - DescriptorArray::kEntryKeyOffset;
+ return LoadDescriptorArrayElement<MaybeObject>(container, key_index,
+ kKeyToValueOffset);
}
TNode<IntPtrT> CodeStubAssembler::DescriptorEntryToIndex(
@@ -8377,14 +8396,14 @@ TNode<IntPtrT> CodeStubAssembler::DescriptorEntryToIndex(
TNode<Name> CodeStubAssembler::LoadKeyByDescriptorEntry(
TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
- return CAST(LoadDescriptorArrayElement(
+ return CAST(LoadDescriptorArrayElement<HeapObject>(
container, DescriptorEntryToIndex(descriptor_entry),
DescriptorArray::ToKeyIndex(0) * kTaggedSize));
}
TNode<Name> CodeStubAssembler::LoadKeyByDescriptorEntry(
TNode<DescriptorArray> container, int descriptor_entry) {
- return CAST(LoadDescriptorArrayElement(
+ return CAST(LoadDescriptorArrayElement<HeapObject>(
container, IntPtrConstant(0),
DescriptorArray::ToKeyIndex(descriptor_entry) * kTaggedSize));
}
@@ -8406,14 +8425,14 @@ TNode<Uint32T> CodeStubAssembler::LoadDetailsByDescriptorEntry(
TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
TNode<DescriptorArray> container, int descriptor_entry) {
- return CAST(LoadDescriptorArrayElement(
+ return LoadDescriptorArrayElement<Object>(
container, IntPtrConstant(0),
- DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize));
+ DescriptorArray::ToValueIndex(descriptor_entry) * kTaggedSize);
}
TNode<MaybeObject> CodeStubAssembler::LoadFieldTypeByDescriptorEntry(
TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
- return LoadDescriptorArrayElement(
+ return LoadDescriptorArrayElement<MaybeObject>(
container, DescriptorEntryToIndex(descriptor_entry),
DescriptorArray::ToValueIndex(0) * kTaggedSize);
}
@@ -9503,15 +9522,15 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Node* accessor_info = value;
CSA_ASSERT(this, IsAccessorInfo(value));
CSA_ASSERT(this, TaggedIsNotSmi(receiver));
- Label if_array(this), if_function(this), if_value(this);
+ Label if_array(this), if_function(this), if_wrapper(this);
// Dispatch based on {receiver} instance type.
Node* receiver_map = LoadMap(receiver);
Node* receiver_instance_type = LoadMapInstanceType(receiver_map);
GotoIf(IsJSArrayInstanceType(receiver_instance_type), &if_array);
GotoIf(IsJSFunctionInstanceType(receiver_instance_type), &if_function);
- Branch(IsJSValueInstanceType(receiver_instance_type), &if_value,
- if_bailout);
+ Branch(IsJSPrimitiveWrapperInstanceType(receiver_instance_type),
+ &if_wrapper, if_bailout);
// JSArray AccessorInfo case.
BIND(&if_array);
@@ -9538,14 +9557,15 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Goto(&done);
}
- // JSValue AccessorInfo case.
- BIND(&if_value);
+ // JSPrimitiveWrapper AccessorInfo case.
+ BIND(&if_wrapper);
{
- // We only deal with the "length" accessor on JSValue string wrappers.
+ // We only deal with the "length" accessor on JSPrimitiveWrapper string
+ // wrappers.
GotoIfNot(IsLengthString(
LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
if_bailout);
- Node* receiver_value = LoadJSValueValue(receiver);
+ Node* receiver_value = LoadJSPrimitiveWrapperValue(receiver);
GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout);
GotoIfNot(IsString(receiver_value), if_bailout);
var_value.Bind(LoadStringLengthAsSmi(receiver_value));
@@ -9646,8 +9666,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
// clang-format off
int32_t values[] = {
// Handled by {if_isobjectorsmi}.
- PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS,
- HOLEY_ELEMENTS,
+ PACKED_SMI_ELEMENTS, HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, HOLEY_ELEMENTS,
+ PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
+ HOLEY_FROZEN_ELEMENTS,
// Handled by {if_isdouble}.
PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS,
// Handled by {if_isdictionary}.
@@ -9673,7 +9694,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
};
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
- &if_isobjectorsmi,
+ &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
+ &if_isobjectorsmi, &if_isobjectorsmi,
&if_isdouble, &if_isdouble,
&if_isdictionary,
&if_isfaststringwrapper,
@@ -9731,8 +9753,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_isfaststringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
- Node* string = LoadJSValueValue(object);
+ CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
+ Node* string = LoadJSPrimitiveWrapperValue(object);
CSA_ASSERT(this, IsString(string));
Node* length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
@@ -9740,8 +9762,8 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_isslowstringwrapper);
{
- CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
- Node* string = LoadJSValueValue(object);
+ CSA_ASSERT(this, HasInstanceType(object, JS_PRIMITIVE_WRAPPER_TYPE));
+ Node* string = LoadJSPrimitiveWrapperValue(object);
CSA_ASSERT(this, IsString(string));
Node* length = LoadStringLengthAsWord(string);
GotoIf(UintPtrLessThan(intptr_index, length), if_found);
@@ -9749,7 +9771,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
BIND(&if_typedarray);
{
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
GotoIf(IsDetachedBuffer(buffer), if_absent);
TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
@@ -9794,15 +9816,15 @@ void CodeStubAssembler::BranchIfMaybeSpecialIndex(TNode<String> name_string,
}
void CodeStubAssembler::TryPrototypeChainLookup(
- Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
+ Node* receiver, Node* object, Node* key,
+ const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder, Label* if_end,
Label* if_bailout, Label* if_proxy) {
// Ensure receiver is JSReceiver, otherwise bailout.
- Label if_objectisnotsmi(this);
- Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi);
- BIND(&if_objectisnotsmi);
+ GotoIf(TaggedIsSmi(receiver), if_bailout);
+ CSA_ASSERT(this, TaggedIsNotSmi(object));
- Node* map = LoadMap(receiver);
+ Node* map = LoadMap(object);
Node* instance_type = LoadMapInstanceType(map);
{
Label if_objectisreceiver(this);
@@ -9812,9 +9834,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
if_bailout);
BIND(&if_objectisreceiver);
- if (if_proxy) {
- GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
- }
+ GotoIf(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), if_proxy);
}
VARIABLE(var_index, MachineType::PointerRepresentation());
@@ -9826,7 +9846,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
BIND(&if_iskeyunique);
{
- VARIABLE(var_holder, MachineRepresentation::kTagged, receiver);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, object);
VARIABLE(var_holder_map, MachineRepresentation::kTagged, map);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32,
instance_type);
@@ -9872,7 +9892,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
}
BIND(&if_keyisindex);
{
- VARIABLE(var_holder, MachineRepresentation::kTagged, receiver);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, object);
VARIABLE(var_holder_map, MachineRepresentation::kTagged, map);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32,
instance_type);
@@ -10049,7 +10069,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
Smi smi_index;
constant_index = ToSmiConstant(index_node, &smi_index);
if (constant_index) index = smi_index.value();
- index_node = BitcastTaggedToWord(index_node);
+ index_node = BitcastTaggedSignedToWord(index_node);
} else {
DCHECK(mode == INTPTR_PARAMETERS);
constant_index = ToIntPtrConstant(index_node, index);
@@ -10594,7 +10614,8 @@ void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
- Label* bailout, Node* context) {
+ Label* bailout, Node* context,
+ Variable* maybe_converted_value) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
Node* elements = LoadElements(object);
@@ -10610,12 +10631,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
TNode<IntPtrT> intptr_key = TryToIntptr(key, bailout);
if (IsTypedArrayElementsKind(elements_kind)) {
- Label done(this);
+ Label done(this), update_value_and_bailout(this, Label::kDeferred);
// IntegerIndexedElementSet converts value to a Number/BigInt prior to the
// bounds check.
- value = PrepareValueForWriteToTypedArray(CAST(value), elements_kind,
- CAST(context));
+ Node* converted_value = PrepareValueForWriteToTypedArray(
+ CAST(value), elements_kind, CAST(context));
// There must be no allocations between the buffer load and
// and the actual store to backing store, because GC may decide that
@@ -10623,8 +10644,12 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// TODO(ishell): introduce DisallowHeapAllocationCode scope here.
// Check if buffer has been detached.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), bailout);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
+ if (maybe_converted_value) {
+ GotoIf(IsDetachedBuffer(buffer), &update_value_and_bailout);
+ } else {
+ GotoIf(IsDetachedBuffer(buffer), bailout);
+ }
// Bounds check.
TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
@@ -10633,27 +10658,88 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// Skip the store if we write beyond the length or
// to a property with a negative integer index.
GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
- } else if (store_mode == STANDARD_STORE) {
- GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
} else {
- // This case is produced due to the dispatched call in
- // ElementsTransitionAndStore and StoreFastElement.
- // TODO(jgruber): Avoid generating unsupported combinations to save code
- // size.
- DebugBreak();
+ DCHECK_EQ(store_mode, STANDARD_STORE);
+ GotoIfNot(UintPtrLessThan(intptr_key, length), &update_value_and_bailout);
}
TNode<RawPtrT> backing_store = LoadJSTypedArrayBackingStore(CAST(object));
- StoreElement(backing_store, elements_kind, intptr_key, value,
+ StoreElement(backing_store, elements_kind, intptr_key, converted_value,
parameter_mode);
Goto(&done);
+ BIND(&update_value_and_bailout);
+ // We already prepared the incoming value for storing into a typed array.
+ // This might involve calling ToNumber in some cases. We shouldn't call
+ // ToNumber again in the runtime so pass the converted value to the runtime.
+ // The prepared value is an untagged value. Convert it to a tagged value
+ // to pass it to runtime. It is not possible to do the detached buffer check
+ // before we prepare the value, since ToNumber can detach the ArrayBuffer.
+ // The spec specifies the order of these operations.
+ if (maybe_converted_value != nullptr) {
+ switch (elements_kind) {
+ case UINT8_ELEMENTS:
+ case INT8_ELEMENTS:
+ case UINT16_ELEMENTS:
+ case INT16_ELEMENTS:
+ case UINT8_CLAMPED_ELEMENTS:
+ maybe_converted_value->Bind(SmiFromInt32(converted_value));
+ break;
+ case UINT32_ELEMENTS:
+ maybe_converted_value->Bind(ChangeUint32ToTagged(converted_value));
+ break;
+ case INT32_ELEMENTS:
+ maybe_converted_value->Bind(ChangeInt32ToTagged(converted_value));
+ break;
+ case FLOAT32_ELEMENTS: {
+ Label dont_allocate_heap_number(this), end(this);
+ GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
+ GotoIf(IsHeapNumber(value), &dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(AllocateHeapNumberWithValue(
+ ChangeFloat32ToFloat64(converted_value)));
+ Goto(&end);
+ }
+ BIND(&dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(value);
+ Goto(&end);
+ }
+ BIND(&end);
+ break;
+ }
+ case FLOAT64_ELEMENTS: {
+ Label dont_allocate_heap_number(this), end(this);
+ GotoIf(TaggedIsSmi(value), &dont_allocate_heap_number);
+ GotoIf(IsHeapNumber(value), &dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(
+ AllocateHeapNumberWithValue(converted_value));
+ Goto(&end);
+ }
+ BIND(&dont_allocate_heap_number);
+ {
+ maybe_converted_value->Bind(value);
+ Goto(&end);
+ }
+ BIND(&end);
+ break;
+ }
+ case BIGINT64_ELEMENTS:
+ case BIGUINT64_ELEMENTS:
+ maybe_converted_value->Bind(converted_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ Goto(bailout);
+
BIND(&done);
return;
}
- DCHECK(
- IsFastElementsKind(elements_kind) ||
- IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS));
+ DCHECK(IsFastElementsKind(elements_kind) ||
+ IsSealedElementsKind(elements_kind));
Node* length = SelectImpl(
IsJSArray(object), [=]() { return LoadJSArrayLength(object); },
@@ -10670,18 +10756,24 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
value = TryTaggedToFloat64(value, bailout);
}
- if (IsGrowStoreMode(store_mode) &&
- !(IsInRange(elements_kind, PACKED_SEALED_ELEMENTS,
- HOLEY_SEALED_ELEMENTS))) {
+ if (IsGrowStoreMode(store_mode) && !IsSealedElementsKind(elements_kind)) {
elements = CheckForCapacityGrow(object, elements, elements_kind, length,
intptr_key, parameter_mode, bailout);
} else {
GotoIfNot(UintPtrLessThan(intptr_key, length), bailout);
}
+ // Cannot store to a hole in holey sealed elements so bailout.
+ if (elements_kind == HOLEY_SEALED_ELEMENTS) {
+ TNode<Object> target_value =
+ LoadFixedArrayElement(CAST(elements), intptr_key);
+ GotoIf(IsTheHole(target_value), bailout);
+ }
+
// If we didn't grow {elements}, it might still be COW, in which case we
// copy it now.
- if (!IsSmiOrObjectElementsKind(elements_kind)) {
+ if (!(IsSmiOrObjectElementsKind(elements_kind) ||
+ IsSealedElementsKind(elements_kind))) {
CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements))));
} else if (IsCOWHandlingStoreMode(store_mode)) {
elements = CopyElementsOnWrite(object, elements, elements_kind, length,
@@ -10925,7 +11017,8 @@ TNode<AllocationSite> CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
// Link the object to the allocation site list
TNode<ExternalReference> site_list = ExternalConstant(
ExternalReference::allocation_sites_list_address(isolate()));
- TNode<Object> next_site = CAST(LoadBufferObject(site_list, 0));
+ TNode<Object> next_site =
+ LoadBufferObject(ReinterpretCast<RawPtrT>(site_list), 0);
// TODO(mvstanton): This is a store to a weak pointer, which we may want to
// mark as such in order to skip the write barrier, once we have a unified
@@ -12155,8 +12248,9 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
return result.value();
}
-Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
- Variable* var_type_feedback) {
+TNode<Oddball> CodeStubAssembler::StrictEqual(SloppyTNode<Object> lhs,
+ SloppyTNode<Object> rhs,
+ Variable* var_type_feedback) {
// Pseudo-code for the algorithm below:
//
// if (lhs == rhs) {
@@ -12208,7 +12302,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Label if_equal(this), if_notequal(this), if_not_equivalent_types(this),
end(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Oddball, result);
OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone);
@@ -12235,7 +12329,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisnotsmi);
{
// Load the map of {lhs}.
- Node* lhs_map = LoadMap(lhs);
+ TNode<Map> lhs_map = LoadMap(CAST(lhs));
// Check if {lhs} is a HeapNumber.
Label if_lhsisnumber(this), if_lhsisnotnumber(this);
@@ -12250,8 +12344,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsissmi);
{
// Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = LoadHeapNumberValue(lhs);
- Node* rhs_value = SmiToFloat64(rhs);
+ Node* lhs_value = LoadHeapNumberValue(CAST(lhs));
+ Node* rhs_value = SmiToFloat64(CAST(rhs));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
@@ -12261,8 +12355,9 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnotsmi);
{
+ TNode<HeapObject> rhs_ho = CAST(rhs);
// Load the map of {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ TNode<Map> rhs_map = LoadMap(rhs_ho);
// Check if {rhs} is also a HeapNumber.
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
@@ -12271,8 +12366,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnumber);
{
// Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = LoadHeapNumberValue(lhs);
- Node* rhs_value = LoadHeapNumberValue(rhs);
+ Node* lhs_value = LoadHeapNumberValue(CAST(lhs));
+ Node* rhs_value = LoadHeapNumberValue(CAST(rhs));
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kNumber);
@@ -12308,7 +12403,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisstring);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ Node* rhs_instance_type = LoadInstanceType(CAST(rhs));
// Check if {rhs} is also a String.
Label if_rhsisstring(this, Label::kDeferred),
@@ -12325,8 +12420,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
CollectFeedbackForString(rhs_instance_type);
var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback));
}
- result.Bind(CallBuiltin(Builtins::kStringEqual,
- NoContextConstant(), lhs, rhs));
+ result = CAST(CallBuiltin(Builtins::kStringEqual,
+ NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -12344,7 +12439,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisbigint);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
// Check if {rhs} is also a BigInt.
Label if_rhsisbigint(this, Label::kDeferred),
@@ -12356,8 +12451,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
{
CombineFeedback(var_type_feedback,
CompareOperationFeedback::kBigInt);
- result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), lhs, rhs));
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -12368,8 +12463,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_lhsisnotbigint);
if (var_type_feedback != nullptr) {
// Load the instance type of {rhs}.
- Node* rhs_map = LoadMap(rhs);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ TNode<Map> rhs_map = LoadMap(CAST(rhs));
+ TNode<Uint16T> rhs_instance_type = LoadMapInstanceType(rhs_map);
Label if_lhsissymbol(this), if_lhsisreceiver(this),
if_lhsisoddball(this);
@@ -12442,7 +12537,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnotsmi);
{
// Load the map of the {rhs}.
- Node* rhs_map = LoadMap(rhs);
+ TNode<Map> rhs_map = LoadMap(CAST(rhs));
// The {rhs} could be a HeapNumber with the same value as {lhs}.
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
@@ -12451,8 +12546,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_rhsisnumber);
{
// Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = SmiToFloat64(lhs);
- Node* rhs_value = LoadHeapNumberValue(rhs);
+ TNode<Float64T> lhs_value = SmiToFloat64(CAST(lhs));
+ TNode<Float64T> rhs_value = LoadHeapNumberValue(CAST(rhs));
CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
@@ -12468,7 +12563,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_equal);
{
- result.Bind(TrueConstant());
+ result = TrueConstant();
Goto(&end);
}
@@ -12480,7 +12575,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
BIND(&if_notequal);
{
- result.Bind(FalseConstant());
+ result = FalseConstant();
Goto(&end);
}
@@ -12636,7 +12731,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<Context> context,
&return_true, &return_false, next_holder, if_bailout);
};
- TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ TryPrototypeChainLookup(object, object, key, lookup_property_in_holder,
lookup_element_in_holder, &return_false,
&call_runtime, &if_proxy);
@@ -13114,8 +13209,9 @@ TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
return CAST(iterator);
}
-Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value,
- Node* done) {
+TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
+ SloppyTNode<Context> context, SloppyTNode<Object> value,
+ SloppyTNode<Oddball> done) {
CSA_ASSERT(this, IsBoolean(done));
Node* native_context = LoadNativeContext(context);
Node* map =
@@ -13128,7 +13224,7 @@ Node* CodeStubAssembler::AllocateJSIteratorResult(Node* context, Node* value,
RootIndex::kEmptyFixedArray);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
- return result;
+ return CAST(result);
}
Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
@@ -13174,9 +13270,8 @@ TNode<JSReceiver> CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
return Construct(context, constructor, len);
}
-Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
- CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
- TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(CAST(buffer));
+TNode<BoolT> CodeStubAssembler::IsDetachedBuffer(TNode<JSArrayBuffer> buffer) {
+ TNode<Uint32T> buffer_bit_field = LoadJSArrayBufferBitField(buffer);
return IsSetWord32<JSArrayBuffer::WasDetachedBit>(buffer_bit_field);
}
@@ -13367,7 +13462,8 @@ void CodeStubArguments::PopAndReturn(Node* value) {
value);
}
-Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsFastElementsKind(
+ TNode<Int32T> elements_kind) {
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
return Uint32LessThanOrEqual(elements_kind,
Int32Constant(LAST_FAST_ELEMENTS_KIND));
@@ -13382,7 +13478,8 @@ TNode<BoolT> CodeStubAssembler::IsDoubleElementsKind(
Int32Constant(PACKED_DOUBLE_ELEMENTS / 2));
}
-Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsFastSmiOrTaggedElementsKind(
+ TNode<Int32T> elements_kind) {
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS > TERMINAL_FAST_ELEMENTS_KIND);
@@ -13390,12 +13487,14 @@ Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND));
}
-Node* CodeStubAssembler::IsFastSmiElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsFastSmiElementsKind(
+ SloppyTNode<Int32T> elements_kind) {
return Uint32LessThanOrEqual(elements_kind,
Int32Constant(HOLEY_SMI_ELEMENTS));
}
-Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKind(
+ TNode<Int32T> elements_kind) {
CSA_ASSERT(this, IsFastElementsKind(elements_kind));
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
@@ -13404,7 +13503,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
return IsSetWord32(elements_kind, 1);
}
-Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) {
+TNode<BoolT> CodeStubAssembler::IsHoleyFastElementsKindForRead(
+ TNode<Int32T> elements_kind) {
CSA_ASSERT(this,
Uint32LessThanOrEqual(elements_kind,
Int32Constant(LAST_FROZEN_ELEMENTS_KIND)));
@@ -13417,8 +13517,8 @@ Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) {
return IsSetWord32(elements_kind, 1);
}
-Node* CodeStubAssembler::IsElementsKindGreaterThan(
- Node* target_kind, ElementsKind reference_kind) {
+TNode<BoolT> CodeStubAssembler::IsElementsKindGreaterThan(
+ TNode<Int32T> target_kind, ElementsKind reference_kind) {
return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
}
@@ -13442,14 +13542,6 @@ Node* CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::IsRuntimeCallStatsEnabled() {
- STATIC_ASSERT(sizeof(TracingFlags::runtime_stats) == kInt32Size);
- TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
- MachineType::Int32(),
- ExternalConstant(ExternalReference::address_of_runtime_stats_flag())));
- return Word32NotEqual(flag_value, Int32Constant(0));
-}
-
Node* CodeStubAssembler::IsPromiseHookEnabled() {
Node* const promise_hook = Load(
MachineType::Pointer(),
@@ -13494,8 +13586,9 @@ TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
int index_shift = kSystemPointerSizeLog2 - kSmiShiftBits;
TNode<WordT> table_index =
- index_shift >= 0 ? WordShl(BitcastTaggedToWord(builtin_id), index_shift)
- : WordSar(BitcastTaggedToWord(builtin_id), -index_shift);
+ index_shift >= 0
+ ? WordShl(BitcastTaggedSignedToWord(builtin_id), index_shift)
+ : WordSar(BitcastTaggedSignedToWord(builtin_id), -index_shift);
return CAST(
Load(MachineType::TaggedPointer(),
@@ -13637,18 +13730,6 @@ Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
return fun;
}
-Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function,
- StackFrame::Type frame_type) {
- return WordEqual(marker_or_function,
- IntPtrConstant(StackFrame::TypeToMarker(frame_type)));
-}
-
-Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function,
- StackFrame::Type frame_type) {
- return WordNotEqual(marker_or_function,
- IntPtrConstant(StackFrame::TypeToMarker(frame_type)));
-}
-
void CodeStubAssembler::CheckPrototypeEnumCache(Node* receiver,
Node* receiver_map,
Label* if_fast,
@@ -13923,7 +14004,7 @@ void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified(
if (i == 0) {
combined_details = details;
} else {
- combined_details = Unsigned(Word32And(combined_details, details));
+ combined_details = Word32And(combined_details, details);
}
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 207eb509e1..00a84c3926 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -10,9 +10,8 @@
#include "src/base/macros.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/compiler/code-assembler.h"
-#include "src/execution/frames.h"
-#include "src/execution/message-template.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
#include "src/objects/objects.h"
@@ -39,7 +38,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
PromiseSpeciesProtector) \
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector) \
- V(RegExpSpeciesProtector, regexp_species_protector, RegExpSpeciesProtector)
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
@@ -111,59 +109,45 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#endif
#ifdef DEBUG
-// Add stringified versions to the given values, except the first. That is,
-// transform
-// x, a, b, c, d, e, f
-// to
-// a, "a", b, "b", c, "c", d, "d", e, "e", f, "f"
-//
-// __VA_ARGS__ is ignored to allow the caller to pass through too many
-// parameters, and the first element is ignored to support having no extra
-// values without empty __VA_ARGS__ (which cause all sorts of problems with
-// extra commas).
-#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(_, v1, v2, v3, v4, v5, ...) \
- v1, #v1, v2, #v2, v3, #v3, v4, #v4, v5, #v5
-
-// Stringify the given variable number of arguments. The arguments are trimmed
-// to 5 if there are too many, and padded with nullptr if there are not enough.
-#define CSA_ASSERT_STRINGIFY_EXTRA_VALUES(...) \
- CSA_ASSERT_STRINGIFY_EXTRA_VALUES_5(__VA_ARGS__, nullptr, nullptr, nullptr, \
- nullptr, nullptr)
-
-#define CSA_ASSERT_GET_FIRST(x, ...) (x)
-#define CSA_ASSERT_GET_FIRST_STR(x, ...) #x
+// CSA_ASSERT_ARGS generates an
+// std::initializer_list<CodeStubAssembler::ExtraNode> from __VA_ARGS__. It
+// currently supports between 0 and 2 arguments.
+
+// clang-format off
+#define CSA_ASSERT_0_ARGS(...) {}
+#define CSA_ASSERT_1_ARG(a, ...) {{a, #a}}
+#define CSA_ASSERT_2_ARGS(a, b, ...) {{a, #a}, {b, #b}}
+// clang-format on
+#define SWITCH_CSA_ASSERT_ARGS(dummy, a, b, FUNC, ...) FUNC(a, b)
+#define CSA_ASSERT_ARGS(...) \
+ SWITCH_CSA_ASSERT_ARGS(dummy, ##__VA_ARGS__, CSA_ASSERT_2_ARGS, \
+ CSA_ASSERT_1_ARG, CSA_ASSERT_0_ARGS)
// CSA_ASSERT(csa, <condition>, <extra values to print...>)
-// We have to jump through some hoops to allow <extra values to print...> to be
-// empty.
-#define CSA_ASSERT(csa, ...) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- return implicit_cast<compiler::SloppyTNode<Word32T>>( \
- EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__))); \
- }, \
- EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, __LINE__, \
- CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
+#define CSA_ASSERT(csa, condition_node, ...) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ return implicit_cast<compiler::SloppyTNode<Word32T>>(condition_node); \
+ }, \
+ #condition_node, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
// CSA_ASSERT_BRANCH(csa, [](Label* ok, Label* not_ok) {...},
// <extra values to print...>)
-#define CSA_ASSERT_BRANCH(csa, ...) \
- (csa)->Assert(EXPAND(CSA_ASSERT_GET_FIRST(__VA_ARGS__)), \
- EXPAND(CSA_ASSERT_GET_FIRST_STR(__VA_ARGS__)), __FILE__, \
- __LINE__, CSA_ASSERT_STRINGIFY_EXTRA_VALUES(__VA_ARGS__))
-
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> compiler::Node* { \
- compiler::Node* const argc = \
- (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
- "argc")
+#define CSA_ASSERT_BRANCH(csa, gen, ...) \
+ (csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
+
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> compiler::Node* { \
+ compiler::Node* const argc = \
+ (csa)->Parameter(Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->Parameter(Descriptor::kJSActualArgumentsCount)), \
+ "argc"}})
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
@@ -490,21 +474,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> SmiToInt32(SloppyTNode<Smi> value);
// Smi operations.
-#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (SmiValuesAre32Bits()) { \
- return BitcastWordToTaggedSigned( \
- IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
- } else { \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
- Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedToWord(b))))); \
- } \
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<Smi> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return BitcastWordToTaggedSigned(IntPtrOpName( \
+ BitcastTaggedSignedToWord(a), BitcastTaggedSignedToWord(b))); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr( \
+ Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))))); \
+ } \
}
SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd, Int32Add)
SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub, Int32Sub)
@@ -523,19 +507,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> SmiShl(TNode<Smi> a, int shift) {
- return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift));
+ return BitcastWordToTaggedSigned(
+ WordShl(BitcastTaggedSignedToWord(a), shift));
}
TNode<Smi> SmiShr(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
- WordAnd(WordShr(BitcastTaggedToWord(a), shift),
- BitcastTaggedToWord(SmiConstant(-1))));
+ WordAnd(WordShr(BitcastTaggedSignedToWord(a), shift),
+ BitcastTaggedSignedToWord(SmiConstant(-1))));
}
TNode<Smi> SmiSar(TNode<Smi> a, int shift) {
return BitcastWordToTaggedSigned(
- WordAnd(WordSar(BitcastTaggedToWord(a), shift),
- BitcastTaggedToWord(SmiConstant(-1))));
+ WordAnd(WordSar(BitcastTaggedSignedToWord(a), shift),
+ BitcastTaggedSignedToWord(SmiConstant(-1))));
}
Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
@@ -556,19 +541,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
-#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
- TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
- if (SmiValuesAre32Bits()) { \
- return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
- } else { \
- DCHECK(SmiValuesAre31Bits()); \
- if (kSystemPointerSize == kInt64Size) { \
- CSA_ASSERT(this, IsValidSmi(a)); \
- CSA_ASSERT(this, IsValidSmi(b)); \
- } \
- return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedToWord(a)), \
- TruncateIntPtrToInt32(BitcastTaggedToWord(b))); \
- } \
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \
+ TNode<BoolT> SmiOpName(TNode<Smi> a, TNode<Smi> b) { \
+ if (SmiValuesAre32Bits()) { \
+ return IntPtrOpName(BitcastTaggedSignedToWord(a), \
+ BitcastTaggedSignedToWord(b)); \
+ } else { \
+ DCHECK(SmiValuesAre31Bits()); \
+ if (kSystemPointerSize == kInt64Size) { \
+ CSA_ASSERT(this, IsValidSmi(a)); \
+ CSA_ASSERT(this, IsValidSmi(b)); \
+ } \
+ return Int32OpName(TruncateIntPtrToInt32(BitcastTaggedSignedToWord(a)), \
+ TruncateIntPtrToInt32(BitcastTaggedSignedToWord(b))); \
+ } \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual, Word32Equal)
SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual, Word32NotEqual)
@@ -626,43 +612,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
using BranchGenerator = std::function<void(Label*, Label*)>;
using NodeGenerator = std::function<Node*()>;
-
- void Assert(const BranchGenerator& branch, const char* message = nullptr,
- const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- void Assert(const NodeGenerator& condition_body,
- const char* message = nullptr, const char* file = nullptr,
- int line = 0, Node* extra_node1 = nullptr,
- const char* extra_node1_name = "", Node* extra_node2 = nullptr,
- const char* extra_node2_name = "", Node* extra_node3 = nullptr,
- const char* extra_node3_name = "", Node* extra_node4 = nullptr,
- const char* extra_node4_name = "", Node* extra_node5 = nullptr,
- const char* extra_node5_name = "");
- void Check(const BranchGenerator& branch, const char* message = nullptr,
- const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- void Check(const NodeGenerator& condition_body, const char* message = nullptr,
- const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- void FailAssert(
- const char* message = nullptr, const char* file = nullptr, int line = 0,
- Node* extra_node1 = nullptr, const char* extra_node1_name = "",
- Node* extra_node2 = nullptr, const char* extra_node2_name = "",
- Node* extra_node3 = nullptr, const char* extra_node3_name = "",
- Node* extra_node4 = nullptr, const char* extra_node4_name = "",
- Node* extra_node5 = nullptr, const char* extra_node5_name = "");
+ using ExtraNode = std::pair<Node*, const char*>;
+
+ void Assert(const BranchGenerator& branch, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void Assert(const NodeGenerator& condition_body, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void Check(const BranchGenerator& branch, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void Check(const NodeGenerator& condition_body, const char* message,
+ const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
+ void FailAssert(const char* message, const char* file, int line,
+ std::initializer_list<ExtraNode> extra_nodes = {});
void FastCheck(TNode<BoolT> condition);
@@ -794,6 +759,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// otherwise goes to {if_false}.
void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
+ // Branches to {if_false} if ToBoolean applied to {value} yields false,
+ // otherwise goes to {if_true}.
+ void BranchIfToBooleanIsFalse(Node* value, Label* if_false, Label* if_true) {
+ BranchIfToBooleanIsTrue(value, if_true, if_false);
+ }
+
void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
// Branches to {if_true} when --force-slow-path flag has been passed.
@@ -811,8 +782,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
MachineType type = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap.
- Node* LoadBufferObject(Node* buffer, int offset,
- MachineType type = MachineType::AnyTagged());
+ Node* LoadBufferObject(Node* buffer, int offset, MachineType type);
+ TNode<Object> LoadBufferObject(TNode<RawPtrT> buffer, int offset) {
+ return CAST(LoadBufferObject(buffer, offset, MachineType::AnyTagged()));
+ }
TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
return UncheckedCast<RawPtrT>(
LoadBufferObject(buffer, offset, MachineType::Pointer()));
@@ -887,15 +860,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
- return CAST(LoadFromObject(MachineTypeOf<T>::value, reference.object,
- reference.offset));
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ return CAST(
+ LoadFromObject(MachineTypeOf<T>::value, reference.object, offset));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
- return UncheckedCast<T>(LoadFromObject(MachineTypeOf<T>::value,
- reference.object, reference.offset));
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ return UncheckedCast<T>(
+ LoadFromObject(MachineTypeOf<T>::value, reference.object, offset));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value,
@@ -908,15 +885,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
} else if (std::is_same<T, Map>::value) {
write_barrier = StoreToObjectWriteBarrier::kMap;
}
- StoreToObject(rep, reference.object, reference.offset, value,
- write_barrier);
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ StoreToObject(rep, reference.object, offset, value, write_barrier);
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
- StoreToObject(MachineRepresentationOf<T>::value, reference.object,
- reference.offset, value, StoreToObjectWriteBarrier::kNone);
+ TNode<IntPtrT> offset =
+ IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
+ StoreToObject(MachineRepresentationOf<T>::value, reference.object, offset,
+ value, StoreToObjectWriteBarrier::kNone);
}
// Tag a smi and store it.
@@ -927,7 +907,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load the Map of an HeapObject.
TNode<Map> LoadMap(SloppyTNode<HeapObject> object);
// Load the instance type of an HeapObject.
- TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
+ TNode<Uint16T> LoadInstanceType(SloppyTNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
InstanceType type);
@@ -967,7 +947,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load bit field 3 of a map.
TNode<Uint32T> LoadMapBitField3(SloppyTNode<Map> map);
// Load the instance type of a map.
- TNode<Int32T> LoadMapInstanceType(SloppyTNode<Map> map);
+ TNode<Uint16T> LoadMapInstanceType(SloppyTNode<Map> map);
// Load the ElementsKind of a map.
TNode<Int32T> LoadMapElementsKind(SloppyTNode<Map> map);
TNode<Int32T> LoadElementsKind(SloppyTNode<HeapObject> object);
@@ -1023,8 +1003,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> LoadStringLengthAsWord32(SloppyTNode<String> string);
// Loads a pointer to the sequential String char array.
Node* PointerToSeqStringData(Node* seq_string);
- // Load value field of a JSValue object.
- Node* LoadJSValueValue(Node* object);
+ // Load value field of a JSPrimitiveWrapper object.
+ Node* LoadJSPrimitiveWrapperValue(Node* object);
// Figures out whether the value of maybe_object is:
// - a SMI (jump to "if_smi", "extracted" will be the SMI value)
@@ -1076,8 +1056,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
- template <typename Array>
- TNode<MaybeObject> LoadArrayElement(
+ template <typename Array, typename T = MaybeObject>
+ TNode<T> LoadArrayElement(
TNode<Array> array, int array_header_size, Node* index,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS,
@@ -1232,15 +1212,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> LoadDoubleWithHoleCheck(
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
- Node* LoadFixedTypedArrayElementAsTagged(
- Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+ TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
+ TNode<RawPtrT> data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
- TNode<WordT> data_pointer, TNode<Smi> index, TNode<Int32T> elements_kind);
+ TNode<RawPtrT> data_pointer, TNode<Smi> index_node,
+ ElementsKind elements_kind) {
+ return LoadFixedTypedArrayElementAsTagged(data_pointer, index_node,
+ elements_kind, SMI_PARAMETERS);
+ }
+ TNode<Numeric> LoadFixedTypedArrayElementAsTagged(
+ TNode<RawPtrT> data_pointer, TNode<Smi> index,
+ TNode<Int32T> elements_kind);
// Parts of the above, factored out for readability:
- Node* LoadFixedBigInt64ArrayElementAsTagged(Node* data_pointer, Node* offset);
- Node* LoadFixedBigUint64ArrayElementAsTagged(Node* data_pointer,
- Node* offset);
+ TNode<BigInt> LoadFixedBigInt64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset);
+ TNode<BigInt> LoadFixedBigUint64ArrayElementAsTagged(
+ SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset);
// 64-bit platforms only:
TNode<BigInt> BigIntFromInt64(TNode<IntPtrT> value);
TNode<BigInt> BigIntFromUint64(TNode<UintPtrT> value);
@@ -1250,10 +1238,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreJSTypedArrayElementFromTagged(TNode<Context> context,
TNode<JSTypedArray> typed_array,
- TNode<Object> index_node,
+ TNode<Smi> index_node,
TNode<Object> value,
- ElementsKind elements_kind,
- ParameterMode parameter_mode);
+ ElementsKind elements_kind);
// Context manipulation
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -1534,10 +1521,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Like above, but allowing custom bitfield initialization.
TNode<BigInt> AllocateRawBigInt(TNode<IntPtrT> length);
void StoreBigIntBitfield(TNode<BigInt> bigint, TNode<Word32T> bitfield);
- void StoreBigIntDigit(TNode<BigInt> bigint, int digit_index,
+ void StoreBigIntDigit(TNode<BigInt> bigint, intptr_t digit_index,
+ TNode<UintPtrT> digit);
+ void StoreBigIntDigit(TNode<BigInt> bigint, TNode<IntPtrT> digit_index,
TNode<UintPtrT> digit);
+
TNode<Word32T> LoadBigIntBitfield(TNode<BigInt> bigint);
- TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
+ TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, intptr_t digit_index);
+ TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint,
+ TNode<IntPtrT> digit_index);
// Allocate a ByteArray with the given length.
TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
@@ -1573,9 +1565,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<NameDictionary> AllocateNameDictionary(int at_least_space_for);
TNode<NameDictionary> AllocateNameDictionary(
- TNode<IntPtrT> at_least_space_for);
+ TNode<IntPtrT> at_least_space_for, AllocationFlags = kNone);
TNode<NameDictionary> AllocateNameDictionaryWithCapacity(
- TNode<IntPtrT> capacity);
+ TNode<IntPtrT> capacity, AllocationFlags = kNone);
TNode<NameDictionary> CopyNameDictionary(TNode<NameDictionary> dictionary,
Label* large_object_fallback);
@@ -1604,9 +1596,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void InitializeStructBody(Node* object, Node* map, Node* size,
int start_offset = Struct::kHeaderSize);
- Node* AllocateJSObjectFromMap(
- Node* map, Node* properties = nullptr, Node* elements = nullptr,
- AllocationFlags flags = kNone,
+ TNode<JSObject> AllocateJSObjectFromMap(
+ SloppyTNode<Map> map, SloppyTNode<HeapObject> properties = nullptr,
+ SloppyTNode<FixedArray> elements = nullptr, AllocationFlags flags = kNone,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
@@ -1696,6 +1688,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
fixed_array_map);
}
+ TNode<Map> GetStructMap(InstanceType instance_type);
+
TNode<FixedArray> AllocateUninitializedFixedArray(intptr_t capacity) {
return UncheckedCast<FixedArray>(AllocateFixedArray(
PACKED_ELEMENTS, IntPtrConstant(capacity), AllocationFlag::kNone));
@@ -1745,7 +1739,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> object,
IterationKind mode);
- Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
+ TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
+ SloppyTNode<Object> value,
+ SloppyTNode<Oddball> done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
TNode<JSReceiver> ArraySpeciesCreate(TNode<Context> context,
@@ -1934,6 +1930,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SMI_PARAMETERS);
}
+ TNode<FixedArray> ExtractFixedArray(
+ TNode<FixedArray> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
+ TNode<IntPtrT> capacity,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays) {
+ return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags,
+ INTPTR_PARAMETERS));
+ }
+
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
// FixedArray, including special appropriate handling for COW arrays.
// * |source| is either a FixedArray or FixedDoubleArray from which to copy
@@ -2043,6 +2048,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
}
+ TNode<IntPtrT> CalculateNewElementsCapacity(TNode<IntPtrT> old_capacity) {
+ return UncheckedCast<IntPtrT>(
+ CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS));
+ }
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
@@ -2086,19 +2095,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_bigint, Variable* var_bigint, Variable* var_feedback);
// Truncate the floating point value of a HeapNumber to an Int32.
- Node* TruncateHeapNumberValueToWord32(Node* object);
+ TNode<Int32T> TruncateHeapNumberValueToWord32(TNode<HeapNumber> object);
// Conversions.
- void TryHeapNumberToSmi(TNode<HeapNumber> number, TVariable<Smi>& output,
+ void TryHeapNumberToSmi(TNode<HeapNumber> number,
+ TVariable<Smi>& output, // NOLINT(runtime/references)
Label* if_smi);
- void TryFloat64ToSmi(TNode<Float64T> number, TVariable<Smi>& output,
+ void TryFloat64ToSmi(TNode<Float64T> number,
+ TVariable<Smi>& output, // NOLINT(runtime/references)
Label* if_smi);
TNode<Number> ChangeFloat64ToTagged(SloppyTNode<Float64T> value);
TNode<Number> ChangeInt32ToTagged(SloppyTNode<Int32T> value);
TNode<Number> ChangeUint32ToTagged(SloppyTNode<Uint32T> value);
TNode<Number> ChangeUintPtrToTagged(TNode<UintPtrT> value);
TNode<Uint32T> ChangeNumberToUint32(TNode<Number> value);
- TNode<Float64T> ChangeNumberToFloat64(SloppyTNode<Number> value);
+ TNode<Float64T> ChangeNumberToFloat64(TNode<Number> value);
TNode<UintPtrT> TryNumberToUintPtr(TNode<Number> value, Label* if_negative);
TNode<UintPtrT> ChangeNonnegativeNumberToUintPtr(TNode<Number> value) {
return TryNumberToUintPtr(value, nullptr);
@@ -2145,10 +2156,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Throws a TypeError for {method_name} if {value} is neither of the given
- // {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
- // returns the {value} (or wrapped value) otherwise.
- Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
- char const* method_name);
+ // {primitive_type} nor a JSPrimitiveWrapper wrapping a value of
+ // {primitive_type}, or returns the {value} (or wrapped value) otherwise.
+ TNode<Object> ToThisValue(TNode<Context> context, TNode<Object> value,
+ PrimitiveType primitive_type,
+ char const* method_name);
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
@@ -2231,6 +2243,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSGlobalProxyInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSGlobalProxyMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSGlobalProxy(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSObjectInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(SloppyTNode<Map> map);
@@ -2246,9 +2259,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsJSValueInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSValueMap(SloppyTNode<Map> map);
- TNode<BoolT> IsJSValue(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSPrimitiveWrapperInstanceType(
+ SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSPrimitiveWrapperMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsJSPrimitiveWrapper(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMap(SloppyTNode<HeapObject> object);
TNode<BoolT> IsMutableHeapNumber(SloppyTNode<HeapObject> object);
TNode<BoolT> IsName(SloppyTNode<HeapObject> object);
@@ -2260,6 +2274,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsOneByteStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrimitiveInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsPrivateSymbol(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsPrivateName(SloppyTNode<Symbol> symbol);
TNode<BoolT> IsPromiseCapability(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsPropertyCell(SloppyTNode<HeapObject> object);
@@ -2305,7 +2320,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsPromiseThenProtectorCellInvalid();
TNode<BoolT> IsArraySpeciesProtectorCellInvalid();
TNode<BoolT> IsTypedArraySpeciesProtectorCellInvalid();
- TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid();
+ TNode<BoolT> IsRegExpSpeciesProtectorCellInvalid(
+ TNode<Context> native_context);
TNode<BoolT> IsPromiseSpeciesProtectorCellInvalid();
TNode<BoolT> IsMockArrayBufferAllocatorFlag() {
@@ -2355,7 +2371,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return Word32Equal(a, b);
}
bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; }
- Node* IsFastElementsKind(Node* elements_kind);
+ TNode<BoolT> IsFastElementsKind(TNode<Int32T> elements_kind);
bool IsFastElementsKind(ElementsKind kind) {
return v8::internal::IsFastElementsKind(kind);
}
@@ -2366,12 +2382,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool IsDoubleElementsKind(ElementsKind kind) {
return v8::internal::IsDoubleElementsKind(kind);
}
- Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
- Node* IsFastSmiElementsKind(Node* elements_kind);
- Node* IsHoleyFastElementsKind(Node* elements_kind);
- Node* IsHoleyFastElementsKindForRead(Node* elements_kind);
- Node* IsElementsKindGreaterThan(Node* target_kind,
- ElementsKind reference_kind);
+ TNode<BoolT> IsFastSmiOrTaggedElementsKind(TNode<Int32T> elements_kind);
+ TNode<BoolT> IsFastSmiElementsKind(SloppyTNode<Int32T> elements_kind);
+ TNode<BoolT> IsHoleyFastElementsKind(TNode<Int32T> elements_kind);
+ TNode<BoolT> IsHoleyFastElementsKindForRead(TNode<Int32T> elements_kind);
+ TNode<BoolT> IsElementsKindGreaterThan(TNode<Int32T> target_kind,
+ ElementsKind reference_kind);
TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
ElementsKind reference_kind);
// Check if reference_kind_a <= target_kind <= reference_kind_b
@@ -2413,8 +2429,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* DerefIndirectString(TNode<String> string, TNode<Int32T> instance_type,
Label* cannot_deref);
- TNode<String> StringFromSingleCodePoint(TNode<Int32T> codepoint,
- UnicodeEncoding encoding);
+ TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
@@ -2578,7 +2593,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsSetSmi(SloppyTNode<Smi> smi, int untagged_mask) {
intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
return WordNotEqual(
- WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)),
+ WordAnd(BitcastTaggedSignedToWord(smi), IntPtrConstant(mask_word)),
IntPtrConstant(0));
}
@@ -2950,11 +2965,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// If it can't handle the case {receiver}/{key} case then the control goes
// to {if_bailout}.
// If {if_proxy} is nullptr, proxies go to if_bailout.
- void TryPrototypeChainLookup(Node* receiver, Node* key,
+ void TryPrototypeChainLookup(Node* receiver, Node* object, Node* key,
const LookupInHolder& lookup_property_in_holder,
const LookupInHolder& lookup_element_in_holder,
Label* if_end, Label* if_bailout,
- Label* if_proxy = nullptr);
+ Label* if_proxy);
// Instanceof helpers.
// Returns true if {object} has {prototype} somewhere in it's prototype
@@ -3055,7 +3070,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode, Label* bailout,
- Node* context);
+ Node* context,
+ Variable* maybe_converted_value = nullptr);
Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
Node* length, Node* key, ParameterMode mode,
@@ -3204,8 +3220,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* Equal(Node* lhs, Node* rhs, Node* context,
Variable* var_type_feedback = nullptr);
- Node* StrictEqual(Node* lhs, Node* rhs,
- Variable* var_type_feedback = nullptr);
+ TNode<Oddball> StrictEqual(SloppyTNode<Object> lhs, SloppyTNode<Object> rhs,
+ Variable* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
@@ -3248,13 +3264,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Debug helpers
Node* IsDebugActive();
- TNode<BoolT> IsRuntimeCallStatsEnabled();
-
// JSArrayBuffer helpers
TNode<Uint32T> LoadJSArrayBufferBitField(TNode<JSArrayBuffer> array_buffer);
TNode<RawPtrT> LoadJSArrayBufferBackingStore(
TNode<JSArrayBuffer> array_buffer);
- Node* IsDetachedBuffer(Node* buffer);
+ TNode<BoolT> IsDetachedBuffer(TNode<JSArrayBuffer> buffer);
void ThrowIfArrayBufferIsDetached(SloppyTNode<Context> context,
TNode<JSArrayBuffer> array_buffer,
const char* method_name);
@@ -3301,12 +3315,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* IsPromiseHookEnabledOrHasAsyncEventDelegate();
Node* IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
- // Helpers for StackFrame markers.
- Node* MarkerIsFrameType(Node* marker_or_function,
- StackFrame::Type frame_type);
- Node* MarkerIsNotFrameType(Node* marker_or_function,
- StackFrame::Type frame_type);
-
// for..in helpers
void CheckPrototypeEnumCache(Node* receiver, Node* receiver_map,
Label* if_fast, Label* if_slow);
@@ -3589,9 +3597,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
private:
// Low-level accessors for Descriptor arrays.
- TNode<MaybeObject> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
- Node* index,
- int additional_offset = 0);
+ template <typename T>
+ TNode<T> LoadDescriptorArrayElement(TNode<DescriptorArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset);
};
class V8_EXPORT_PRIVATE CodeStubArguments {
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 5197dd3a2f..906eb0f0ca 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -15,8 +15,10 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/pending-optimization-table.h"
#include "src/codegen/unoptimized-compilation-info.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
@@ -24,7 +26,6 @@
#include "src/debug/liveedit.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/heap-inl.h"
@@ -319,6 +320,8 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
counters->turbofan_optimize_total_foreground()->AddSample(
static_cast<int>(time_foreground.InMicroseconds()));
}
+ counters->turbofan_ticks()->AddSample(static_cast<int>(
+ compilation_info()->tick_counter().CurrentTicks() / 1000));
}
}
@@ -593,6 +596,12 @@ MaybeHandle<SharedFunctionInfo> GenerateUnoptimizedCodeForToplevel(
return MaybeHandle<SharedFunctionInfo>();
}
+ if (FLAG_stress_lazy_source_positions) {
+ // Collect source positions immediately to try and flush out bytecode
+ // mismatches.
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ }
+
if (shared_info.is_identical_to(top_level)) {
// Ensure that the top level function is retained.
*is_compiled_scope = shared_info->is_compiled_scope();
@@ -797,18 +806,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
- // If code was pending optimization for testing, delete remove the strong root
- // that was preventing the bytecode from being flushed between marking and
- // optimization.
- if (!isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
- Handle<ObjectHashTable> table =
- handle(ObjectHashTable::cast(
- isolate->heap()->pending_optimize_for_test_bytecode()),
- isolate);
- bool was_present;
- table = table->Remove(isolate, table, handle(function->shared(), isolate),
- &was_present);
- isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+ // If code was pending optimization for testing, delete remove the entry
+ // from the table that was preventing the bytecode from being flushed
+ if (V8_UNLIKELY(FLAG_testing_d8_test_runner)) {
+ PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
Handle<Code> cached_code;
@@ -1346,6 +1347,13 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
DCHECK(!isolate->has_pending_exception());
*is_compiled_scope = shared_info->is_compiled_scope();
DCHECK(is_compiled_scope->is_compiled());
+
+ if (FLAG_stress_lazy_source_positions) {
+ // Collect source positions immediately to try and flush out bytecode
+ // mismatches.
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ }
+
return true;
}
@@ -1599,33 +1607,103 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context,
- Handle<String> source) {
+// Check whether embedder allows code generation in this context.
+// (via v8::Isolate::SetAllowCodeGenerationFromStringsCallback)
+bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
+ Handle<String> source) {
DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
- // Check with callback if set.
+ DCHECK(isolate->allow_code_gen_callback());
+
+ // Callback set. Let it decide if code generation is allowed.
+ VMState<EXTERNAL> state(isolate);
+ RuntimeCallTimerScope timer(
+ isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
- if (callback == nullptr) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState<EXTERNAL> state(isolate);
- return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
+ return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
+}
+
+// Check whether embedder allows code generation in this context.
+// (via v8::Isolate::SetModifyCodeGenerationFromStringsCallback)
+bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
+ Handle<i::Object>* source) {
+ DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
+ DCHECK(isolate->modify_code_gen_callback());
+ DCHECK(source);
+
+ // Callback set. Run it, and use the return value as source, or block
+ // execution if it's not set.
+ VMState<EXTERNAL> state(isolate);
+ ModifyCodeGenerationFromStringsCallback modify_callback =
+ isolate->modify_code_gen_callback();
+ RuntimeCallTimerScope timer(
+ isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
+ MaybeLocal<v8::String> modified_source =
+ modify_callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(*source));
+ if (modified_source.IsEmpty()) return false;
+
+ // Use the new source (which might be the same as the old source) and return.
+ *source = Utils::OpenHandle(*modified_source.ToLocalChecked(), false);
+ return true;
+}
+
+// Run Embedder-mandated checks before generating code from a string.
+//
+// Returns a string to be used for compilation, or a flag that an object type
+// was encountered that is neither a string, nor something the embedder knows
+// how to handle.
+//
+// Returns: (assuming: std::tie(source, unknown_object))
+// - !source.is_null(): compilation allowed, source contains the source string.
+// - unknown_object is true: compilation allowed, but we don't know how to
+// deal with source_object.
+// - source.is_null() && !unknown_object: compilation should be blocked.
+//
+// - !source_is_null() and unknown_object can't be true at the same time.
+std::pair<MaybeHandle<String>, bool> Compiler::ValidateDynamicCompilationSource(
+ Isolate* isolate, Handle<Context> context,
+ Handle<i::Object> source_object) {
+ Handle<String> source;
+ if (source_object->IsString()) source = Handle<String>::cast(source_object);
+
+ // Check if the context unconditionally allows code gen from strings.
+ // allow_code_gen_from_strings can be many things, so we'll always check
+ // against the 'false' literal, so that e.g. undefined and 'true' are treated
+ // the same.
+ if (!context->allow_code_gen_from_strings().IsFalse(isolate)) {
+ return {source, !source_object->IsString()};
+ }
+
+ // Check if the context allows code generation for this string.
+ // allow_code_gen_callback only allows proper strings.
+ // (I.e., let allow_code_gen_callback decide, if it has been set.)
+ if (isolate->allow_code_gen_callback()) {
+ if (source_object->IsString() &&
+ CodeGenerationFromStringsAllowed(isolate, context, source)) {
+ return {source, !source_object->IsString()};
+ }
+ }
+
+ // Check if the context wants to block or modify this source object.
+ // Double-check that we really have a string now.
+ // (Let modify_code_gen_callback decide, if it's been set.)
+ if (isolate->modify_code_gen_callback()) {
+ if (ModifyCodeGenerationFromStrings(isolate, context, &source_object) &&
+ source_object->IsString())
+ return {Handle<String>::cast(source_object), false};
}
+
+ return {MaybeHandle<String>(), !source_object->IsString()};
}
-MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
- Handle<Context> context, Handle<String> source,
+MaybeHandle<JSFunction> Compiler::GetFunctionFromValidatedString(
+ Handle<Context> context, MaybeHandle<String> source,
ParseRestriction restriction, int parameters_end_pos) {
Isolate* const isolate = context->GetIsolate();
Handle<Context> native_context(context->native_context(), isolate);
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings().IsFalse(isolate) &&
- !CodeGenerationFromStringsAllowed(isolate, native_context, source)) {
+ // Raise an EvalError if we did not receive a string.
+ if (source.is_null()) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
THROW_NEW_ERROR(
@@ -1639,9 +1717,20 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
int eval_position = kNoSourcePosition;
Handle<SharedFunctionInfo> outer_info(
native_context->empty_function().shared(), isolate);
- return Compiler::GetFunctionFromEval(
- source, outer_info, native_context, LanguageMode::kSloppy, restriction,
- parameters_end_pos, eval_scope_position, eval_position);
+ return Compiler::GetFunctionFromEval(source.ToHandleChecked(), outer_info,
+ native_context, LanguageMode::kSloppy,
+ restriction, parameters_end_pos,
+ eval_scope_position, eval_position);
+}
+
+MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
+ Handle<Context> context, Handle<Object> source,
+ ParseRestriction restriction, int parameters_end_pos) {
+ Isolate* const isolate = context->GetIsolate();
+ Handle<Context> native_context(context->native_context(), isolate);
+ return GetFunctionFromValidatedString(
+ context, ValidateDynamicCompilationSource(isolate, context, source).first,
+ restriction, parameters_end_pos);
}
namespace {
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index a598706373..836f738123 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -132,17 +132,22 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
v8::ScriptCompiler::CompileOptions compile_options,
v8::ScriptCompiler::NoCacheReason no_cache_reason);
- // Returns true if the embedder permits compiling the given source string in
- // the given context.
- static bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context,
- Handle<String> source);
-
// Create a (bound) function for a String source within a context for eval.
V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
- Handle<Context> context, Handle<String> source,
+ Handle<Context> context, Handle<i::Object> source,
ParseRestriction restriction, int parameters_end_pos);
+ // Decompose GetFunctionFromString into two functions, to allow callers to
+ // deal seperately with a case of object not handled by the embedder.
+ V8_WARN_UNUSED_RESULT static std::pair<MaybeHandle<String>, bool>
+ ValidateDynamicCompilationSource(Isolate* isolate, Handle<Context> context,
+ Handle<i::Object> source_object);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction>
+ GetFunctionFromValidatedString(Handle<Context> context,
+ MaybeHandle<String> source,
+ ParseRestriction restriction,
+ int parameters_end_pos);
+
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 613a142f24..6816c5b7ad 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/codegen/constant-pool.h"
+#include "src/codegen/assembler-arch.h"
#include "src/codegen/assembler-inl.h"
namespace v8 {
@@ -210,5 +211,253 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
#endif // defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_ARM64)
+
+// Constant Pool.
+
+ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
+ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
+
+RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
+ RelocInfo::Mode rmode) {
+ ConstantPoolKey key(data, rmode);
+ CHECK(key.is_value32());
+ return RecordKey(std::move(key), assm_->pc_offset());
+}
+
+RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
+ RelocInfo::Mode rmode) {
+ ConstantPoolKey key(data, rmode);
+ CHECK(!key.is_value32());
+ return RecordKey(std::move(key), assm_->pc_offset());
+}
+
+RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
+ RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
+ if (write_reloc_info == RelocInfoStatus::kMustRecord) {
+ if (key.is_value32()) {
+ if (entry32_count_ == 0) first_use_32_ = offset;
+ ++entry32_count_;
+ } else {
+ if (entry64_count_ == 0) first_use_64_ = offset;
+ ++entry64_count_;
+ }
+ }
+ entries_.insert(std::make_pair(key, offset));
+
+ if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
+ // Request constant pool emission after the next instruction.
+ SetNextCheckIn(1);
+ }
+
+ return write_reloc_info;
+}
+
+RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
+ const ConstantPoolKey& key) {
+ if (key.AllowsDeduplication()) {
+ auto existing = entries_.find(key);
+ if (existing != entries_.end()) {
+ return RelocInfoStatus::kMustOmitForDuplicate;
+ }
+ }
+ return RelocInfoStatus::kMustRecord;
+}
+
+void ConstantPool::EmitAndClear(Jump require_jump) {
+ DCHECK(!IsBlocked());
+ // Prevent recursive pool emission.
+ Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
+ Alignment require_alignment =
+ IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
+ int size = ComputeSize(require_jump, require_alignment);
+ Label size_check;
+ assm_->bind(&size_check);
+ assm_->RecordConstPool(size);
+
+ // Emit the constant pool. It is preceded by an optional branch if
+ // {require_jump} and a header which will:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // 3) align the 64bit pool entries to 64-bit.
+ // TODO(all): Make the alignment part less fragile. Currently code is
+ // allocated as a byte array so there are no guarantees the alignment will
+ // be preserved on compaction. Currently it works as allocation seems to be
+ // 64-bit aligned.
+
+ Label after_pool;
+ if (require_jump == Jump::kRequired) assm_->b(&after_pool);
+
+ assm_->RecordComment("[ Constant Pool");
+ EmitPrologue(require_alignment);
+ if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
+ EmitEntries();
+ assm_->RecordComment("]");
+
+ if (after_pool.is_linked()) assm_->bind(&after_pool);
+
+ DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
+ Clear();
+}
+
+void ConstantPool::Clear() {
+ entries_.clear();
+ first_use_32_ = -1;
+ first_use_64_ = -1;
+ entry32_count_ = 0;
+ entry64_count_ = 0;
+ next_check_ = 0;
+}
+
+void ConstantPool::StartBlock() {
+ if (blocked_nesting_ == 0) {
+ // Prevent constant pool checks from happening by setting the next check to
+ // the biggest possible offset.
+ next_check_ = kMaxInt;
+ }
+ ++blocked_nesting_;
+}
+
+void ConstantPool::EndBlock() {
+ --blocked_nesting_;
+ if (blocked_nesting_ == 0) {
+ DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
+ // Make sure a check happens quickly after getting unblocked.
+ next_check_ = 0;
+ }
+}
+
+bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
+
+void ConstantPool::SetNextCheckIn(size_t instructions) {
+ next_check_ =
+ assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
+}
+
+void ConstantPool::EmitEntries() {
+ for (auto iter = entries_.begin(); iter != entries_.end();) {
+ DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
+ auto range = entries_.equal_range(iter->first);
+ bool shared = iter->first.AllowsDeduplication();
+ for (auto it = range.first; it != range.second; ++it) {
+ SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
+ if (!shared) Emit(it->first);
+ }
+ if (shared) Emit(iter->first);
+ iter = range.second;
+ }
+}
+
+void ConstantPool::Emit(const ConstantPoolKey& key) {
+ if (key.is_value32()) {
+ assm_->dd(key.value32());
+ } else {
+ assm_->dq(key.value64());
+ }
+}
+
+bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
+ if (IsEmpty()) return false;
+ if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
+ return true;
+ }
+ // We compute {dist32/64}, i.e. the distance from the first instruction
+ // accessing a 32bit/64bit entry in the constant pool to any of the
+ // 32bit/64bit constant pool entries, respectively. This is required because
+ // we do not guarantee that entries are emitted in order of reference, i.e. it
+ // is possible that the entry with the earliest reference is emitted last.
+ // The constant pool should be emitted if either of the following is true:
+ // (A) {dist32/64} will be out of range at the next check in.
+ // (B) Emission can be done behind an unconditional branch and {dist32/64}
+ // exceeds {kOpportunityDist*}.
+ // (C) {dist32/64} exceeds the desired approximate distance to the pool.
+ int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
+ size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
+ size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
+ if (Entry64Count() != 0) {
+ // The 64-bit constants are always emitted before the 32-bit constants, so
+ // we subtract the size of the 32-bit constants from {size}.
+ size_t dist64 = pool_end_64 - first_use_64_;
+ bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
+ bool opportune_emission_without_jump =
+ require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
+ bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
+ if (next_check_too_late || opportune_emission_without_jump ||
+ approximate_distance_exceeded) {
+ return true;
+ }
+ }
+ if (Entry32Count() != 0) {
+ size_t dist32 = pool_end_32 - first_use_32_;
+ bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
+ bool opportune_emission_without_jump =
+ require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
+ bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
+ if (next_check_too_late || opportune_emission_without_jump ||
+ approximate_distance_exceeded) {
+ return true;
+ }
+ }
+ return false;
+}
+
+int ConstantPool::ComputeSize(Jump require_jump,
+ Alignment require_alignment) const {
+ int size_up_to_marker = PrologueSize(require_jump);
+ int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
+ size_t size_after_marker =
+ Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
+ return size_up_to_marker + static_cast<int>(size_after_marker);
+}
+
+Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
+ int pc_offset) const {
+ int size_up_to_marker = PrologueSize(require_jump);
+ if (Entry64Count() != 0 &&
+ !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
+ return Alignment::kRequired;
+ }
+ return Alignment::kOmitted;
+}
+
+bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
+ // Check that all entries are in range if the pool is emitted at {pc_offset}.
+ // This ignores kPcLoadDelta (conservatively, since all offsets are positive),
+ // and over-estimates the last entry's address with the pool's end.
+ Alignment require_alignment =
+ IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
+ size_t pool_end_32 =
+ pc_offset + ComputeSize(Jump::kRequired, require_alignment);
+ size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
+ bool entries_in_range_32 =
+ Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
+ bool entries_in_range_64 =
+ Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
+ return entries_in_range_32 && entries_in_range_64;
+}
+
+ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
+ : pool_(&assm->constpool_) {
+ pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
+ pool_->StartBlock();
+}
+
+ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
+ : pool_(&assm->constpool_) {
+ DCHECK_EQ(check, PoolEmissionCheck::kSkip);
+ pool_->StartBlock();
+}
+
+ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
+
+void ConstantPool::MaybeCheck() {
+ if (assm_->pc_offset() >= next_check_) {
+ Check(Emission::kIfNeeded, Jump::kRequired);
+ }
+}
+
+#endif // defined(V8_TARGET_ARCH_ARM64)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index 4399f6fc1f..d07452336b 100644
--- a/deps/v8/src/codegen/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
+class Instruction;
+
// -----------------------------------------------------------------------------
// Constant pool support
@@ -136,8 +138,9 @@ class ConstantPoolBuilder {
inline Label* EmittedPosition() { return &emitted_label_; }
private:
- ConstantPoolEntry::Access AddEntry(ConstantPoolEntry& entry,
- ConstantPoolEntry::Type type);
+ ConstantPoolEntry::Access AddEntry(
+ ConstantPoolEntry& entry, // NOLINT(runtime/references)
+ ConstantPoolEntry::Type type);
void EmitSharedEntries(Assembler* assm, ConstantPoolEntry::Type type);
void EmitGroup(Assembler* assm, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
@@ -161,6 +164,189 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC)
+#if defined(V8_TARGET_ARCH_ARM64)
+
+class ConstantPoolKey {
+ public:
+ explicit ConstantPoolKey(uint64_t value,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : is_value32_(false), value64_(value), rmode_(rmode) {}
+
+ explicit ConstantPoolKey(uint32_t value,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : is_value32_(true), value32_(value), rmode_(rmode) {}
+
+ uint64_t value64() const {
+ CHECK(!is_value32_);
+ return value64_;
+ }
+ uint32_t value32() const {
+ CHECK(is_value32_);
+ return value32_;
+ }
+
+ bool is_value32() const { return is_value32_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ bool AllowsDeduplication() const {
+ DCHECK(rmode_ != RelocInfo::CONST_POOL &&
+ rmode_ != RelocInfo::VENEER_POOL &&
+ rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
+ rmode_ != RelocInfo::DEOPT_INLINING_ID &&
+ rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
+ // CODE_TARGETs can be shared because they aren't patched anymore,
+ // and we make sure we emit only one reloc info for them (thus delta
+ // patching) will apply the delta only once. At the moment, we do not dedup
+ // code targets if they are wrapped in a heap object request (value == 0).
+ bool is_sharable_code_target =
+ rmode_ == RelocInfo::CODE_TARGET &&
+ (is_value32() ? (value32() != 0) : (value64() != 0));
+ bool is_sharable_embedded_object = RelocInfo::IsEmbeddedObjectMode(rmode_);
+ return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target ||
+ is_sharable_embedded_object;
+ }
+
+ private:
+ bool is_value32_;
+ union {
+ uint64_t value64_;
+ uint32_t value32_;
+ };
+ RelocInfo::Mode rmode_;
+};
+
+// Order for pool entries. 64bit entries go first.
+inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
+ if (a.is_value32() < b.is_value32()) return true;
+ if (a.is_value32() > b.is_value32()) return false;
+ if (a.rmode() < b.rmode()) return true;
+ if (a.rmode() > b.rmode()) return false;
+ if (a.is_value32()) return a.value32() < b.value32();
+ return a.value64() < b.value64();
+}
+
+inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
+ if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
+ return false;
+ }
+ if (a.is_value32()) return a.value32() == b.value32();
+ return a.value64() == b.value64();
+}
+
+// Constant pool generation
+enum class Jump { kOmitted, kRequired };
+enum class Emission { kIfNeeded, kForced };
+enum class Alignment { kOmitted, kRequired };
+enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
+enum class PoolEmissionCheck { kSkip };
+
+// Pools are emitted in the instruction stream, preferably after unconditional
+// jumps or after returns from functions (in dead code locations).
+// If a long code sequence does not contain unconditional jumps, it is
+// necessary to emit the constant pool before the pool gets too far from the
+// location it is accessed from. In this case, we emit a jump over the emitted
+// constant pool.
+// Constants in the pool may be addresses of functions that gets relocated;
+// if so, a relocation info entry is associated to the constant pool entry.
+class ConstantPool {
+ public:
+ explicit ConstantPool(Assembler* assm);
+ ~ConstantPool();
+
+ // Returns true when we need to write RelocInfo and false when we do not.
+ RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
+ RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
+
+ size_t Entry32Count() const { return entry32_count_; }
+ size_t Entry64Count() const { return entry64_count_; }
+ bool IsEmpty() const { return entries_.empty(); }
+ // Check if pool will be out of range at {pc_offset}.
+ bool IsInImmRangeIfEmittedAt(int pc_offset);
+ // Size in bytes of the constant pool. Depending on parameters, the size will
+ // include the branch over the pool and alignment padding.
+ int ComputeSize(Jump require_jump, Alignment require_alignment) const;
+
+ // Emit the pool at the current pc with a branch over the pool if requested.
+ void EmitAndClear(Jump require);
+ bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
+ V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
+ size_t margin = 0);
+
+ V8_EXPORT_PRIVATE void MaybeCheck();
+ void Clear();
+
+ // Constant pool emisssion can be blocked temporarily.
+ bool IsBlocked() const;
+
+ // Repeated checking whether the constant pool should be emitted is expensive;
+ // only check once a number of instructions have been generated.
+ void SetNextCheckIn(size_t instructions);
+
+ // Class for scoping postponing the constant pool generation.
+ class V8_EXPORT_PRIVATE BlockScope {
+ public:
+ // BlockScope immediatelly emits the pool if necessary to ensure that
+ // during the block scope at least {margin} bytes can be emitted without
+ // pool emission becomming necessary.
+ explicit BlockScope(Assembler* pool, size_t margin = 0);
+ BlockScope(Assembler* pool, PoolEmissionCheck);
+ ~BlockScope();
+
+ private:
+ ConstantPool* pool_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
+ };
+
+ // Hard limit to the const pool which must not be exceeded.
+ static const size_t kMaxDistToPool32;
+ static const size_t kMaxDistToPool64;
+ // Approximate distance where the pool should be emitted.
+ static const size_t kApproxDistToPool32;
+ V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
+ // Approximate distance where the pool may be emitted if
+ // no jump is required (due to a recent unconditional jump).
+ static const size_t kOpportunityDistToPool32;
+ static const size_t kOpportunityDistToPool64;
+ // PC distance between constant pool checks.
+ V8_EXPORT_PRIVATE static const size_t kCheckInterval;
+ // Number of entries in the pool which trigger a check.
+ static const size_t kApproxMaxEntryCount;
+
+ private:
+ void StartBlock();
+ void EndBlock();
+
+ void EmitEntries();
+ void EmitPrologue(Alignment require_alignment);
+ int PrologueSize(Jump require_jump) const;
+ RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
+ RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
+ void Emit(const ConstantPoolKey& key);
+ void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
+ const ConstantPoolKey& key);
+ Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
+ int pc_offset) const;
+
+ Assembler* assm_;
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_use_32_ = -1;
+ int first_use_64_ = -1;
+ // We sort not according to insertion order, but since we do not insert
+ // addresses (for heap objects we insert an index which is created in
+ // increasing order), the order is deterministic. We map each entry to the
+ // pc offset of the load. We use a multimap because we need to record the
+ // pc offset of each load of the same constant so that the immediate of the
+ // loads can be back-patched when the pool is emitted.
+ std::multimap<ConstantPoolKey, int> entries_;
+ size_t entry32_count_ = 0;
+ size_t entry64_count_ = 0;
+ int next_check_ = 0;
+ int blocked_nesting_ = 0;
+};
+
+#endif // defined(V8_TARGET_ARCH_ARM64)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index b2f792e339..dae9992c57 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -14,6 +14,7 @@ namespace internal {
// CPU feature flags.
enum CpuFeature {
// x86
+ SSE4_2,
SSE4_1,
SSSE3,
SSE3,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index 5538f361f0..c077407931 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -26,31 +26,11 @@
#include "src/logging/log.h"
#include "src/numbers/math-random.h"
#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-stack.h"
#include "src/strings/string-search.h"
#include "src/wasm/wasm-external-refs.h"
-// Include native regexp-macro-assembler.
-#if V8_TARGET_ARCH_IA32
-#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
-#else // Unknown architecture.
-#error "Unknown architecture."
-#endif // Target architecture.
-
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif // V8_INTL_SUPPORT
@@ -671,6 +651,15 @@ static Address LexicographicCompareWrapper(Isolate* isolate, Address smi_x,
FUNCTION_REFERENCE(smi_lexicographic_compare_function,
LexicographicCompareWrapper)
+FUNCTION_REFERENCE(mutable_big_int_absolute_add_and_canonicalize_function,
+ MutableBigInt_AbsoluteAddAndCanonicalize)
+
+FUNCTION_REFERENCE(mutable_big_int_absolute_compare_function,
+ MutableBigInt_AbsoluteCompare)
+
+FUNCTION_REFERENCE(mutable_big_int_absolute_sub_and_canonicalize_function,
+ MutableBigInt_AbsoluteSubAndCanonicalize)
+
FUNCTION_REFERENCE(check_object_type, CheckObjectType)
#ifdef V8_INTL_SUPPORT
@@ -786,6 +775,12 @@ ExternalReference ExternalReference::fast_c_call_caller_pc_address(
isolate->isolate_data()->fast_c_call_caller_pc_address());
}
+ExternalReference ExternalReference::stack_is_iterable_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->isolate_data()->stack_is_iterable_address());
+}
+
FUNCTION_REFERENCE(call_enqueue_microtask_function,
MicrotaskQueue::CallEnqueueMicrotask)
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 4c83a9b33a..b663ae1621 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -72,6 +72,7 @@ class StatsCounter;
"IsolateData::fast_c_call_caller_fp_address") \
V(fast_c_call_caller_pc_address, \
"IsolateData::fast_c_call_caller_pc_address") \
+ V(stack_is_iterable_address, "IsolateData::stack_is_iterable_address") \
V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \
V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \
V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \
@@ -149,6 +150,12 @@ class StatsCounter;
V(libc_memmove_function, "libc_memmove") \
V(libc_memset_function, "libc_memset") \
V(mod_two_doubles_operation, "mod_two_doubles") \
+ V(mutable_big_int_absolute_add_and_canonicalize_function, \
+ "MutableBigInt_AbsoluteAddAndCanonicalize") \
+ V(mutable_big_int_absolute_compare_function, \
+ "MutableBigInt_AbsoluteCompare") \
+ V(mutable_big_int_absolute_sub_and_canonicalize_function, \
+ "MutableBigInt_AbsoluteSubAndCanonicalize") \
V(new_deoptimizer_function, "Deoptimizer::New()") \
V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
V(printf_function, "printf") \
diff --git a/deps/v8/src/codegen/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index 12a05e1fba..4f94746ea5 100644
--- a/deps/v8/src/codegen/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -15,31 +15,41 @@ namespace internal {
HandlerTable::HandlerTable(Code code)
: HandlerTable(code.InstructionStart() + code.handler_table_offset(),
- code.handler_table_size()) {}
+ code.handler_table_size(), kReturnAddressBasedEncoding) {}
HandlerTable::HandlerTable(BytecodeArray bytecode_array)
: HandlerTable(bytecode_array.handler_table()) {}
HandlerTable::HandlerTable(ByteArray byte_array)
- : number_of_entries_(byte_array.length() / kRangeEntrySize /
- sizeof(int32_t)),
-#ifdef DEBUG
- mode_(kRangeBasedEncoding),
-#endif
- raw_encoded_data_(
- reinterpret_cast<Address>(byte_array.GetDataStartAddress())) {
- DCHECK_EQ(0, byte_array.length() % (kRangeEntrySize * sizeof(int32_t)));
-}
+ : HandlerTable(reinterpret_cast<Address>(byte_array.GetDataStartAddress()),
+ byte_array.length(), kRangeBasedEncoding) {}
-HandlerTable::HandlerTable(Address handler_table, int handler_table_size)
- : number_of_entries_(handler_table_size / kReturnEntrySize /
+HandlerTable::HandlerTable(Address handler_table, int handler_table_size,
+ EncodingMode encoding_mode)
+ : number_of_entries_(handler_table_size / EntrySizeFromMode(encoding_mode) /
sizeof(int32_t)),
#ifdef DEBUG
- mode_(kReturnAddressBasedEncoding),
+ mode_(encoding_mode),
#endif
raw_encoded_data_(handler_table) {
+ // Check padding.
static_assert(4 < kReturnEntrySize * sizeof(int32_t), "allowed padding");
- DCHECK_GE(4, handler_table_size % (kReturnEntrySize * sizeof(int32_t)));
+ // For return address encoding, maximum padding is 4; otherwise, there should
+ // be no padding.
+ DCHECK_GE(kReturnAddressBasedEncoding == encoding_mode ? 4 : 0,
+ handler_table_size %
+ (EntrySizeFromMode(encoding_mode) * sizeof(int32_t)));
+}
+
+// static
+int HandlerTable::EntrySizeFromMode(EncodingMode mode) {
+ switch (mode) {
+ case kReturnAddressBasedEncoding:
+ return kReturnEntrySize;
+ case kRangeBasedEncoding:
+ return kRangeEntrySize;
+ }
+ UNREACHABLE();
}
int HandlerTable::GetRangeStart(int index) const {
diff --git a/deps/v8/src/codegen/handler-table.h b/deps/v8/src/codegen/handler-table.h
index eaa062873b..362412525d 100644
--- a/deps/v8/src/codegen/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -45,11 +45,14 @@ class V8_EXPORT_PRIVATE HandlerTable {
// async/await handling in the debugger can take place.
};
+ enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
+
// Constructors for the various encodings.
explicit HandlerTable(Code code);
explicit HandlerTable(ByteArray byte_array);
explicit HandlerTable(BytecodeArray bytecode_array);
- explicit HandlerTable(Address handler_table, int handler_table_size);
+ HandlerTable(Address handler_table, int handler_table_size,
+ EncodingMode encoding_mode);
// Getters for handler table based on ranges.
int GetRangeStart(int index) const;
@@ -88,11 +91,12 @@ class V8_EXPORT_PRIVATE HandlerTable {
#endif
private:
- enum EncodingMode { kRangeBasedEncoding, kReturnAddressBasedEncoding };
-
// Getters for handler table based on ranges.
CatchPrediction GetRangePrediction(int index) const;
+ // Gets entry size based on mode.
+ static int EntrySizeFromMode(EncodingMode mode);
+
// Getters for handler table based on return addresses.
int GetReturnOffset(int index) const;
int GetReturnHandler(int index) const;
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 99d38890e3..aefcab7299 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -756,6 +756,13 @@ void Assembler::cmpxchg8b(Operand dst) {
emit_operand(ecx, dst);
}
+void Assembler::mfence() {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xAE);
+ EMIT(0xF0);
+}
+
void Assembler::lfence() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index d2dcb0f348..2423f73bdb 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -542,6 +542,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmpxchg8b(Operand dst);
// Memory Fence
+ void mfence();
void lfence();
void pause();
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 6a0be9386e..f6f0153e54 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -1887,20 +1887,24 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below (we use
// times_half_system_pointer_size instead of times_system_pointer_size since
// smis are already shifted by one).
- mov(builtin_pointer,
- Operand(kRootRegister, builtin_pointer, times_half_system_pointer_size,
+ mov(builtin_index,
+ Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
IsolateData::builtin_entry_table_offset()));
- call(builtin_pointer);
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 345ae815af..9b13e87447 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -87,7 +87,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target) { call(target); }
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
diff --git a/deps/v8/src/codegen/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index f8f874359b..5934c80a7d 100644
--- a/deps/v8/src/codegen/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -252,6 +252,11 @@ void StringAtDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, kParameterCount);
}
+void StringAtAsStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void StringSubstringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index d166b477d8..f6c1adfe47 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -74,6 +74,7 @@ namespace internal {
V(StoreTransition) \
V(StoreWithVector) \
V(StringAt) \
+ V(StringAtAsString) \
V(StringSubstring) \
V(TypeConversion) \
V(TypeConversionStackParameter) \
@@ -969,6 +970,17 @@ class StringAtDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor)
};
+class StringAtAsStringDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kPosition)
+ // TODO(turbofan): Return untagged value here.
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::TaggedPointer(), // result string
+ MachineType::AnyTagged(), // kReceiver
+ MachineType::IntPtr()) // kPosition
+ DECLARE_DESCRIPTOR(StringAtAsStringDescriptor, CallInterfaceDescriptor)
+};
+
class StringSubstringDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kString, kFrom, kTo)
diff --git a/deps/v8/src/codegen/label.h b/deps/v8/src/codegen/label.h
index 430958d190..f45f1e62d7 100644
--- a/deps/v8/src/codegen/label.h
+++ b/deps/v8/src/codegen/label.h
@@ -99,7 +99,7 @@ class Label {
friend class Assembler;
friend class Displacement;
- friend class RegExpMacroAssemblerIrregexp;
+ friend class RegExpBytecodeGenerator;
// Disallow copy construction and assignment, but allow move construction and
// move assignment on selected platforms (see above).
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index d6337aefb6..423da2fb65 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -39,6 +39,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/codegen/mips/assembler-mips-inl.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/heap-number-inl.h"
@@ -2211,7 +2212,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
emit(break_instr);
}
-void Assembler::stop(const char* msg, uint32_t code) {
+void Assembler::stop(uint32_t code) {
DCHECK_GT(code, kMaxWatchpointCode);
DCHECK_LE(code, kMaxStopCode);
#if V8_HOST_ARCH_MIPS
diff --git a/deps/v8/src/codegen/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 640e11cf1a..86a07ab06e 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -558,7 +558,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Break / Trap instructions.
void break_(uint32_t code, bool break_as_stop = false);
- void stop(const char* msg, uint32_t code = kMaxStopCode);
+ void stop(uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -1478,11 +1478,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic);
- static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset,
- int16_t& jic_offset);
- static void UnpackTargetAddressUnsigned(uint32_t address,
- uint32_t& lui_offset,
- uint32_t& jic_offset);
+ static void UnpackTargetAddress(
+ uint32_t address, int16_t& lui_offset, // NOLINT(runtime/references)
+ int16_t& jic_offset); // NOLINT(runtime/references)
+ static void UnpackTargetAddressUnsigned(
+ uint32_t address,
+ uint32_t& lui_offset, // NOLINT(runtime/references)
+ uint32_t& jic_offset); // NOLINT(runtime/references)
static bool IsAndImmediate(Instr instr);
static bool IsEmittedConstant(Instr instr);
@@ -1513,7 +1515,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src,
+ MemOperand& src, // NOLINT(runtime/references)
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 483b7e895b..79373c1b5b 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -189,7 +189,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -3974,18 +3974,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
- SmiUntag(builtin_pointer, builtin_pointer);
- Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
- lw(builtin_pointer,
- MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
- Call(builtin_pointer);
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2);
+ lw(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
@@ -4111,6 +4115,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void TurboAssembler::LoadAddress(Register dst, Label* target) {
+ uint32_t address = jump_address(target);
+ li(dst, address);
+}
+
void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -4694,15 +4703,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -4938,7 +4947,7 @@ void MacroAssembler::AssertStackIsAligned() {
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
+ stop();
bind(&alignment_as_expected);
}
}
@@ -5352,7 +5361,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
- stop("Unexpected alignment in CallCFunction");
+ stop();
bind(&alignment_as_expected);
}
}
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index f394e01769..3dfc7bfbad 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -212,8 +212,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
+ void LoadAddress(Register dst, Label* target);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination,
Register code_object) override {
@@ -841,9 +845,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits,
+ Register& scratch, // NOLINT(runtime/references)
+ const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index cb8e3dd7d1..801faf6306 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -38,6 +38,7 @@
#include "src/base/cpu.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/codegen/safepoint-table.h"
#include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/heap-number-inl.h"
@@ -2344,7 +2345,7 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
emit(break_instr);
}
-void Assembler::stop(const char* msg, uint32_t code) {
+void Assembler::stop(uint32_t code) {
DCHECK_GT(code, kMaxWatchpointCode);
DCHECK_LE(code, kMaxStopCode);
#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
diff --git a/deps/v8/src/codegen/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index c7c027eef7..a22ddf0e7d 100644
--- a/deps/v8/src/codegen/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -601,7 +601,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Break / Trap instructions.
void break_(uint32_t code, bool break_as_stop = false);
- void stop(const char* msg, uint32_t code = kMaxStopCode);
+ void stop(uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -1560,7 +1560,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Helper function for memory load/store using base register and offset.
void AdjustBaseAndOffset(
- MemOperand& src,
+ MemOperand& src, // NOLINT(runtime/references)
OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS,
int second_access_add_to_offset = 4);
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 65c0b592eb..97e5af1fa8 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -187,7 +187,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -4274,18 +4274,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
- SmiUntag(builtin_pointer, builtin_pointer);
- Dlsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
- Ld(builtin_pointer,
- MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
- Call(builtin_pointer);
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Dlsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2);
+ Ld(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) {
@@ -4433,6 +4437,11 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
+void TurboAssembler::LoadAddress(Register dst, Label* target) {
+ uint64_t address = jump_address(target);
+ li(dst, address);
+}
+
void TurboAssembler::Push(Smi smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5026,15 +5035,15 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -5273,7 +5282,7 @@ void MacroAssembler::AssertStackIsAligned() {
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
+ stop();
bind(&alignment_as_expected);
}
}
@@ -5698,7 +5707,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
- stop("Unexpected alignment in CallCFunction");
+ stop();
bind(&alignment_as_expected);
}
}
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index d0f9b7f5bc..eb62bec0e8 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -234,8 +234,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
+ void LoadAddress(Register dst, Label* target);
- void CallBuiltinPointer(Register builtin_pointer) override;
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination,
Register code_object) override {
@@ -845,9 +849,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
- bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
- Register& scratch, const Operand& rt);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits);
+ bool CalculateOffset(Label* L, int32_t& offset, // NOLINT(runtime/references)
+ OffsetSize bits,
+ Register& scratch, // NOLINT(runtime/references)
+ const Operand& rt);
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 596d5c261e..f3582d868a 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -75,9 +75,15 @@ void OptimizedCompilationInfo::ConfigureFlags() {
break;
case Code::BYTECODE_HANDLER:
SetFlag(kCalledWithCodeStartRegister);
+ if (FLAG_turbo_splitting) {
+ MarkAsSplittingEnabled();
+ }
break;
case Code::BUILTIN:
case Code::STUB:
+ if (FLAG_turbo_splitting) {
+ MarkAsSplittingEnabled();
+ }
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
MarkAsSourcePositionsEnabled();
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
@@ -177,6 +183,8 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
return StackFrame::WASM_TO_JS;
case Code::WASM_INTERPRETER_ENTRY:
return StackFrame::WASM_INTERPRETER_ENTRY;
+ case Code::C_WASM_ENTRY:
+ return StackFrame::C_WASM_ENTRY;
default:
UNIMPLEMENTED();
return StackFrame::NONE;
@@ -206,7 +214,7 @@ bool OptimizedCompilationInfo::has_native_context() const {
return !closure().is_null() && !closure()->native_context().is_null();
}
-Context OptimizedCompilationInfo::native_context() const {
+NativeContext OptimizedCompilationInfo::native_context() const {
DCHECK(has_native_context());
return closure()->native_context();
}
@@ -234,6 +242,8 @@ void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) {
if (FLAG_trace_turbo) SetFlag(kTraceTurboJson);
if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph);
if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled);
+ if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation);
+ if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker);
}
OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index eca3a8fa32..624517283e 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -9,6 +9,7 @@
#include "src/codegen/bailout-reason.h"
#include "src/codegen/source-position-table.h"
+#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
@@ -60,9 +61,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
kTraceTurboJson = 1 << 14,
kTraceTurboGraph = 1 << 15,
kTraceTurboScheduled = 1 << 16,
- kWasmRuntimeExceptionSupport = 1 << 17,
- kTurboControlFlowAwareAllocation = 1 << 18,
- kTurboPreprocessRanges = 1 << 19
+ kTraceTurboAllocation = 1 << 17,
+ kTraceHeapBroker = 1 << 18,
+ kWasmRuntimeExceptionSupport = 1 << 19,
+ kTurboControlFlowAwareAllocation = 1 << 20,
+ kTurboPreprocessRanges = 1 << 21
};
// Construct a compilation info for optimized compilation.
@@ -189,10 +192,16 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); }
+ bool trace_turbo_allocation_enabled() const {
+ return GetFlag(kTraceTurboAllocation);
+ }
+
bool trace_turbo_scheduled_enabled() const {
return GetFlag(kTraceTurboScheduled);
}
+ bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); }
+
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -204,7 +213,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Context context() const;
bool has_native_context() const;
- Context native_context() const;
+ NativeContext native_context() const;
bool has_global_object() const;
JSGlobalObject global_object() const;
@@ -281,6 +290,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
std::unique_ptr<v8::tracing::TracedValue> ToTracedValue();
+ TickCounter& tick_counter() { return tick_counter_; }
+
private:
OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
void ConfigureFlags();
@@ -333,6 +344,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
+ TickCounter tick_counter_;
+
DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo);
};
diff --git a/deps/v8/src/codegen/pending-optimization-table.cc b/deps/v8/src/codegen/pending-optimization-table.cc
new file mode 100644
index 0000000000..9e33de7918
--- /dev/null
+++ b/deps/v8/src/codegen/pending-optimization-table.cc
@@ -0,0 +1,97 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/pending-optimization-table.h"
+
+#include "src/execution/isolate-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/hash-table.h"
+#include "src/objects/js-objects.h"
+
+namespace v8 {
+namespace internal {
+
+enum class FunctionStatus { kPrepareForOptimize, kMarkForOptimize };
+
+void PendingOptimizationTable::PreparedForOptimization(
+ Isolate* isolate, Handle<JSFunction> function) {
+ DCHECK(FLAG_testing_d8_test_runner);
+
+ Handle<ObjectHashTable> table =
+ isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
+ ? ObjectHashTable::New(isolate, 1)
+ : handle(ObjectHashTable::cast(
+ isolate->heap()->pending_optimize_for_test_bytecode()),
+ isolate);
+ Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
+ handle(function->shared().GetBytecodeArray(), isolate),
+ handle(
+ Smi::FromInt(static_cast<int>(FunctionStatus::kPrepareForOptimize)),
+ isolate),
+ AllocationType::kYoung);
+ table =
+ ObjectHashTable::Put(table, handle(function->shared(), isolate), tuple);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+}
+
+void PendingOptimizationTable::MarkedForOptimization(
+ Isolate* isolate, Handle<JSFunction> function) {
+ DCHECK(FLAG_testing_d8_test_runner);
+
+ Handle<Object> table =
+ handle(isolate->heap()->pending_optimize_for_test_bytecode(), isolate);
+ Handle<Object> entry =
+ table->IsUndefined()
+ ? handle(ReadOnlyRoots(isolate).the_hole_value(), isolate)
+ : handle(Handle<ObjectHashTable>::cast(table)->Lookup(
+ handle(function->shared(), isolate)),
+ isolate);
+ if (entry->IsTheHole()) {
+ PrintF("Error: Function ");
+ function->ShortPrint();
+ PrintF(
+ " should be prepared for optimization with "
+ "%%PrepareFunctionForOptimize before "
+ "%%OptimizeFunctionOnNextCall / %%OptimizeOSR ");
+ UNREACHABLE();
+ }
+
+ DCHECK(entry->IsTuple2());
+ Handle<Tuple2>::cast(entry)->set_value2(
+ Smi::FromInt(static_cast<int>(FunctionStatus::kMarkForOptimize)));
+ table = ObjectHashTable::Put(Handle<ObjectHashTable>::cast(table),
+ handle(function->shared(), isolate), entry);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+}
+
+void PendingOptimizationTable::FunctionWasOptimized(
+ Isolate* isolate, Handle<JSFunction> function) {
+ DCHECK(FLAG_testing_d8_test_runner);
+
+ if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
+ return;
+ }
+
+ Handle<ObjectHashTable> table =
+ handle(ObjectHashTable::cast(
+ isolate->heap()->pending_optimize_for_test_bytecode()),
+ isolate);
+ Handle<Object> value(table->Lookup(handle(function->shared(), isolate)),
+ isolate);
+ // Remove only if we have already seen %OptimizeFunctionOnNextCall. If it is
+ // optimized for other reasons, still keep holding the bytecode since we may
+ // optimize it later.
+ if (!value->IsTheHole() &&
+ Smi::cast(Handle<Tuple2>::cast(value)->value2()).value() ==
+ static_cast<int>(FunctionStatus::kMarkForOptimize)) {
+ bool was_present;
+ table = table->Remove(isolate, table, handle(function->shared(), isolate),
+ &was_present);
+ DCHECK(was_present);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen/pending-optimization-table.h b/deps/v8/src/codegen/pending-optimization-table.h
new file mode 100644
index 0000000000..2a2782d17a
--- /dev/null
+++ b/deps/v8/src/codegen/pending-optimization-table.h
@@ -0,0 +1,44 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_
+#define V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// This class adds the functionality to properly test the optimized code. This
+// is only for use in tests. All these functions should only be called when
+// testing_d8_flag_for_tests is set.
+class PendingOptimizationTable {
+ public:
+ // This function should be called before we mark the function for
+ // optimization. Calling this function ensures that |function| is compiled and
+ // has a feedback vector allocated. This also holds on to the bytecode
+ // strongly in pending optimization table preventing the bytecode to be
+ // flushed.
+ static void PreparedForOptimization(Isolate* isolate,
+ Handle<JSFunction> function);
+
+ // This function should be called when the function is marked for optimization
+ // via the intrinsics. This will update the state of the bytecode array in the
+ // pending optimization table, so that the entry can be removed once the
+ // function is optimized. If the function is already optimized it removes the
+ // entry from the table.
+ static void MarkedForOptimization(Isolate* isolate,
+ Handle<JSFunction> function);
+
+ // This function should be called once the function is optimized. If there is
+ // an entry in the pending optimization table and it is marked for removal
+ // then this function removes the entry from pending optimization table.
+ static void FunctionWasOptimized(Isolate* isolate,
+ Handle<JSFunction> function);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_PENDING_OPTIMIZATION_TABLE_H_
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 3241f821f9..2a638af070 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -224,6 +224,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(ip.bit()),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -1490,8 +1491,7 @@ void Assembler::mtfprwa(DoubleRegister dst, Register src) {
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-ppc.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code,
- CRegister cr) {
+void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
if (cond != al) {
Label skip;
b(NegateCondition(cond), &skip, cr);
@@ -1948,6 +1948,24 @@ PatchingAssembler::~PatchingAssembler() {
DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : assembler_(assembler),
+ old_available_(*assembler->GetScratchRegisterList()) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *assembler_->GetScratchRegisterList() = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_NE(*available, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
+ Register reg = Register::from_code(index);
+ *available &= ~reg.bit();
+ return reg;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 2c4225849f..dee264a75c 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -437,6 +437,7 @@ class Assembler : public AssemblerBase {
PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
#undef DECLARE_PPC_XX3_INSTRUCTIONS
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
@@ -841,8 +842,8 @@ class Assembler : public AssemblerBase {
void function_descriptor();
// Exception-generating instructions and debugging support
- void stop(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode, CRegister cr = cr7);
+ void stop(Condition cond = al, int32_t code = kDefaultStopCode,
+ CRegister cr = cr7);
void bkpt(uint32_t imm16); // v5 and above
@@ -1182,6 +1183,9 @@ class Assembler : public AssemblerBase {
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
std::vector<DeferredRelocInfo> relocations_;
+ // Scratch registers available for use by the Assembler.
+ RegList scratch_register_list_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Optimizable cmpi information.
@@ -1297,6 +1301,7 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
+ friend class UseScratchRegisterScope;
};
class EnsureSpace {
@@ -1311,6 +1316,24 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler();
};
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+
+ // Check if we have registers available to acquire.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+
+ private:
+ friend class Assembler;
+ friend class TurboAssembler;
+
+ Assembler* assembler_;
+ RegList old_available_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc b/deps/v8/src/codegen/ppc/code-stubs-ppc.cc
deleted file mode 100644
index 937c745662..0000000000
--- a/deps/v8/src/codegen/ppc/code-stubs-ppc.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/api/api-arguments-inl.h"
-#include "src/base/bits.h"
-#include "src/code-stubs.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/frame-constants.h"
-#include "src/execution/frames.h"
-#include "src/execution/isolate.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/init/bootstrapper.h"
-#include "src/numbers/double.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 62f0fde3b8..8ab3e5b83b 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -419,7 +419,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
andi(r0, dst, Operand(kPointerSize - 1));
beq(&ok, cr0);
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -1721,15 +1721,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -2454,27 +2454,24 @@ void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
DCHECK_EQ(mem.rb(), no_reg);
int offset = mem.offset();
+ int misaligned = (offset & 3);
+ int adj = (offset & 3) - 4;
+ int alignedOffset = (offset & ~3) + 4;
- if (!is_int16(offset)) {
+ if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) {
/* cannot use d-form */
- DCHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
LoadPX(dst, MemOperand(mem.ra(), scratch));
} else {
-#if V8_TARGET_ARCH_PPC64
- int misaligned = (offset & 3);
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
- DCHECK(dst != r0);
- addi(dst, mem.ra(), Operand((offset & 3) - 4));
- ld(dst, MemOperand(dst, (offset & ~3) + 4));
+ DCHECK_NE(dst, r0);
+ addi(dst, mem.ra(), Operand(adj));
+ ld(dst, MemOperand(dst, alignedOffset));
} else {
ld(dst, mem);
}
-#else
- lwz(dst, mem);
-#endif
}
}
@@ -2934,20 +2931,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
- ShiftRightArithImm(builtin_pointer, builtin_pointer,
+ ShiftRightArithImm(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2);
- addi(builtin_pointer, builtin_pointer,
+ addi(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
- LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index ae24ef9a55..6249c405e3 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -408,11 +408,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Condition cond = al);
void Call(Label* target);
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
- void CallBuiltinPointer(Register builtin_pointer) override;
+ void CallBuiltinByIndex(Register builtin_index) override;
void CallForDeoptimization(Address target, int deopt_id);
// Emit code to discard a non-negative number of pointer-sized elements
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index dbfdc9a32a..6776626a23 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -351,7 +351,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
- : AssemblerBase(options, std::move(buffer)) {
+ : AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(ip.bit()) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
last_bound_pos_ = 0;
relocations_.reserve(128);
@@ -636,8 +637,7 @@ void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-s390.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code,
- CRegister cr) {
+void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
if (cond != al) {
Label skip;
b(NegateCondition(cond), &skip, Label::kNear);
@@ -831,6 +831,23 @@ void Assembler::EmitRelocations() {
}
}
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : assembler_(assembler),
+ old_available_(*assembler->GetScratchRegisterList()) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *assembler_->GetScratchRegisterList() = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ RegList* available = assembler_->GetScratchRegisterList();
+ DCHECK_NOT_NULL(available);
+ DCHECK_NE(*available, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
+ Register reg = Register::from_code(index);
+ *available &= ~reg.bit();
+ return reg;
+}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index e22c037a31..0653e79b67 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -307,7 +307,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
static constexpr int kSpecialTargetSize = 0;
-
// Number of bytes for instructions used to store pointer sized constant.
#if V8_TARGET_ARCH_S390X
static constexpr int kBytesForPtrConstant = 12; // IIHF + IILF
@@ -315,6 +314,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kBytesForPtrConstant = 6; // IILF
#endif
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
// ---------------------------------------------------------------------------
// Code generation
@@ -1261,8 +1262,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void larl(Register r, Label* l);
// Exception-generating instructions and debugging support
- void stop(const char* msg, Condition cond = al,
- int32_t code = kDefaultStopCode, CRegister cr = cr7);
+ void stop(Condition cond = al, int32_t code = kDefaultStopCode,
+ CRegister cr = cr7);
void bkpt(uint32_t imm16); // v5 and above
@@ -1376,6 +1377,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RelocInfoWriter reloc_info_writer;
std::vector<DeferredRelocInfo> relocations_;
+ // Scratch registers available for use by the Assembler.
+ RegList scratch_register_list_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1455,6 +1459,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
friend class EnsureSpace;
+ friend class UseScratchRegisterScope;
};
class EnsureSpace {
@@ -1462,6 +1467,24 @@ class EnsureSpace {
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+
+ // Check if we have registers available to acquire.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+
+ private:
+ friend class Assembler;
+ friend class TurboAssembler;
+
+ Assembler* assembler_;
+ RegList old_available_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen/s390/code-stubs-s390.cc b/deps/v8/src/codegen/s390/code-stubs-s390.cc
deleted file mode 100644
index f85c309943..0000000000
--- a/deps/v8/src/codegen/s390/code-stubs-s390.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/api/api-arguments-inl.h"
-#include "src/base/bits.h"
-#include "src/code-stubs.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/execution/frame-constants.h"
-#include "src/execution/frames.h"
-#include "src/execution/isolate.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/init/bootstrapper.h"
-#include "src/objects/api-callbacks.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index ff94fa839e..f6c2314a84 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -440,7 +440,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label ok;
AndP(r0, dst, Operand(kPointerSize - 1));
beq(&ok, Label::kNear);
- stop("Unaligned cell in write barrier");
+ stop();
bind(&ok);
}
@@ -1670,15 +1670,15 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
- const char* msg = GetAbortReason(reason);
#ifdef DEBUG
+ const char* msg = GetAbortReason(reason);
RecordComment("Abort message: ");
RecordComment(msg);
#endif
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
- stop(msg);
+ stop();
return;
}
@@ -4332,20 +4332,24 @@ void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
blt(dest);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
- ShiftRightArithP(builtin_pointer, builtin_pointer,
+ ShiftRightArithP(builtin_index, builtin_index,
Operand(kSmiShift - kSystemPointerSizeLog2));
- AddP(builtin_pointer, builtin_pointer,
+ AddP(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
- LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
- Call(builtin_pointer);
+ LoadP(builtin_index, MemOperand(kRootRegister, builtin_index));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index ba870874c8..52f668d175 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -166,11 +166,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Label* target);
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
- void CallBuiltinPointer(Register builtin_pointer) override;
+ void CallBuiltinByIndex(Register builtin_index) override;
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
diff --git a/deps/v8/src/codegen/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 066f0123fc..fccce1a7a6 100644
--- a/deps/v8/src/codegen/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -5,8 +5,8 @@
#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
#define V8_CODEGEN_SAFEPOINT_TABLE_H_
+#include "src/base/memory.h"
#include "src/common/assert-scope.h"
-#include "src/common/v8memory.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
#include "src/zone/zone-chunk-list.h"
@@ -76,22 +76,23 @@ class SafepointTable {
unsigned GetPcOffset(unsigned index) const {
DCHECK(index < length_);
- return Memory<uint32_t>(GetPcOffsetLocation(index));
+ return base::Memory<uint32_t>(GetPcOffsetLocation(index));
}
int GetTrampolinePcOffset(unsigned index) const {
DCHECK(index < length_);
- return Memory<int>(GetTrampolineLocation(index));
+ return base::Memory<int>(GetTrampolineLocation(index));
}
unsigned find_return_pc(unsigned pc_offset);
SafepointEntry GetEntry(unsigned index) const {
DCHECK(index < length_);
- unsigned deopt_index = Memory<uint32_t>(GetEncodedInfoLocation(index));
- uint8_t* bits = &Memory<uint8_t>(entries_ + (index * entry_size_));
+ unsigned deopt_index =
+ base::Memory<uint32_t>(GetEncodedInfoLocation(index));
+ uint8_t* bits = &base::Memory<uint8_t>(entries_ + (index * entry_size_));
int trampoline_pc =
- has_deopt_ ? Memory<int>(GetTrampolineLocation(index)) : -1;
+ has_deopt_ ? base::Memory<int>(GetTrampolineLocation(index)) : -1;
return SafepointEntry(deopt_index, bits, trampoline_pc);
}
diff --git a/deps/v8/src/codegen/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index 6c0aa36b27..e10cc07571 100644
--- a/deps/v8/src/codegen/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -31,7 +31,7 @@ class MoreBit : public BitField8<bool, 7, 1> {};
class ValueBits : public BitField8<unsigned, 0, 7> {};
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
-void AddAndSetEntry(PositionTableEntry& value,
+void AddAndSetEntry(PositionTableEntry& value, // NOLINT(runtime/references)
const PositionTableEntry& other) {
value.code_offset += other.code_offset;
value.source_position += other.source_position;
@@ -39,7 +39,7 @@ void AddAndSetEntry(PositionTableEntry& value,
}
// Helper: Subtract the offsets from 'other' from 'value'.
-void SubtractFromEntry(PositionTableEntry& value,
+void SubtractFromEntry(PositionTableEntry& value, // NOLINT(runtime/references)
const PositionTableEntry& other) {
value.code_offset -= other.code_offset;
value.source_position -= other.source_position;
@@ -47,7 +47,8 @@ void SubtractFromEntry(PositionTableEntry& value,
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>& bytes, T value) {
+void EncodeInt(std::vector<byte>& bytes, // NOLINT(runtime/references)
+ T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
@@ -65,7 +66,8 @@ void EncodeInt(std::vector<byte>& bytes, T value) {
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>& bytes, const PositionTableEntry& entry) {
+void EncodeEntry(std::vector<byte>& bytes, // NOLINT(runtime/references)
+ const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -113,8 +115,9 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(std::vector<PositionTableEntry>& raw_entries,
- SourcePositionTableIterator& encoded) {
+void CheckTableEquals(
+ std::vector<PositionTableEntry>& raw_entries, // NOLINT(runtime/references)
+ SourcePositionTableIterator& encoded) { // NOLINT(runtime/references)
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
auto raw = raw_entries.begin();
diff --git a/deps/v8/src/codegen/tick-counter.cc b/deps/v8/src/codegen/tick-counter.cc
new file mode 100644
index 0000000000..2e72ae0e86
--- /dev/null
+++ b/deps/v8/src/codegen/tick-counter.cc
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/tick-counter.h"
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+void TickCounter::DoTick() {
+ ++ticks_;
+ // Magical number to detect performance bugs or compiler divergence.
+ // Selected as being roughly 10x of what's needed frequently.
+ constexpr size_t kMaxTicks = 100000000;
+ USE(kMaxTicks);
+ DCHECK_LT(ticks_, kMaxTicks);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/codegen/tick-counter.h b/deps/v8/src/codegen/tick-counter.h
new file mode 100644
index 0000000000..8d6c966bb0
--- /dev/null
+++ b/deps/v8/src/codegen/tick-counter.h
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_TICK_COUNTER_H_
+#define V8_CODEGEN_TICK_COUNTER_H_
+
+#include <cstddef>
+
+namespace v8 {
+namespace internal {
+
+// A deterministic correlate of time, used to detect performance or
+// divergence bugs in Turbofan. DoTick() should be called frequently
+// thoughout the compilation.
+class TickCounter {
+ public:
+ void DoTick();
+ size_t CurrentTicks() const { return ticks_; }
+
+ private:
+ size_t ticks_ = 0;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_TICK_COUNTER_H_
diff --git a/deps/v8/src/codegen/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index afdef22fe7..2f058eda19 100644
--- a/deps/v8/src/codegen/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -50,9 +50,9 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
void set_has_frame(bool v) { has_frame_ = v; }
bool has_frame() const { return has_frame_; }
- // Calls the given builtin. If builtins are embedded, the trampoline Code
- // object on the heap is not used.
- virtual void CallBuiltinPointer(Register builtin_pointer) = 0;
+ // Calls the builtin given by the Smi in |builtin|. If builtins are embedded,
+ // the trampoline Code object on the heap is not used.
+ virtual void CallBuiltinByIndex(Register builtin_index) = 0;
// Calls/jumps to the given Code object. If builtins are embedded, the
// trampoline Code object on the heap is not used.
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 67cf648c04..f5d0c0ffcf 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -8,7 +8,7 @@
#include "src/codegen/x64/assembler-x64.h"
#include "src/base/cpu.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
@@ -246,7 +246,7 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
}
Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
- return GetCompressedEmbeddedObject(ReadUnalignedValue<int32_t>(pc));
+ return GetEmbeddedObject(ReadUnalignedValue<uint32_t>(pc));
}
Address Assembler::runtime_entry_at(Address pc) {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 3236b0f52c..1d28f1d45d 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -78,6 +78,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
+ if (cpu.has_sse42() && FLAG_enable_sse4_2) supported_ |= 1u << SSE4_2;
if (cpu.has_sse41() && FLAG_enable_sse4_1) {
supported_ |= 1u << SSE4_1;
supported_ |= 1u << SSSE3;
@@ -1257,6 +1258,13 @@ void Assembler::emit_cmpxchg(Operand dst, Register src, int size) {
emit_operand(src, dst);
}
+void Assembler::mfence() {
+ EnsureSpace ensure_space(this);
+ emit(0x0F);
+ emit(0xAE);
+ emit(0xF0);
+}
+
void Assembler::lfence() {
EnsureSpace ensure_space(this);
emit(0x0F);
@@ -1512,19 +1520,20 @@ void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
emitl(code_target_index);
}
-void Assembler::jmp_rel(int offset) {
+void Assembler::jmp_rel(int32_t offset) {
EnsureSpace ensure_space(this);
- const int short_size = sizeof(int8_t);
- const int long_size = sizeof(int32_t);
- --offset; // This is how jumps are specified on x64.
- if (is_int8(offset - short_size) && !predictable_code_size()) {
- // 1110 1011 #8-bit disp.
+ // The offset is encoded relative to the next instruction.
+ constexpr int32_t kShortJmpDisplacement = 1 + sizeof(int8_t);
+ constexpr int32_t kNearJmpDisplacement = 1 + sizeof(int32_t);
+ DCHECK_LE(std::numeric_limits<int32_t>::min() + kNearJmpDisplacement, offset);
+ if (is_int8(offset - kShortJmpDisplacement) && !predictable_code_size()) {
+ // 0xEB #8-bit disp.
emit(0xEB);
- emit((offset - short_size) & 0xFF);
+ emit(offset - kShortJmpDisplacement);
} else {
- // 1110 1001 #32-bit disp.
+ // 0xE9 #32-bit disp.
emit(0xE9);
- emitl(offset - long_size);
+ emitl(offset - kNearJmpDisplacement);
}
}
@@ -2005,84 +2014,37 @@ void Assembler::emit_not(Operand dst, int size) {
}
void Assembler::Nop(int n) {
+ DCHECK_LE(0, n);
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
//
- // Length Assembly Byte Sequence
- // 2 bytes 66 NOP 66 90H
- // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
- // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
- // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
- // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
- // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
- // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
- // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
- // 00000000H] 00H
-
- EnsureSpace ensure_space(this);
- while (n > 0) {
- switch (n) {
- case 2:
- emit(0x66);
- V8_FALLTHROUGH;
- case 1:
- emit(0x90);
- return;
- case 3:
- emit(0x0F);
- emit(0x1F);
- emit(0x00);
- return;
- case 4:
- emit(0x0F);
- emit(0x1F);
- emit(0x40);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- V8_FALLTHROUGH;
- case 5:
- emit(0x0F);
- emit(0x1F);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0F);
- emit(0x1F);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- default:
- case 11:
- emit(0x66);
- n--;
- V8_FALLTHROUGH;
- case 10:
- emit(0x66);
- n--;
- V8_FALLTHROUGH;
- case 9:
- emit(0x66);
- n--;
- V8_FALLTHROUGH;
- case 8:
- emit(0x0F);
- emit(0x1F);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- n -= 8;
- }
- }
+ // Len Assembly Byte Sequence
+ // 2 66 NOP 66 90H
+ // 3 NOP DWORD ptr [EAX] 0F 1F 00H
+ // 4 NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
+ // 5 NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
+ // 6 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
+ // 7 NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
+ // 8 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
+ // 9 66 NOP DWORD ptr [EAX + EAX*1 + 00000000H] 66 0F 1F 84 00 00 00 00 00H
+
+ constexpr const char* kNopSequences =
+ "\x66\x90" // length 1 (@1) / 2 (@0)
+ "\x0F\x1F\x00" // length 3 (@2)
+ "\x0F\x1F\x40\x00" // length 4 (@5)
+ "\x66\x0F\x1F\x44\x00\x00" // length 5 (@10) / 6 (@9)
+ "\x0F\x1F\x80\x00\x00\x00\x00" // length 7 (@15)
+ "\x66\x0F\x1F\x84\x00\x00\x00\x00\x00"; // length 8 (@23) / 9 (@22)
+ constexpr int8_t kNopOffsets[10] = {0, 1, 0, 2, 5, 10, 9, 15, 23, 22};
+
+ do {
+ EnsureSpace ensure_space(this);
+ int nop_bytes = std::min(n, 9);
+ const char* sequence = kNopSequences + kNopOffsets[nop_bytes];
+ memcpy(pc_, sequence, nop_bytes);
+ pc_ += nop_bytes;
+ n -= nop_bytes;
+ } while (n);
}
void Assembler::popq(Register dst) {
@@ -2883,6 +2845,18 @@ void Assembler::movd(Register dst, XMMRegister src) {
}
void Assembler::movq(XMMRegister dst, Register src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6E);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movq(XMMRegister dst, Operand src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2893,6 +2867,7 @@ void Assembler::movq(XMMRegister dst, Register src) {
}
void Assembler::movq(Register dst, XMMRegister src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2903,6 +2878,7 @@ void Assembler::movq(Register dst, XMMRegister src) {
}
void Assembler::movq(XMMRegister dst, XMMRegister src) {
+ // Mixing AVX and non-AVX is expensive, catch those cases
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
if (dst.low_bits() == 4) {
@@ -3068,6 +3044,42 @@ void Assembler::pextrd(Operand dst, XMMRegister src, int8_t imm8) {
emit(imm8);
}
+void Assembler::pextrq(Register dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x16);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
+
+void Assembler::pinsrq(XMMRegister dst, Register src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+void Assembler::pinsrq(XMMRegister dst, Operand src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -4135,6 +4147,22 @@ void Assembler::vmovq(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqu(XMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::vmovdqu(Operand src, XMMRegister dst) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL128, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
VexW w) {
@@ -4654,6 +4682,30 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
emit_sse_operand(dst, src);
}
+void Assembler::sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSE4_2));
+ EnsureSpace ensure_space(this);
+ emit(prefix);
+ emit_optional_rex_32(dst, src);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::sse4_2_instr(XMMRegister dst, Operand src, byte prefix,
+ byte escape1, byte escape2, byte opcode) {
+ DCHECK(IsEnabled(SSE4_2));
+ EnsureSpace ensure_space(this);
+ emit(prefix);
+ emit_optional_rex_32(dst, src);
+ emit(escape1);
+ emit(escape2);
+ emit(opcode);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::lddqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index dc6acb67f4..acb4fce82c 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -952,6 +952,23 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
#undef DECLARE_SSE4_INSTRUCTION
+ // SSE4.2
+ void sse4_2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+ void sse4_2_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
+ byte escape2, byte opcode);
+#define DECLARE_SSE4_2_INSTRUCTION(instruction, prefix, escape1, escape2, \
+ opcode) \
+ void instruction(XMMRegister dst, XMMRegister src) { \
+ sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ } \
+ void instruction(XMMRegister dst, Operand src) { \
+ sse4_2_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
+ }
+
+ SSE4_2_INSTRUCTION_LIST(DECLARE_SSE4_2_INSTRUCTION)
+#undef DECLARE_SSE4_2_INSTRUCTION
+
#define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2, \
opcode) \
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
@@ -969,6 +986,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movd(XMMRegister dst, Operand src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
+ void movq(XMMRegister dst, Operand src);
void movq(Register dst, XMMRegister src);
void movq(XMMRegister dst, XMMRegister src);
@@ -1068,12 +1086,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pextrw(Operand dst, XMMRegister src, int8_t imm8);
void pextrd(Register dst, XMMRegister src, int8_t imm8);
void pextrd(Operand dst, XMMRegister src, int8_t imm8);
+ void pextrq(Register dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, int8_t imm8);
void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
void pinsrw(XMMRegister dst, Register src, int8_t imm8);
void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
+ void pinsrq(XMMRegister dst, Register src, int8_t imm8);
+ void pinsrq(XMMRegister dst, Operand src, int8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1284,6 +1305,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
+ void vmovdqu(XMMRegister dst, Operand src);
+ void vmovdqu(Operand dst, XMMRegister src);
#define AVX_SP_3(instr, opcode) \
AVX_S_3(instr, opcode) \
@@ -1723,6 +1746,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, Operand src, byte imm8);
+ void mfence();
void lfence();
void pause();
diff --git a/deps/v8/src/codegen/x64/constants-x64.h b/deps/v8/src/codegen/x64/constants-x64.h
index 0e43b05034..775abecd9f 100644
--- a/deps/v8/src/codegen/x64/constants-x64.h
+++ b/deps/v8/src/codegen/x64/constants-x64.h
@@ -12,7 +12,8 @@ namespace internal {
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
// TODO(sigurds): Choose best value.
-constexpr int kRootRegisterBias = 128;
+// TODO(ishell): Choose best value for ptr-compr.
+constexpr int kRootRegisterBias = kSystemPointerSize == kTaggedSize ? 128 : 0;
constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048;
} // namespace internal
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 493c711009..f13811b1ae 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -317,15 +317,14 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
Register scratch) {
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInGeneratedCode) {
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
Register masked_root = scratch;
- movl(masked_root, destination);
- andl(masked_root, Immediate(kSmiTagMask));
- negq(masked_root);
- andq(masked_root, kRootRegister);
+ xorq(masked_root, masked_root);
+ Condition smi = CheckSmi(destination);
+ cmovq(NegateCondition(smi), masked_root, kRootRegister);
// Now this add operation will either leave the value unchanged if it is
// a smi or add the isolate root if it is a heap object.
addq(destination, masked_root);
@@ -917,7 +916,7 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
orq(kScratchRegister, Immediate(1));
bind(&msb_not_set);
Cvtqsi2ss(dst, kScratchRegister);
- addss(dst, dst);
+ Addss(dst, dst);
bind(&done);
}
@@ -941,7 +940,7 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
orq(kScratchRegister, Immediate(1));
bind(&msb_not_set);
Cvtqsi2sd(dst, kScratchRegister);
- addsd(dst, dst);
+ Addsd(dst, dst);
bind(&done);
}
@@ -1042,11 +1041,11 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
// and convert it again to see if it is within the uint64 range.
if (is_double) {
tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
- tasm->addsd(kScratchDoubleReg, src);
+ tasm->Addsd(kScratchDoubleReg, src);
tasm->Cvttsd2siq(dst, kScratchDoubleReg);
} else {
tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
- tasm->addss(kScratchDoubleReg, src);
+ tasm->Addss(kScratchDoubleReg, src);
tasm->Cvttss2siq(dst, kScratchDoubleReg);
}
tasm->testq(dst, dst);
@@ -1468,8 +1467,9 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
}
}
if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- int compressed_embedded_object_index = AddCompressedEmbeddedObject(object);
- movl(result, Immediate(compressed_embedded_object_index, rmode));
+ EmbeddedObjectIndex index = AddEmbeddedObject(object);
+ DCHECK(is_uint32(index));
+ movl(result, Immediate(static_cast<int>(index), rmode));
} else {
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
movq(result, Immediate64(object.address(), rmode));
@@ -1607,29 +1607,33 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
call(code_object, rmode);
}
-void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
+Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
+ // The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below (we use times_4 instead
// of times_8 since smis are already shifted by one).
- Call(Operand(kRootRegister, builtin_pointer, times_4,
- IsolateData::builtin_entry_table_offset()));
+ return Operand(kRootRegister, builtin_index, times_4,
+ IsolateData::builtin_entry_table_offset());
#else // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- // The builtin_pointer register contains the builtin index as a Smi.
- SmiUntag(builtin_pointer, builtin_pointer);
- Call(Operand(kRootRegister, builtin_pointer, times_8,
- IsolateData::builtin_entry_table_offset()));
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ return Operand(kRootRegister, builtin_index, times_8,
+ IsolateData::builtin_entry_table_offset());
#endif // defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
}
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ Call(EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@@ -1767,6 +1771,46 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
}
}
+void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsllq(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ psllq(dst, imm8);
+ }
+}
+
+void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlq(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ psrlq(dst, imm8);
+ }
+}
+
+void TurboAssembler::Pslld(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpslld(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ pslld(dst, imm8);
+ }
+}
+
+void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrld(dst, dst, imm8);
+ } else {
+ DCHECK(!IsEnabled(AVX));
+ psrld(dst, imm8);
+ }
+}
+
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index a5b8e60ec5..139690bb8d 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
template <typename Dst, typename... Args>
struct AvxHelper {
Assembler* assm;
- // Call an method where the AVX version expects the dst argument to be
+ // Call a method where the AVX version expects the dst argument to be
// duplicated.
template <void (Assembler::*avx)(Dst, Dst, Args...),
void (Assembler::*no_avx)(Dst, Args...)>
@@ -93,7 +93,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
}
- // Call an method where the AVX version expects no duplicated dst argument.
+ // Call a method where the AVX version expects no duplicated dst argument.
template <void (Assembler::*avx)(Dst, Args...),
void (Assembler::*no_avx)(Dst, Args...)>
void emit(Dst dst, Args... args) {
@@ -127,11 +127,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Movmskpd, movmskpd)
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
+ AVX_OP(Movdqu, movdqu)
AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP(Pslld, pslld)
- AVX_OP(Psllq, psllq)
- AVX_OP(Psrld, psrld)
- AVX_OP(Psrlq, psrlq)
+ AVX_OP(Addss, addss)
AVX_OP(Addsd, addsd)
AVX_OP(Mulsd, mulsd)
AVX_OP(Andps, andps)
@@ -344,7 +342,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(ExternalReference ext);
void Call(Label* target) { call(target); }
- void CallBuiltinPointer(Register builtin_pointer) override;
+ Operand EntryFromBuiltinIndexAsOperand(Register builtin_index);
+ void CallBuiltinByIndex(Register builtin_index) override;
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -368,6 +367,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, Operand src, int8_t imm8);
+ void Psllq(XMMRegister dst, byte imm8);
+ void Psrlq(XMMRegister dst, byte imm8);
+ void Pslld(XMMRegister dst, byte imm8);
+ void Psrld(XMMRegister dst, byte imm8);
+
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index ee20483cfe..56618d20e0 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -21,6 +21,7 @@
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
+ V(paddq, 66, 0F, D4) \
V(paddsb, 66, 0F, EC) \
V(paddsw, 66, 0F, ED) \
V(paddusb, 66, 0F, DC) \
@@ -46,6 +47,7 @@
V(psubb, 66, 0F, F8) \
V(psubw, 66, 0F, F9) \
V(psubd, 66, 0F, FA) \
+ V(psubq, 66, 0F, FB) \
V(psubsb, 66, 0F, E8) \
V(psubsw, 66, 0F, E9) \
V(psubusb, 66, 0F, D8) \
@@ -66,6 +68,7 @@
V(psignd, 66, 0F, 38, 0A)
#define SSE4_INSTRUCTION_LIST(V) \
+ V(pcmpeqq, 66, 0F, 38, 29) \
V(ptest, 66, 0F, 38, 17) \
V(pmovsxbw, 66, 0F, 38, 20) \
V(pmovsxwd, 66, 0F, 38, 23) \
@@ -82,4 +85,6 @@
V(pmaxud, 66, 0F, 38, 3F) \
V(pmulld, 66, 0F, 38, 40)
+#define SSE4_2_INSTRUCTION_LIST(V) V(pcmpgtq, 66, 0F, 38, 37)
+
#endif // V8_CODEGEN_X64_SSE_INSTR_H_
diff --git a/deps/v8/src/common/OWNERS b/deps/v8/src/common/OWNERS
new file mode 100644
index 0000000000..3f9de7e204
--- /dev/null
+++ b/deps/v8/src/common/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 5d4b957e84..8d1bf5dfcc 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -212,15 +212,6 @@ constexpr size_t kReservedCodeRangePages = 0;
STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
-// This macro is used for declaring and defining HeapObject getter methods that
-// are a bit more efficient for the pointer compression case than the default
-// parameterless getters because isolate root doesn't have to be computed from
-// arbitrary field address but it comes "for free" instead.
-// These alternatives are always defined (in order to avoid #ifdef mess but
-// are not supposed to be used when pointer compression is not enabled.
-#define ROOT_VALUE isolate_for_root
-#define ROOT_PARAM Isolate* const ROOT_VALUE
-
#ifdef V8_COMPRESS_POINTERS
static_assert(
kSystemPointerSize == kInt64Size,
@@ -234,11 +225,6 @@ constexpr int kTaggedSizeLog2 = 2;
using Tagged_t = int32_t;
using AtomicTagged_t = base::Atomic32;
-#define DEFINE_ROOT_VALUE(isolate) ROOT_PARAM = isolate
-#define WITH_ROOT_PARAM(...) ROOT_PARAM, ##__VA_ARGS__
-#define WITH_ROOT_VALUE(...) ROOT_VALUE, ##__VA_ARGS__
-#define WITH_ROOT(isolate_for_root, ...) isolate_for_root, ##__VA_ARGS__
-
#else
constexpr int kTaggedSize = kSystemPointerSize;
@@ -249,16 +235,12 @@ constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord;
-#define DEFINE_ROOT_VALUE(isolate)
-#define WITH_ROOT_PARAM(...) __VA_ARGS__
-#define WITH_ROOT_VALUE(...) __VA_ARGS__
-#define WITH_ROOT(isolate_for_root, ...) __VA_ARGS__
-
#endif // V8_COMPRESS_POINTERS
// Defines whether the branchless or branchful implementation of pointer
// decompression should be used.
-constexpr bool kUseBranchlessPtrDecompression = true;
+constexpr bool kUseBranchlessPtrDecompressionInRuntime = false;
+constexpr bool kUseBranchlessPtrDecompressionInGeneratedCode = false;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES);
@@ -667,7 +649,6 @@ struct SlotTraits;
template <>
struct SlotTraits<SlotLocation::kOffHeap> {
using TObjectSlot = FullObjectSlot;
- using TMapWordSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
};
@@ -678,12 +659,10 @@ template <>
struct SlotTraits<SlotLocation::kOnHeap> {
#ifdef V8_COMPRESS_POINTERS
using TObjectSlot = CompressedObjectSlot;
- using TMapWordSlot = CompressedMapWordSlot;
using TMaybeObjectSlot = CompressedMaybeObjectSlot;
using THeapObjectSlot = CompressedHeapObjectSlot;
#else
using TObjectSlot = FullObjectSlot;
- using TMapWordSlot = FullObjectSlot;
using TMaybeObjectSlot = FullMaybeObjectSlot;
using THeapObjectSlot = FullHeapObjectSlot;
#endif
@@ -693,10 +672,6 @@ struct SlotTraits<SlotLocation::kOnHeap> {
// holding Object value (smi or strong heap object).
using ObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TObjectSlot;
-// An MapWordSlot instance describes a kTaggedSize-sized on-heap field ("slot")
-// holding HeapObject (strong heap object) value or a forwarding pointer.
-using MapWordSlot = SlotTraits<SlotLocation::kOnHeap>::TMapWordSlot;
-
// A MaybeObjectSlot instance describes a kTaggedSize-sized on-heap field
// ("slot") holding MaybeObject (smi or weak heap object or strong heap object).
using MaybeObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TMaybeObjectSlot;
@@ -1193,7 +1168,7 @@ enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
-enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
+enum RequiresBrandCheckFlag : uint8_t { kNoBrandCheck, kRequiresBrandCheck };
enum class InterpreterPushArgsMode : unsigned {
kArrayFunction,
@@ -1554,6 +1529,12 @@ constexpr int kFunctionLiteralIdTopLevel = 0;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
+// Opaque data type for identifying stack frames. Used extensively
+// by the debugger.
+// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
+// has correct value range (see Issue 830 for more details).
+enum StackFrameId { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/message-template.h b/deps/v8/src/common/message-template.h
index ae88aa4411..fedbfa5a10 100644
--- a/deps/v8/src/execution/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXECUTION_MESSAGE_TEMPLATE_H_
-#define V8_EXECUTION_MESSAGE_TEMPLATE_H_
+#ifndef V8_COMMON_MESSAGE_TEMPLATE_H_
+#define V8_COMMON_MESSAGE_TEMPLATE_H_
#include "src/base/logging.h"
@@ -90,6 +90,7 @@ namespace internal {
T(ImmutablePrototypeSet, \
"Immutable prototype object '%' cannot have their prototype set") \
T(ImportCallNotNewExpression, "Cannot use new with import") \
+ T(ImportOutsideModule, "Cannot use import statement outside a module") \
T(ImportMetaOutsideModule, "Cannot use 'import.meta' outside a module") \
T(ImportMissingSpecifier, "import() requires a specifier") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
@@ -415,6 +416,7 @@ namespace internal {
"Read of private field % from an object which did not contain the field") \
T(InvalidPrivateFieldWrite, \
"Write of private field % to an object which did not contain the field") \
+ T(InvalidPrivateMethodWrite, "Private method '%' is not writable") \
T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
@@ -495,7 +497,7 @@ namespace internal {
T(UnexpectedSuper, "'super' keyword unexpected here") \
T(UnexpectedNewTarget, "new.target expression is not allowed here") \
T(UnexpectedTemplateString, "Unexpected template string") \
- T(UnexpectedToken, "Unexpected token %") \
+ T(UnexpectedToken, "Unexpected token '%'") \
T(UnexpectedTokenUnaryExponentiation, \
"Unary operator used immediately before exponentiation expression. " \
"Parenthesis must be used to disambiguate operator precedence") \
@@ -562,6 +564,8 @@ namespace internal {
T(TraceEventPhaseError, "Trace event phase must be a number.") \
T(TraceEventIDError, "Trace event id must be a number.") \
/* Weak refs */ \
+ T(WeakRefsUnregisterTokenMustBeObject, \
+ "unregisterToken ('%') must be an object") \
T(WeakRefsCleanupMustBeCallable, \
"FinalizationGroup: cleanup must be callable") \
T(WeakRefsRegisterTargetMustBeObject, \
@@ -576,16 +580,16 @@ enum class MessageTemplate {
#define TEMPLATE(NAME, STRING) k##NAME,
MESSAGE_TEMPLATES(TEMPLATE)
#undef TEMPLATE
- kLastMessage
+ kMessageCount
};
inline MessageTemplate MessageTemplateFromInt(int message_id) {
- DCHECK_LE(0, message_id);
- DCHECK_LT(message_id, static_cast<int>(MessageTemplate::kLastMessage));
+ DCHECK_LT(static_cast<unsigned>(message_id),
+ static_cast<unsigned>(MessageTemplate::kMessageCount));
return static_cast<MessageTemplate>(message_id);
}
} // namespace internal
} // namespace v8
-#endif // V8_EXECUTION_MESSAGE_TEMPLATE_H_
+#endif // V8_COMMON_MESSAGE_TEMPLATE_H_
diff --git a/deps/v8/src/common/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index fd0f97e904..00a79bb291 100644
--- a/deps/v8/src/common/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -25,8 +25,12 @@ V8_INLINE Address GetIsolateRoot(TOnHeapAddress on_heap_addr);
template <>
V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
+ // We subtract 1 here in order to let the compiler generate addition of 32-bit
+ // signed constant instead of 64-bit constant (the problem is that 2Gb looks
+ // like a negative 32-bit value). It's correct because we will never use
+ // leftmost address of V8 heap as |on_heap_addr|.
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr +
- kPtrComprIsolateRootBias);
+ kPtrComprIsolateRootBias - 1);
}
template <>
@@ -34,17 +38,10 @@ V8_INLINE Address GetIsolateRoot<Isolate*>(Isolate* isolate) {
return isolate->isolate_root();
}
-template <>
-V8_INLINE Address GetIsolateRoot<const Isolate*>(const Isolate* isolate) {
- return isolate->isolate_root();
-}
-
// Decompresses smi value.
V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
- // Current compression scheme requires |raw_value| to be sign-extended
- // from int32_t to intptr_t.
- intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- return static_cast<Address>(value);
+ // For runtime code the upper 32-bits of the Smi value do not matter.
+ return static_cast<Address>(raw_value);
}
// Decompresses weak or strong heap object pointer or forwarding pointer,
@@ -63,18 +60,18 @@ V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
- // Current compression scheme requires |raw_value| to be sign-extended
- // from int32_t to intptr_t.
- intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- if (kUseBranchlessPtrDecompression) {
+ if (kUseBranchlessPtrDecompressionInRuntime) {
+ // Current compression scheme requires |raw_value| to be sign-extended
+ // from int32_t to intptr_t.
+ intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = static_cast<Address>(-(value & kSmiTagMask));
Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr);
return root_or_zero + static_cast<Address>(value);
} else {
- return HAS_SMI_TAG(value)
- ? static_cast<Address>(value)
- : (GetIsolateRoot(on_heap_addr) + static_cast<Address>(value));
+ return HAS_SMI_TAG(raw_value)
+ ? DecompressTaggedSigned(raw_value)
+ : DecompressTaggedPointer(on_heap_addr, raw_value);
}
}
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
new file mode 100644
index 0000000000..9664a4857c
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -0,0 +1,7 @@
+ahaas@chromium.org
+jkummerow@chromium.org
+leszeks@chromium.org
+mstarzinger@chromium.org
+rmcilroy@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 39beced3f3..50e2af7129 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
@@ -19,6 +17,7 @@ per-file wasm-*=gdeepti@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
-per-file simd-scalar-lowering.*=aseemgarg@chromium.org
+per-file simd-scalar-lowering.*=bbudge@chromium.org
+per-file simd-scalar-lowering.*=gdeepti@chromium.org
# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/STYLE b/deps/v8/src/compiler/STYLE
deleted file mode 100644
index ae41e3f989..0000000000
--- a/deps/v8/src/compiler/STYLE
+++ /dev/null
@@ -1,29 +0,0 @@
-Compiler Coding Style
-=====================
-
-Coding style for the TurboFan compiler generally follows the Google C++ Style
-Guide and the Chromium Coding Style. The notes below are usually just extensions
-beyond what the Google style guide already says. If this document doesn't
-mention a rule, follow the Google C++ style.
-
-
-TODOs
------
-We use the following convention for putting TODOs into the code:
-
- * A TODO(turbofan) implies a performance improvement opportunity.
- * A TODO(name) implies an incomplete implementation.
-
-
-Use of C++11 auto keyword
--------------------------
-Use auto to avoid type names that are just clutter. Continue to use manifest
-type declarations when it helps readability, and never use auto for anything
-but local variables, in particular auto should only be used where it is obvious
-from context what the type is:
-
- for (auto block : x->blocks()) // clearly a Block of some kind
- for (auto instr : x->instructions()) // clearly an Instruction of some kind
-
- for (auto b : x->predecessors()) // less clear, better to make it explicit
- for (BasicBlock* b : x->predecessors()) // now clear
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 726a81a465..a369de4885 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -14,9 +14,9 @@
#include "src/objects/heap-number.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/source-text-module.h"
namespace v8 {
namespace internal {
@@ -72,6 +72,26 @@ FieldAccess AccessBuilder::ForBigIntBitfield() {
}
// static
+FieldAccess AccessBuilder::ForBigIntOptionalPadding() {
+ DCHECK_EQ(FIELD_SIZE(BigInt::kOptionalPaddingOffset), 4);
+ FieldAccess access = {
+ kTaggedBase, BigInt::kOptionalPaddingOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kInt32, MachineType::Uint32(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() {
+ DCHECK_EQ(BigInt::SizeFor(1) - BigInt::SizeFor(0), 8);
+ FieldAccess access = {
+ kTaggedBase, BigInt::kDigitsOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kBigUint64, MachineType::Uint64(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {
kTaggedBase, JSObject::kPropertiesOrHashOffset,
@@ -626,7 +646,7 @@ FieldAccess AccessBuilder::ForMapPrototype() {
// static
FieldAccess AccessBuilder::ForModuleRegularExports() {
FieldAccess access = {
- kTaggedBase, Module::kRegularExportsOffset,
+ kTaggedBase, SourceTextModule::kRegularExportsOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
@@ -636,7 +656,7 @@ FieldAccess AccessBuilder::ForModuleRegularExports() {
// static
FieldAccess AccessBuilder::ForModuleRegularImports() {
FieldAccess access = {
- kTaggedBase, Module::kRegularImportsOffset,
+ kTaggedBase, SourceTextModule::kRegularImportsOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
@@ -847,7 +867,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
// static
FieldAccess AccessBuilder::ForValue() {
FieldAccess access = {
- kTaggedBase, JSValue::kValueOffset,
+ kTaggedBase, JSPrimitiveWrapper::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::NonInternal(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e38c487b1a..e3a17fe257 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -42,6 +42,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to BigInt's bit field.
static FieldAccess ForBigIntBitfield();
+ // Provides access to BigInt's 32 bit padding that is placed after the
+ // bitfield on 64 bit architectures without pointer compression. Do not use
+ // this on 32 bit architectures.
+ static FieldAccess ForBigIntOptionalPadding();
+
+ // Provides access to BigInt's least significant digit on 64 bit
+ // architectures. Do not use this on 32 bit architectures.
+ static FieldAccess ForBigIntLeastSignificantDigit64();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectPropertiesOrHash();
@@ -263,7 +272,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSStringIterator::index() field.
static FieldAccess ForJSStringIteratorIndex();
- // Provides access to JSValue::value() field.
+ // Provides access to JSPrimitiveWrapper::value() field.
static FieldAccess ForValue();
// Provides access to Cell::value() field.
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 713484f734..6fc9e8214e 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,6 +8,7 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/compilation-dependency.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
#include "src/logging/counters.h"
@@ -78,7 +79,7 @@ PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone,
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -90,7 +91,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
// static
PropertyAccessInfo PropertyAccessInfo::DataConstant(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -156,8 +157,7 @@ PropertyAccessInfo::PropertyAccessInfo(
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map,
ZoneVector<Handle<Map>>&& receiver_maps,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies)
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
receiver_maps_(receiver_maps),
unrecorded_dependencies_(std::move(unrecorded_dependencies)),
@@ -258,11 +258,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
}
-Handle<Cell> PropertyAccessInfo::export_cell() const {
- DCHECK_EQ(kModuleExport, kind_);
- return Handle<Cell>::cast(constant_);
-}
-
AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
CompilationDependencies* dependencies,
Zone* zone)
@@ -336,11 +331,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MapRef map_ref(broker(), map);
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies(zone());
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
+ map_ref.SerializeOwnDescriptor(descriptor);
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
- map_ref.SerializeOwnDescriptor(descriptor);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
descriptor));
@@ -360,19 +354,23 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
}
- map_ref.SerializeOwnDescriptor(descriptor);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
descriptor));
if (descriptors_field_type->IsClass()) {
- unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
+ } else {
+ CHECK(details_representation.IsTagged());
}
+ // TODO(turbofan): We may want to do this only depending on the use
+ // of the access info.
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
+
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
@@ -445,9 +443,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
holder.is_null());
DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) {
- return PropertyAccessInfo::Invalid(zone());
- }
}
if (access_mode == AccessMode::kLoad) {
Handle<Name> cached_property_name;
@@ -569,7 +564,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (map_prototype->map().is_deprecated()) {
// Try to migrate the prototype object so we don't embed the deprecated
// map into the optimized code.
- JSObject::TryMigrateInstance(map_prototype);
+ JSObject::TryMigrateInstance(isolate(), map_prototype);
}
map = handle(map_prototype->map(), isolate());
holder = map_prototype;
@@ -611,8 +606,7 @@ void AccessInfoFactory::ComputePropertyAccessInfos(
void PropertyAccessInfo::RecordDependencies(
CompilationDependencies* dependencies) {
- for (CompilationDependencies::Dependency const* d :
- unrecorded_dependencies_) {
+ for (CompilationDependency const* d : unrecorded_dependencies_) {
dependencies->RecordDependency(d);
}
unrecorded_dependencies_.clear();
@@ -648,6 +642,8 @@ void AccessInfoFactory::MergePropertyAccessInfos(
CHECK(!result->empty());
}
+Isolate* AccessInfoFactory::isolate() const { return broker()->isolate(); }
+
namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
@@ -760,8 +756,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MapRef transition_map_ref(broker(), transition_map);
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies(zone());
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
transition_map_ref.SerializeOwnDescriptor(number);
@@ -796,6 +791,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
unrecorded_dependencies.push_back(
dependencies()->TransitionDependencyOffTheRecord(
MapRef(broker(), transition_map)));
+ transition_map_ref.SerializeBackPointer(); // For BuildPropertyStore.
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 3499069fc4..4c7c3611df 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -8,7 +8,6 @@
#include <iosfwd>
#include "src/codegen/machine-type.h"
-#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/types.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/field-index.h"
@@ -25,8 +24,10 @@ class Factory;
namespace compiler {
// Forward declarations.
+class CompilationDependencies;
+class CompilationDependency;
class ElementAccessFeedback;
-class Type;
+class JSHeapBroker;
class TypeCache;
std::ostream& operator<<(std::ostream&, AccessMode);
@@ -74,16 +75,14 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies,
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
static PropertyAccessInfo DataConstant(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies,
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
@@ -113,9 +112,9 @@ class PropertyAccessInfo final {
Kind kind() const { return kind_; }
MaybeHandle<JSObject> holder() const {
- // This CHECK tries to protect against using the access info without
- // recording its dependencies first.
- CHECK(unrecorded_dependencies_.empty());
+ // TODO(neis): There was a CHECK here that tries to protect against
+ // using the access info without recording its dependencies first.
+ // Find a more suitable place for it.
return holder_;
}
MaybeHandle<Map> transition_map() const { return transition_map_; }
@@ -127,7 +126,6 @@ class PropertyAccessInfo final {
ZoneVector<Handle<Map>> const& receiver_maps() const {
return receiver_maps_;
}
- Handle<Cell> export_cell() const;
private:
explicit PropertyAccessInfo(Zone* zone);
@@ -136,17 +134,16 @@ class PropertyAccessInfo final {
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
ZoneVector<Handle<Map>>&& receiver_maps);
- PropertyAccessInfo(
- Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
- FieldIndex field_index, Representation field_representation,
- Type field_type, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies);
+ PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map, FieldIndex field_index,
+ Representation field_representation, Type field_type,
+ MaybeHandle<Map> field_map,
+ ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<CompilationDependency const*>&& dependencies);
Kind kind_;
ZoneVector<Handle<Map>> receiver_maps_;
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies_;
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
@@ -215,7 +212,7 @@ class AccessInfoFactory final {
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
- Isolate* isolate() const { return broker()->isolate(); }
+ Isolate* isolate() const;
Zone* zone() const { return zone_; }
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.cc b/deps/v8/src/compiler/add-type-assertions-reducer.cc
new file mode 100644
index 0000000000..59d2fe6820
--- /dev/null
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.cc
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/add-type-assertions-reducer.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AddTypeAssertionsReducer::AddTypeAssertionsReducer(Editor* editor,
+ JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ visited_(jsgraph->graph()->NodeCount(), zone) {}
+
+AddTypeAssertionsReducer::~AddTypeAssertionsReducer() = default;
+
+Reduction AddTypeAssertionsReducer::Reduce(Node* node) {
+ if (node->opcode() == IrOpcode::kAssertType ||
+ node->opcode() == IrOpcode::kPhi || !NodeProperties::IsTyped(node) ||
+ visited_.Get(node)) {
+ return NoChange();
+ }
+ visited_.Set(node, true);
+
+ Type type = NodeProperties::GetType(node);
+ if (!type.IsRange()) {
+ return NoChange();
+ }
+
+ Node* assertion = graph()->NewNode(simplified()->AssertType(type), node);
+ NodeProperties::SetType(assertion, type);
+
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsValueEdge(edge) && user != assertion) {
+ edge.UpdateTo(assertion);
+ Revisit(user);
+ }
+ }
+
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h
new file mode 100644
index 0000000000..36add040e1
--- /dev/null
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.h
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
+#define V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ ~AddTypeAssertionsReducer() final;
+
+ const char* reducer_name() const override {
+ return "AddTypeAssertionsReducer";
+ }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ JSGraph* const jsgraph_;
+ NodeAuxData<bool> visited_;
+
+ Graph* graph() { return jsgraph_->graph(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index d93053c64b..88a9c52a33 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -130,6 +130,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
case Constant::kInt64:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
case Constant::kRpoNumber:
@@ -308,9 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode,
+ ArmOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -319,9 +320,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
}
}
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter& i, Register address) {
+void ComputePoisonedAddressForLoad(
+ CodeGenerator* codegen, InstructionCode opcode,
+ ArmOperandConverter& i, // NOLINT(runtime/references)
+ Register address) {
DCHECK_EQ(kMemoryAccessPoisoned,
static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
switch (AddressingModeField::decode(opcode)) {
@@ -711,8 +713,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -879,23 +881,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r1);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1752,6 +1752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArmDmbIsh: {
+ __ dmb(ISH);
+ break;
+ }
case kArmDsbIsb: {
__ dsb(SY);
__ isb(SY);
@@ -2588,6 +2592,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmax(NeonU32, scratch, src.low(), src.high());
__ vpmax(NeonU32, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x4AllTrue: {
@@ -2597,6 +2603,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU32, scratch, src.low(), src.high());
__ vpmin(NeonU32, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x8AnyTrue: {
@@ -2607,6 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmax(NeonU16, scratch, scratch, scratch);
__ vpmax(NeonU16, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x8AllTrue: {
@@ -2617,6 +2627,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU16, scratch, scratch, scratch);
__ vpmin(NeonU16, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x16AnyTrue: {
@@ -2631,6 +2643,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kDoubleRegZero is not changed, since it is 0.
__ vtst(Neon32, q_scratch, q_scratch, q_scratch);
__ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x16AllTrue: {
@@ -2642,6 +2656,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU8, scratch, scratch, scratch);
__ vpmin(NeonU8, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kWord32AtomicLoadInt8:
@@ -2901,7 +2917,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -2993,8 +3009,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(lr, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ } else {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3025,8 +3047,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3074,7 +3096,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 722502edc7..165ca39f9d 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -126,6 +126,7 @@ namespace compiler {
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
+ V(ArmDmbIsh) \
V(ArmDsbIsb) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 211abd85b8..41d7b4055f 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -275,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmStr:
case kArmPush:
case kArmPoke:
+ case kArmDmbIsh:
case kArmDsbIsb:
case kArmWord32AtomicPairStore:
case kArmWord32AtomicPairAdd:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 678d75ae5e..06aba4491a 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -441,9 +441,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -2020,6 +2020,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmDmbIsh, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 53864ad2e9..c71a63cc3d 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -224,6 +224,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
+ case Constant::kCompressedHeapObject: // Fall through.
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kDelayedStringConstant:
@@ -375,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -621,8 +622,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -793,19 +794,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0).is(x1));
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ Debug("kArchDebugAbort", 0, BREAK);
+ __ Debug("kArchAbortCSAAssert", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -867,9 +866,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
__ StoreTaggedField(value, MemOperand(object, offset));
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
eq, ool->entry());
__ Bind(ool->exit());
@@ -1629,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64DmbIsh:
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
case kArm64DsbIsb:
__ Dsb(FullSystem, BarrierAll);
__ Isb();
@@ -2200,6 +2199,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
+ __ Cmp(i.OutputRegister32(), 0); \
+ __ Cset(i.OutputRegister32(), ne); \
break; \
}
SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
@@ -2399,12 +2400,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Adr(temp, &table);
__ Add(temp, temp, Operand(input, UXTW, 2));
__ Br(temp);
- __ StartBlockPools();
- __ Bind(&table);
- for (size_t index = 0; index < case_count; ++index) {
- __ B(GetLabel(i.InputRpo(index + 2)));
+ {
+ TurboAssembler::BlockPoolsScope block_pools(tasm(),
+ case_count * kInstrSize);
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
}
- __ EndBlockPools();
}
void CodeGenerator::FinishFrame(Frame* frame) {
@@ -2437,8 +2440,8 @@ void CodeGenerator::AssembleConstructFrame() {
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
@@ -2577,7 +2580,17 @@ void CodeGenerator::AssembleConstructFrame() {
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
case CallDescriptor::kCallAddress:
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ required_slots += 2; // marker + saved c_entry_fp.
+ }
__ Claim(required_slots);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
+ __ Str(scratch,
+ MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ }
break;
default:
UNREACHABLE();
@@ -2654,7 +2667,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2669,6 +2682,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Mov(dst, src_object);
}
+ } else if (src.type() == Constant::kCompressedHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ // TODO(v8:8977): Even though this mov happens on 32 bits (Note the
+ // .W()) and we are passing along the RelocInfo, we still haven't made
+ // the address embedded in the code-stream actually be compressed.
+ __ Mov(dst.W(),
+ Immediate(src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
+ }
} else {
__ Mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 4b7b017111..1c4c0e3335 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -171,6 +171,7 @@ namespace compiler {
V(Arm64CompressSigned) \
V(Arm64CompressPointer) \
V(Arm64CompressAny) \
+ V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 502b9d7d82..8344887ec2 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -319,6 +319,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64DmbIsh:
case kArm64DsbIsb:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 69d82b4993..a953e35a66 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -535,9 +535,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -676,10 +676,11 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
- // must check kArithmeticImm as well as kLoadStoreImm64.
- if (g.CanBeImmediate(index, kArithmeticImm) &&
- g.CanBeImmediate(index, kLoadStoreImm64)) {
+ // OutOfLineRecordWrite uses the index in an add or sub instruction, but we
+ // can trust the assembler to generate extra instructions if the index does
+ // not fit into add or sub. So here only check the immediate for a store.
+ if (g.CanBeImmediate(index, COMPRESS_POINTERS_BOOL ? kLoadStoreImm32
+ : kLoadStoreImm64)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
@@ -1599,7 +1600,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// 32-bit operations will write their result in a W register (implicitly
// clearing the top 32-bit of the corresponding X register) so the
// zero-extension is a no-op.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
}
case IrOpcode::kLoad: {
@@ -1610,7 +1611,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
default:
break;
@@ -1646,29 +1647,75 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressed);
+ InstructionCode opcode = kArm64LdrDecompressAnyTagged;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressed;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+ }
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressPointer, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedPointer);
+ InstructionCode opcode = kArm64LdrDecompressTaggedPointer;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressedPointer;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressPointer, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ }
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressSigned, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedSigned);
+ InstructionCode opcode = kArm64LdrDecompressTaggedSigned;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressedSigned;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressSigned, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ }
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
- Node* value = node->InputAt(0);
// The top 32 bits in the 64-bit register will be undefined, and
// must not be used by a dependent node.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
+ EmitIdentity(node);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
@@ -2451,7 +2498,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count();
size_t lookup_time_cost = sw.case_count();
- if (sw.case_count() > 0 &&
+ if (sw.case_count() > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value() > std::numeric_limits<int32_t>::min() &&
@@ -2755,6 +2802,11 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
return VisitRRR(this, kArm64Float64Mul, node);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64DmbIsh, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index bb83a8497b..9ce92dadaa 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -1210,6 +1210,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToHeapObject());
break;
+ case Constant::kCompressedHeapObject:
+ DCHECK_EQ(MachineRepresentation::kCompressed, type.representation());
+ literal = DeoptimizationLiteral(constant.ToHeapObject());
+ break;
case Constant::kDelayedStringConstant:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 0e61c22cbb..ed4be7a47c 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -81,6 +81,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
return Immediate(constant.ToHeapObject());
+ case Constant::kCompressedHeapObject:
+ break;
case Constant::kDelayedStringConstant:
return Immediate::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
@@ -462,6 +464,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ Register dst = i.OutputRegister(); \
+ Operand src = i.InputOperand(0); \
+ Register tmp = i.TempRegister(0); \
+ __ mov(tmp, Immediate(1)); \
+ __ xor_(dst, dst); \
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, src); \
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ cmov(zero, dst, tmp); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -674,8 +689,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -870,17 +885,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == edx);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
__ int3();
break;
@@ -1204,7 +1217,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
- case kLFence:
+ case kIA32MFence:
+ __ mfence();
+ break;
+ case kIA32LFence:
__ lfence();
break;
case kSSEFloat32Cmp:
@@ -3663,18 +3679,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmov(zero, dst, tmp);
break;
}
+ // Need to split up all the different lane structures because the
+ // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
+ // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
+ // respectively.
case kIA32S1x4AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
+ break;
case kIA32S1x8AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ break;
case kIA32S1x16AllTrue: {
- Register dst = i.OutputRegister();
- Operand src = i.InputOperand(0);
- Register tmp = i.TempRegister(0);
- __ mov(tmp, Immediate(1));
- __ xor_(dst, dst);
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(kScratchDoubleReg, src);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- __ cmov(zero, dst, tmp);
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
case kIA32StackCheck: {
@@ -4224,6 +4240,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ push(ebp);
__ mov(ebp, esp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -4254,8 +4275,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -4629,6 +4650,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
+#undef ASSEMBLE_SIMD_ALL_TRUE
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 60ed1cc29c..56dea82fe2 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -44,7 +44,8 @@ namespace compiler {
V(IA32Tzcnt) \
V(IA32Popcnt) \
V(IA32Bswap) \
- V(LFence) \
+ V(IA32MFence) \
+ V(IA32LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index f2d5cc0d17..15f69b991c 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -365,7 +365,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32PushFloat64:
case kIA32PushSimd128:
case kIA32Poke:
- case kLFence:
+ case kIA32MFence:
+ case kIA32LFence:
return kHasSideEffect;
case kIA32Word32AtomicPairLoad:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f81b88823e..e1fc66b4ba 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -272,9 +272,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
IA32OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1593,6 +1593,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32MFence, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 068164b57e..1085de2196 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -82,7 +82,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
- V(ArchDebugAbort) \
+ V(ArchAbortCSAAssert) \
V(ArchDebugBreak) \
V(ArchComment) \
V(ArchThrowTerminator) \
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index b0637c175d..538af71bb4 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -298,7 +298,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
case kArchDebugBreak:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index 21edc2f503..a3f62e7ba4 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
// Helper struct containing data about a table or lookup switch.
class SwitchInfo {
public:
- SwitchInfo(ZoneVector<CaseInfo>& cases, int32_t min_value, int32_t max_value,
- BasicBlock* default_branch)
+ SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references)
+ int32_t min_value, int32_t max_value, BasicBlock* default_branch)
: cases_(cases),
min_value_(min_value),
max_value_(max_value),
@@ -109,13 +109,9 @@ class OperandGenerator {
}
InstructionOperand DefineAsConstant(Node* node) {
- return DefineAsConstant(node, ToConstant(node));
- }
-
- InstructionOperand DefineAsConstant(Node* node, Constant constant) {
selector()->MarkAsDefined(node);
int virtual_register = GetVReg(node);
- sequence()->AddConstant(virtual_register, constant);
+ sequence()->AddConstant(virtual_register, ToConstant(node));
return ConstantOperand(virtual_register);
}
@@ -326,6 +322,8 @@ class OperandGenerator {
}
case IrOpcode::kHeapConstant:
return Constant(HeapConstantOf(node->op()));
+ case IrOpcode::kCompressedHeapConstant:
+ return Constant(HeapConstantOf(node->op()), true);
case IrOpcode::kDelayedStringConstant:
return Constant(StringConstantBaseOf(node->op()));
case IrOpcode::kDeadValue: {
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 2b748a188b..11ba910405 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -8,6 +8,7 @@
#include "src/base/adapters.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
@@ -24,7 +25,7 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
@@ -54,7 +55,8 @@ InstructionSelector::InstructionSelector(
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
- trace_turbo_(trace_turbo) {
+ trace_turbo_(trace_turbo),
+ tick_counter_(tick_counter) {
instructions_.reserve(node_count);
continuation_inputs_.reserve(5);
continuation_outputs_.reserve(2);
@@ -1078,7 +1080,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
- node->opcode() == IrOpcode::kProtectedStore) {
+ node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kMemoryBarrier) {
++effect_level;
}
}
@@ -1251,6 +1254,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
}
void InstructionSelector::VisitNode(Node* node) {
+ tick_counter_->DoTick();
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
@@ -1301,6 +1305,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kCompressedHeapConstant:
+ return MarkAsCompressed(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
double value = OpParameter<double>(node->op());
if (!IsSmiDouble(value)) MarkAsReference(node);
@@ -1324,8 +1330,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
- case IrOpcode::kDebugAbort:
- VisitDebugAbort(node);
+ case IrOpcode::kAbortCSAAssert:
+ VisitAbortCSAAssert(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
@@ -1474,6 +1480,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
@@ -1734,6 +1741,8 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
+ case IrOpcode::kMemoryBarrier:
+ return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1808,6 +1817,24 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
+ case IrOpcode::kF64x2Splat:
+ return MarkAsSimd128(node), VisitF64x2Splat(node);
+ case IrOpcode::kF64x2ExtractLane:
+ return MarkAsFloat64(node), VisitF64x2ExtractLane(node);
+ case IrOpcode::kF64x2ReplaceLane:
+ return MarkAsSimd128(node), VisitF64x2ReplaceLane(node);
+ case IrOpcode::kF64x2Abs:
+ return MarkAsSimd128(node), VisitF64x2Abs(node);
+ case IrOpcode::kF64x2Neg:
+ return MarkAsSimd128(node), VisitF64x2Neg(node);
+ case IrOpcode::kF64x2Eq:
+ return MarkAsSimd128(node), VisitF64x2Eq(node);
+ case IrOpcode::kF64x2Ne:
+ return MarkAsSimd128(node), VisitF64x2Ne(node);
+ case IrOpcode::kF64x2Lt:
+ return MarkAsSimd128(node), VisitF64x2Lt(node);
+ case IrOpcode::kF64x2Le:
+ return MarkAsSimd128(node), VisitF64x2Le(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1846,6 +1873,38 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
+ case IrOpcode::kI64x2Splat:
+ return MarkAsSimd128(node), VisitI64x2Splat(node);
+ case IrOpcode::kI64x2ExtractLane:
+ return MarkAsWord64(node), VisitI64x2ExtractLane(node);
+ case IrOpcode::kI64x2ReplaceLane:
+ return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
+ case IrOpcode::kI64x2Neg:
+ return MarkAsSimd128(node), VisitI64x2Neg(node);
+ case IrOpcode::kI64x2Shl:
+ return MarkAsSimd128(node), VisitI64x2Shl(node);
+ case IrOpcode::kI64x2ShrS:
+ return MarkAsSimd128(node), VisitI64x2ShrS(node);
+ case IrOpcode::kI64x2Add:
+ return MarkAsSimd128(node), VisitI64x2Add(node);
+ case IrOpcode::kI64x2Sub:
+ return MarkAsSimd128(node), VisitI64x2Sub(node);
+ case IrOpcode::kI64x2Mul:
+ return MarkAsSimd128(node), VisitI64x2Mul(node);
+ case IrOpcode::kI64x2Eq:
+ return MarkAsSimd128(node), VisitI64x2Eq(node);
+ case IrOpcode::kI64x2Ne:
+ return MarkAsSimd128(node), VisitI64x2Ne(node);
+ case IrOpcode::kI64x2GtS:
+ return MarkAsSimd128(node), VisitI64x2GtS(node);
+ case IrOpcode::kI64x2GeS:
+ return MarkAsSimd128(node), VisitI64x2GeS(node);
+ case IrOpcode::kI64x2ShrU:
+ return MarkAsSimd128(node), VisitI64x2ShrU(node);
+ case IrOpcode::kI64x2GtU:
+ return MarkAsSimd128(node), VisitI64x2GtU(node);
+ case IrOpcode::kI64x2GeU:
+ return MarkAsSimd128(node), VisitI64x2GeU(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -2028,6 +2087,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
+ case IrOpcode::kS1x2AnyTrue:
+ return MarkAsWord32(node), VisitS1x2AnyTrue(node);
+ case IrOpcode::kS1x2AllTrue:
+ return MarkAsWord32(node), VisitS1x2AllTrue(node);
case IrOpcode::kS1x4AnyTrue:
return MarkAsWord32(node), VisitS1x4AnyTrue(node);
case IrOpcode::kS1x4AllTrue:
@@ -2489,6 +2552,36 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64
+
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
@@ -2962,7 +3055,7 @@ void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
bool* is_swizzle) {
// Get raw shuffle indices.
- memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
+ memcpy(shuffle, S8x16ShuffleOf(node->op()), kSimd128Size);
bool needs_swap;
bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
GetVirtualRegister(node->InputAt(1));
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 4f6b1c5971..16f88bb516 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -19,6 +19,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -266,7 +269,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@@ -496,11 +499,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
VectorSlotPair const& feedback,
Node* frame_state);
- void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
- void EmitLookupSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand);
- void EmitBinarySearchSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand);
+ void EmitTableSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& index_operand); // NOLINT(runtime/references)
+ void EmitLookupSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& value_operand); // NOLINT(runtime/references)
+ void EmitBinarySearchSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& value_operand); // NOLINT(runtime/references)
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
@@ -604,6 +611,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
+ // Visit the load node with a value and opcode to replace with.
+ void VisitLoad(Node* node, Node* value, InstructionCode opcode);
void VisitFinishRegion(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
@@ -772,6 +781,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
EnableTraceTurboJson trace_turbo_;
+ TickCounter* const tick_counter_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index c52dca61a1..09c7fe22c5 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -530,7 +530,7 @@ Constant::Constant(RelocatablePtrConstantInfo info) {
}
Handle<HeapObject> Constant::ToHeapObject() const {
- DCHECK_EQ(kHeapObject, type());
+ DCHECK(kHeapObject == type() || kCompressedHeapObject == type());
Handle<HeapObject> value(
reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
return value;
@@ -561,7 +561,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
return os << constant.ToFloat64().value();
case Constant::kExternalReference:
return os << constant.ToExternalReference().address();
- case Constant::kHeapObject:
+ case Constant::kHeapObject: // Fall through.
+ case Constant::kCompressedHeapObject:
return os << Brief(*constant.ToHeapObject());
case Constant::kRpoNumber:
return os << "RPO" << constant.ToRpoNumber().ToInt();
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 61875a1a17..9b32204055 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1007,6 +1007,7 @@ class V8_EXPORT_PRIVATE Constant final {
kFloat32,
kFloat64,
kExternalReference,
+ kCompressedHeapObject,
kHeapObject,
kRpoNumber,
kDelayedStringConstant
@@ -1018,8 +1019,9 @@ class V8_EXPORT_PRIVATE Constant final {
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref.address())) {}
- explicit Constant(Handle<HeapObject> obj)
- : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+ explicit Constant(Handle<HeapObject> obj, bool is_compressed = false)
+ : type_(is_compressed ? kCompressedHeapObject : kHeapObject),
+ value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
explicit Constant(const StringConstantBase* str)
: type_(kDelayedStringConstant), value_(bit_cast<intptr_t>(str)) {}
diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h
index e23dd45359..ce60ebcb2e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.h
+++ b/deps/v8/src/compiler/backend/jump-threading.h
@@ -17,14 +17,17 @@ class V8_EXPORT_PRIVATE JumpThreading {
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
- InstructionSequence* code, bool frame_at_start);
+ static bool ComputeForwarding(
+ Zone* local_zone,
+ ZoneVector<RpoNumber>& result, // NOLINT(runtime/references)
+ InstructionSequence* code, bool frame_at_start);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& forwarding,
- InstructionSequence* code);
+ static void ApplyForwarding(
+ Zone* local_zone,
+ ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references)
+ InstructionSequence* code);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc
index 6ed0416045..0a0aadfad1 100644
--- a/deps/v8/src/compiler/backend/live-range-separator.cc
+++ b/deps/v8/src/compiler/backend/live-range-separator.cc
@@ -9,15 +9,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+#define TRACE_COND(cond, ...) \
+ do { \
+ if (cond) PrintF(__VA_ARGS__); \
} while (false)
namespace {
void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
- LifetimePosition first_cut, LifetimePosition last_cut) {
+ LifetimePosition first_cut, LifetimePosition last_cut,
+ bool trace_alloc) {
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
// If a range ends right at the end of a deferred block, it is marked by
@@ -49,9 +50,10 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
range->SetSplinter(splinter);
}
Zone* zone = data->allocation_zone();
- TRACE("creating splinter %d for range %d between %d and %d\n",
- range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(),
- end.ToInstructionIndex());
+ TRACE_COND(trace_alloc,
+ "creating splinter %d for range %d between %d and %d\n",
+ range->splinter()->vreg(), range->vreg(),
+ start.ToInstructionIndex(), end.ToInstructionIndex());
range->Splinter(start, end, zone);
}
}
@@ -102,7 +104,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
current_block->last_instruction_index());
} else {
if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, last_cut);
+ CreateSplinter(range, data, first_cut, last_cut,
+ data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
@@ -116,7 +119,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
// have to connect blocks anyway, so we can also splinter to the end of the
// block, too.
if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, interval_end);
+ CreateSplinter(range, data, first_cut, interval_end,
+ data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
@@ -186,7 +190,7 @@ void LiveRangeMerger::Merge() {
}
}
-#undef TRACE
+#undef TRACE_COND
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 1f79386821..5cec4a8a16 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -80,6 +80,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -264,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -301,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -662,8 +664,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -778,6 +780,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sw(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -785,6 +794,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -816,22 +827,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1611,6 +1620,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMipsSync: {
+ __ sync();
+ break;
+ }
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
@@ -3157,7 +3170,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3376,8 +3389,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3387,7 +3406,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3397,12 +3417,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ lw(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3564,6 +3588,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index ba64e59429..44e53ac044 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -134,6 +134,7 @@ namespace compiler {
V(MipsStackClaim) \
V(MipsSeb) \
V(MipsSeh) \
+ V(MipsSync) \
V(MipsS128Zero) \
V(MipsI32x4Splat) \
V(MipsI32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 26a3e808cc..92ab3f9344 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -284,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUsh:
case kMipsUsw:
case kMipsUswc1:
+ case kMipsSync:
case kMipsWord32AtomicPairStore:
case kMipsWord32AtomicPairAdd:
case kMipsWord32AtomicPairSub:
@@ -1352,7 +1353,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 0c7299d451..452e92a174 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -274,9 +274,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1775,6 +1775,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
arraysize(temps), temps);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 5cd9bc54eb..f746b52df6 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -82,6 +82,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -277,8 +278,9 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -309,9 +311,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -634,8 +636,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -756,6 +758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sd(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -763,6 +772,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -794,22 +805,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
@@ -1786,6 +1795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMips64Sync: {
+ __ sync();
+ break;
+ }
case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -3304,7 +3317,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3535,8 +3548,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Dsubu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3546,7 +3565,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3556,12 +3576,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ ld(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Dsubu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3723,6 +3747,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
break;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 24f01b1af1..e375ee8d07 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -163,6 +163,7 @@ namespace compiler {
V(Mips64StackClaim) \
V(Mips64Seb) \
V(Mips64Seh) \
+ V(Mips64Sync) \
V(Mips64AssertEqual) \
V(Mips64S128Zero) \
V(Mips64I32x4Splat) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 499a3da05a..4dcafe4197 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -318,6 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ush:
case kMips64Usw:
case kMips64Uswc1:
+ case kMips64Sync:
case kMips64Word64AtomicStoreWord8:
case kMips64Word64AtomicStoreWord16:
case kMips64Word64AtomicStoreWord32:
@@ -1263,7 +1264,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency(instr);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 9768a7da9b..95f11ebed1 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -334,9 +334,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -1946,7 +1946,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
// in those cases. Unfortunately, the solution is not complete because
// it might skip cases where Word32 full compare is needed, so
// basically it is a hack.
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+#ifndef USE_SIMULATOR
if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+#else
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
+ node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall ) {
+#endif
VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
} else {
VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
@@ -2398,6 +2408,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Sync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 30605df270..5289812cb5 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -79,6 +79,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -262,8 +263,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -877,8 +879,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1019,6 +1021,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ constexpr int offset = 12;
+ if (isWasmCapiFunction) {
+ __ mflr(kScratchReg);
+ __ bind(&start_call);
+ __ LoadPC(r0);
+ __ addi(r0, r0, Operand(offset));
+ __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mtlr(r0);
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1026,6 +1040,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ // TODO(miladfar): In the above block, r0 must be populated with the
+ // strictly-correct PC, which is the return address at this spot. The
+ // offset is set to 12 right now, which is counted from where we are
+ // binding to the label and ends at this spot. If failed, replace it it
+ // with the correct offset suggested. More info on f5ab7d3.
+ if (isWasmCapiFunction)
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1060,22 +1083,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r4);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -1174,6 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kPPC_Sync: {
+ __ sync();
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -2150,7 +2175,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -2304,14 +2329,20 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ mflr(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(r0, fp, kConstantPoolRegister);
- // Adjust FP to point to saved FP.
- __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ addi(sp, sp, Operand(-kSystemPointerSize));
} else {
- __ Push(r0, fp);
- __ mr(fp, sp);
+ __ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ } else {
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+ }
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
@@ -2325,7 +2356,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -2335,12 +2367,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ addi(sp, sp, Operand(-kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2389,7 +2425,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
@@ -2554,6 +2590,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
break;
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index a34a09b796..f37529bd88 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -13,6 +13,7 @@ namespace compiler {
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(PPC_Peek) \
+ V(PPC_Sync) \
V(PPC_And) \
V(PPC_AndComplement) \
V(PPC_Or) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index e5f7d7e45a..61c2d2be3b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -143,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_Push:
case kPPC_PushFrame:
case kPPC_StoreToStackSlot:
+ case kPPC_Sync:
return kHasSideEffect;
case kPPC_AtomicStoreUint8:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index bb503763c2..bfc77b9412 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -173,9 +173,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
PPCOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1853,6 +1853,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Sync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 57ea2c1a26..44701f8159 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -9,6 +9,7 @@
#include "src/base/adapters.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/linkage.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
@@ -17,11 +18,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+#define TRACE_COND(cond, ...) \
+ do { \
+ if (cond) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
+
namespace {
static constexpr int kFloat32Bit =
@@ -1119,8 +1122,9 @@ void TopLevelLiveRange::Verify() const {
}
}
-void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
- TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
+void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
+ start.value());
DCHECK_NOT_NULL(first_interval_);
DCHECK(first_interval_->start() <= start);
DCHECK(start < first_interval_->end());
@@ -1128,9 +1132,10 @@ void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
}
void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
- LifetimePosition end, Zone* zone) {
- TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
- end.value());
+ LifetimePosition end, Zone* zone,
+ bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
+ start.value(), end.value());
LifetimePosition new_end = end;
while (first_interval_ != nullptr && first_interval_->start() <= end) {
if (first_interval_->end() > end) {
@@ -1148,9 +1153,10 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
}
void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
- LifetimePosition end, Zone* zone) {
- TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
- end.value());
+ LifetimePosition end, Zone* zone,
+ bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
+ start.value(), end.value());
if (first_interval_ == nullptr) {
UseInterval* interval = new (zone) UseInterval(start, end);
first_interval_ = interval;
@@ -1173,9 +1179,10 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
}
}
-void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
+void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
LifetimePosition pos = use_pos->pos();
- TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
+ TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
+ pos.value());
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
UsePosition* current = first_pos_;
@@ -1309,13 +1316,8 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os,
if (range->spilled()) {
prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
} else {
- const char* reg_name;
- if (range->assigned_register() == kUnassignedRegister) {
- reg_name = "???";
- } else {
- reg_name = RegisterName(range->assigned_register());
- }
- prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name);
+ prefix = snprintf(buffer, max_prefix_length, "|%s",
+ RegisterName(range->assigned_register()));
}
os << buffer;
position += std::min(prefix, max_prefix_length - 1);
@@ -1469,7 +1471,7 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment(
RegisterAllocationData::RegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, RegisterAllocationFlags flags,
- const char* debug_name)
+ TickCounter* tick_counter, const char* debug_name)
: allocation_zone_(zone),
frame_(frame),
code_(code),
@@ -1496,7 +1498,8 @@ RegisterAllocationData::RegisterAllocationData(
preassigned_slot_ranges_(zone),
spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
zone),
- flags_(flags) {
+ flags_(flags),
+ tick_counter_(tick_counter) {
if (!kSimpleFPAliasing) {
fixed_float_live_ranges_.resize(
kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
@@ -1815,6 +1818,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
void ConstraintBuilder::MeetRegisterConstraints() {
for (InstructionBlock* block : code()->instruction_blocks()) {
+ data_->tick_counter()->DoTick();
MeetRegisterConstraints(block);
}
}
@@ -1973,14 +1977,6 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
second->reference_map(), &gap_move->source()};
data()->delayed_references().push_back(delayed_reference);
}
- } else if (!code()->IsReference(input_vreg) &&
- code()->IsReference(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
}
}
}
@@ -1988,6 +1984,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
void ConstraintBuilder::ResolvePhis() {
// Process the blocks in reverse order.
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
+ data_->tick_counter()->DoTick();
ResolvePhis(block);
}
}
@@ -2071,7 +2068,8 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
- range->AddUseInterval(start, end, allocation_zone());
+ range->AddUseInterval(start, end, allocation_zone(),
+ data()->is_trace_alloc());
iterator.Advance();
}
}
@@ -2192,16 +2190,18 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start() > position) {
// Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextStart(), allocation_zone());
- range->AddUsePosition(NewUsePosition(position.NextStart()));
+ range->AddUseInterval(position, position.NextStart(), allocation_zone(),
+ data()->is_trace_alloc());
+ range->AddUsePosition(NewUsePosition(position.NextStart()),
+ data()->is_trace_alloc());
} else {
- range->ShortenTo(position);
+ range->ShortenTo(position, data()->is_trace_alloc());
}
if (!operand->IsUnallocated()) return nullptr;
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
UsePosition* use_pos =
NewUsePosition(position, unalloc_operand, hint, hint_type);
- range->AddUsePosition(use_pos);
+ range->AddUsePosition(use_pos, data()->is_trace_alloc());
return use_pos;
}
@@ -2216,9 +2216,10 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
if (operand->IsUnallocated()) {
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
- range->AddUsePosition(use_pos);
+ range->AddUsePosition(use_pos, data()->is_trace_alloc());
}
- range->AddUseInterval(block_start, position, allocation_zone());
+ range->AddUseInterval(block_start, position, allocation_zone(),
+ data()->is_trace_alloc());
return use_pos;
}
@@ -2279,7 +2280,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int code = config()->GetAllocatableGeneralCode(i);
TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
@@ -2291,7 +2292,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat64, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
// Clobber fixed float registers on archs with non-simple aliasing.
if (!kSimpleFPAliasing) {
@@ -2304,7 +2305,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat32, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
if (fixed_simd128_live_ranges) {
@@ -2314,7 +2315,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kSimd128, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
}
@@ -2574,7 +2575,8 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
- range->EnsureInterval(start, end, allocation_zone());
+ range->EnsureInterval(start, end, allocation_zone(),
+ data()->is_trace_alloc());
iterator.Advance();
}
// Insert all values into the live in sets of all blocks in the loop.
@@ -2588,6 +2590,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
+ data_->tick_counter()->DoTick();
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block, data());
@@ -2607,6 +2610,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Postprocess the ranges.
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) {
+ data_->tick_counter()->DoTick();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr) continue;
@@ -2773,7 +2777,7 @@ void BundleBuilder::BuildBundles() {
LiveRangeBundle* input_bundle = input_range->get_bundle();
if (input_bundle != nullptr) {
TRACE("Merge\n");
- if (out->TryMerge(input_bundle))
+ if (out->TryMerge(input_bundle, data()->is_trace_alloc()))
TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
out->id());
} else {
@@ -2798,7 +2802,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) {
InsertUses(range->first_interval());
return true;
}
-bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
+bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
if (other == this) return true;
auto iter1 = uses_.begin();
@@ -2810,8 +2814,8 @@ bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
} else if (iter2->start > iter1->end) {
++iter1;
} else {
- TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start,
- iter2->end);
+ TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
+ iter1->end, iter2->start, iter2->end);
return false;
}
}
@@ -3042,6 +3046,7 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
}
const char* RegisterAllocator::RegisterName(int register_code) const {
+ if (register_code == kUnassignedRegister) return "unassigned";
return mode() == GENERAL_REGISTERS
? i::RegisterName(Register::from_code(register_code))
: i::RegisterName(DoubleRegister::from_code(register_code));
@@ -3408,7 +3413,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
to_be_live->emplace(val.first, reg);
TRACE("Reset %d as live due vote %zu in %s\n",
val.first->TopLevel()->vreg(), val.second.count,
- reg == kUnassignedRegister ? "unassigned" : RegisterName(reg));
+ RegisterName(reg));
}
}
};
@@ -3477,6 +3482,8 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
RegisterName(other->assigned_register()));
LiveRange* split_off =
other->SplitAt(next_start, data()->allocation_zone());
+ // Try to get the same register after the deferred block.
+ split_off->set_controlflow_hint(other->assigned_register());
DCHECK_NE(split_off, other);
AddToUnhandled(split_off);
update_caches(other);
@@ -3574,7 +3581,7 @@ void LinearScanAllocator::AllocateRegisters() {
SplitAndSpillRangesDefinedByMemoryOperand();
data()->ResetSpillState();
- if (FLAG_trace_alloc) {
+ if (data()->is_trace_alloc()) {
PrintRangeOverview(std::cout);
}
@@ -3642,6 +3649,7 @@ void LinearScanAllocator::AllocateRegisters() {
while (!unhandled_live_ranges().empty() ||
(data()->is_turbo_control_flow_aware_allocation() &&
last_block < max_blocks)) {
+ data()->tick_counter()->DoTick();
LiveRange* current = unhandled_live_ranges().empty()
? nullptr
: *unhandled_live_ranges().begin();
@@ -3824,7 +3832,7 @@ void LinearScanAllocator::AllocateRegisters() {
ProcessCurrentRange(current, spill_mode);
}
- if (FLAG_trace_alloc) {
+ if (data()->is_trace_alloc()) {
PrintRangeOverview(std::cout);
}
}
@@ -4557,6 +4565,14 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LiveRange* third_part =
SplitBetween(second_part, split_start, third_part_end);
+ if (GetInstructionBlock(data()->code(), second_part->Start())
+ ->IsDeferred()) {
+ // Try to use the same register as before.
+ TRACE("Setting control flow hint for %d:%d to %s\n",
+ third_part->TopLevel()->vreg(), third_part->relative_id(),
+ RegisterName(range->controlflow_hint()));
+ third_part->set_controlflow_hint(range->controlflow_hint());
+ }
AddToUnhandled(third_part);
// This can happen, even if we checked for start < end above, as we fiddle
@@ -4601,6 +4617,7 @@ OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
void OperandAssigner::DecideSpillingMode() {
if (data()->is_turbo_control_flow_aware_allocation()) {
for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
int max_blocks = data()->code()->InstructionBlockCount();
if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
// If the range is spilled only in deferred blocks and starts in
@@ -4629,6 +4646,7 @@ void OperandAssigner::DecideSpillingMode() {
void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
if (range != nullptr && range->get_bundle() != nullptr) {
range->get_bundle()->MergeSpillRanges();
}
@@ -4636,6 +4654,7 @@ void OperandAssigner::AssignSpillSlots() {
ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges.size(); ++i) {
+ data()->tick_counter()->DoTick();
SpillRange* range = spill_ranges[i];
if (range == nullptr) continue;
if (range->IsEmpty()) continue;
@@ -4648,6 +4667,7 @@ void OperandAssigner::AssignSpillSlots() {
}
// Allocate slots for the merged spill ranges.
for (SpillRange* range : spill_ranges) {
+ data()->tick_counter()->DoTick();
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
@@ -4660,6 +4680,7 @@ void OperandAssigner::AssignSpillSlots() {
void OperandAssigner::CommitAssignment() {
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (top_range == nullptr || top_range->IsEmpty()) continue;
@@ -4859,6 +4880,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
+ data()->tick_counter()->DoTick();
int vreg = iterator.Current();
LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
@@ -5130,6 +5152,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
}
#undef TRACE
+#undef TRACE_COND
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 8929fb2ee6..55f8a8dd1f 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -16,6 +16,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters;
@@ -175,7 +178,8 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
enum class RegisterAllocationFlag : unsigned {
kTurboControlFlowAwareAllocation = 1 << 0,
- kTurboPreprocessRanges = 1 << 1
+ kTurboPreprocessRanges = 1 << 1,
+ kTraceAllocation = 1 << 2
};
using RegisterAllocationFlags = base::Flags<RegisterAllocationFlag>;
@@ -198,6 +202,10 @@ class RegisterAllocationData final : public ZoneObject {
return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ bool is_trace_alloc() {
+ return flags_ & RegisterAllocationFlag::kTraceAllocation;
+ }
+
static constexpr int kNumberOfFixedRangesPerRegister = 2;
class PhiMapValue : public ZoneObject {
@@ -238,6 +246,7 @@ class RegisterAllocationData final : public ZoneObject {
Zone* allocation_zone, Frame* frame,
InstructionSequence* code,
RegisterAllocationFlags flags,
+ TickCounter* tick_counter,
const char* debug_name = nullptr);
const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
@@ -328,6 +337,8 @@ class RegisterAllocationData final : public ZoneObject {
void ResetSpillState() { spill_state_.clear(); }
+ TickCounter* tick_counter() { return tick_counter_; }
+
private:
int GetNextLiveRangeId();
@@ -354,6 +365,7 @@ class RegisterAllocationData final : public ZoneObject {
RangesWithPreassignedSlots preassigned_slot_ranges_;
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
RegisterAllocationFlags flags_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@@ -741,7 +753,7 @@ class LiveRangeBundle : public ZoneObject {
: ranges_(zone), uses_(zone), id_(id) {}
bool TryAddRange(LiveRange* range);
- bool TryMerge(LiveRangeBundle* other);
+ bool TryMerge(LiveRangeBundle* other, bool trace_alloc);
ZoneSet<LiveRange*, LiveRangeOrdering> ranges_;
ZoneSet<Range, RangeOrdering> uses_;
@@ -785,12 +797,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
// Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUsePosition(UsePosition* pos);
+ void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone,
+ bool trace_alloc);
+ void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone,
+ bool trace_alloc);
+ void AddUsePosition(UsePosition* pos, bool trace_alloc);
// Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
+ void ShortenTo(LifetimePosition start, bool trace_alloc);
// Detaches between start and end, and attributes the resulting range to
// result.
@@ -1279,11 +1293,13 @@ class LinearScanAllocator final : public RegisterAllocator {
RangeWithRegister::Equals>;
void MaybeUndoPreviousSplit(LiveRange* range);
- void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position, SpillMode spill_mode);
+ void SpillNotLiveRanges(
+ RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
+ LifetimePosition position, SpillMode spill_mode);
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
- void ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position);
+ void ReloadLiveRanges(
+ RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
+ LifetimePosition position);
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 595800268d..6457b7c8b4 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -73,6 +73,7 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -1245,8 +1246,9 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1380,8 +1382,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1509,6 +1511,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ // Put the return address in a stack slot.
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ larl(r0, &return_location);
+ __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1516,6 +1525,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1547,22 +1558,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r3);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -2891,7 +2900,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3014,8 +3023,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(r14, fp);
- __ LoadRR(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ } else {
+ __ Push(r14, fp);
+ __ LoadRR(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
if (call_descriptor->PushArgumentCount()) {
@@ -3028,7 +3043,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3038,12 +3054,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -3089,7 +3109,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
@@ -3247,6 +3267,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ break;
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
break;
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index d982605efc..99d3b0fa0f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -447,11 +447,13 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
#endif
template <class CanCombineWithLoad>
-void GenerateRightOperands(InstructionSelector* selector, Node* node,
- Node* right, InstructionCode& opcode,
- OperandModes& operand_mode,
- InstructionOperand* inputs, size_t& input_count,
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateRightOperands(
+ InstructionSelector* selector, Node* node, Node* right,
+ InstructionCode& opcode, // NOLINT(runtime/references)
+ OperandModes& operand_mode, // NOLINT(runtime/references)
+ InstructionOperand* inputs,
+ size_t& input_count, // NOLINT(runtime/references)
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
if ((operand_mode & OperandMode::kAllowImmediate) &&
@@ -491,11 +493,13 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
}
template <class CanCombineWithLoad>
-void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
- Node* left, Node* right, InstructionCode& opcode,
- OperandModes& operand_mode,
- InstructionOperand* inputs, size_t& input_count,
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateBinOpOperands(
+ InstructionSelector* selector, Node* node, Node* left, Node* right,
+ InstructionCode& opcode, // NOLINT(runtime/references)
+ OperandModes& operand_mode, // NOLINT(runtime/references)
+ InstructionOperand* inputs,
+ size_t& input_count, // NOLINT(runtime/references)
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
// left is always register
InstructionOperand const left_input = g.UseRegister(left);
@@ -686,9 +690,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
S390OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -2194,6 +2198,11 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kArchNop, g.NoOutput());
+}
+
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h
index 590a839a06..d3a52b34b7 100644
--- a/deps/v8/src/compiler/backend/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h
@@ -23,6 +23,7 @@ namespace v8 {
namespace internal {
class EhFrameWriter;
+class Zone;
namespace compiler {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index c6667292fc..a108edeff0 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -349,7 +349,8 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i, int pc) {
+ X64OperandConverter& i, // NOLINT(runtime/references)
+ int pc) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessProtected) {
@@ -357,9 +358,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ X64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -575,6 +576,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \
} while (false)
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ CpuFeatureScope sse_scope(tasm(), SSE4_1); \
+ Register dst = i.OutputRegister(); \
+ Register tmp = i.TempRegister(0); \
+ __ movq(tmp, Immediate(1)); \
+ __ xorq(dst, dst); \
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \
+ __ ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ cmovq(zero, dst, tmp); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@@ -752,8 +766,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -952,17 +966,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == rdx);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
__ int3();
unwinding_info_writer_.MarkBlockWillExit();
@@ -1029,9 +1041,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
mode, DetermineStubCallMode());
__ StoreTaggedField(operand, value);
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1042,7 +1051,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ andq(i.InputRegister(0), kSpeculationPoisonRegister);
break;
- case kLFence:
+ case kX64MFence:
+ __ mfence();
+ break;
+ case kX64LFence:
__ lfence();
break;
case kArchStackSlot: {
@@ -1309,16 +1321,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, 33);
+ __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, 31);
+ __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Sqrt:
@@ -1517,18 +1529,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kX64F64x2Abs:
case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, 1);
+ __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
+ case kX64F64x2Neg:
case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, 63);
+ __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Sqrt:
@@ -1944,16 +1958,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqCompressTagged: {
@@ -1970,16 +1987,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64DecompressSigned: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressTaggedSigned);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64DecompressPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressTaggedPointer);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64DecompressAny: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressAnyTagged);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64CompressSigned: // Fall through.
@@ -2006,11 +2026,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movss(operand, i.InputDoubleRegister(index));
+ __ Movss(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsd: {
@@ -2039,11 +2059,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movdqu(operand, i.InputSimd128Register(index));
+ __ Movdqu(operand, i.InputSimd128Register(index));
}
break;
}
@@ -2065,7 +2085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsRegister()) {
__ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64BitcastLD:
@@ -2235,6 +2255,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F64x2Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ pshufd(dst, i.InputDoubleRegister(0), 0x44);
+ } else {
+ __ pshufd(dst, i.InputOperand(0), 0x44);
+ }
+ break;
+ }
+ case kX64F64x2ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ if (instr->InputAt(2)->IsFPRegister()) {
+ __ movq(kScratchRegister, i.InputDoubleRegister(2));
+ __ pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1));
+ } else {
+ __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64F64x2ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movq(i.OutputDoubleRegister(), kScratchRegister);
+ break;
+ }
+ case kX64F64x2Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
@@ -2400,6 +2465,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I64x2Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(dst, i.InputRegister(0));
+ } else {
+ __ movq(dst, i.InputOperand(0));
+ }
+ __ pshufd(dst, dst, 0x44);
+ break;
+ }
+ case kX64I64x2ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I64x2Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ movapd(kScratchDoubleReg, src);
+ src = kScratchDoubleReg;
+ }
+ __ pxor(dst, dst);
+ __ psubq(dst, src);
+ break;
+ }
+ case kX64I64x2Shl: {
+ __ psllq(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2ShrS: {
+ // TODO(zhin): there is vpsraq but requires AVX512
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ // ShrS on each quadword one at a time
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+
+ // lower quadword
+ __ pextrq(kScratchRegister, src, 0x0);
+ __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
+ __ pinsrq(dst, kScratchRegister, 0x0);
+
+ // upper quadword
+ __ pextrq(kScratchRegister, src, 0x1);
+ __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
+ __ pinsrq(dst, kScratchRegister, 0x1);
+ break;
+ }
+ case kX64I64x2Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1));
+
+ __ movaps(tmp1, left);
+ __ movaps(tmp2, right);
+
+ // Multiply high dword of each qword of left with right.
+ __ psrlq(tmp1, 32);
+ __ pmuludq(tmp1, right);
+
+ // Multiply high dword of each qword of right with left.
+ __ psrlq(tmp2, 32);
+ __ pmuludq(tmp2, left);
+
+ __ paddq(tmp2, tmp1);
+ __ psllq(tmp2, 32);
+
+ __ pmuludq(left, right);
+ __ paddq(left, tmp2); // left == dst
+ break;
+ }
+ case kX64I64x2Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I64x2GtS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ __ pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2GeS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ movaps(tmp, src);
+ __ pcmpgtq(tmp, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, tmp);
+ break;
+ }
+ case kX64I64x2ShrU: {
+ __ psrlq(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2GtU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+
+ __ movaps(tmp, src);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ __ pcmpgtq(dst, tmp);
+ break;
+ }
+ case kX64I64x2GeU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+
+ __ movaps(tmp, src);
+ __ pxor(dst, kScratchDoubleReg);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pcmpgtq(tmp, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, tmp);
+ break;
+ }
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsRegister()) {
@@ -3297,6 +3527,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ por(dst, kScratchDoubleReg);
break;
}
+ case kX64S1x2AnyTrue:
case kX64S1x4AnyTrue:
case kX64S1x8AnyTrue:
case kX64S1x16AnyTrue: {
@@ -3310,19 +3541,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmovq(zero, dst, tmp);
break;
}
- case kX64S1x4AllTrue:
- case kX64S1x8AllTrue:
+ // Need to split up all the different lane structures because the
+ // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
+ // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
+ // respectively.
+ case kX64S1x2AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqq);
+ break;
+ }
+ case kX64S1x4AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqd);
+ break;
+ }
+ case kX64S1x8AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ break;
+ }
case kX64S1x16AllTrue: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- Register dst = i.OutputRegister();
- XMMRegister src = i.InputSimd128Register(0);
- Register tmp = i.TempRegister(0);
- __ movq(tmp, Immediate(1));
- __ xorq(dst, dst);
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(kScratchDoubleReg, src);
- __ ptest(kScratchDoubleReg, kScratchDoubleReg);
- __ cmovq(zero, dst, tmp);
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
case kX64StackCheck:
@@ -3507,6 +3743,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_INSTR
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
+#undef ASSEMBLE_SIMD_ALL_TRUE
namespace {
@@ -3734,6 +3971,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3765,8 +4007,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3835,7 +4077,7 @@ void CodeGenerator::AssembleConstructFrame() {
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
- __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
+ __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx),
XMMRegister::from_code(i));
slot_idx++;
}
@@ -3877,7 +4119,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
- __ movdqu(XMMRegister::from_code(i),
+ __ Movdqu(XMMRegister::from_code(i),
Operand(rsp, kQuadWordSize * slot_idx));
slot_idx++;
}
@@ -3970,6 +4212,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ }
+ break;
+ }
case Constant::kDelayedStringConstant: {
const StringConstantBase* src_constant = src.ToDelayedStringConstant();
__ MoveStringConstant(dst, src_constant);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 57ef26dbd7..d6ac3f43df 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -58,7 +58,8 @@ namespace compiler {
V(X64Popcnt32) \
V(X64Bswap) \
V(X64Bswap32) \
- V(LFence) \
+ V(X64MFence) \
+ V(X64LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -158,6 +159,15 @@ namespace compiler {
V(X64Poke) \
V(X64Peek) \
V(X64StackCheck) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -177,6 +187,22 @@ namespace compiler {
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2ReplaceLane) \
+ V(X64I64x2Neg) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2Ne) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2GtU) \
+ V(X64I64x2GeU) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
@@ -293,6 +319,8 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
+ V(X64S1x2AnyTrue) \
+ V(X64S1x2AllTrue) \
V(X64S1x4AnyTrue) \
V(X64S1x4AllTrue) \
V(X64S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 9d48e9175a..6389ef2e50 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -124,6 +124,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64F64x2Splat:
+ case kX64F64x2ExtractLane:
+ case kX64F64x2ReplaceLane:
+ case kX64F64x2Abs:
+ case kX64F64x2Neg:
+ case kX64F64x2Eq:
+ case kX64F64x2Ne:
+ case kX64F64x2Lt:
+ case kX64F64x2Le:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -143,6 +152,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Ne:
case kX64F32x4Lt:
case kX64F32x4Le:
+ case kX64I64x2Splat:
+ case kX64I64x2ExtractLane:
+ case kX64I64x2ReplaceLane:
+ case kX64I64x2Neg:
+ case kX64I64x2Shl:
+ case kX64I64x2ShrS:
+ case kX64I64x2Add:
+ case kX64I64x2Sub:
+ case kX64I64x2Mul:
+ case kX64I64x2Eq:
+ case kX64I64x2Ne:
+ case kX64I64x2GtS:
+ case kX64I64x2GeS:
+ case kX64I64x2ShrU:
+ case kX64I64x2GtU:
+ case kX64I64x2GeU:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
@@ -233,6 +258,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Not:
case kX64S128Select:
case kX64S128Zero:
+ case kX64S1x2AnyTrue:
+ case kX64S1x2AllTrue:
case kX64S1x4AnyTrue:
case kX64S1x4AllTrue:
case kX64S1x8AnyTrue:
@@ -327,7 +354,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Poke:
return kHasSideEffect;
- case kLFence:
+ case kX64MFence:
+ case kX64LFence:
return kHasSideEffect;
case kX64Word64AtomicLoadUint8:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index a20590b8d3..a4908fb846 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -309,21 +309,19 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
X64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
X64OperandGenerator g(this);
-
- ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= MiscField::encode(kMemoryAccessProtected);
@@ -334,6 +332,11 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
+
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
@@ -898,7 +901,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// Omit truncation and turn subtractions of constant values into immediate
// "leal" instructions by negating the value.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
+ g.DefineAsRegister(node), int64_input,
+ g.TempImmediate(base::NegateWithWraparound(imm)));
}
return;
}
@@ -907,9 +911,9 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else if (m.right().Is(0)) {
- // TODO(jarin): We should be able to use {EmitIdentity} here
- // (https://crbug.com/v8/7947).
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ // {EmitIdentity} reuses the virtual register of the first input
+ // for the output. This is exactly what we want here.
+ EmitIdentity(node);
} else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leal" instructions
// by negating the value.
@@ -1254,23 +1258,47 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
}
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressed);
+ VisitLoad(node, value, kX64MovqDecompressAnyTagged);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
+ }
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedPointer);
+ VisitLoad(node, value, kX64MovqDecompressTaggedPointer);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
+ }
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedSigned);
+ VisitLoad(node, value, kX64MovqDecompressTaggedSigned);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
+ }
}
namespace {
@@ -2343,6 +2371,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64MFence, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2545,12 +2578,18 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F64x2) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
@@ -2561,6 +2600,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2615,12 +2659,18 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
+#define SIMD_BINOP_ONE_TEMP_LIST(V) \
+ V(I64x2GeS) \
+ V(I64x2GtU) \
+ V(I64x2GeU)
+
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
+ V(I64x2Neg) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
V(I32x4Neg) \
@@ -2635,6 +2685,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
@@ -2646,11 +2699,13 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
+ V(S1x2AnyTrue) \
V(S1x4AnyTrue) \
V(S1x8AnyTrue) \
V(S1x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
+ V(S1x2AllTrue) \
V(S1x4AllTrue) \
V(S1x8AllTrue) \
V(S1x16AllTrue)
@@ -2721,6 +2776,18 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
+#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ arraysize(temps), temps); \
+ }
+SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
+#undef VISIT_SIMD_BINOP_ONE_TEMP
+#undef SIMD_BINOP_ONE_TEMP_LIST
+
#define VISIT_SIMD_ANYTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2751,12 +2818,33 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+void InstructionSelector::VisitF64x2Abs(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF64x2Neg(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
X64OperandGenerator g(this);
Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitI64x2Mul(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ Emit(kX64I64x2Mul, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 9c23cd460a..b44bec5fc8 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -79,22 +79,28 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
}
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
- Zone* zone, bool do_liveness_analysis)
+ Zone* zone, BailoutId osr_bailout_id,
+ bool analyze_liveness)
: bytecode_array_(bytecode_array),
- do_liveness_analysis_(do_liveness_analysis),
zone_(zone),
+ osr_bailout_id_(osr_bailout_id),
+ analyze_liveness_(analyze_liveness),
loop_stack_(zone),
loop_end_index_queue_(zone),
resume_jump_targets_(zone),
end_to_header_(zone),
header_to_info_(zone),
osr_entry_point_(-1),
- liveness_map_(bytecode_array->length(), zone) {}
+ liveness_map_(bytecode_array->length(), zone) {
+ Analyze();
+}
namespace {
-void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateInLiveness(
+ Bytecode bytecode,
+ BytecodeLivenessState& in_liveness, // NOLINT(runtime/references)
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -201,12 +207,14 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
}
}
-void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
- BytecodeLivenessState* next_bytecode_in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor,
- const BytecodeLivenessMap& liveness_map) {
+void UpdateOutLiveness(
+ Bytecode bytecode,
+ BytecodeLivenessState& out_liveness, // NOLINT(runtime/references)
+ BytecodeLivenessState* next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayAccessor& accessor,
+ Handle<BytecodeArray> bytecode_array,
+ const BytecodeLivenessMap& liveness_map) {
int current_offset = accessor.current_offset();
- const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator ||
@@ -261,20 +269,24 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
}
}
-void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+void UpdateLiveness(Bytecode bytecode,
+ BytecodeLiveness& liveness, // NOLINT(runtime/references)
BytecodeLivenessState** next_bytecode_in_liveness,
const interpreter::BytecodeArrayAccessor& accessor,
+ Handle<BytecodeArray> bytecode_array,
const BytecodeLivenessMap& liveness_map) {
UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness,
- accessor, liveness_map);
+ accessor, bytecode_array, liveness_map);
liveness.in->CopyFrom(*liveness.out);
UpdateInLiveness(bytecode, *liveness.in, accessor);
*next_bytecode_in_liveness = liveness.in;
}
-void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
- const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateAssignments(
+ Bytecode bytecode,
+ BytecodeLoopAssignments& assignments, // NOLINT(runtime/references)
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -307,15 +319,13 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
} // namespace
-void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+void BytecodeAnalysis::Analyze() {
loop_stack_.push({-1, nullptr});
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
-
- bool is_osr = !osr_bailout_id.IsNone();
- int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1;
-
int generator_switch_index = -1;
+ int osr_loop_end_offset = osr_bailout_id_.ToInt();
+ DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone());
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
@@ -337,14 +347,14 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
if (current_offset == osr_loop_end_offset) {
osr_entry_point_ = loop_header;
} else if (current_offset < osr_loop_end_offset) {
- // Check we've found the osr_entry_point if we've gone past the
+ // Assert that we've found the osr_entry_point if we've gone past the
// osr_loop_end_offset. Note, we are iterating the bytecode in reverse,
- // so the less than in the check is correct.
- DCHECK_NE(-1, osr_entry_point_);
+ // so the less-than in the above condition is correct.
+ DCHECK_LE(0, osr_entry_point_);
}
// Save the index so that we can do another pass later.
- if (do_liveness_analysis_) {
+ if (analyze_liveness_) {
loop_end_index_queue_.push_back(iterator.current_index());
}
} else if (loop_stack_.size() > 1) {
@@ -357,8 +367,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// information we currently have.
UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
- // Update suspend counts for this loop, though only if not OSR.
- if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ // Update suspend counts for this loop.
+ if (bytecode == Bytecode::kSuspendGenerator) {
int suspend_id = iterator.GetUnsignedImmediateOperand(3);
int resume_offset = current_offset + iterator.current_bytecode_size();
current_loop_info->AddResumeTarget(
@@ -412,7 +422,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
}
}
}
- } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ } else if (bytecode == Bytecode::kSuspendGenerator) {
// If we're not in a loop, we still need to look for suspends.
// TODO(leszeks): It would be nice to de-duplicate this with the in-loop
// case
@@ -422,11 +432,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
ResumeJumpTarget::Leaf(suspend_id, resume_offset));
}
- if (do_liveness_analysis_) {
+ if (analyze_liveness_) {
BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
current_offset, bytecode_array()->register_count(), zone());
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
}
@@ -435,7 +445,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK(ResumeJumpTargetsAreValid());
- if (!do_liveness_analysis_) return;
+ if (!analyze_liveness_) return;
// At this point, every bytecode has a valid in and out liveness, except for
// propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
@@ -489,12 +499,13 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
// Now we are at the loop header. Since the in-liveness of the header
// can't change, we need only to update the out-liveness.
UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
- next_bytecode_in_liveness, iterator, liveness_map_);
+ next_bytecode_in_liveness, iterator, bytecode_array(),
+ liveness_map_);
}
// Process the generator switch statement separately, once the loops are done.
@@ -533,12 +544,12 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK_NE(bytecode, Bytecode::kJumpLoop);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
}
}
- DCHECK(do_liveness_analysis_);
+ DCHECK(analyze_liveness_);
if (FLAG_trace_environment_liveness) {
StdoutStream of;
PrintLivenessTo(of);
@@ -610,14 +621,14 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
int offset) const {
- if (!do_liveness_analysis_) return nullptr;
+ if (!analyze_liveness_) return nullptr;
return liveness_map_.GetInLiveness(offset);
}
const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
int offset) const {
- if (!do_liveness_analysis_) return nullptr;
+ if (!analyze_liveness_) return nullptr;
return liveness_map_.GetOutLiveness(offset);
}
@@ -662,9 +673,8 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
}
// If the iterator is invalid, we've reached the end without finding the
- // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we
- // need no jump targets. So, ensure there are no jump targets and exit.
- if (!iterator.IsValid() || HasOsrEntryPoint()) {
+ // generator switch. So, ensure there are no jump targets and exit.
+ if (!iterator.IsValid()) {
// Check top-level.
if (!resume_jump_targets().empty()) {
PrintF(stderr,
@@ -758,14 +768,14 @@ bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
valid = false;
} else {
// Make sure we're resuming to a Resume bytecode
- interpreter::BytecodeArrayAccessor assessor(bytecode_array(),
+ interpreter::BytecodeArrayAccessor accessor(bytecode_array(),
target.target_offset());
- if (assessor.current_bytecode() != Bytecode::kResumeGenerator) {
+ if (accessor.current_bytecode() != Bytecode::kResumeGenerator) {
PrintF(stderr,
"Expected resume target for id %d, offset %d, to be "
"ResumeGenerator, but found %s\n",
target.suspend_id(), target.target_offset(),
- Bytecodes::ToString(assessor.current_bytecode()));
+ Bytecodes::ToString(accessor.current_bytecode()));
valid = false;
}
@@ -820,7 +830,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
previous_liveness.CopyFrom(*liveness.out);
UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
+ iterator, bytecode_array(), liveness_map_);
// UpdateOutLiveness skips kJumpLoop, so we update it manually.
if (bytecode == Bytecode::kJumpLoop) {
int target_offset = iterator.GetJumpTargetOffset();
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 53f86ca306..32c5168466 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -92,18 +92,14 @@ struct V8_EXPORT_PRIVATE LoopInfo {
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};
-class V8_EXPORT_PRIVATE BytecodeAnalysis {
+// Analyze the bytecodes to find the loop ranges, loop nesting, loop assignments
+// and liveness. NOTE: The broker/serializer relies on the fact that an
+// analysis for OSR (osr_bailout_id is not None) subsumes an analysis for
+// non-OSR (osr_bailout_id is None).
+class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
- bool do_liveness_analysis);
-
- // Analyze the bytecodes to find the loop ranges, loop nesting, loop
- // assignments and liveness, under the assumption that there is an OSR bailout
- // at {osr_bailout_id}.
- //
- // No other methods in this class return valid information until this has been
- // called.
- void Analyze(BailoutId osr_bailout_id);
+ BailoutId osr_bailout_id, bool analyze_liveness);
// Return true if the given offset is a loop header
bool IsLoopHeader(int offset) const;
@@ -118,23 +114,30 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
return resume_jump_targets_;
}
- // True if the current analysis has an OSR entry point.
- bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; }
-
- int osr_entry_point() const { return osr_entry_point_; }
-
- // Gets the in-liveness for the bytecode at {offset}.
+ // Gets the in-/out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
-
- // Gets the out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+ // In the case of OSR, the analysis also computes the (bytecode offset of the)
+ // OSR entry point from the {osr_bailout_id} that was given to the
+ // constructor.
+ int osr_entry_point() const {
+ CHECK_LE(0, osr_entry_point_);
+ return osr_entry_point_;
+ }
+ // Return the osr_bailout_id (for verification purposes).
+ BailoutId osr_bailout_id() const { return osr_bailout_id_; }
+
+ // Return whether liveness analysis was performed (for verification purposes).
+ bool liveness_analyzed() const { return analyze_liveness_; }
+
private:
struct LoopStackEntry {
int header_offset;
LoopInfo* loop_info;
};
+ void Analyze();
void PushLoop(int loop_header, int loop_end);
#if DEBUG
@@ -153,17 +156,15 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
std::ostream& PrintLivenessTo(std::ostream& os) const;
Handle<BytecodeArray> const bytecode_array_;
- bool const do_liveness_analysis_;
Zone* const zone_;
-
+ BailoutId const osr_bailout_id_;
+ bool const analyze_liveness_;
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
-
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
int osr_entry_point_;
-
BytecodeLivenessMap liveness_map_;
DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 0ab8f85670..7c71446320 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -6,9 +6,11 @@
#include "src/ast/ast.h"
#include "src/codegen/source-position-table.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -32,14 +34,15 @@ namespace compiler {
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
- Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeArrayRef bytecode_array,
+ SharedFunctionInfoRef shared,
+ FeedbackVectorRef feedback_vector, BailoutId osr_offset,
+ JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags);
+ NativeContextRef native_context, int inlining_id,
+ BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter);
// Creates a graph by visiting bytecodes.
void CreateGraph();
@@ -318,12 +321,8 @@ class BytecodeGraphBuilder {
return jsgraph_->simplified();
}
Zone* local_zone() const { return local_zone_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
- }
- const Handle<FeedbackVector>& feedback_vector() const {
- return feedback_vector_;
- }
+ const BytecodeArrayRef bytecode_array() const { return bytecode_array_; }
+ FeedbackVectorRef feedback_vector() const { return feedback_vector_; }
const JSTypeHintLowering& type_hint_lowering() const {
return type_hint_lowering_;
}
@@ -332,7 +331,7 @@ class BytecodeGraphBuilder {
}
SourcePositionTableIterator& source_position_iterator() {
- return source_position_iterator_;
+ return *source_position_iterator_.get();
}
interpreter::BytecodeArrayIterator& bytecode_iterator() {
@@ -343,8 +342,6 @@ class BytecodeGraphBuilder {
return bytecode_analysis_;
}
- void RunBytecodeAnalysis() { bytecode_analysis_.Analyze(osr_offset_); }
-
int currently_peeled_loop_offset() const {
return currently_peeled_loop_offset_;
}
@@ -368,9 +365,9 @@ class BytecodeGraphBuilder {
needs_eager_checkpoint_ = value;
}
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ SharedFunctionInfoRef shared_info() const { return shared_info_; }
- Handle<Context> native_context() const { return native_context_; }
+ NativeContextRef native_context() const { return native_context_; }
JSHeapBroker* broker() const { return broker_; }
@@ -382,15 +379,15 @@ class BytecodeGraphBuilder {
Zone* const local_zone_;
JSGraph* const jsgraph_;
CallFrequency const invocation_frequency_;
- Handle<BytecodeArray> const bytecode_array_;
- Handle<FeedbackVector> const feedback_vector_;
+ BytecodeArrayRef const bytecode_array_;
+ FeedbackVectorRef feedback_vector_;
JSTypeHintLowering const type_hint_lowering_;
const FrameStateFunctionInfo* const frame_state_function_info_;
- SourcePositionTableIterator source_position_iterator_;
+ std::unique_ptr<SourcePositionTableIterator> source_position_iterator_;
interpreter::BytecodeArrayIterator bytecode_iterator_;
- BytecodeAnalysis bytecode_analysis_;
+ BytecodeAnalysis const& bytecode_analysis_;
Environment* environment_;
- BailoutId const osr_offset_;
+ bool const osr_;
int currently_peeled_loop_offset_;
bool skip_next_stack_check_;
@@ -434,10 +431,12 @@ class BytecodeGraphBuilder {
SourcePosition const start_position_;
- Handle<SharedFunctionInfo> const shared_info_;
+ SharedFunctionInfoRef const shared_info_;
// The native context for which we optimize.
- Handle<Context> const native_context_;
+ NativeContextRef const native_context_;
+
+ TickCounter* const tick_counter_;
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
@@ -938,13 +937,12 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- JSHeapBroker* broker, Zone* local_zone,
- Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared_info,
- Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
- JSGraph* jsgraph, CallFrequency const& invocation_frequency,
- SourcePositionTable* source_positions, Handle<Context> native_context,
- int inlining_id, BytecodeGraphBuilderFlags flags)
+ JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array,
+ SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency const& invocation_frequency,
+ SourcePositionTable* source_positions, NativeContextRef native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
: broker_(broker),
local_zone_(local_zone),
jsgraph_(jsgraph),
@@ -952,22 +950,22 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array_(bytecode_array),
feedback_vector_(feedback_vector),
type_hint_lowering_(
- jsgraph, feedback_vector,
+ jsgraph, feedback_vector.object(),
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
- bytecode_array->parameter_count(), bytecode_array->register_count(),
- shared_info)),
- source_position_iterator_(
- handle(bytecode_array->SourcePositionTableIfCollected(), isolate())),
- bytecode_iterator_(bytecode_array),
- bytecode_analysis_(
- bytecode_array, local_zone,
- flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness),
+ bytecode_array.parameter_count(), bytecode_array.register_count(),
+ shared_info.object())),
+ bytecode_iterator_(
+ base::make_unique<OffHeapBytecodeArray>(bytecode_array)),
+ bytecode_analysis_(broker_->GetBytecodeAnalysis(
+ bytecode_array.object(), osr_offset,
+ flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
+ !FLAG_concurrent_inlining)),
environment_(nullptr),
- osr_offset_(osr_offset),
+ osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
skip_next_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
@@ -981,9 +979,23 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
exit_controls_(local_zone),
state_values_cache_(jsgraph),
source_positions_(source_positions),
- start_position_(shared_info->StartPosition(), inlining_id),
+ start_position_(shared_info.StartPosition(), inlining_id),
shared_info_(shared_info),
- native_context_(native_context) {}
+ native_context_(native_context),
+ tick_counter_(tick_counter) {
+ if (FLAG_concurrent_inlining) {
+ // With concurrent inlining on, the source position address doesn't change
+ // because it's been copied from the heap.
+ source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ Vector<const byte>(bytecode_array.source_positions_address(),
+ bytecode_array.source_positions_size()));
+ } else {
+ // Otherwise, we need to access the table through a handle.
+ source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ handle(bytecode_array.object()->SourcePositionTableIfCollected(),
+ isolate()));
+ }
+}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
@@ -997,33 +1009,30 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
- NodeProperties::ReplaceContextInput(
- result, jsgraph()->HeapConstant(native_context()));
+ NodeProperties::ReplaceContextInput(result,
+ jsgraph()->Constant(native_context()));
return result;
}
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
- FeedbackNexus nexus(feedback_vector(), slot);
- return VectorSlotPair(feedback_vector(), slot, nexus.ic_state());
+ FeedbackNexus nexus(feedback_vector().object(), slot);
+ return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state());
}
void BytecodeGraphBuilder::CreateGraph() {
- BytecodeArrayRef bytecode_array_ref(broker(), bytecode_array());
-
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
- int actual_parameter_count = bytecode_array_ref.parameter_count() + 4;
+ int actual_parameter_count = bytecode_array().parameter_count() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
- Environment env(
- this, bytecode_array_ref.register_count(),
- bytecode_array_ref.parameter_count(),
- bytecode_array_ref.incoming_new_target_or_generator_register(),
- graph()->start());
+ Environment env(this, bytecode_array().register_count(),
+ bytecode_array().parameter_count(),
+ bytecode_array().incoming_new_target_or_generator_register(),
+ graph()->start());
set_environment(&env);
VisitBytecodes();
@@ -1112,19 +1121,17 @@ class BytecodeGraphBuilder::OsrIteratorState {
void ProcessOsrPrelude() {
ZoneVector<int> outer_loop_offsets(graph_builder_->local_zone());
- BytecodeAnalysis const& bytecode_analysis =
- graph_builder_->bytecode_analysis();
- int osr_offset = bytecode_analysis.osr_entry_point();
+ int osr_entry = graph_builder_->bytecode_analysis().osr_entry_point();
// We find here the outermost loop which contains the OSR loop.
- int outermost_loop_offset = osr_offset;
- while ((outermost_loop_offset =
- bytecode_analysis.GetLoopInfoFor(outermost_loop_offset)
- .parent_offset()) != -1) {
+ int outermost_loop_offset = osr_entry;
+ while ((outermost_loop_offset = graph_builder_->bytecode_analysis()
+ .GetLoopInfoFor(outermost_loop_offset)
+ .parent_offset()) != -1) {
outer_loop_offsets.push_back(outermost_loop_offset);
}
outermost_loop_offset =
- outer_loop_offsets.empty() ? osr_offset : outer_loop_offsets.back();
+ outer_loop_offsets.empty() ? osr_entry : outer_loop_offsets.back();
graph_builder_->AdvanceIteratorsTo(outermost_loop_offset);
// We save some iterators states at the offsets of the loop headers of the
@@ -1142,14 +1149,16 @@ class BytecodeGraphBuilder::OsrIteratorState {
}
// Finishing by advancing to the OSR entry
- graph_builder_->AdvanceIteratorsTo(osr_offset);
+ graph_builder_->AdvanceIteratorsTo(osr_entry);
// Enters all remaining exception handler which end before the OSR loop
// so that on next call of VisitSingleBytecode they will get popped from
// the exception handlers stack.
- graph_builder_->ExitThenEnterExceptionHandlers(osr_offset);
+ graph_builder_->ExitThenEnterExceptionHandlers(osr_entry);
graph_builder_->set_currently_peeled_loop_offset(
- bytecode_analysis.GetLoopInfoFor(osr_offset).parent_offset());
+ graph_builder_->bytecode_analysis()
+ .GetLoopInfoFor(osr_entry)
+ .parent_offset());
}
void RestoreState(int target_offset, int new_parent_offset) {
@@ -1198,8 +1207,8 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset(
void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
OsrIteratorState iterator_states(this);
iterator_states.ProcessOsrPrelude();
- int osr_offset = bytecode_analysis().osr_entry_point();
- DCHECK_EQ(bytecode_iterator().current_offset(), osr_offset);
+ int osr_entry = bytecode_analysis().osr_entry_point();
+ DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry);
environment()->FillWithOsrValues();
@@ -1217,7 +1226,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
// parent loop entirely, and so on.
int current_parent_offset =
- bytecode_analysis().GetLoopInfoFor(osr_offset).parent_offset();
+ bytecode_analysis().GetLoopInfoFor(osr_entry).parent_offset();
while (current_parent_offset != -1) {
const LoopInfo& current_parent_loop =
bytecode_analysis().GetLoopInfoFor(current_parent_offset);
@@ -1261,6 +1270,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
}
void BytecodeGraphBuilder::VisitSingleBytecode() {
+ tick_counter_->DoTick();
int current_offset = bytecode_iterator().current_offset();
UpdateSourcePosition(current_offset);
ExitThenEnterExceptionHandlers(current_offset);
@@ -1289,14 +1299,12 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
}
void BytecodeGraphBuilder::VisitBytecodes() {
- RunBytecodeAnalysis();
-
if (!bytecode_analysis().resume_jump_targets().empty()) {
environment()->BindGeneratorState(
jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
}
- if (bytecode_analysis().HasOsrEntryPoint()) {
+ if (osr_) {
// We peel the OSR loop and any outer loop containing it except that we
// leave the nodes corresponding to the whole outermost loop (including
// the last copies of the loops it contains) to be generated by the normal
@@ -1333,7 +1341,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
void BytecodeGraphBuilder::VisitLdaConstant() {
Node* node = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
environment()->BindAccumulator(node);
}
@@ -1383,15 +1391,16 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
uint32_t feedback_slot_index,
TypeofMode typeof_mode) {
VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
- DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot())));
+ DCHECK(
+ IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot())));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
return NewNode(op);
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -1400,8 +1409,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -1410,8 +1419,8 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1537,7 +1546,7 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
@@ -1622,7 +1631,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1657,9 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1675,7 +1683,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1705,7 +1713,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -1729,8 +1737,8 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
@@ -1753,8 +1761,8 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->LoadNamed(name, VectorSlotPair());
Node* node = NewNode(op, object);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1788,8 +1796,8 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
@@ -1828,8 +1836,8 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
LanguageMode language_mode =
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
@@ -1902,10 +1910,8 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- Handle<SharedFunctionInfo> shared_info(
- SharedFunctionInfo::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
AllocationType allocation =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
@@ -1913,7 +1919,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
: AllocationType::kYoung;
const Operator* op = javascript()->CreateClosure(
shared_info,
- feedback_vector()->GetClosureFeedbackCell(
+ feedback_vector().object()->GetClosureFeedbackCell(
bytecode_iterator().GetIndexOperand(1)),
handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy),
isolate()),
@@ -1923,9 +1929,8 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* context = NewNode(op);
@@ -1933,9 +1938,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE);
@@ -1944,9 +1948,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE);
@@ -1957,9 +1960,8 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->CreateCatchContext(scope_info);
Node* context = NewNode(op, exception);
@@ -1969,9 +1971,8 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object);
@@ -1997,9 +1998,8 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- Handle<String> constant_pattern(
- String::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<String> constant_pattern = Handle<String>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2009,10 +2009,9 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- Handle<ArrayBoilerplateDescription> array_boilerplate_description(
- ArrayBoilerplateDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description =
+ Handle<ArrayBoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2046,10 +2045,9 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- Handle<ObjectBoilerplateDescription> constant_properties(
- ObjectBoilerplateDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ObjectBoilerplateDescription> constant_properties =
+ Handle<ObjectBoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2082,29 +2080,13 @@ void BytecodeGraphBuilder::VisitCloneObject() {
}
void BytecodeGraphBuilder::VisitGetTemplateObject() {
- Handle<TemplateObjectDescription> description(
- TemplateObjectDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackNexus nexus(feedback_vector(), slot);
-
- Handle<JSArray> cached_value;
- if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::zero())) {
- // It's not observable when the template object is created, so we
- // can just create it eagerly during graph building and bake in
- // the JSArray constant here.
- cached_value = TemplateObjectDescription::GetTemplateObject(
- isolate(), native_context(), description, shared_info(), slot.ToInt());
- nexus.vector().Set(slot, *cached_value);
- } else {
- cached_value =
- handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()),
- isolate());
- }
-
- Node* template_object = jsgraph()->HeapConstant(cached_value);
- environment()->BindAccumulator(template_object);
+ ObjectRef description(
+ broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ JSArrayRef template_object =
+ shared_info().GetTemplateObject(description, feedback_vector(), slot);
+ environment()->BindAccumulator(jsgraph()->Constant(template_object));
}
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
@@ -2587,7 +2569,7 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
@@ -2658,7 +2640,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
return nexus.GetBinaryOperationFeedback();
}
@@ -2666,14 +2648,14 @@ BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
// feedback.
CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
return nexus.GetCompareOperationFeedback();
}
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
switch (nexus.GetForInFeedback()) {
case ForInHint::kNone:
case ForInHint::kEnumCacheKeysAndIndices:
@@ -2688,7 +2670,8 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
- FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector().object(),
+ FeedbackVector::ToSlot(slot_id));
float feedback_frequency = nexus.ComputeCallFrequency();
if (feedback_frequency == 0.0f) {
// This is to prevent multiplying zero and infinity.
@@ -2699,7 +2682,8 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
}
SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
- FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector().object(),
+ FeedbackVector::ToSlot(slot_id));
return nexus.GetSpeculationMode();
}
@@ -3301,8 +3285,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
CHECK_EQ(0, first_reg.index());
int register_count =
static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
- int parameter_count_without_receiver =
- bytecode_array()->parameter_count() - 1;
+ int parameter_count_without_receiver = bytecode_array().parameter_count() - 1;
Node* suspend_id = jsgraph()->SmiConstant(
bytecode_iterator().GetUnsignedImmediateOperand(3));
@@ -3442,8 +3425,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
const BytecodeLivenessState* liveness = bytecode_analysis().GetOutLivenessFor(
bytecode_iterator().current_offset());
- int parameter_count_without_receiver =
- bytecode_array()->parameter_count() - 1;
+ int parameter_count_without_receiver = bytecode_array().parameter_count() - 1;
// Mapping between registers and array indices must match that used in
// InterpreterAssembler::ExportParametersAndRegisterFile.
@@ -3836,7 +3818,10 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
- HandlerTable table(*bytecode_array());
+ DisallowHeapAllocation no_allocation;
+ HandlerTable table(bytecode_array().handler_table_address(),
+ bytecode_array().handler_table_size(),
+ HandlerTable::kRangeBasedEncoding);
// Potentially exit exception handlers.
while (!exception_handlers_.empty()) {
@@ -3890,7 +3875,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_context) {
*current_input++ = OperatorProperties::NeedsExactContext(op)
? environment()->Context()
- : jsgraph()->HeapConstant(native_context());
+ : jsgraph()->Constant(native_context());
}
if (has_frame_state) {
// The frame state will be inserted later. Here we misuse the {Dead} node
@@ -4037,12 +4022,19 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags) {
- BytecodeGraphBuilder builder(broker, local_zone, bytecode_array, shared,
- feedback_vector, osr_offset, jsgraph,
- invocation_frequency, source_positions,
- native_context, inlining_id, flags);
+ Handle<NativeContext> native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter) {
+ BytecodeArrayRef bytecode_array_ref(broker, bytecode_array);
+ DCHECK(bytecode_array_ref.IsSerializedForCompilation());
+ FeedbackVectorRef feedback_vector_ref(broker, feedback_vector);
+ SharedFunctionInfoRef shared_ref(broker, shared);
+ DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref));
+ NativeContextRef native_context_ref(broker, native_context);
+ BytecodeGraphBuilder builder(
+ broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref,
+ osr_offset, jsgraph, invocation_frequency, source_positions,
+ native_context_ref, inlining_id, flags, tick_counter);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index b9504a6086..682569778f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -11,6 +11,9 @@
#include "src/handles/handles.h"
namespace v8 {
+
+class TickCounter;
+
namespace internal {
class BytecodeArray;
@@ -25,6 +28,9 @@ class SourcePositionTable;
enum class BytecodeGraphBuilderFlag : uint8_t {
kSkipFirstStackCheck = 1 << 0,
+ // TODO(neis): Remove liveness flag here when concurrent inlining is always
+ // on, because then the serializer will be the only place where we perform
+ // bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2,
};
@@ -39,8 +45,9 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags);
+ Handle<NativeContext> native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index d8a01d6308..af0ba98ffd 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -226,8 +226,12 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kWeakHeapObjectTag)),
&ok);
- Node* message_node = StringConstant(location);
- DebugAbort(message_node);
+ EmbeddedVector<char, 1024> message;
+ SNPrintF(message, "no Object: %s", location);
+ Node* message_node = StringConstant(message.begin());
+ // This somewhat misuses the AbortCSAAssert runtime function. This will print
+ // "abort: CSA_ASSERT failed: <message>", which is good enough.
+ AbortCSAAssert(message_node);
Unreachable();
Bind(&ok);
}
@@ -409,8 +413,8 @@ void CodeAssembler::ReturnRaw(Node* value) {
return raw_assembler()->Return(value);
}
-void CodeAssembler::DebugAbort(Node* message) {
- raw_assembler()->DebugAbort(message);
+void CodeAssembler::AbortCSAAssert(Node* message) {
+ raw_assembler()->AbortCSAAssert(message);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
@@ -441,16 +445,16 @@ void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
}
#endif // DEBUG
-Node* CodeAssembler::LoadFramePointer() {
- return raw_assembler()->LoadFramePointer();
+TNode<RawPtrT> CodeAssembler::LoadFramePointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadFramePointer());
}
-Node* CodeAssembler::LoadParentFramePointer() {
- return raw_assembler()->LoadParentFramePointer();
+TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-Node* CodeAssembler::LoadStackPointer() {
- return raw_assembler()->LoadStackPointer();
+TNode<RawPtrT> CodeAssembler::LoadStackPointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadStackPointer());
}
TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(
@@ -1140,14 +1144,6 @@ Node* CodeAssembler::Retain(Node* value) {
return raw_assembler()->Retain(value);
}
-Node* CodeAssembler::ChangeTaggedToCompressed(Node* tagged) {
- return raw_assembler()->ChangeTaggedToCompressed(tagged);
-}
-
-Node* CodeAssembler::ChangeCompressedToTagged(Node* compressed) {
- return raw_assembler()->ChangeCompressedToTagged(compressed);
-}
-
Node* CodeAssembler::Projection(int index, Node* value) {
DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0f7ae64082..cc432214aa 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -73,6 +73,9 @@ class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
class WasmDebugInfo;
class Zone;
+#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name;
+TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
+#undef MAKE_FORWARD_DECLARATION
template <typename T>
class Signature;
@@ -107,13 +110,13 @@ struct Uint32T : Word32T {
struct Int16T : Int32T {
static constexpr MachineType kMachineType = MachineType::Int16();
};
-struct Uint16T : Uint32T {
+struct Uint16T : Uint32T, Int32T {
static constexpr MachineType kMachineType = MachineType::Uint16();
};
struct Int8T : Int16T {
static constexpr MachineType kMachineType = MachineType::Int8();
};
-struct Uint8T : Uint16T {
+struct Uint8T : Uint16T, Int16T {
static constexpr MachineType kMachineType = MachineType::Uint8();
};
@@ -147,6 +150,12 @@ struct Float64T : UntaggedT {
static constexpr MachineType kMachineType = MachineType::Float64();
};
+#ifdef V8_COMPRESS_POINTERS
+using TaggedT = Int32T;
+#else
+using TaggedT = IntPtrT;
+#endif
+
// Result of a comparison operation.
struct BoolT : Word32T {};
@@ -329,6 +338,7 @@ class WasmExceptionObject;
class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
+class WasmIndirectFunctionTable;
class WasmJSFunctionData;
class WasmMemoryObject;
class WasmModuleObject;
@@ -413,6 +423,10 @@ struct types_have_common_values {
static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
};
template <class U>
+struct types_have_common_values<BoolT, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
struct types_have_common_values<Uint32T, U> {
static const bool value = types_have_common_values<Word32T, U>::value;
};
@@ -611,14 +625,15 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64Sqrt, Float64T, Float64T) \
V(Float64Tan, Float64T, Float64T) \
V(Float64Tanh, Float64T, Float64T) \
- V(Float64ExtractLowWord32, Word32T, Float64T) \
- V(Float64ExtractHighWord32, Word32T, Float64T) \
+ V(Float64ExtractLowWord32, Uint32T, Float64T) \
+ V(Float64ExtractHighWord32, Uint32T, Float64T) \
V(BitcastTaggedToWord, IntPtrT, Object) \
+ V(BitcastTaggedSignedToWord, IntPtrT, Smi) \
V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \
V(BitcastWordToTagged, Object, WordT) \
V(BitcastWordToTaggedSigned, Smi, WordT) \
V(TruncateFloat64ToFloat32, Float32T, Float64T) \
- V(TruncateFloat64ToWord32, Word32T, Float64T) \
+ V(TruncateFloat64ToWord32, Uint32T, Float64T) \
V(TruncateInt64ToInt32, Int32T, Int64T) \
V(ChangeFloat32ToFloat64, Float64T, Float32T) \
V(ChangeFloat64ToUint32, Uint32T, Float64T) \
@@ -628,7 +643,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(ChangeUint32ToFloat64, Float64T, Word32T) \
V(ChangeUint32ToUint64, Uint64T, Word32T) \
V(BitcastInt32ToFloat32, Float32T, Word32T) \
- V(BitcastFloat32ToInt32, Word32T, Float32T) \
+ V(BitcastFloat32ToInt32, Uint32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
V(RoundInt32ToFloat32, Int32T, Float32T) \
V(Float64SilenceNaN, Float64T, Float64T) \
@@ -840,10 +855,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// TODO(jkummerow): The style guide wants pointers for output parameters.
// https://google.github.io/styleguide/cppguide.html#Output_Parameters
- bool ToInt32Constant(Node* node, int32_t& out_value);
- bool ToInt64Constant(Node* node, int64_t& out_value);
+ bool ToInt32Constant(Node* node,
+ int32_t& out_value); // NOLINT(runtime/references)
+ bool ToInt64Constant(Node* node,
+ int64_t& out_value); // NOLINT(runtime/references)
bool ToSmiConstant(Node* node, Smi* out_value);
- bool ToIntPtrConstant(Node* node, intptr_t& out_value);
+ bool ToIntPtrConstant(Node* node,
+ intptr_t& out_value); // NOLINT(runtime/references)
bool IsUndefinedConstant(TNode<Object> node);
bool IsNullConstant(TNode<Object> node);
@@ -872,7 +890,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnRaw(Node* value);
- void DebugAbort(Node* message);
+ void AbortCSAAssert(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const char* msg) {
@@ -938,11 +956,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Label** case_labels, size_t case_count);
// Access to the frame pointer
- Node* LoadFramePointer();
- Node* LoadParentFramePointer();
+ TNode<RawPtrT> LoadFramePointer();
+ TNode<RawPtrT> LoadParentFramePointer();
// Access to the stack pointer
- Node* LoadStackPointer();
+ TNode<RawPtrT> LoadStackPointer();
// Poison |value| on speculative paths.
TNode<Object> TaggedPoisonOnSpeculation(SloppyTNode<Object> value);
@@ -1047,20 +1065,60 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
- TNode<IntPtrT> WordShr(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return UncheckedCast<IntPtrT>(
+ TNode<UintPtrT> WordShr(TNode<UintPtrT> left, TNode<IntegralT> right) {
+ return Unsigned(
WordShr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<IntPtrT> WordSar(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return UncheckedCast<IntPtrT>(
- WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<IntPtrT> WordShl(TNode<IntPtrT> left, TNode<IntegralT> right) {
+ return Signed(WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> WordShl(TNode<UintPtrT> left, TNode<IntegralT> right) {
+ return Unsigned(
+ WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Word32Shl(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Shl(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Shr(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Shr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<IntPtrT> WordAnd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return UncheckedCast<IntPtrT>(
+ return Signed(WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> WordAnd(TNode<UintPtrT> left, TNode<UintPtrT> right) {
+ return Unsigned(
WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Int32T> Word32And(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32And(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Word32Or(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Or(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
template <class Left, class Right,
class = typename std::enable_if<
std::is_base_of<Object, Left>::value &&
@@ -1106,6 +1164,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<BoolT> Word64NotEqual(SloppyTNode<Word64T> left,
SloppyTNode<Word64T> right);
+ TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
+ return UncheckedCast<BoolT>(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<BoolT> Word32And(TNode<BoolT> left, TNode<BoolT> right) {
+ return UncheckedCast<BoolT>(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<Int32T> Int32Add(TNode<Int32T> left, TNode<Int32T> right) {
return Signed(
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
@@ -1116,6 +1183,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Int32T> Int32Sub(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Int32Sub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Int32Mul(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Int32Mul(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<WordT> IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<IntPtrT> IntPtrDiv(TNode<IntPtrT> left, TNode<IntPtrT> right);
TNode<WordT> IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
@@ -1195,6 +1272,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+ template <class Dummy = void>
+ TNode<IntPtrT> BitcastTaggedToWord(TNode<Smi> node) {
+ static_assert(sizeof(Dummy) < 0,
+ "Should use BitcastTaggedSignedToWord instead.");
+ }
+
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
// Assumes that the double can be exactly represented as an int.
TNode<UintPtrT> ChangeFloat64ToUintPtr(SloppyTNode<Float64T> value);
@@ -1217,10 +1300,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Projections
Node* Projection(int index, Node* value);
- // Pointer compression and decompression.
- Node* ChangeTaggedToCompressed(Node* tagged);
- Node* ChangeCompressedToTagged(Node* compressed);
-
template <int index, class T1, class T2>
TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
Projection(TNode<PairT<T1, T2>> value) {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index fa727748f6..5dd765527f 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -337,9 +337,9 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
// End
// Now the effect input to the {Return} node can be either an {EffectPhi}
- // hanging off the same {Merge}, or the {Merge} node is only connected to
- // the {Return} and the {Phi}, in which case we know that the effect input
- // must somehow dominate all merged branches.
+ // hanging off the same {Merge}, or the effect chain doesn't depend on the
+ // {Phi} or the {Merge}, in which case we know that the effect input must
+ // somehow dominate all merged branches.
Node::Inputs control_inputs = control->inputs();
Node::Inputs value_inputs = value->inputs();
@@ -347,7 +347,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
DCHECK_NE(0, graph()->end()->InputCount());
- if (control->OwnedBy(node, value)) {
+ if (control->OwnedBy(node, value) && value->OwnedBy(node)) {
for (int i = 0; i < control_inputs.count(); ++i) {
// Create a new {Return} and connect it to {end}. We don't need to mark
// {end} as revisit, because we mark {node} as {Dead} below, which was
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 45e558f609..0ef6402264 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1216,8 +1216,18 @@ const Operator* CommonOperatorBuilder::HeapConstant(
value); // parameter
}
+const Operator* CommonOperatorBuilder::CompressedHeapConstant(
+ const Handle<HeapObject>& value) {
+ return new (zone()) Operator1<Handle<HeapObject>>( // --
+ IrOpcode::kCompressedHeapConstant, Operator::kPure, // opcode
+ "CompressedHeapConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
+}
+
Handle<HeapObject> HeapConstantOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+ DCHECK(IrOpcode::kHeapConstant == op->opcode() ||
+ IrOpcode::kCompressedHeapConstant == op->opcode());
return OpParameter<Handle<HeapObject>>(op);
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 43a689b5c2..9f634e72ec 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -499,6 +499,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* NumberConstant(volatile double);
const Operator* PointerConstant(intptr_t);
const Operator* HeapConstant(const Handle<HeapObject>&);
+ const Operator* CompressedHeapConstant(const Handle<HeapObject>&);
const Operator* ObjectId(uint32_t);
const Operator* RelocatableInt32Constant(int32_t value,
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index f0bb797b68..673f4a341b 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -4,6 +4,7 @@
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/compilation-dependency.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects-inl.h"
@@ -17,18 +18,7 @@ CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {}
-class CompilationDependencies::Dependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(const MaybeObjectHandle& code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
-#endif
-};
-
-class InitialMapDependency final : public CompilationDependencies::Dependency {
+class InitialMapDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the initial map.
@@ -56,8 +46,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
MapRef initial_map_;
};
-class PrototypePropertyDependency final
- : public CompilationDependencies::Dependency {
+class PrototypePropertyDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the prototype.
@@ -96,7 +85,7 @@ class PrototypePropertyDependency final
ObjectRef prototype_;
};
-class StableMapDependency final : public CompilationDependencies::Dependency {
+class StableMapDependency final : public CompilationDependency {
public:
explicit StableMapDependency(const MapRef& map) : map_(map) {
DCHECK(map_.is_stable());
@@ -114,7 +103,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
MapRef map_;
};
-class TransitionDependency final : public CompilationDependencies::Dependency {
+class TransitionDependency final : public CompilationDependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
DCHECK(!map_.is_deprecated());
@@ -132,8 +121,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
MapRef map_;
};
-class PretenureModeDependency final
- : public CompilationDependencies::Dependency {
+class PretenureModeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the mode.
@@ -163,8 +151,7 @@ class PretenureModeDependency final
AllocationType allocation_;
};
-class FieldRepresentationDependency final
- : public CompilationDependencies::Dependency {
+class FieldRepresentationDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the representation.
@@ -197,7 +184,7 @@ class FieldRepresentationDependency final
Representation representation_;
};
-class FieldTypeDependency final : public CompilationDependencies::Dependency {
+class FieldTypeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
@@ -227,8 +214,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
ObjectRef type_;
};
-class FieldConstnessDependency final
- : public CompilationDependencies::Dependency {
+class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& owner, int descriptor)
: owner_(owner), descriptor_(descriptor) {
@@ -255,8 +241,7 @@ class FieldConstnessDependency final
int descriptor_;
};
-class GlobalPropertyDependency final
- : public CompilationDependencies::Dependency {
+class GlobalPropertyDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type and the read_only flag.
@@ -294,7 +279,7 @@ class GlobalPropertyDependency final
bool read_only_;
};
-class ProtectorDependency final : public CompilationDependencies::Dependency {
+class ProtectorDependency final : public CompilationDependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
@@ -315,8 +300,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
PropertyCellRef cell_;
};
-class ElementsKindDependency final
- : public CompilationDependencies::Dependency {
+class ElementsKindDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the elements kind.
@@ -349,7 +333,7 @@ class ElementsKindDependency final
};
class InitialMapInstanceSizePredictionDependency final
- : public CompilationDependencies::Dependency {
+ : public CompilationDependency {
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
@@ -380,7 +364,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
-void CompilationDependencies::RecordDependency(Dependency const* dependency) {
+void CompilationDependencies::RecordDependency(
+ CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
}
@@ -565,6 +550,11 @@ namespace {
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
+ // TODO(neis): Remove heap access (SerializePrototype call).
+ AllowCodeDependencyChange dependency_change_;
+ AllowHandleAllocation handle_allocation_;
+ AllowHandleDereference handle_dereference_;
+ AllowHeapAllocation heap_allocation_;
while (true) {
map.SerializePrototype();
HeapObjectRef proto = map.prototype();
@@ -635,7 +625,7 @@ CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
return SlackTrackingPrediction(initial_map, instance_size);
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::TransitionDependencyOffTheRecord(
const MapRef& target_map) const {
if (target_map.CanBeDeprecated()) {
@@ -646,7 +636,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord(
}
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
const MapRef& map, int descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
@@ -657,7 +647,7 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
details.representation());
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map,
int descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 37a2bc3a28..cb6cea0685 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -25,6 +25,8 @@ class SlackTrackingPrediction {
int inobject_property_count_;
};
+class CompilationDependency;
+
// Collects and installs dependencies of the code that is being generated.
class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
@@ -113,14 +115,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// DependOnTransition(map);
// is equivalent to:
// RecordDependency(TransitionDependencyOffTheRecord(map));
- class Dependency;
- void RecordDependency(Dependency const* dependency);
- Dependency const* TransitionDependencyOffTheRecord(
+ void RecordDependency(CompilationDependency const* dependency);
+ CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
- Dependency const* FieldRepresentationDependencyOffTheRecord(
+ CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
+ const MapRef& map, int descriptor) const;
+ CompilationDependency const* FieldTypeDependencyOffTheRecord(
const MapRef& map, int descriptor) const;
- Dependency const* FieldTypeDependencyOffTheRecord(const MapRef& map,
- int descriptor) const;
// Exposed only for testing purposes.
bool AreValid() const;
@@ -128,7 +129,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
private:
Zone* const zone_;
JSHeapBroker* const broker_;
- ZoneForwardList<Dependency const*> dependencies_;
+ ZoneForwardList<CompilationDependency const*> dependencies_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
new file mode 100644
index 0000000000..e5726a0ddb
--- /dev/null
+++ b/deps/v8/src/compiler/compilation-dependency.h
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
+#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
+
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class MaybeObjectHandle;
+
+namespace compiler {
+
+class CompilationDependency : public ZoneObject {
+ public:
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(const MaybeObjectHandle& code) const = 0;
+
+#ifdef DEBUG
+ virtual bool IsPretenureModeDependency() const { return false; }
+#endif
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 7177a6069d..600db1d160 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/control-flow-optimizer.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-matchers.h"
@@ -16,18 +17,20 @@ namespace compiler {
ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
+ TickCounter* tick_counter,
Zone* zone)
: graph_(graph),
common_(common),
machine_(machine),
queue_(zone),
queued_(graph, 2),
- zone_(zone) {}
-
+ zone_(zone),
+ tick_counter_(tick_counter) {}
void ControlFlowOptimizer::Optimize() {
Enqueue(graph()->start());
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
if (node->IsDead()) continue;
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 0a688a7c39..07fc9e6fc2 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -22,7 +25,8 @@ class Node;
class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
public:
ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine, Zone* zone);
+ MachineOperatorBuilder* machine,
+ TickCounter* tick_counter, Zone* zone);
void Optimize();
@@ -45,6 +49,7 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
ZoneQueue<Node*> queue_;
NodeMarker<bool> queued_;
Zone* const zone_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
};
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
new file mode 100644
index 0000000000..620d98019f
--- /dev/null
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -0,0 +1,336 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/csa-load-elimination.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CsaLoadElimination::Reduce(Node* node) {
+ if (FLAG_trace_turbo_load_elimination) {
+ if (node->op()->EffectInputCount() > 0) {
+ PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic());
+ if (node->op()->ValueInputCount() > 0) {
+ PrintF("(");
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ if (i > 0) PrintF(", ");
+ Node* const value = NodeProperties::GetValueInput(node, i);
+ PrintF("#%d:%s", value->id(), value->op()->mnemonic());
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (AbstractState const* const state = node_states_.Get(effect)) {
+ PrintF(" state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ state->Print();
+ } else {
+ PrintF(" no state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ }
+ }
+ }
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kLoadFromObject:
+ return ReduceLoadFromObject(node, ObjectAccessOf(node->op()));
+ case IrOpcode::kStoreToObject:
+ return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
+ case IrOpcode::kDebugBreak:
+ case IrOpcode::kAbortCSAAssert:
+ // Avoid changing optimizations in the presence of debug instructions.
+ return PropagateInputState(node);
+ case IrOpcode::kCall:
+ return ReduceCall(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
+ break;
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
+ }
+ return NoChange();
+}
+
+namespace CsaLoadEliminationHelpers {
+
+bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
+ if (r1 == r2) return true;
+ return IsAnyCompressedTagged(r1) && IsAnyCompressedTagged(r2);
+}
+
+bool ObjectMayAlias(Node* a, Node* b) {
+ if (a != b) {
+ if (b->opcode() == IrOpcode::kAllocate) {
+ std::swap(a, b);
+ }
+ if (a->opcode() == IrOpcode::kAllocate) {
+ switch (b->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return false;
+ default:
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
+ MachineRepresentation repr2) {
+ IntPtrMatcher matcher1(offset1);
+ IntPtrMatcher matcher2(offset2);
+ // If either of the offsets is variable, accesses may alias
+ if (!matcher1.HasValue() || !matcher2.HasValue()) {
+ return true;
+ }
+ // Otherwise, we return whether accesses overlap
+ intptr_t start1 = matcher1.Value();
+ intptr_t end1 = start1 + ElementSizeInBytes(repr1);
+ intptr_t start2 = matcher2.Value();
+ intptr_t end2 = start2 + ElementSizeInBytes(repr2);
+ return !(end1 <= start2 || end2 <= start1);
+}
+
+} // namespace CsaLoadEliminationHelpers
+
+namespace Helpers = CsaLoadEliminationHelpers;
+
+void CsaLoadElimination::AbstractState::Merge(AbstractState const* that,
+ Zone* zone) {
+ FieldInfo empty_info;
+ for (std::pair<Field, FieldInfo> entry : field_infos_) {
+ if (that->field_infos_.Get(entry.first) != entry.second) {
+ field_infos_.Set(entry.first, empty_info);
+ }
+ }
+}
+
+CsaLoadElimination::AbstractState const*
+CsaLoadElimination::AbstractState::KillField(Node* kill_object,
+ Node* kill_offset,
+ MachineRepresentation kill_repr,
+ Zone* zone) const {
+ FieldInfo empty_info;
+ AbstractState* that = new (zone) AbstractState(*this);
+ for (std::pair<Field, FieldInfo> entry : that->field_infos_) {
+ Field field = entry.first;
+ MachineRepresentation field_repr = entry.second.representation;
+ if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second,
+ field_repr) &&
+ Helpers::ObjectMayAlias(kill_object, field.first)) {
+ that->field_infos_.Set(field, empty_info);
+ }
+ }
+ return that;
+}
+
+CsaLoadElimination::AbstractState const*
+CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
+ CsaLoadElimination::FieldInfo info,
+ Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->field_infos_.Set({object, offset}, info);
+ return that;
+}
+
+CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
+ Node* object, Node* offset) const {
+ if (object->IsDead()) {
+ return {};
+ }
+ return field_infos_.Get({object, offset});
+}
+
+void CsaLoadElimination::AbstractState::Print() const {
+ for (std::pair<Field, FieldInfo> entry : field_infos_) {
+ Field field = entry.first;
+ Node* object = field.first;
+ Node* offset = field.second;
+ FieldInfo info = entry.second;
+ PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
+ object->op()->mnemonic(), info.value->id(),
+ info.value->op()->mnemonic(),
+ MachineReprToString(info.representation));
+ }
+}
+
+Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
+ ObjectAccess const& access) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* offset = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ MachineRepresentation representation = access.machine_type.representation();
+ FieldInfo lookup_result = state->Lookup(object, offset);
+ if (!lookup_result.IsEmpty()) {
+ // Make sure we don't reuse values that were recorded with a different
+ // representation or resurrect dead {replacement} nodes.
+ Node* replacement = lookup_result.value;
+ if (Helpers::IsCompatible(representation, lookup_result.representation) &&
+ !replacement->IsDead()) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
+ }
+ FieldInfo info(node, representation);
+ state = state->AddField(object, offset, info, zone());
+
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceStoreToObject(Node* node,
+ ObjectAccess const& access) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* offset = NodeProperties::GetValueInput(node, 1);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ FieldInfo info(value, access.machine_type.representation());
+ state = state->KillField(object, offset, info.representation, zone());
+ state = state->AddField(object, offset, info, zone());
+
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) {
+ Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ AbstractState const* state0 = node_states_.Get(effect0);
+ if (state0 == nullptr) return NoChange();
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just take
+ // the state from the first input, and compute the loop state based on it.
+ AbstractState const* state = ComputeLoopState(node, state0);
+ return UpdateState(node, state);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 1; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_states_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's state and merge with the state
+ // from other inputs.
+ AbstractState* state = new (zone()) AbstractState(*state0);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ state->Merge(node_states_.Get(input), zone());
+ }
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceStart(Node* node) {
+ return UpdateState(node, empty_state());
+}
+
+Reduction CsaLoadElimination::ReduceCall(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ ExternalReferenceMatcher m(value);
+ if (m.Is(ExternalReference::check_object_type())) {
+ return PropagateInputState(node);
+ }
+ return ReduceOtherNode(node);
+}
+
+Reduction CsaLoadElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectInputCount() == 1) {
+ if (node->op()->EffectOutputCount() == 1) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate
+ // just yet because we will have to recompute anyway once we compute
+ // the predecessor.
+ if (state == nullptr) return NoChange();
+ // Check if this {node} has some uncontrolled side effects.
+ if (!node->op()->HasProperty(Operator::kNoWrite)) {
+ state = empty_state();
+ }
+ return UpdateState(node, state);
+ } else {
+ return NoChange();
+ }
+ }
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ return NoChange();
+}
+
+Reduction CsaLoadElimination::UpdateState(Node* node,
+ AbstractState const* state) {
+ AbstractState const* original = node_states_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {state}
+ // has changed wrt. the {original}.
+ if (state != original) {
+ if (original == nullptr || !state->Equals(original)) {
+ node_states_.Set(node, state);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+Reduction CsaLoadElimination::PropagateInputState(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ return UpdateState(node, state);
+}
+
+CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
+ Node* node, AbstractState const* state) const {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ Node* const control = NodeProperties::GetControlInput(node);
+ ZoneQueue<Node*> queue(zone());
+ ZoneSet<Node*> visited(zone());
+ visited.insert(node);
+ for (int i = 1; i < control->InputCount(); ++i) {
+ queue.push(node->InputAt(i));
+ }
+ while (!queue.empty()) {
+ Node* const current = queue.front();
+ queue.pop();
+ if (visited.insert(current).second) {
+ if (!current->op()->HasProperty(Operator::kNoWrite)) {
+ return empty_state();
+ }
+ for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
+ queue.push(NodeProperties::GetEffectInput(current, i));
+ }
+ }
+ }
+ return state;
+}
+
+CommonOperatorBuilder* CsaLoadElimination::common() const {
+ return jsgraph()->common();
+}
+
+Graph* CsaLoadElimination::graph() const { return jsgraph()->graph(); }
+
+Isolate* CsaLoadElimination::isolate() const { return jsgraph()->isolate(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
new file mode 100644
index 0000000000..9460858d04
--- /dev/null
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -0,0 +1,118 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CSA_LOAD_ELIMINATION_H_
+#define V8_COMPILER_CSA_LOAD_ELIMINATION_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/persistent-map.h"
+#include "src/handles/maybe-handles.h"
+#include "src/zone/zone-handle-set.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ObjectAccess;
+class Graph;
+class JSGraph;
+
+class V8_EXPORT_PRIVATE CsaLoadElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ CsaLoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ empty_state_(zone),
+ node_states_(jsgraph->graph()->NodeCount(), zone),
+ jsgraph_(jsgraph),
+ zone_(zone) {}
+ ~CsaLoadElimination() final = default;
+
+ const char* reducer_name() const override { return "CsaLoadElimination"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct FieldInfo {
+ FieldInfo() = default;
+ FieldInfo(Node* value, MachineRepresentation representation)
+ : value(value), representation(representation) {}
+
+ bool operator==(const FieldInfo& other) const {
+ return value == other.value && representation == other.representation;
+ }
+
+ bool operator!=(const FieldInfo& other) const { return !(*this == other); }
+
+ bool IsEmpty() const { return value == nullptr; }
+
+ Node* value = nullptr;
+ MachineRepresentation representation = MachineRepresentation::kNone;
+ };
+
+ class AbstractState final : public ZoneObject {
+ public:
+ explicit AbstractState(Zone* zone) : field_infos_(zone) {}
+
+ bool Equals(AbstractState const* that) const {
+ return field_infos_ == that->field_infos_;
+ }
+ void Merge(AbstractState const* that, Zone* zone);
+
+ AbstractState const* KillField(Node* object, Node* offset,
+ MachineRepresentation repr,
+ Zone* zone) const;
+ AbstractState const* AddField(Node* object, Node* offset, FieldInfo info,
+ Zone* zone) const;
+ FieldInfo Lookup(Node* object, Node* offset) const;
+
+ void Print() const;
+
+ private:
+ using Field = std::pair<Node*, Node*>;
+ using FieldInfos = PersistentMap<Field, FieldInfo>;
+ FieldInfos field_infos_;
+ };
+
+ Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access);
+ Reduction ReduceStoreToObject(Node* node, ObjectAccess const& access);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceCall(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction UpdateState(Node* node, AbstractState const* state);
+ Reduction PropagateInputState(Node* node);
+
+ AbstractState const* ComputeLoopState(Node* node,
+ AbstractState const* state) const;
+
+ CommonOperatorBuilder* common() const;
+ Isolate* isolate() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* zone() const { return zone_; }
+ AbstractState const* empty_state() const { return &empty_state_; }
+
+ AbstractState const empty_state_;
+ NodeAuxData<AbstractState const*> node_states_;
+ JSGraph* const jsgraph_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CSA_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc
index e69e61fac5..537744652b 100644
--- a/deps/v8/src/compiler/decompression-elimination.cc
+++ b/deps/v8/src/compiler/decompression-elimination.cc
@@ -21,10 +21,8 @@ bool DecompressionElimination::IsReducibleConstantOpcode(
IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kInt64Constant:
- return true;
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
case IrOpcode::kHeapConstant:
+ return true;
default:
return false;
}
@@ -55,13 +53,8 @@ Node* DecompressionElimination::GetCompressedConstant(Node* constant) {
static_cast<int32_t>(OpParameter<int64_t>(constant->op()))));
break;
case IrOpcode::kHeapConstant:
- // TODO(v8:8977): The HeapConstant remains as 64 bits. This does not
- // affect the comparison and it will still work correctly. However, we are
- // introducing a 64 bit value in the stream where a 32 bit one will
- // suffice. Currently there is no "CompressedHeapConstant", and
- // introducing a new opcode and handling it correctly throught the
- // pipeline seems that it will involve quite a bit of work.
- return constant;
+ return graph()->NewNode(
+ common()->CompressedHeapConstant(HeapConstantOf(constant->op())));
default:
UNREACHABLE();
}
@@ -84,6 +77,21 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) {
}
}
+Reduction DecompressionElimination::ReduceDecompress(Node* node) {
+ DCHECK(IrOpcode::IsDecompressOpcode(node->opcode()));
+
+ DCHECK_EQ(node->InputCount(), 1);
+ Node* input_node = node->InputAt(0);
+ IrOpcode::Value input_opcode = input_node->opcode();
+ if (IrOpcode::IsCompressOpcode(input_opcode)) {
+ DCHECK(IsValidDecompress(input_opcode, node->opcode()));
+ DCHECK_EQ(input_node->InputCount(), 1);
+ return Replace(input_node->InputAt(0));
+ } else {
+ return NoChange();
+ }
+}
+
Reduction DecompressionElimination::ReducePhi(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kPhi);
@@ -138,7 +146,10 @@ Reduction DecompressionElimination::ReducePhi(Node* node) {
// Add a decompress after the Phi. To do this, we need to replace the Phi with
// "Phi <- Decompress".
- return Replace(graph()->NewNode(op, node));
+ Node* decompress = graph()->NewNode(op, node);
+ ReplaceWithValue(node, decompress);
+ decompress->ReplaceInput(0, node);
+ return Changed(node);
}
Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) {
@@ -201,6 +212,10 @@ Reduction DecompressionElimination::Reduce(Node* node) {
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
return ReduceCompress(node);
+ case IrOpcode::kChangeCompressedToTagged:
+ case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+ case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+ return ReduceDecompress(node);
case IrOpcode::kPhi:
return ReducePhi(node);
case IrOpcode::kTypedStateValues:
diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h
index c850f064a9..85a6c98aa0 100644
--- a/deps/v8/src/compiler/decompression-elimination.h
+++ b/deps/v8/src/compiler/decompression-elimination.h
@@ -38,7 +38,7 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// elimination.
bool IsReducibleConstantOpcode(IrOpcode::Value opcode);
- // Get the new 32 bit node constant given the 64 bit one
+ // Get the new 32 bit node constant given the 64 bit one.
Node* GetCompressedConstant(Node* constant);
// Removes direct Decompressions & Compressions, going from
@@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// Can be used for Any, Signed, and Pointer compressions.
Reduction ReduceCompress(Node* node);
+ // Removes direct Compressions & Decompressions, analogously to ReduceCompress
+ Reduction ReduceDecompress(Node* node);
+
// Replaces Phi's input decompressions with their input node, if and only if
// all of the Phi's inputs are Decompress nodes.
Reduction ReducePhi(Node* node);
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index cc6ca954f3..cac1b1726b 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -33,13 +33,13 @@ struct Diamond {
}
// Place {this} after {that} in control flow order.
- void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
+ void Chain(Diamond const& that) { branch->ReplaceInput(1, that.merge); }
// Place {this} after {that} in control flow order.
void Chain(Node* that) { branch->ReplaceInput(1, that); }
// Nest {this} into either the if_true or if_false branch of {that}.
- void Nest(Diamond& that, bool if_true) {
+ void Nest(Diamond const& that, bool if_true) {
if (if_true) {
branch->ReplaceInput(1, that.if_true);
that.merge->ReplaceInput(0, merge);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index ced078a178..788638fe68 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -17,6 +17,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/execution/frames.h"
#include "src/heap/factory-inl.h"
#include "src/objects/heap-number.h"
#include "src/objects/oddball.h"
@@ -51,6 +52,7 @@ class EffectControlLinearizer {
bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
Node** control);
Node* LowerChangeBitToTagged(Node* node);
+ Node* LowerChangeInt31ToCompressedSigned(Node* node);
Node* LowerChangeInt31ToTaggedSigned(Node* node);
Node* LowerChangeInt32ToTagged(Node* node);
Node* LowerChangeInt64ToTagged(Node* node);
@@ -58,6 +60,7 @@ class EffectControlLinearizer {
Node* LowerChangeUint64ToTagged(Node* node);
Node* LowerChangeFloat64ToTagged(Node* node);
Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+ Node* LowerChangeCompressedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt64(Node* node);
Node* LowerChangeTaggedToBit(Node* node);
@@ -75,6 +78,7 @@ class EffectControlLinearizer {
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckBigInt(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
void LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
@@ -84,6 +88,7 @@ class EffectControlLinearizer {
Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32ToCompressedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
@@ -101,6 +106,9 @@ class EffectControlLinearizer {
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerBigIntAsUintN(Node* node, Node* frame_state);
+ Node* LowerChangeUint64ToBigInt(Node* node);
+ Node* LowerTruncateBigIntToUint64(Node* node);
Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state);
@@ -150,17 +158,20 @@ class EffectControlLinearizer {
Node* LowerStringConcat(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharCodeAt(Node* node);
- Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
+ Node* LowerStringCodePointAt(Node* node);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromSingleCharCode(Node* node);
Node* LowerStringFromSingleCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
Node* LowerStringSubstring(Node* node);
+ Node* LowerStringFromCodePointAt(Node* node);
Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
+ Node* LowerBigIntAdd(Node* node, Node* frame_state);
+ Node* LowerBigIntNegate(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
@@ -186,6 +197,7 @@ class EffectControlLinearizer {
void LowerTransitionAndStoreNumberElement(Node* node);
void LowerTransitionAndStoreNonNumberElement(Node* node);
void LowerRuntimeAbort(Node* node);
+ Node* LowerAssertType(Node* node);
Node* LowerConvertReceiver(Node* node);
Node* LowerDateNow(Node* node);
@@ -214,6 +226,7 @@ class EffectControlLinearizer {
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
+ Node* ChangeInt32ToCompressedSmi(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
Node* ChangeInt64ToSmi(Node* value);
@@ -222,6 +235,7 @@ class EffectControlLinearizer {
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
+ Node* ChangeCompressedSmiToInt32(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
Node* ObjectIsSmi(Node* value);
@@ -827,6 +841,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeBitToTagged:
result = LowerChangeBitToTagged(node);
break;
+ case IrOpcode::kChangeInt31ToCompressedSigned:
+ result = LowerChangeInt31ToCompressedSigned(node);
+ break;
case IrOpcode::kChangeInt31ToTaggedSigned:
result = LowerChangeInt31ToTaggedSigned(node);
break;
@@ -848,6 +865,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeFloat64ToTaggedPointer:
result = LowerChangeFloat64ToTaggedPointer(node);
break;
+ case IrOpcode::kChangeCompressedSignedToInt32:
+ result = LowerChangeCompressedSignedToInt32(node);
+ break;
case IrOpcode::kChangeTaggedSignedToInt32:
result = LowerChangeTaggedSignedToInt32(node);
break;
@@ -911,6 +931,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
+ case IrOpcode::kCheckBigInt:
+ result = LowerCheckBigInt(node, frame_state);
+ break;
case IrOpcode::kCheckInternalizedString:
result = LowerCheckInternalizedString(node, frame_state);
break;
@@ -938,6 +961,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt32Mul:
result = LowerCheckedInt32Mul(node, frame_state);
break;
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
+ result = LowerCheckedInt32ToCompressedSigned(node, frame_state);
+ break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
@@ -993,6 +1019,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToTaggedPointer:
result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
+ case IrOpcode::kBigIntAsUintN:
+ result = LowerBigIntAsUintN(node, frame_state);
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ result = LowerChangeUint64ToBigInt(node);
+ break;
+ case IrOpcode::kTruncateBigIntToUint64:
+ result = LowerTruncateBigIntToUint64(node);
+ break;
case IrOpcode::kCheckedCompressedToTaggedSigned:
result = LowerCheckedCompressedToTaggedSigned(node, frame_state);
break;
@@ -1110,6 +1145,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringFromCodePointAt:
+ result = LowerStringFromCodePointAt(node);
+ break;
case IrOpcode::kStringLength:
result = LowerStringLength(node);
break;
@@ -1120,7 +1158,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerStringCharCodeAt(node);
break;
case IrOpcode::kStringCodePointAt:
- result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op()));
+ result = LowerStringCodePointAt(node);
break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
@@ -1140,6 +1178,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringLessThanOrEqual:
result = LowerStringLessThanOrEqual(node);
break;
+ case IrOpcode::kBigIntAdd:
+ result = LowerBigIntAdd(node, frame_state);
+ break;
+ case IrOpcode::kBigIntNegate:
+ result = LowerBigIntNegate(node);
+ break;
case IrOpcode::kNumberIsFloat64Hole:
result = LowerNumberIsFloat64Hole(node);
break;
@@ -1233,6 +1277,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kRuntimeAbort:
LowerRuntimeAbort(node);
break;
+ case IrOpcode::kAssertType:
+ result = LowerAssertType(node);
+ break;
case IrOpcode::kConvertReceiver:
result = LowerConvertReceiver(node);
break;
@@ -1357,6 +1404,11 @@ Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerChangeInt31ToCompressedSigned(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeInt32ToCompressedSmi(value);
+}
+
Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* value = node->InputAt(0);
return ChangeInt32ToSmi(value);
@@ -1461,6 +1513,11 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
return ChangeSmiToInt32(value);
}
+Node* EffectControlLinearizer::LowerChangeCompressedSignedToInt32(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeCompressedSmiToInt32(value);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
Node* value = node->InputAt(0);
return ChangeSmiToInt64(value);
@@ -1684,8 +1741,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToCompressedSigned(Node* node) {
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
vfalse = __ ChangeFloat64ToInt32(vfalse);
- vfalse = ChangeInt32ToSmi(vfalse);
- vfalse = __ ChangeTaggedSignedToCompressedSigned(vfalse);
+ vfalse = ChangeInt32ToCompressedSmi(vfalse);
__ Goto(&done, vfalse);
__ Bind(&done);
@@ -2283,6 +2339,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerCheckedInt32ToCompressedSigned(
+ Node* node, Node* frame_state) {
+ DCHECK(SmiValuesAre31Bits());
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* check = __ Projection(1, add);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ return __ Projection(0, add);
+}
+
Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
@@ -2651,6 +2720,121 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
return value;
}
+Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ // Check for Smi.
+ Node* smi_check = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check,
+ frame_state);
+
+ // Check for BigInt.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant());
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
+ bi_check, frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntAsUintN(Node* node,
+ Node* frame_state) {
+ DCHECK(machine()->Is64());
+
+ const int bits = OpParameter<int>(node->op());
+ DCHECK(0 <= bits && bits <= 64);
+
+ if (bits == 64) {
+ // Reduce to nop.
+ return node->InputAt(0);
+ } else {
+ const uint64_t msk = (1ULL << bits) - 1ULL;
+ return __ Word64And(node->InputAt(0), __ Int64Constant(msk));
+ }
+}
+
+Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
+ DCHECK(machine()->Is64());
+
+ Node* value = node->InputAt(0);
+ Node* map = jsgraph()->HeapConstant(factory()->bigint_map());
+ // BigInts with value 0 must be of size 0 (canonical form).
+ auto if_zerodigits = __ MakeLabel();
+ auto if_onedigit = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &if_zerodigits);
+ __ Goto(&if_onedigit);
+
+ __ Bind(&if_onedigit);
+ {
+ Node* result = __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(BigInt::SizeFor(1)));
+ const auto bitfield = BigInt::LengthBits::update(0, 1);
+ __ StoreField(AccessBuilder::ForMap(), result, map);
+ __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
+ __ IntPtrConstant(bitfield));
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
+ __ IntPtrConstant(0));
+ }
+ __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result,
+ value);
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&if_zerodigits);
+ {
+ Node* result = __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(BigInt::SizeFor(0)));
+ const auto bitfield = BigInt::LengthBits::update(0, 0);
+ __ StoreField(AccessBuilder::ForMap(), result, map);
+ __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
+ __ IntPtrConstant(bitfield));
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
+ __ IntPtrConstant(0));
+ }
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerTruncateBigIntToUint64(Node* node) {
+ DCHECK(machine()->Is64());
+
+ auto done = __ MakeLabel(MachineRepresentation::kWord64);
+ auto if_neg = __ MakeLabel();
+ auto if_not_zero = __ MakeLabel();
+
+ Node* value = node->InputAt(0);
+
+ Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
+ __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero);
+ __ Goto(&done, __ Int64Constant(0));
+
+ __ Bind(&if_not_zero);
+ {
+ Node* lsd =
+ __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value);
+ Node* sign =
+ __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask));
+ __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg);
+ __ Goto(&done, lsd);
+
+ __ Bind(&if_neg);
+ __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd));
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -3726,16 +3910,12 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
return loop_done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerStringCodePointAt(
- Node* node, UnicodeEncoding encoding) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Builtins::Name builtin = encoding == UnicodeEncoding::UTF16
- ? Builtins::kStringCodePointAtUTF16
- : Builtins::kStringCodePointAtUTF32;
-
- Callable const callable = Builtins::CallableFor(isolate(), builtin);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -3968,31 +4148,23 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
__ Bind(&if_not_single_code);
// Generate surrogate pair string
{
- switch (UnicodeEncodingOf(node->op())) {
- case UnicodeEncoding::UTF16:
- break;
+ // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+ Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
- case UnicodeEncoding::UTF32: {
- // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
+ // lead = (codepoint >> 10) + LEAD_OFFSET
+ Node* lead =
+ __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
- // lead = (codepoint >> 10) + LEAD_OFFSET
- Node* lead =
- __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
+ // trail = (codepoint & 0x3FF) + 0xDC00;
+ Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+ __ Int32Constant(0xDC00));
- // trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
- __ Int32Constant(0xDC00));
-
- // codpoint = (trail << 16) | lead;
+ // codpoint = (trail << 16) | lead;
#if V8_TARGET_BIG_ENDIAN
- code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
+ code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
#else
- code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
+ code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
#endif
- break;
- }
- }
// Allocate a new SeqTwoByteString for {code}.
Node* vfalse0 =
@@ -4032,6 +4204,21 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
search_string, position, __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
+ Node* string = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringFromCodePointAt);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
+ index, __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
Node* subject = node->InputAt(0);
@@ -4083,6 +4270,41 @@ Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node);
}
+Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kBigIntAddNoThrow);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
+
+ // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
+ __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{},
+ ObjectIsSmi(value), frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kBigIntUnaryMinus);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()),
+ node->InputAt(0), __ NoContextConstant());
+
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
@@ -4256,6 +4478,11 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
return value;
}
+Node* EffectControlLinearizer::ChangeInt32ToCompressedSmi(Node* value) {
+ CHECK(machine()->Is64() && SmiValuesAre31Bits());
+ return __ Word32Shl(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
@@ -4305,6 +4532,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
return ChangeSmiToIntPtr(value);
}
+Node* EffectControlLinearizer::ChangeCompressedSmiToInt32(Node* value) {
+ CHECK(machine()->Is64() && SmiValuesAre31Bits());
+ return __ Word32Sar(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
CHECK(machine()->Is64());
return ChangeSmiToIntPtr(value);
@@ -5163,6 +5395,30 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
__ Int32Constant(1), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerAssertType(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
+ Type type = OpParameter<Type>(node->op());
+ DCHECK(type.IsRange());
+ auto range = type.AsRange();
+
+ Node* const input = node->InputAt(0);
+ Node* const min = __ NumberConstant(range->Min());
+ Node* const max = __ NumberConstant(range->Max());
+
+ {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kCheckNumberInRange);
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ __ Call(call_descriptor, __ HeapConstant(callable.code()), input, min, max,
+ __ NoContextConstant());
+ return input;
+ }
+}
+
Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op());
Node* value = node->InputAt(0);
@@ -5187,7 +5443,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
__ GotoIf(check, &convert_to_object);
__ Goto(&done_convert, value);
- // Wrap the primitive {value} into a JSValue.
+ // Wrap the primitive {value} into a JSPrimitiveWrapper.
__ Bind(&convert_to_object);
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
@@ -5220,7 +5476,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
__ GotoIf(check, &convert_to_object);
__ Goto(&done_convert, value);
- // Wrap the primitive {value} into a JSValue.
+ // Wrap the primitive {value} into a JSPrimitiveWrapper.
__ Bind(&convert_to_object);
__ GotoIf(__ WordEqual(value, __ UndefinedConstant()),
&convert_global_proxy);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index dc0db4d780..aee0121384 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/escape-analysis.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -153,6 +154,7 @@ class VariableTracker {
ZoneVector<Node*> buffer_;
EffectGraphReducer* reducer_;
int next_variable_ = 0;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(VariableTracker);
};
@@ -279,12 +281,14 @@ class EscapeAnalysisTracker : public ZoneObject {
};
EffectGraphReducer::EffectGraphReducer(
- Graph* graph, std::function<void(Node*, Reduction*)> reduce, Zone* zone)
+ Graph* graph, std::function<void(Node*, Reduction*)> reduce,
+ TickCounter* tick_counter, Zone* zone)
: graph_(graph),
state_(graph, kNumStates),
revisit_(zone),
stack_(zone),
- reduce_(std::move(reduce)) {}
+ reduce_(std::move(reduce)),
+ tick_counter_(tick_counter) {}
void EffectGraphReducer::ReduceFrom(Node* node) {
// Perform DFS and eagerly trigger revisitation as soon as possible.
@@ -293,6 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) {
DCHECK(stack_.empty());
stack_.push({node, 0});
while (!stack_.empty()) {
+ tick_counter_->DoTick();
Node* current = stack_.top().node;
int& input_index = stack_.top().input_index;
if (input_index < current->InputCount()) {
@@ -357,7 +362,8 @@ VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer,
graph_(graph),
table_(zone, State(zone)),
buffer_(zone),
- reducer_(reducer) {}
+ reducer_(reducer),
+ tick_counter_(reducer->tick_counter()) {}
VariableTracker::Scope::Scope(VariableTracker* states, Node* node,
Reduction* reduction)
@@ -406,6 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0));
State result = first_input;
for (std::pair<Variable, Node*> var_value : first_input) {
+ tick_counter_->DoTick();
if (Node* value = var_value.second) {
Variable var = var_value.first;
TRACE("var %i:\n", var.id_);
@@ -441,10 +448,12 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
// [old_value] cannot originate from the inputs. Thus [old_value]
// must have been created by a previous reduction of this [effect_phi].
for (int i = 0; i < arity; ++i) {
- NodeProperties::ReplaceValueInput(
- old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i);
- // This change cannot affect the rest of the reducer, so there is no
- // need to trigger additional revisitations.
+ Node* old_input = NodeProperties::GetValueInput(old_value, i);
+ Node* new_input = buffer_[i] ? buffer_[i] : graph_->Dead();
+ if (old_input != new_input) {
+ NodeProperties::ReplaceValueInput(old_value, new_input, i);
+ reducer_->Revisit(old_value);
+ }
}
result.Set(var, old_value);
} else {
@@ -701,21 +710,19 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
} else if (right_object && !right_object->HasEscaped()) {
replacement = jsgraph->FalseConstant();
}
- if (replacement) {
- // TODO(tebbi) This is a workaround for uninhabited types. If we
- // replaced a value of uninhabited type with a constant, we would
- // widen the type of the node. This could produce inconsistent
- // types (which might confuse representation selection). We get
- // around this by refusing to constant-fold and escape-analyze
- // if the type is not inhabited.
- if (!NodeProperties::GetType(left).IsNone() &&
- !NodeProperties::GetType(right).IsNone()) {
- current->SetReplacement(replacement);
- } else {
- current->SetEscaped(left);
- current->SetEscaped(right);
- }
+ // TODO(tebbi) This is a workaround for uninhabited types. If we
+ // replaced a value of uninhabited type with a constant, we would
+ // widen the type of the node. This could produce inconsistent
+ // types (which might confuse representation selection). We get
+ // around this by refusing to constant-fold and escape-analyze
+ // if the type is not inhabited.
+ if (replacement && !NodeProperties::GetType(left).IsNone() &&
+ !NodeProperties::GetType(right).IsNone()) {
+ current->SetReplacement(replacement);
+ break;
}
+ current->SetEscaped(left);
+ current->SetEscaped(right);
break;
}
case IrOpcode::kCheckMaps: {
@@ -817,11 +824,12 @@ void EscapeAnalysis::Reduce(Node* node, Reduction* reduction) {
ReduceNode(op, &current, jsgraph());
}
-EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
+EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter,
+ Zone* zone)
: EffectGraphReducer(
jsgraph->graph(),
[this](Node* node, Reduction* reduction) { Reduce(node, reduction); },
- zone),
+ tick_counter, zone),
tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)),
jsgraph_(jsgraph) {}
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index c3dcd2f74d..0fbc7d0bdd 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
class CommonOperatorBuilder;
@@ -38,7 +41,8 @@ class EffectGraphReducer {
};
EffectGraphReducer(Graph* graph,
- std::function<void(Node*, Reduction*)> reduce, Zone* zone);
+ std::function<void(Node*, Reduction*)> reduce,
+ TickCounter* tick_counter, Zone* zone);
void ReduceGraph() { ReduceFrom(graph_->end()); }
@@ -56,6 +60,8 @@ class EffectGraphReducer {
bool Complete() { return stack_.empty() && revisit_.empty(); }
+ TickCounter* tick_counter() const { return tick_counter_; }
+
private:
struct NodeState {
Node* node;
@@ -69,6 +75,7 @@ class EffectGraphReducer {
ZoneStack<Node*> revisit_;
ZoneStack<NodeState> stack_;
std::function<void(Node*, Reduction*)> reduce_;
+ TickCounter* const tick_counter_;
};
// A variable is an abstract storage location, which is lowered to SSA values
@@ -164,7 +171,7 @@ class EscapeAnalysisResult {
class V8_EXPORT_PRIVATE EscapeAnalysis final
: public NON_EXPORTED_BASE(EffectGraphReducer) {
public:
- EscapeAnalysis(JSGraph* jsgraph, Zone* zone);
+ EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, Zone* zone);
EscapeAnalysisResult analysis_result() {
DCHECK(Complete());
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index cc9dbd9dfd..50f29d968b 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -52,6 +52,9 @@ Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
return jsgraph()->HeapConstant(object);
}
+Node* GraphAssembler::NumberConstant(double value) {
+ return jsgraph()->Constant(value);
+}
Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
return jsgraph()->ExternalConstant(ref);
@@ -221,6 +224,12 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
current_effect_, current_control_);
}
+Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value,
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
return current_effect_ =
graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 74b885b788..e2c0005d15 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -57,6 +57,7 @@ namespace compiler {
V(Word32Shr) \
V(Word32Shl) \
V(Word32Sar) \
+ V(Word64And) \
V(IntAdd) \
V(IntSub) \
V(IntMul) \
@@ -71,6 +72,7 @@ namespace compiler {
V(Uint64LessThan) \
V(Uint64LessThanOrEqual) \
V(Int32LessThan) \
+ V(Int64Sub) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Div) \
@@ -93,22 +95,24 @@ namespace compiler {
V(Uint32Mod) \
V(Uint32Div)
-#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
- V(TrueConstant) \
- V(FalseConstant) \
- V(NullConstant) \
- V(BigIntMapConstant) \
- V(BooleanMapConstant) \
- V(HeapNumberMapConstant) \
- V(NoContextConstant) \
- V(EmptyStringConstant) \
- V(UndefinedConstant) \
- V(TheHoleConstant) \
- V(FixedArrayMapConstant) \
- V(FixedDoubleArrayMapConstant) \
- V(ToNumberBuiltinConstant) \
- V(AllocateInYoungGenerationStubConstant) \
- V(AllocateInOldGenerationStubConstant)
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(NullConstant) \
+ V(BigIntMapConstant) \
+ V(BooleanMapConstant) \
+ V(HeapNumberMapConstant) \
+ V(NoContextConstant) \
+ V(EmptyStringConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(FixedArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(AllocateInYoungGenerationStubConstant) \
+ V(AllocateRegularInYoungGenerationStubConstant) \
+ V(AllocateInOldGenerationStubConstant) \
+ V(AllocateRegularInOldGenerationStubConstant)
class GraphAssembler;
@@ -196,6 +200,7 @@ class GraphAssembler {
Node* Float64Constant(double value);
Node* Projection(int index, Node* value);
Node* HeapConstant(Handle<HeapObject> object);
+ Node* NumberConstant(double value);
Node* CEntryStubConstant(int result_size);
Node* ExternalConstant(ExternalReference ref);
@@ -225,6 +230,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
Node* BitcastTaggedToWord(Node* value);
+ Node* BitcastTaggedSignedToWord(Node* value);
Node* Allocate(AllocationType allocation, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index fafa322d87..9a0dea6b26 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -5,10 +5,11 @@
#include <functional>
#include <limits>
-#include "src/compiler/graph.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/node.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
namespace v8 {
@@ -25,13 +26,15 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
-GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
+GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
+ Node* dead)
: graph_(graph),
dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
- stack_(zone) {
+ stack_(zone),
+ tick_counter_(tick_counter) {
if (dead != nullptr) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -82,6 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
auto skip = reducers_.end();
for (auto i = reducers_.begin(); i != reducers_.end();) {
if (i != skip) {
+ tick_counter_->DoTick();
Reduction reduction = (*i)->Reduce(node);
if (!reduction.Changed()) {
// No change from this reducer.
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 3bb20a4625..bbcc67b074 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -12,13 +12,15 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
class Graph;
class Node;
-
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
using NodeId = uint32_t;
@@ -129,7 +131,8 @@ class AdvancedReducer : public Reducer {
class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
- GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
+ GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
+ Node* dead = nullptr);
~GraphReducer() override;
Graph* graph() const { return graph_; }
@@ -181,6 +184,7 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneVector<Reducer*> reducers_;
ZoneQueue<Node*> revisit_;
ZoneStack<NodeState> stack_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
new file mode 100644
index 0000000000..5547039fa6
--- /dev/null
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -0,0 +1,906 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_HEAP_REFS_H_
+#define V8_COMPILER_HEAP_REFS_H_
+
+#include "src/base/optional.h"
+#include "src/ic/call-optimization.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/instance-type.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+class CallHandlerInfo;
+class FixedDoubleArray;
+class FunctionTemplateInfo;
+class HeapNumber;
+class InternalizedString;
+class JSBoundFunction;
+class JSDataView;
+class JSGlobalProxy;
+class JSRegExp;
+class JSTypedArray;
+class NativeContext;
+class ScriptContextTable;
+class VectorSlotPair;
+
+namespace compiler {
+
+// Whether we are loading a property or storing to a property.
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
+
+enum class OddballType : uint8_t {
+ kNone, // Not an Oddball.
+ kBoolean, // True or False.
+ kUndefined,
+ kNull,
+ kHole,
+ kUninitialized,
+ kOther // Oddball, but none of the above.
+};
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
+#define HEAP_BROKER_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray) \
+ V(JSBoundFunction) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSGlobalProxy) \
+ V(JSRegExp) \
+ V(JSTypedArray) \
+ /* Subtypes of Context */ \
+ V(NativeContext) \
+ /* Subtypes of FixedArray */ \
+ V(Context) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(InternalizedString) \
+ V(String) \
+ V(Symbol) \
+ /* Subtypes of HeapObject */ \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(CallHandlerInfo) \
+ V(Cell) \
+ V(Code) \
+ V(DescriptorArray) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArrayBase) \
+ V(FunctionTemplateInfo) \
+ V(HeapNumber) \
+ V(JSObject) \
+ V(Map) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(PropertyCell) \
+ V(SharedFunctionInfo) \
+ V(SourceTextModule) \
+ /* Subtypes of Object */ \
+ V(HeapObject)
+
+class CompilationDependencies;
+class JSHeapBroker;
+class ObjectData;
+class PerIsolateCompilerCache;
+class PropertyAccessInfo;
+#define FORWARD_DECL(Name) class Name##Ref;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+class V8_EXPORT_PRIVATE ObjectRef {
+ public:
+ ObjectRef(JSHeapBroker* broker, Handle<Object> object);
+ ObjectRef(JSHeapBroker* broker, ObjectData* data)
+ : data_(data), broker_(broker) {
+ CHECK_NOT_NULL(data_);
+ }
+
+ Handle<Object> object() const;
+
+ bool equals(const ObjectRef& other) const;
+
+ bool IsSmi() const;
+ int AsSmi() const;
+
+#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+#undef HEAP_IS_METHOD_DECL
+
+#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+#undef HEAP_AS_METHOD_DECL
+
+ bool IsNullOrUndefined() const;
+
+ bool BooleanValue() const;
+ Maybe<double> OddballToNumber() const;
+
+ // Return the element at key {index} if {index} is known to be an own data
+ // property of the object that is non-writable and non-configurable.
+ base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
+ bool serialize = false) const;
+
+ Isolate* isolate() const;
+
+ struct Hash {
+ size_t operator()(const ObjectRef& ref) const {
+ return base::hash_combine(ref.object().address());
+ }
+ };
+ struct Equal {
+ bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const {
+ return lhs.equals(rhs);
+ }
+ };
+
+ protected:
+ JSHeapBroker* broker() const;
+ ObjectData* data() const;
+ ObjectData* data_; // Should be used only by object() getters.
+
+ private:
+ friend class FunctionTemplateInfoRef;
+ friend class JSArrayData;
+ friend class JSGlobalProxyRef;
+ friend class JSGlobalProxyData;
+ friend class JSObjectData;
+ friend class StringData;
+
+ friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
+
+ JSHeapBroker* broker_;
+};
+
+// Temporary class that carries information from a Map. We'd like to remove
+// this class and use MapRef instead, but we can't as long as we support the
+// kDisabled broker mode. That's because obtaining the MapRef via
+// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
+// During OptimizeGraph we generally don't have a HandleScope, however. There
+// are two places where we therefore use GetHeapObjectType() instead. Both that
+// function and this class should eventually be removed.
+class HeapObjectType {
+ public:
+ enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
+
+ using Flags = base::Flags<Flag>;
+
+ HeapObjectType(InstanceType instance_type, Flags flags,
+ OddballType oddball_type)
+ : instance_type_(instance_type),
+ oddball_type_(oddball_type),
+ flags_(flags) {
+ DCHECK_EQ(instance_type == ODDBALL_TYPE,
+ oddball_type != OddballType::kNone);
+ }
+
+ OddballType oddball_type() const { return oddball_type_; }
+ InstanceType instance_type() const { return instance_type_; }
+ Flags flags() const { return flags_; }
+
+ bool is_callable() const { return flags_ & kCallable; }
+ bool is_undetectable() const { return flags_ & kUndetectable; }
+
+ private:
+ InstanceType const instance_type_;
+ OddballType const oddball_type_;
+ Flags const flags_;
+};
+
+class HeapObjectRef : public ObjectRef {
+ public:
+ using ObjectRef::ObjectRef;
+ Handle<HeapObject> object() const;
+
+ MapRef map() const;
+
+ // See the comment on the HeapObjectType class.
+ HeapObjectType GetHeapObjectType() const;
+};
+
+class PropertyCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<PropertyCell> object() const;
+
+ PropertyDetails property_details() const;
+
+ void Serialize();
+ ObjectRef value() const;
+};
+
+class JSObjectRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<JSObject> object() const;
+
+ uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
+ double RawFastDoublePropertyAt(FieldIndex index) const;
+ ObjectRef RawFastPropertyAt(FieldIndex index) const;
+
+ // Return the value of the property identified by the field {index}
+ // if {index} is known to be an own data property of the object.
+ base::Optional<ObjectRef> GetOwnProperty(Representation field_representation,
+ FieldIndex index,
+ bool serialize = false) const;
+
+ FixedArrayBaseRef elements() const;
+ void SerializeElements();
+ void EnsureElementsTenured();
+ ElementsKind GetElementsKind() const;
+
+ void SerializeObjectCreateMap();
+ base::Optional<MapRef> GetObjectCreateMap() const;
+};
+
+class JSDataViewRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSDataView> object() const;
+
+ size_t byte_length() const;
+ size_t byte_offset() const;
+};
+
+class JSBoundFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSBoundFunction> object() const;
+
+ void Serialize();
+
+ // The following are available only after calling Serialize().
+ ObjectRef bound_target_function() const;
+ ObjectRef bound_this() const;
+ FixedArrayRef bound_arguments() const;
+};
+
+class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSFunction> object() const;
+
+ bool has_feedback_vector() const;
+ bool has_initial_map() const;
+ bool has_prototype() const;
+ bool PrototypeRequiresRuntimeLookup() const;
+
+ void Serialize();
+ bool serialized() const;
+
+ // The following are available only after calling Serialize().
+ ObjectRef prototype() const;
+ MapRef initial_map() const;
+ ContextRef context() const;
+ NativeContextRef native_context() const;
+ SharedFunctionInfoRef shared() const;
+ FeedbackVectorRef feedback_vector() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
+
+ bool IsSerializedForCompilation() const;
+};
+
+class JSRegExpRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSRegExp> object() const;
+
+ ObjectRef raw_properties_or_hash() const;
+ ObjectRef data() const;
+ ObjectRef source() const;
+ ObjectRef flags() const;
+ ObjectRef last_index() const;
+};
+
+class HeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<HeapNumber> object() const;
+
+ double value() const;
+};
+
+class MutableHeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<MutableHeapNumber> object() const;
+
+ double value() const;
+};
+
+class ContextRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Context> object() const;
+
+ // {previous} decrements {depth} by 1 for each previous link successfully
+ // followed. If {depth} != 0 on function return, then it only got
+ // partway to the desired depth. If {serialize} is true, then
+ // {previous} will cache its findings.
+ ContextRef previous(size_t* depth, bool serialize = false) const;
+
+ // Only returns a value if the index is valid for this ContextRef.
+ base::Optional<ObjectRef> get(int index, bool serialize = false) const;
+
+ // We only serialize the ScopeInfo if certain Promise
+ // builtins are called.
+ void SerializeScopeInfo();
+ base::Optional<ScopeInfoRef> scope_info() const;
+};
+
+#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, boolean_function) \
+ V(JSFunction, bigint_function) \
+ V(JSFunction, number_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(JSFunction, promise_then) \
+ V(JSFunction, string_function) \
+ V(JSFunction, symbol_function) \
+ V(JSGlobalProxy, global_proxy_object) \
+ V(JSObject, promise_prototype) \
+ V(Map, bound_function_with_constructor_map) \
+ V(Map, bound_function_without_constructor_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(ScriptContextTable, script_context_table) \
+ V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
+ V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_then_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
+
+// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
+// happened when Turbofan is invoked via --always-opt.
+#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, async_function_object_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(JSFunction, regexp_exec_function) \
+ V(Map, set_key_value_iterator_map) \
+ V(Map, set_value_iterator_map)
+
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
+
+class NativeContextRef : public ContextRef {
+ public:
+ using ContextRef::ContextRef;
+ Handle<NativeContext> object() const;
+
+ void Serialize();
+
+#define DECL_ACCESSOR(type, name) type##Ref name() const;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ ScopeInfoRef scope_info() const;
+ MapRef GetFunctionMapFromIndex(int index) const;
+ MapRef GetInitialJSArrayMap(ElementsKind kind) const;
+ base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
+};
+
+class NameRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Name> object() const;
+
+ bool IsUniqueName() const;
+};
+
+class ScriptContextTableRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<ScriptContextTable> object() const;
+
+ struct LookupResult {
+ ContextRef context;
+ bool immutable;
+ int index;
+ };
+
+ base::Optional<LookupResult> lookup(const NameRef& name) const;
+};
+
+class DescriptorArrayRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<DescriptorArray> object() const;
+};
+
+class FeedbackCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackCell> object() const;
+
+ HeapObjectRef value() const;
+};
+
+class FeedbackVectorRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackVector> object() const;
+
+ ObjectRef get(FeedbackSlot slot) const;
+
+ void SerializeSlots();
+};
+
+class CallHandlerInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<CallHandlerInfo> object() const;
+
+ Address callback() const;
+
+ void Serialize();
+ ObjectRef data() const;
+};
+
+class AllocationSiteRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<AllocationSite> object() const;
+
+ bool PointsToLiteral() const;
+ AllocationType GetAllocationType() const;
+ ObjectRef nested_site() const;
+
+ // {IsFastLiteral} determines whether the given array or object literal
+ // boilerplate satisfies all limits to be considered for fast deep-copying
+ // and computes the total size of all objects that are part of the graph.
+ //
+ // If PointsToLiteral() is false, then IsFastLiteral() is also false.
+ bool IsFastLiteral() const;
+ // We only serialize boilerplate if IsFastLiteral is true.
+ base::Optional<JSObjectRef> boilerplate() const;
+
+ ElementsKind GetElementsKind() const;
+ bool CanInlineCall() const;
+};
+
+class BigIntRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<BigInt> object() const;
+
+ uint64_t AsUint64() const;
+};
+
+class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Map> object() const;
+
+ int instance_size() const;
+ InstanceType instance_type() const;
+ int GetInObjectProperties() const;
+ int GetInObjectPropertiesStartInWords() const;
+ int NumberOfOwnDescriptors() const;
+ int GetInObjectPropertyOffset(int index) const;
+ int constructor_function_index() const;
+ int NextFreePropertyIndex() const;
+ int UnusedPropertyFields() const;
+ ElementsKind elements_kind() const;
+ bool is_stable() const;
+ bool is_extensible() const;
+ bool is_constructor() const;
+ bool has_prototype_slot() const;
+ bool is_access_check_needed() const;
+ bool is_deprecated() const;
+ bool CanBeDeprecated() const;
+ bool CanTransition() const;
+ bool IsInobjectSlackTrackingInProgress() const;
+ bool is_dictionary_map() const;
+ bool IsFixedCowArrayMap() const;
+ bool IsPrimitiveMap() const;
+ bool is_undetectable() const;
+ bool is_callable() const;
+ bool has_indexed_interceptor() const;
+ bool is_migration_target() const;
+ bool supports_fast_array_iteration() const;
+ bool supports_fast_array_resize() const;
+ bool IsMapOfCurrentGlobalProxy() const;
+
+ OddballType oddball_type() const;
+
+#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
+ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
+ void SerializeBackPointer();
+ HeapObjectRef GetBackPointer() const;
+
+ void SerializePrototype();
+ bool serialized_prototype() const;
+ HeapObjectRef prototype() const;
+
+ void SerializeForElementLoad();
+
+ void SerializeForElementStore();
+ bool HasOnlyStablePrototypesWithFastElements(
+ ZoneVector<MapRef>* prototype_maps);
+
+ // Concerning the underlying instance_descriptors:
+ void SerializeOwnDescriptors();
+ void SerializeOwnDescriptor(int descriptor_index);
+ MapRef FindFieldOwner(int descriptor_index) const;
+ PropertyDetails GetPropertyDetails(int descriptor_index) const;
+ NameRef GetPropertyKey(int descriptor_index) const;
+ FieldIndex GetFieldIndexFor(int descriptor_index) const;
+ ObjectRef GetFieldType(int descriptor_index) const;
+ bool IsUnboxedDoubleField(int descriptor_index) const;
+
+ // Available after calling JSFunctionRef::Serialize on a function that has
+ // this map as initial map.
+ ObjectRef GetConstructor() const;
+ base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
+};
+
+struct HolderLookupResult {
+ HolderLookupResult(CallOptimization::HolderLookup lookup_ =
+ CallOptimization::kHolderNotFound,
+ base::Optional<JSObjectRef> holder_ = base::nullopt)
+ : lookup(lookup_), holder(holder_) {}
+ CallOptimization::HolderLookup lookup;
+ base::Optional<JSObjectRef> holder;
+};
+
+class FunctionTemplateInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FunctionTemplateInfo> object() const;
+
+ bool is_signature_undefined() const;
+ bool accept_any_receiver() const;
+ // The following returns true if the CallHandlerInfo is present.
+ bool has_call_code() const;
+
+ void SerializeCallCode();
+ base::Optional<CallHandlerInfoRef> call_code() const;
+
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map,
+ bool serialize);
+};
+
+class FixedArrayBaseRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FixedArrayBase> object() const;
+
+ int length() const;
+};
+
+class FixedArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedArray> object() const;
+
+ ObjectRef get(int i) const;
+};
+
+class FixedDoubleArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedDoubleArray> object() const;
+
+ double get_scalar(int i) const;
+ bool is_the_hole(int i) const;
+};
+
+class BytecodeArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<BytecodeArray> object() const;
+
+ int register_count() const;
+ int parameter_count() const;
+ interpreter::Register incoming_new_target_or_generator_register() const;
+
+ // Bytecode access methods.
+ uint8_t get(int index) const;
+ Address GetFirstBytecodeAddress() const;
+
+ // Source position table.
+ const byte* source_positions_address() const;
+ int source_positions_size() const;
+
+ // Constant pool access.
+ Handle<Object> GetConstantAtIndex(int index) const;
+ bool IsConstantAtIndexSmi(int index) const;
+ Smi GetConstantAtIndexAsSmi(int index) const;
+
+ // Exception handler table.
+ Address handler_table_address() const;
+ int handler_table_size() const;
+
+ bool IsSerializedForCompilation() const;
+ void SerializeForCompilation();
+};
+
+class JSArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSArray> object() const;
+
+ ObjectRef length() const;
+
+ // Return the element at key {index} if the array has a copy-on-write elements
+ // storage and {index} is known to be an own data property.
+ base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
+ bool serialize = false) const;
+};
+
+class ScopeInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<ScopeInfo> object() const;
+
+ int ContextLength() const;
+};
+
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(bool, is_safe_to_skip_arguments_adaptor) \
+ V(bool, IsInlineable) \
+ V(int, StartPosition) \
+ V(bool, is_compiled)
+
+class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<SharedFunctionInfo> object() const;
+
+ int builtin_id() const;
+ BytecodeArrayRef GetBytecodeArray() const;
+
+#define DECL_ACCESSOR(type, name) type name() const;
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SetSerializedForCompilation(FeedbackVectorRef feedback);
+
+ // Template objects may not be created at compilation time. This method
+ // wraps the retrieval of the template object and creates it if
+ // necessary.
+ JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector,
+ FeedbackSlot slot, bool serialize = false);
+
+ void SerializeFunctionTemplateInfo();
+ base::Optional<FunctionTemplateInfoRef> function_template_info() const;
+};
+
+class StringRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<String> object() const;
+
+ int length() const;
+ uint16_t GetFirstChar();
+ base::Optional<double> ToNumber();
+ bool IsSeqString() const;
+ bool IsExternalString() const;
+};
+
+class SymbolRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<Symbol> object() const;
+};
+
+class JSTypedArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSTypedArray> object() const;
+
+ bool is_on_heap() const;
+ size_t length() const;
+ void* external_pointer() const;
+
+ void Serialize();
+ bool serialized() const;
+
+ HeapObjectRef buffer() const;
+};
+
+class SourceTextModuleRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<SourceTextModule> object() const;
+
+ void Serialize();
+
+ CellRef GetCell(int cell_index) const;
+};
+
+class CellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Cell> object() const;
+
+ ObjectRef value() const;
+};
+
+class JSGlobalProxyRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSGlobalProxy> object() const;
+
+ // If {serialize} is false:
+ // If the property is known to exist as a property cell (on the global
+ // object), return that property cell. Otherwise (not known to exist as a
+ // property cell or known not to exist as a property cell) return nothing.
+ // If {serialize} is true:
+ // Like above but potentially access the heap and serialize the necessary
+ // information.
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
+ bool serialize = false) const;
+};
+
+class CodeRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Code> object() const;
+};
+
+class InternalizedStringRef : public StringRef {
+ public:
+ using StringRef::StringRef;
+ Handle<InternalizedString> object() const;
+};
+
+class ElementAccessFeedback;
+class NamedAccessFeedback;
+
+class ProcessedFeedback : public ZoneObject {
+ public:
+ enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
+ Kind kind() const { return kind_; }
+
+ ElementAccessFeedback const* AsElementAccess() const;
+ NamedAccessFeedback const* AsNamedAccess() const;
+
+ protected:
+ explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
+
+ private:
+ Kind const kind_;
+};
+
+class InsufficientFeedback final : public ProcessedFeedback {
+ public:
+ InsufficientFeedback();
+};
+
+class GlobalAccessFeedback : public ProcessedFeedback {
+ public:
+ explicit GlobalAccessFeedback(PropertyCellRef cell);
+ GlobalAccessFeedback(ContextRef script_context, int slot_index,
+ bool immutable);
+
+ bool IsPropertyCell() const;
+ PropertyCellRef property_cell() const;
+
+ bool IsScriptContextSlot() const { return !IsPropertyCell(); }
+ ContextRef script_context() const;
+ int slot_index() const;
+ bool immutable() const;
+
+ base::Optional<ObjectRef> GetConstantHint() const;
+
+ private:
+ ObjectRef const cell_or_context_;
+ int const index_and_immutable_;
+};
+
+class KeyedAccessMode {
+ public:
+ static KeyedAccessMode FromNexus(FeedbackNexus const& nexus);
+
+ AccessMode access_mode() const;
+ bool IsLoad() const;
+ bool IsStore() const;
+ KeyedAccessLoadMode load_mode() const;
+ KeyedAccessStoreMode store_mode() const;
+
+ private:
+ AccessMode const access_mode_;
+ union LoadStoreMode {
+ LoadStoreMode(KeyedAccessLoadMode load_mode);
+ LoadStoreMode(KeyedAccessStoreMode store_mode);
+ KeyedAccessLoadMode load_mode;
+ KeyedAccessStoreMode store_mode;
+ } const load_store_mode_;
+
+ KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode);
+ KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode);
+};
+
+class ElementAccessFeedback : public ProcessedFeedback {
+ public:
+ ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode);
+
+ // No transition sources appear in {receiver_maps}.
+ // All transition targets appear in {receiver_maps}.
+ ZoneVector<Handle<Map>> receiver_maps;
+ ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
+
+ KeyedAccessMode const keyed_mode;
+
+ class MapIterator {
+ public:
+ bool done() const;
+ void advance();
+ MapRef current() const;
+
+ private:
+ friend class ElementAccessFeedback;
+
+ explicit MapIterator(ElementAccessFeedback const& processed,
+ JSHeapBroker* broker);
+
+ ElementAccessFeedback const& processed_;
+ JSHeapBroker* const broker_;
+ size_t index_ = 0;
+ };
+
+ // Iterator over all maps: first {receiver_maps}, then transition sources.
+ MapIterator all_maps(JSHeapBroker* broker) const;
+};
+
+class NamedAccessFeedback : public ProcessedFeedback {
+ public:
+ NamedAccessFeedback(NameRef const& name,
+ ZoneVector<PropertyAccessInfo> const& access_infos);
+
+ NameRef const& name() const { return name_; }
+ ZoneVector<PropertyAccessInfo> const& access_infos() const {
+ return access_infos_;
+ }
+
+ private:
+ NameRef const name_;
+ ZoneVector<PropertyAccessInfo> const access_infos_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_HEAP_REFS_H_
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 3430b6b339..eda866e5f2 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -97,7 +97,10 @@ int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) {
int GetParameterIndexAfterLowering(
Signature<MachineRepresentation>* signature, int old_index) {
int result = old_index;
- for (int i = 0; i < old_index; i++) {
+ // Be robust towards special indexes (>= param count).
+ int max_to_check =
+ std::min(old_index, static_cast<int>(signature->parameter_count()));
+ for (int i = 0; i < max_to_check; i++) {
if (signature->GetParam(i) == MachineRepresentation::kWord64) {
result++;
}
@@ -142,16 +145,16 @@ int Int64Lowering::GetParameterCountAfterLowering(
signature, static_cast<int>(signature->parameter_count()));
}
-void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
- Node*& index_high) {
+void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
+ Node** index_high) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- index_low = index;
- index_high = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
+ *index_low = index;
+ *index_high = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
#elif defined(V8_TARGET_BIG_ENDIAN)
- index_low = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
- index_high = index;
+ *index_low = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+ *index_high = index;
#endif
}
@@ -182,7 +185,7 @@ void Int64Lowering::LowerNode(Node* node) {
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
- GetIndexNodes(index, index_low, index_high);
+ GetIndexNodes(index, &index_low, &index_high);
const Operator* load_op;
if (node->opcode() == IrOpcode::kLoad) {
@@ -232,7 +235,7 @@ void Int64Lowering::LowerNode(Node* node) {
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
- GetIndexNodes(index, index_low, index_high);
+ GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
@@ -291,12 +294,6 @@ void Int64Lowering::LowerNode(Node* node) {
// changes.
if (GetParameterCountAfterLowering(signature()) != param_count) {
int old_index = ParameterIndexOf(node->op());
- // Prevent special lowering of wasm's instance or JS
- // context/closure parameters.
- if (old_index <= 0 || old_index > param_count) {
- DefaultLowering(node);
- break;
- }
// Adjust old_index to be compliant with the signature.
--old_index;
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
@@ -304,6 +301,12 @@ void Int64Lowering::LowerNode(Node* node) {
++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+ if (old_index < 0 || old_index >= param_count) {
+ // Special parameters (JS closure/context) don't have kWord64
+ // representation anyway.
+ break;
+ }
+
if (signature()->GetParam(old_index) ==
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1),
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index b083805771..9c77cf41a3 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -59,7 +59,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
bool HasReplacementHigh(Node* node);
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
- void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
+ void GetIndexNodes(Node* index, Node** index_low, Node** index_high);
void ReplaceNodeWithProjections(Node* node);
void LowerMemoryBaseAndIndex(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index d58331c85e..8128f89949 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -179,6 +179,100 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op,
return Replace(value);
}
+// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values )
+Reduction JSCallReducer::ReduceMathHypot(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->ZeroConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ NodeVector values(graph()->zone());
+
+ Node* max = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ NodeProperties::GetValueInput(node, 2), effect, control);
+ max = graph()->NewNode(simplified()->NumberAbs(), max);
+ values.push_back(max);
+ for (int i = 3; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ NodeProperties::GetValueInput(node, i), effect, control);
+ input = graph()->NewNode(simplified()->NumberAbs(), input);
+ values.push_back(input);
+
+ // Make sure {max} is NaN in the end in case any argument was NaN.
+ max = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max),
+ max, input);
+ }
+
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0 = jsgraph()->ZeroConstant();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max,
+ jsgraph()->Constant(V8_INFINITY));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = jsgraph()->Constant(V8_INFINITY);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ Node* sum = jsgraph()->ZeroConstant();
+ Node* compensation = jsgraph()->ZeroConstant();
+ for (Node* value : values) {
+ Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max);
+ Node* summand = graph()->NewNode(
+ simplified()->NumberSubtract(),
+ graph()->NewNode(simplified()->NumberMultiply(), n, n),
+ compensation);
+ Node* preliminary =
+ graph()->NewNode(simplified()->NumberAdd(), sum, summand);
+ compensation = graph()->NewNode(
+ simplified()->NumberSubtract(),
+ graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum),
+ summand);
+ sum = preliminary;
+ }
+ vfalse1 = graph()->NewNode(
+ simplified()->NumberMultiply(),
+ graph()->NewNode(simplified()->NumberSqrt(), sum), max);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
+ vfalse0, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -274,6 +368,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
size_t arity = p.arity();
@@ -381,9 +477,17 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
}
// Change {node} to the new {JSCall} operator.
+ // TODO(mslekova): Since this introduces a Call that will get optimized by
+ // the JSCallReducer, we basically might have to do all the serialization
+ // that we do for that here as well. The only difference is that here we
+ // disable speculation (cf. the empty VectorSlotPair above), causing the
+ // JSCallReducer to do much less work. We should revisit this later.
NodeProperties::ChangeOp(
node,
javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
+ // TODO(mslekova): Remove once ReduceJSCall is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -496,6 +600,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
@@ -508,6 +614,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
HeapObjectMatcher m(target);
if (m.HasValue()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ if (FLAG_concurrent_inlining && !function.serialized()) {
+ TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
+ return NoChange();
+ }
context = jsgraph()->Constant(function.context());
} else {
context = effect = graph()->NewNode(
@@ -537,6 +647,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
NodeProperties::ChangeOp(
node,
javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
+ // TODO(mslekova): Remove once ReduceJSCall is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -588,7 +701,6 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
MapRef object_map(broker(), object_maps[i]);
object_map.SerializePrototype();
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
- object_map.has_hidden_prototype() ||
!object_map.prototype().equals(candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
// might require access checks here; we also don't want to deal
@@ -1002,27 +1114,28 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
return true;
}
-bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
- MapHandles const& receiver_maps,
- ElementsKind* kind_return,
- bool builtin_is_push = false) {
+bool CanInlineArrayResizingBuiltin(
+ JSHeapBroker* broker, MapHandles const& receiver_maps,
+ std::vector<ElementsKind>& kinds, // NOLINT(runtime/references)
+ bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
- *kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
for (auto receiver_map : receiver_maps) {
MapRef map(broker, receiver_map);
if (!map.supports_fast_array_resize()) return false;
- if (builtin_is_push) {
- if (!UnionElementsKindUptoPackedness(kind_return, map.elements_kind())) {
- return false;
- }
- } else {
- // TODO(turbofan): We should also handle fast holey double elements once
- // we got the hole NaN mess sorted out in TurboFan/V8.
- if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS ||
- !UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
- return false;
+ // TODO(turbofan): We should also handle fast holey double elements once
+ // we got the hole NaN mess sorted out in TurboFan/V8.
+ if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS && !builtin_is_push) {
+ return false;
+ }
+ ElementsKind current_kind = map.elements_kind();
+ auto kind_ptr = kinds.data();
+ size_t i;
+ for (i = 0; i < kinds.size(); i++, kind_ptr++) {
+ if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) {
+ break;
}
}
+ if (i == kinds.size()) kinds.push_back(current_kind);
}
return true;
}
@@ -2735,6 +2848,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, const SharedFunctionInfoRef& shared) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
@@ -2750,78 +2865,21 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
- // See if we can optimize this API call to {shared}.
- Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(shared.object()->function_data()), isolate());
- CallOptimization call_optimization(isolate(), function_template_info);
- if (!call_optimization.is_simple_api_call()) return NoChange();
-
- // Try to infer the {receiver} maps from the graph.
- MapInference inference(broker(), receiver, effect);
- if (inference.HaveMaps()) {
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check that all {receiver_maps} are actually JSReceiver maps and
- // that the {function_template_info} accepts them without access
- // checks (even if "access check needed" is set for {receiver}).
- //
- // Note that we don't need to know the concrete {receiver} maps here,
- // meaning it's fine if the {receiver_maps} are unreliable, and we also
- // don't need to install any stability dependencies, since the only
- // relevant information regarding the {receiver} is the Map::constructor
- // field on the root map (which is different from the JavaScript exposed
- // "constructor" property) and that field cannot change.
- //
- // So if we know that {receiver} had a certain constructor at some point
- // in the past (i.e. it had a certain map), then this constructor is going
- // to be the same later, since this information cannot change with map
- // transitions.
- //
- // The same is true for the instance type, e.g. we still know that the
- // instance type is JSObject even if that information is unreliable, and
- // the "access check needed" bit, which also cannot change later.
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !function_template_info->accept_any_receiver())) {
- return inference.NoChange();
- }
- }
-
- // See if we can constant-fold the compatible receiver checks.
- CallOptimization::HolderLookup lookup;
- Handle<JSObject> api_holder =
- call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
- if (lookup == CallOptimization::kHolderNotFound)
- return inference.NoChange();
- for (size_t i = 1; i < receiver_maps.size(); ++i) {
- CallOptimization::HolderLookup lookupi;
- Handle<JSObject> holderi = call_optimization.LookupHolderOfExpectedType(
- receiver_maps[i], &lookupi);
- if (lookup != lookupi) return inference.NoChange();
- if (!api_holder.is_identical_to(holderi)) return inference.NoChange();
- }
+ if (!shared.function_template_info().has_value()) {
+ TRACE_BROKER_MISSING(
+ broker(), "FunctionTemplateInfo for function with SFI " << shared);
+ return NoChange();
+ }
- if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
- !inference.RelyOnMapsViaStability(dependencies())) {
- // We were not able to make the receiver maps reliable without map checks
- // but doing map checks would lead to deopt loops, so give up.
- return inference.NoChange();
- }
+ // See if we can optimize this API call to {shared}.
+ FunctionTemplateInfoRef function_template_info(
+ shared.function_template_info().value());
- // TODO(neis): The maps were used in a way that does not actually require
- // map checks or stability dependencies.
- inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
- control, p.feedback());
+ if (!function_template_info.has_call_code()) return NoChange();
- // Determine the appropriate holder for the {lookup}.
- holder = lookup == CallOptimization::kHolderFound
- ? jsgraph()->HeapConstant(api_holder)
- : receiver;
- } else if (function_template_info->accept_any_receiver() &&
- function_template_info->signature().IsUndefined(isolate())) {
- // We haven't found any {receiver_maps}, but we might still be able to
+ if (function_template_info.accept_any_receiver() &&
+ function_template_info.is_signature_undefined()) {
+ // We might be able to
// optimize the API call depending on the {function_template_info}.
// If the API function accepts any kind of {receiver}, we only need to
// ensure that the {receiver} is actually a JSReceiver at this point,
@@ -2840,51 +2898,127 @@ Reduction JSCallReducer::ReduceCallApiFunction(
graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
receiver, global_proxy, effect, control);
} else {
- // We don't have enough information to eliminate the access check
- // and/or the compatible receiver check, so use the generic builtin
- // that does those checks dynamically. This is still significantly
- // faster than the generic call sequence.
- Builtins::Name builtin_name =
- !function_template_info->accept_any_receiver()
- ? (function_template_info->signature().IsUndefined(isolate())
- ? Builtins::kCallFunctionTemplate_CheckAccess
- : Builtins::
- kCallFunctionTemplate_CheckAccessAndCompatibleReceiver)
- : Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
-
- // The CallFunctionTemplate builtin requires the {receiver} to be
- // an actual JSReceiver, so make sure we do the proper conversion
- // first if necessary.
- receiver = holder = effect =
- graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
- receiver, global_proxy, effect, control);
+ // Try to infer the {receiver} maps from the graph.
+ MapInference inference(broker(), receiver, effect);
+ if (inference.HaveMaps()) {
+ MapHandles const& receiver_maps = inference.GetMaps();
+ MapRef first_receiver_map(broker(), receiver_maps[0]);
+
+ // See if we can constant-fold the compatible receiver checks.
+ HolderLookupResult api_holder =
+ function_template_info.LookupHolderOfExpectedType(first_receiver_map,
+ false);
+ if (api_holder.lookup == CallOptimization::kHolderNotFound)
+ return inference.NoChange();
- Callable callable = Builtins::CallableFor(isolate(), builtin_name);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->ReplaceInput(1, jsgraph()->HeapConstant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->ReplaceInput(3, receiver); // Update receiver input.
- node->ReplaceInput(6 + argc, effect); // Update effect input.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- return Changed(node);
+ // Check that all {receiver_maps} are actually JSReceiver maps and
+ // that the {function_template_info} accepts them without access
+ // checks (even if "access check needed" is set for {receiver}).
+ //
+ // Note that we don't need to know the concrete {receiver} maps here,
+ // meaning it's fine if the {receiver_maps} are unreliable, and we also
+ // don't need to install any stability dependencies, since the only
+ // relevant information regarding the {receiver} is the Map::constructor
+ // field on the root map (which is different from the JavaScript exposed
+ // "constructor" property) and that field cannot change.
+ //
+ // So if we know that {receiver} had a certain constructor at some point
+ // in the past (i.e. it had a certain map), then this constructor is going
+ // to be the same later, since this information cannot change with map
+ // transitions.
+ //
+ // The same is true for the instance type, e.g. we still know that the
+ // instance type is JSObject even if that information is unreliable, and
+ // the "access check needed" bit, which also cannot change later.
+ CHECK(first_receiver_map.IsJSReceiverMap());
+ CHECK(!first_receiver_map.is_access_check_needed() ||
+ function_template_info.accept_any_receiver());
+
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ MapRef receiver_map(broker(), receiver_maps[i]);
+ HolderLookupResult holder_i =
+ function_template_info.LookupHolderOfExpectedType(receiver_map,
+ false);
+
+ if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
+ if (!(api_holder.holder.has_value() && holder_i.holder.has_value()))
+ return inference.NoChange();
+ if (!api_holder.holder->equals(*holder_i.holder))
+ return inference.NoChange();
+
+ CHECK(receiver_map.IsJSReceiverMap());
+ CHECK(!receiver_map.is_access_check_needed() ||
+ function_template_info.accept_any_receiver());
+ }
+
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
+ !inference.RelyOnMapsViaStability(dependencies())) {
+ // We were not able to make the receiver maps reliable without map
+ // checks but doing map checks would lead to deopt loops, so give up.
+ return inference.NoChange();
+ }
+
+ // TODO(neis): The maps were used in a way that does not actually require
+ // map checks or stability dependencies.
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
+
+ // Determine the appropriate holder for the {lookup}.
+ holder = api_holder.lookup == CallOptimization::kHolderFound
+ ? jsgraph()->Constant(*api_holder.holder)
+ : receiver;
+ } else {
+ // We don't have enough information to eliminate the access check
+ // and/or the compatible receiver check, so use the generic builtin
+ // that does those checks dynamically. This is still significantly
+ // faster than the generic call sequence.
+ Builtins::Name builtin_name;
+ if (function_template_info.accept_any_receiver()) {
+ builtin_name = Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
+ } else if (function_template_info.is_signature_undefined()) {
+ builtin_name = Builtins::kCallFunctionTemplate_CheckAccess;
+ } else {
+ builtin_name =
+ Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver;
+ }
+
+ // The CallFunctionTemplate builtin requires the {receiver} to be
+ // an actual JSReceiver, so make sure we do the proper conversion
+ // first if necessary.
+ receiver = holder = effect =
+ graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
+ receiver, global_proxy, effect, control);
+
+ Callable callable = Builtins::CallableFor(isolate(), builtin_name);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->ReplaceInput(3, receiver); // Update receiver input.
+ node->ReplaceInput(6 + argc, effect); // Update effect input.
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+ }
}
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
// this and lower it during JSGenericLowering, and unify this with the
// JSNativeContextSpecialization::InlineApiCall method a bit.
- Handle<CallHandlerInfo> call_handler_info(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate());
- Handle<Object> data(call_handler_info->data(), isolate());
+ if (!function_template_info.call_code().has_value()) {
+ TRACE_BROKER_MISSING(broker(), "call code for function template info "
+ << function_template_info);
+ return NoChange();
+ }
+ CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), cid, argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState);
- ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ ApiFunction api_function(call_handler_info.callback());
ExternalReference function_reference = ExternalReference::Create(
&api_function, ExternalReference::DIRECT_API_CALL);
@@ -2895,7 +3029,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference));
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(call_handler_info.data()));
node->InsertInput(graph()->zone(), 4, holder);
node->ReplaceInput(5, receiver); // Update receiver input.
node->ReplaceInput(7 + argc, continuation_frame_state);
@@ -3495,6 +3630,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceMathUnary(node, simplified()->NumberFloor());
case Builtins::kMathFround:
return ReduceMathUnary(node, simplified()->NumberFround());
+ case Builtins::kMathHypot:
+ return ReduceMathHypot(node);
case Builtins::kMathLog:
return ReduceMathUnary(node, simplified()->NumberLog());
case Builtins::kMathLog1p:
@@ -3563,8 +3700,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(),
node);
case Builtins::kStringPrototypeCodePointAt:
- return ReduceStringPrototypeStringAt(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node);
+ return ReduceStringPrototypeStringAt(simplified()->StringCodePointAt(),
+ node);
case Builtins::kStringPrototypeSubstring:
return ReduceStringPrototypeSubstring(node);
case Builtins::kStringPrototypeSlice:
@@ -3642,18 +3779,23 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDateNow(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtins::kBigIntAsUintN:
+ return ReduceBigIntAsUintN(node);
default:
break;
}
- if (!TracingFlags::is_runtime_stats_enabled() &&
- shared.object()->IsApiFunction()) {
+ if (shared.object()->IsApiFunction()) {
return ReduceCallApiFunction(node, shared);
}
return NoChange();
}
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
+ // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
VectorSlotPair feedback;
@@ -4250,6 +4392,52 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
+Node* JSCallReducer::LoadReceiverElementsKind(Node* receiver, Node** effect,
+ Node** control) {
+ Node* receiver_map = *effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, *effect, *control);
+ Node* receiver_bit_field2 = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map,
+ *effect, *control);
+ Node* receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberShiftRightLogical(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2,
+ jsgraph()->Constant(Map::ElementsKindBits::kMask)),
+ jsgraph()->Constant(Map::ElementsKindBits::kShift));
+ return receiver_elements_kind;
+}
+
+void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind,
+ ElementsKind kind, Node* control,
+ Node** if_true, Node** if_false) {
+ Node* is_packed_kind =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind,
+ jsgraph()->Constant(GetPackedElementsKind(kind)));
+ Node* packed_branch =
+ graph()->NewNode(common()->Branch(), is_packed_kind, control);
+ Node* if_packed = graph()->NewNode(common()->IfTrue(), packed_branch);
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* if_not_packed = graph()->NewNode(common()->IfFalse(), packed_branch);
+ Node* is_holey_kind =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind,
+ jsgraph()->Constant(GetHoleyElementsKind(kind)));
+ Node* holey_branch =
+ graph()->NewNode(common()->Branch(), is_holey_kind, if_not_packed);
+ Node* if_holey = graph()->NewNode(common()->IfTrue(), holey_branch);
+
+ Node* if_not_packed_not_holey =
+ graph()->NewNode(common()->IfFalse(), holey_branch);
+
+ *if_true = graph()->NewNode(common()->Merge(2), if_packed, if_holey);
+ *if_false = if_not_packed_not_holey;
+ } else {
+ *if_true = if_packed;
+ *if_false = graph()->NewNode(common()->IfFalse(), packed_branch);
+ }
+}
+
// ES6 section 22.1.3.18 Array.prototype.push ( )
Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -4267,81 +4455,121 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Collect the value inputs to push.
- std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = NodeProperties::GetValueInput(node, 2 + i);
- }
-
- for (auto& value : values) {
- if (IsSmiElementsKind(kind)) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- value, effect, control);
- } else if (IsDoubleElementsKind(kind)) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()),
- value, effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* return_value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
}
- }
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
- Node* value = length;
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
- // Check if we have any {values} to push.
- if (num_values > 0) {
- // Compute the resulting "length" of the {receiver}.
- Node* new_length = value = graph()->NewNode(
- simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
+ for (auto& value : values) {
+ if (IsSmiElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ value, effect, control);
+ } else if (IsDoubleElementsKind(kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckNumber(p.feedback()), value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
- effect, control);
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
+ return_value = length;
- GrowFastElementsMode mode =
- IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
- elements,
- graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(num_values - 1)),
- elements_length, effect, control);
-
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, new_length, effect, control);
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = return_value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
- // Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
- Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind)
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
+ elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
effect = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, index, value, effect, control);
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, new_length, effect, control);
+
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect =
+ graph()->NewNode(simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(kind)),
+ elements, index, value, effect, control);
+ }
}
+
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(return_value);
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
+
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
+ return_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
+ }
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
}
// ES6 section 22.1.3.17 Array.prototype.pop ( )
@@ -4360,79 +4588,117 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
+ }
- // Check if the {receiver} has any elements.
- Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // TODO(tebbi): We should trim the backing store if the capacity is too
- // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-
- // Load the elements backing store from the {receiver}.
- Node* elements = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- efalse, if_false);
-
- // Ensure that we aren't popping from a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(kind)) {
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
- elements, efalse, if_false);
- }
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, efalse, if_false);
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
- // Store the new {length} to the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, length, efalse, if_false);
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
- // Load the last entry from the {elements}.
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, length, efalse, if_false);
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
- // Store a hole to the element we just removed from the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
- elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(value);
}
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(kind)) {
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
}
ReplaceWithValue(node, value, effect, control);
@@ -4458,151 +4724,172 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
+ }
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ // Load length of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
+ // Return undefined if {receiver} has no elements.
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->UndefinedConstant();
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
{
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+ // Check if we should take the fast-path.
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(JSArray::kMaxCopyElements));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, etrue1, if_true1);
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(kind)) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
+ // Load the first element here, which we return below.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(kind)),
+ elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+
+ // Ensure that we aren't shifting a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = etrue1 =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, etrue1, if_true1);
+ }
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
+ // Shift the remaining {elements} by one towards the start.
+ Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* index = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->OneConstant(),
+ jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
+ {
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
+ if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
+ etrue1 = eloop;
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
+ Node* control = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect = etrue1;
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind);
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements, index,
- effect, control);
- effect =
- graph()->NewNode(simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(),
- index, jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
+ ElementAccess const access =
+ AccessBuilder::ForFixedArrayElement(kind);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadElement(access), elements,
+ index, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreElement(access), elements,
+ graph()->NewNode(simplified()->NumberSubtract(), index,
+ jsgraph()->OneConstant()),
+ value, effect, control);
+
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+ index->ReplaceInput(1,
+ graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant()));
+ }
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, length, etrue1, if_true1);
+ // Store the new {length} to the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, etrue1, if_true1);
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
+ // Store a hole to the element we just removed from the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
+ GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
+ }
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code =
- jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry =
- jsgraph()->ExternalConstant(ExternalReference::Create(builtin_entry));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver,
- jsgraph()->PaddingConstant(), argc, target,
- jsgraph()->UndefinedConstant(), entry, argc, context,
- frame_state, efalse1, if_false1);
- }
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Call the generic C++ implementation.
+ const int builtin_index = Builtins::kArrayShift;
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
+ graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
+ Builtins::name(builtin_index), node->op()->properties(),
+ CallDescriptor::kNeedsFrameState);
+ Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
+ kArgvOnStack, true);
+ Address builtin_entry = Builtins::CppEntryOf(builtin_index);
+ Node* entry = jsgraph()->ExternalConstant(
+ ExternalReference::Create(builtin_entry));
+ Node* argc =
+ jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
+ if_false1 = efalse1 = vfalse1 =
+ graph()->NewNode(common()->Call(call_descriptor), stub_code,
+ receiver, jsgraph()->PaddingConstant(), argc,
+ target, jsgraph()->UndefinedConstant(), entry,
+ argc, context, frame_state, efalse1, if_false1);
+ }
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
@@ -4611,8 +4898,27 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(value);
+ }
+
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
+
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
+ value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
// ES6 section 22.1.3.23 Array.prototype.slice ( )
@@ -5230,8 +5536,8 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
graph()->NewNode(simplified()->CheckBounds(p.feedback()), input,
jsgraph()->Constant(0x10FFFF + 1), effect, control);
- Node* value = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF32), input);
+ Node* value =
+ graph()->NewNode(simplified()->StringFromSingleCodePoint(), input);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -5287,12 +5593,8 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* vtrue0;
{
done_true = jsgraph()->FalseConstant();
- Node* codepoint = etrue0 = graph()->NewNode(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string, index,
- etrue0, if_true0);
- vtrue0 = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16),
- codepoint);
+ vtrue0 = etrue0 = graph()->NewNode(simplified()->StringFromCodePointAt(),
+ string, index, etrue0, if_true0);
// Update iterator.[[NextIndex]]
Node* char_length = graph()->NewNode(simplified()->StringLength(), vtrue0);
@@ -5396,6 +5698,8 @@ Node* JSCallReducer::CreateArtificialFrameState(
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -5404,7 +5708,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* executor = NodeProperties::GetValueInput(node, 1);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
-
Node* context = NodeProperties::GetContextInput(node);
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -5459,7 +5762,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
// Allocate a promise context for the closures below.
Node* promise_context = effect = graph()->NewNode(
javascript()->CreateFunctionContext(
- handle(native_context().object()->scope_info(), isolate()),
+ native_context().scope_info().object(),
PromiseBuiltins::kPromiseContextLength - Context::MIN_CONTEXT_SLOTS,
FUNCTION_SCOPE),
context, effect, control);
@@ -5477,21 +5780,13 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
promise_context, jsgraph()->TrueConstant(), effect, control);
// Allocate the closure for the resolve case.
- SharedFunctionInfoRef resolve_shared =
- native_context().promise_capability_default_resolve_shared_fun();
- Node* resolve = effect = graph()->NewNode(
- javascript()->CreateClosure(
- resolve_shared.object(), factory()->many_closures_cell(),
- handle(resolve_shared.object()->GetCode(), isolate())),
+ Node* resolve = effect = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_capability_default_resolve_shared_fun(),
promise_context, effect, control);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef reject_shared =
- native_context().promise_capability_default_reject_shared_fun();
- Node* reject = effect = graph()->NewNode(
- javascript()->CreateClosure(
- reject_shared.object(), factory()->many_closures_cell(),
- handle(reject_shared.object()->GetCode(), isolate())),
+ Node* reject = effect = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_capability_default_reject_shared_fun(),
promise_context, effect, control);
const std::vector<Node*> checkpoint_parameters_continuation(
@@ -5624,6 +5919,30 @@ Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) {
return Replace(value);
}
+bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
+ if (!inference->HaveMaps()) return false;
+ MapHandles const& receiver_maps = inference->GetMaps();
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSPromiseMap()) return false;
+ if (!FLAG_concurrent_inlining) {
+ receiver_map.SerializePrototype();
+ } else if (!receiver_map.serialized_prototype()) {
+ TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map);
+ return false;
+ }
+ if (!receiver_map.prototype().equals(
+ native_context().promise_prototype())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
// ES section #sec-promise.prototype.catch
Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -5637,20 +5956,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
if (!dependencies()->DependOnPromiseThenProtector())
return inference.NoChange();
@@ -5675,8 +5981,21 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
+ SharedFunctionInfoRef shared, Node* context, Node* effect, Node* control) {
+ DCHECK(shared.HasBuiltinId());
+ Callable const callable = Builtins::CallableFor(
+ isolate(), static_cast<Builtins::Name>(shared.builtin_id()));
+ return graph()->NewNode(
+ javascript()->CreateClosure(
+ shared.object(), factory()->many_closures_cell(), callable.code()),
+ context, effect, control);
+}
+
// ES section #sec-promise.prototype.finally
Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -5690,21 +6009,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
-
if (!dependencies()->DependOnPromiseHookProtector())
return inference.NoChange();
if (!dependencies()->DependOnPromiseThenProtector())
@@ -5730,13 +6037,13 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
jsgraph()->Constant(native_context().promise_function());
// Allocate shared context for the closures below.
- context = etrue = graph()->NewNode(
- javascript()->CreateFunctionContext(
- handle(native_context().object()->scope_info(), isolate()),
- PromiseBuiltins::kPromiseFinallyContextLength -
- Context::MIN_CONTEXT_SLOTS,
- FUNCTION_SCOPE),
- context, etrue, if_true);
+ context = etrue =
+ graph()->NewNode(javascript()->CreateFunctionContext(
+ native_context().scope_info().object(),
+ PromiseBuiltins::kPromiseFinallyContextLength -
+ Context::MIN_CONTEXT_SLOTS,
+ FUNCTION_SCOPE),
+ context, etrue, if_true);
etrue = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForContextSlot(PromiseBuiltins::kOnFinallySlot)),
@@ -5747,22 +6054,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
context, constructor, etrue, if_true);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef catch_finally =
- native_context().promise_catch_finally_shared_fun();
- catch_true = etrue = graph()->NewNode(
- javascript()->CreateClosure(
- catch_finally.object(), factory()->many_closures_cell(),
- handle(catch_finally.object()->GetCode(), isolate())),
- context, etrue, if_true);
+ catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_catch_finally_shared_fun(), context, etrue,
+ if_true);
// Allocate the closure for the fulfill case.
- SharedFunctionInfoRef then_finally =
- native_context().promise_then_finally_shared_fun();
- then_true = etrue = graph()->NewNode(
- javascript()->CreateClosure(
- then_finally.object(), factory()->many_closures_cell(),
- handle(then_finally.object()->GetCode(), isolate())),
- context, etrue, if_true);
+ then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_then_finally_shared_fun(), context, etrue,
+ if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -5810,6 +6109,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -5829,20 +6130,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
if (!dependencies()->DependOnPromiseHookProtector())
return inference.NoChange();
@@ -5889,6 +6177,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// ES section #sec-promise.resolve
Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* value = node->op()->ValueInputCount() > 2
@@ -6828,8 +7118,11 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) {
}
Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
+ DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
if (FLAG_force_slow_path) return NoChange();
if (node->op()->ValueInputCount() < 3) return NoChange();
+
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
@@ -6846,13 +7139,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
}
MapHandles const& regexp_maps = inference.GetMaps();
- // Compute property access info for "exec" on {resolution}.
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- access_info_factory.ComputePropertyAccessInfos(
- MapHandles(regexp_maps.begin(), regexp_maps.end()),
- factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ if (!FLAG_concurrent_inlining) {
+ // Compute property access info for "exec" on {resolution}.
+ access_info_factory.ComputePropertyAccessInfos(
+ MapHandles(regexp_maps.begin(), regexp_maps.end()),
+ factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ } else {
+ // Obtain precomputed access infos from the broker.
+ for (auto map : regexp_maps) {
+ MapRef map_ref(broker(), map);
+ PropertyAccessInfo access_info =
+ broker()->GetAccessInfoForLoadingExec(map_ref);
+ access_infos.push_back(access_info);
+ }
+ }
+
PropertyAccessInfo ai_exec =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
@@ -6864,34 +7168,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Do not reduce if the exec method is not on the prototype chain.
if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
+ JSObjectRef holder_ref(broker(), holder);
+
// Bail out if the exec method is not the original one.
- Handle<Object> constant = JSObject::FastPropertyAt(
- holder, ai_exec.field_representation(), ai_exec.field_index());
- if (!constant.is_identical_to(isolate()->regexp_exec_function())) {
+ base::Optional<ObjectRef> constant = holder_ref.GetOwnProperty(
+ ai_exec.field_representation(), ai_exec.field_index());
+ if (!constant.has_value() ||
+ !constant->equals(native_context().regexp_exec_function())) {
return inference.NoChange();
}
- // Protect the exec method change in the holder.
- Handle<Object> exec_on_proto;
- MapRef holder_map(broker(), handle(holder->map(), isolate()));
- Handle<DescriptorArray> descriptors(
- holder_map.object()->instance_descriptors(), isolate());
- int descriptor_index =
- descriptors->Search(*(factory()->exec_string()), *holder_map.object());
- CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
- holder_map.SerializeOwnDescriptors();
- dependencies()->DependOnFieldType(holder_map, descriptor_index);
- } else {
- return inference.NoChange();
- }
-
- // Add proper dependencies on the {regexp}s [[Prototype]]s.
- Handle<JSObject> holder;
- if (ai_exec.holder().ToHandle(&holder)) {
+ // Add proper dependencies on the {regexp}s [[Prototype]]s.
dependencies()->DependOnStablePrototypeChains(
ai_exec.receiver_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
+ } else {
+ return inference.NoChange();
}
+
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -6955,12 +7249,47 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
+Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
+ if (!jsgraph()->machine()->Is64()) {
+ return NoChange();
+ }
+
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* bits = NodeProperties::GetValueInput(node, 2);
+ Node* value = NodeProperties::GetValueInput(node, 3);
+
+ NumberMatcher matcher(bits);
+ if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
+ const int bits_value = static_cast<int>(matcher.Value());
+ value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
+ value, effect, control);
+ value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ return NoChange();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+NativeContextRef JSCallReducer::native_context() const {
+ return broker()->native_context();
+}
+
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 02821ebb0d..bf3676c5b2 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -29,6 +29,7 @@ struct FieldAccess;
class JSGraph;
class JSHeapBroker;
class JSOperatorBuilder;
+class MapInference;
class NodeProperties;
class SimplifiedOperatorBuilder;
@@ -155,6 +156,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
+ Reduction ReduceMathHypot(Node* node);
Reduction ReduceNumberIsFinite(Node* node);
Reduction ReduceNumberIsInteger(Node* node);
@@ -190,6 +192,15 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceBigIntAsUintN(Node* node);
+
+ // Helper to verify promise receiver maps are as expected.
+ // On bailout from a reduction, be sure to return inference.NoChange().
+ bool DoPromiseChecks(MapInference* inference);
+
+ Node* CreateClosureFromBuiltinSharedFunctionInfo(SharedFunctionInfoRef shared,
+ Node* context, Node* effect,
+ Node* control);
// Returns the updated {to} node, and updates control and effect along the
// way.
@@ -231,12 +242,16 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
const SharedFunctionInfoRef& shared,
Node* context = nullptr);
+ void CheckIfElementsKind(Node* receiver_elements_kind, ElementsKind kind,
+ Node* control, Node** if_true, Node** if_false);
+ Node* LoadReceiverElementsKind(Node* receiver, Node** effect, Node** control);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
Factory* factory() const;
- NativeContextRef native_context() const { return broker()->native_context(); }
+ NativeContextRef native_context() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index dea6d7fc2b..035e8b7ceb 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -6,6 +6,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -144,9 +145,10 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete.SerializeContextChain(); // TODO(neis): Remove later.
- for (; depth > 0; --depth) {
- concrete = concrete.previous();
+ concrete = concrete.previous(&depth);
+ if (depth > 0) {
+ TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
if (!access.immutable()) {
@@ -157,8 +159,6 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// This will hold the final value, if we can figure it out.
base::Optional<ObjectRef> maybe_value;
-
- concrete.SerializeSlot(static_cast<int>(access.index()));
maybe_value = concrete.get(static_cast<int>(access.index()));
if (maybe_value.has_value() && !maybe_value->IsSmi()) {
// Even though the context slot is immutable, the context might have escaped
@@ -174,6 +174,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
}
if (!maybe_value.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "slot value " << access.index()
+ << " for context "
+ << concrete);
return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
@@ -207,9 +210,10 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete.SerializeContextChain(); // TODO(neis): Remove later.
- for (; depth > 0; --depth) {
- concrete = concrete.previous();
+ concrete = concrete.previous(&depth);
+ if (depth > 0) {
+ TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
}
return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 8fc8dd1308..4e69db6b9b 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -837,7 +837,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
iterated_object, effect, control);
- // Create the JSArrayIterator result.
+ // Create the JSCollectionIterator result.
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSCollectionIterator::kSize, AllocationType::kYoung,
Type::OtherObject());
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index a3805ec125..43a4beadee 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -128,9 +128,17 @@ void JSGraph::GetCachedNodes(NodeVector* nodes) {
DEFINE_GETTER(AllocateInYoungGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInYoungGeneration)))
+DEFINE_GETTER(AllocateRegularInYoungGenerationStubConstant,
+ HeapConstant(BUILTIN_CODE(isolate(),
+ AllocateRegularInYoungGeneration)))
+
DEFINE_GETTER(AllocateInOldGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldGeneration)))
+DEFINE_GETTER(AllocateRegularInOldGenerationStubConstant,
+ HeapConstant(BUILTIN_CODE(isolate(),
+ AllocateRegularInOldGeneration)))
+
DEFINE_GETTER(ArrayConstructorStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl)))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index b5c80515ad..ec36c26034 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -80,31 +80,33 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
void GetCachedNodes(NodeVector* nodes);
// Cached global nodes.
-#define CACHED_GLOBAL_LIST(V) \
- V(AllocateInYoungGenerationStubConstant) \
- V(AllocateInOldGenerationStubConstant) \
- V(ArrayConstructorStubConstant) \
- V(BigIntMapConstant) \
- V(BooleanMapConstant) \
- V(ToNumberBuiltinConstant) \
- V(EmptyFixedArrayConstant) \
- V(EmptyStringConstant) \
- V(FixedArrayMapConstant) \
- V(PropertyArrayMapConstant) \
- V(FixedDoubleArrayMapConstant) \
- V(HeapNumberMapConstant) \
- V(OptimizedOutConstant) \
- V(StaleRegisterConstant) \
- V(UndefinedConstant) \
- V(TheHoleConstant) \
- V(TrueConstant) \
- V(FalseConstant) \
- V(NullConstant) \
- V(ZeroConstant) \
- V(OneConstant) \
- V(NaNConstant) \
- V(MinusOneConstant) \
- V(EmptyStateValues) \
+#define CACHED_GLOBAL_LIST(V) \
+ V(AllocateInYoungGenerationStubConstant) \
+ V(AllocateRegularInYoungGenerationStubConstant) \
+ V(AllocateInOldGenerationStubConstant) \
+ V(AllocateRegularInOldGenerationStubConstant) \
+ V(ArrayConstructorStubConstant) \
+ V(BigIntMapConstant) \
+ V(BooleanMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(EmptyFixedArrayConstant) \
+ V(EmptyStringConstant) \
+ V(FixedArrayMapConstant) \
+ V(PropertyArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
+ V(HeapNumberMapConstant) \
+ V(OptimizedOutConstant) \
+ V(StaleRegisterConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(NullConstant) \
+ V(ZeroConstant) \
+ V(OneConstant) \
+ V(NaNConstant) \
+ V(MinusOneConstant) \
+ V(EmptyStateValues) \
V(SingleDeadTypedStateValues)
// Cached global node accessor methods.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 86250e9d1f..c79c793ae6 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/heap-refs.h"
#ifdef ENABLE_SLOW_DCHECKS
#include <algorithm>
@@ -12,6 +13,7 @@
#include "src/ast/modules.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/compiler/vector-slot-pair.h"
@@ -26,6 +28,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils/boxed-float.h"
#include "src/utils/utils.h"
@@ -121,17 +124,31 @@ class PropertyCellData : public HeapObjectData {
ObjectData* value_ = nullptr;
};
+// TODO(mslekova): Once we have real-world usage data, we might want to
+// reimplement this as sorted vector instead, to reduce the memory overhead.
+typedef ZoneMap<MapData*, HolderLookupResult> KnownReceiversMap;
+
class FunctionTemplateInfoData : public HeapObjectData {
public:
FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object);
- void Serialize(JSHeapBroker* broker);
- ObjectData* call_code() const { return call_code_; }
+ bool is_signature_undefined() const { return is_signature_undefined_; }
+ bool accept_any_receiver() const { return accept_any_receiver_; }
+ bool has_call_code() const { return has_call_code_; }
+
+ void SerializeCallCode(JSHeapBroker* broker);
+ CallHandlerInfoData* call_code() const { return call_code_; }
+ KnownReceiversMap& known_receivers() { return known_receivers_; }
private:
- bool serialized_ = false;
- ObjectData* call_code_ = nullptr;
+ bool serialized_call_code_ = false;
+ CallHandlerInfoData* call_code_ = nullptr;
+ bool is_signature_undefined_ = false;
+ bool accept_any_receiver_ = false;
+ bool has_call_code_ = false;
+
+ KnownReceiversMap known_receivers_;
};
class CallHandlerInfoData : public HeapObjectData {
@@ -154,7 +171,16 @@ class CallHandlerInfoData : public HeapObjectData {
FunctionTemplateInfoData::FunctionTemplateInfoData(
JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object),
+ known_receivers_(broker->zone()) {
+ auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
+ is_signature_undefined_ =
+ function_template_info->signature().IsUndefined(broker->isolate());
+ accept_any_receiver_ = function_template_info->accept_any_receiver();
+
+ CallOptimization call_optimization(broker->isolate(), object);
+ has_call_code_ = call_optimization.is_simple_api_call();
+}
CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
ObjectData** storage,
@@ -181,18 +207,17 @@ void PropertyCellData::Serialize(JSHeapBroker* broker) {
value_ = broker->GetOrCreateData(cell->value());
}
-void FunctionTemplateInfoData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
+ if (serialized_call_code_) return;
+ serialized_call_code_ = true;
- TraceScope tracer(broker, this, "FunctionTemplateInfoData::Serialize");
+ TraceScope tracer(broker, this,
+ "FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
DCHECK_NULL(call_code_);
- call_code_ = broker->GetOrCreateData(function_template_info->call_code());
-
- if (call_code_->IsCallHandlerInfo()) {
- call_code_->AsCallHandlerInfo()->Serialize(broker);
- }
+ call_code_ = broker->GetOrCreateData(function_template_info->call_code())
+ ->AsCallHandlerInfo();
+ call_code_->Serialize(broker);
}
void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
@@ -231,6 +256,12 @@ class JSObjectField {
uint64_t number_bits_ = 0;
};
+struct FieldIndexHasher {
+ size_t operator()(FieldIndex field_index) const {
+ return field_index.index();
+ }
+};
+
class JSObjectData : public HeapObjectData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -253,12 +284,15 @@ class JSObjectData : public HeapObjectData {
ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index,
bool serialize);
+ ObjectData* GetOwnProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index, bool serialize);
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
private:
- void SerializeRecursive(JSHeapBroker* broker, int max_depths);
+ void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths);
FixedArrayBaseData* elements_ = nullptr;
bool cow_or_empty_elements_tenured_ = false;
@@ -277,6 +311,12 @@ class JSObjectData : public HeapObjectData {
// non-configurable, or (2) are known not to (possibly they don't exist at
// all). In case (2), the second pair component is nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
+ // Properties that either:
+ // (1) are known to exist directly on the object, or
+ // (2) are known not to (possibly they don't exist at all).
+ // In case (2), the second pair component is nullptr.
+ // For simplicity, this may in theory overlap with inobject_fields_.
+ ZoneUnorderedMap<FieldIndex, ObjectData*, FieldIndexHasher> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
@@ -312,6 +352,15 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
}
return base::nullopt;
}
+
+ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ Representation representation,
+ FieldIndex field_index) {
+ Handle<Object> constant =
+ JSObject::FastPropertyAt(receiver, representation, field_index);
+ return ObjectRef(broker, constant);
+}
} // namespace
ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
@@ -333,6 +382,27 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
return result;
}
+ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index,
+ bool serialize) {
+ auto p = own_properties_.find(field_index);
+ if (p != own_properties_.end()) return p->second;
+
+ if (!serialize) {
+ TRACE_MISSING(broker, "knowledge about property with index "
+ << field_index.property_index() << " on "
+ << this);
+ return nullptr;
+ }
+
+ ObjectRef property = GetOwnPropertyFromHeap(
+ broker, Handle<JSObject>::cast(object()), representation, field_index);
+ ObjectData* result(property.data());
+ own_properties_.insert(std::make_pair(field_index, result));
+ return result;
+}
+
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -503,24 +573,18 @@ class ContextData : public HeapObjectData {
public:
ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object);
- void SerializeContextChain(JSHeapBroker* broker);
- ContextData* previous() const {
- CHECK(serialized_context_chain_);
- return previous_;
- }
+ // {previous} will return the closest valid context possible to desired
+ // {depth}, decrementing {depth} for each previous link successfully followed.
+ // If {serialize} is true, it will serialize contexts along the way.
+ ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize);
- void SerializeSlot(JSHeapBroker* broker, int index);
-
- ObjectData* GetSlot(int index) {
- auto search = slots_.find(index);
- CHECK(search != slots_.end());
- return search->second;
- }
+ // Returns nullptr if the slot index isn't valid or wasn't serialized
+ // (unless {serialize} is true).
+ ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize);
private:
ZoneMap<int, ObjectData*> slots_;
- bool serialized_context_chain_ = false;
ContextData* previous_ = nullptr;
};
@@ -528,28 +592,46 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object)
: HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
-void ContextData::SerializeContextChain(JSHeapBroker* broker) {
- if (serialized_context_chain_) return;
- serialized_context_chain_ = true;
+ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth,
+ bool serialize) {
+ if (*depth == 0) return this;
- TraceScope tracer(broker, this, "ContextData::SerializeContextChain");
- Handle<Context> context = Handle<Context>::cast(object());
+ if (serialize && previous_ == nullptr) {
+ TraceScope tracer(broker, this, "ContextData::previous");
+ Handle<Context> context = Handle<Context>::cast(object());
+ Object prev = context->unchecked_previous();
+ if (prev.IsContext()) {
+ previous_ = broker->GetOrCreateData(prev)->AsContext();
+ }
+ }
- DCHECK_NULL(previous_);
- // Context::previous DCHECK-fails when called on the native context.
- if (!context->IsNativeContext()) {
- previous_ = broker->GetOrCreateData(context->previous())->AsContext();
- previous_->SerializeContextChain(broker);
+ if (previous_ != nullptr) {
+ *depth = *depth - 1;
+ return previous_->previous(broker, depth, serialize);
}
+ return this;
}
-void ContextData::SerializeSlot(JSHeapBroker* broker, int index) {
- TraceScope tracer(broker, this, "ContextData::SerializeSlot");
- TRACE(broker, "Serializing script context slot " << index);
- Handle<Context> context = Handle<Context>::cast(object());
- CHECK(index >= 0 && index < context->length());
- ObjectData* odata = broker->GetOrCreateData(context->get(index));
- slots_.insert(std::make_pair(index, odata));
+ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
+ bool serialize) {
+ CHECK_GE(index, 0);
+ auto search = slots_.find(index);
+ if (search != slots_.end()) {
+ return search->second;
+ }
+
+ if (serialize) {
+ Handle<Context> context = Handle<Context>::cast(object());
+ if (index < context->length()) {
+ TraceScope tracer(broker, this, "ContextData::GetSlot");
+ TRACE(broker, "Serializing context slot " << index);
+ ObjectData* odata = broker->GetOrCreateData(context->get(index));
+ slots_.insert(std::make_pair(index, odata));
+ return odata;
+ }
+ }
+
+ return nullptr;
}
class NativeContextData : public ContextData {
@@ -564,6 +646,11 @@ class NativeContextData : public ContextData {
return function_maps_;
}
+ ScopeInfoData* scope_info() const {
+ CHECK(serialized_);
+ return scope_info_;
+ }
+
NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object);
void Serialize(JSHeapBroker* broker);
@@ -574,6 +661,7 @@ class NativeContextData : public ContextData {
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
ZoneVector<MapData*> function_maps_;
+ ScopeInfoData* scope_info_ = nullptr;
};
class NameData : public HeapObjectData {
@@ -674,14 +762,15 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
DCHECK_GE(max_depth, 0);
DCHECK_GE(*max_properties, 0);
+ Isolate* const isolate = boilerplate->GetIsolate();
+
// Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+ if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false;
// Check for too deep nesting.
if (max_depth == 0) return false;
// Check the elements.
- Isolate* const isolate = boilerplate->GetIsolate();
Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
if (elements->length() > 0 &&
elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
@@ -780,6 +869,18 @@ class AllocationSiteData : public HeapObjectData {
bool serialized_boilerplate_ = false;
};
+class BigIntData : public HeapObjectData {
+ public:
+ BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
+ : HeapObjectData(broker, storage, object),
+ as_uint64_(object->AsUint64(nullptr)) {}
+
+ uint64_t AsUint64() const { return as_uint64_; }
+
+ private:
+ const uint64_t as_uint64_;
+};
+
// Only used in JSNativeContextSpecialization.
class ScriptContextTableData : public HeapObjectData {
public:
@@ -1215,7 +1316,8 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object)
: HeapObjectData(broker, storage, object),
inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()) {}
+ own_constant_elements_(broker->zone()),
+ own_properties_(broker->zone()) {}
FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArray> object)
@@ -1282,18 +1384,106 @@ class BytecodeArrayData : public FixedArrayBaseData {
return incoming_new_target_or_generator_register_;
}
+ uint8_t get(int index) const {
+ DCHECK(is_serialized_for_compilation_);
+ return bytecodes_[index];
+ }
+
+ Address GetFirstBytecodeAddress() const {
+ return reinterpret_cast<Address>(bytecodes_.data());
+ }
+
+ Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
+ return constant_pool_[index]->object();
+ }
+
+ bool IsConstantAtIndexSmi(int index) const {
+ return constant_pool_[index]->is_smi();
+ }
+
+ Smi GetConstantAtIndexAsSmi(int index) const {
+ return *(Handle<Smi>::cast(constant_pool_[index]->object()));
+ }
+
+ bool IsSerializedForCompilation() const {
+ return is_serialized_for_compilation_;
+ }
+
+ void SerializeForCompilation(JSHeapBroker* broker) {
+ if (is_serialized_for_compilation_) return;
+
+ Handle<BytecodeArray> bytecode_array =
+ Handle<BytecodeArray>::cast(object());
+
+ DCHECK(bytecodes_.empty());
+ bytecodes_.reserve(bytecode_array->length());
+ for (int i = 0; i < bytecode_array->length(); i++) {
+ bytecodes_.push_back(bytecode_array->get(i));
+ }
+
+ DCHECK(constant_pool_.empty());
+ Handle<FixedArray> constant_pool(bytecode_array->constant_pool(),
+ broker->isolate());
+ constant_pool_.reserve(constant_pool->length());
+ for (int i = 0; i < constant_pool->length(); i++) {
+ constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i)));
+ }
+
+ Handle<ByteArray> source_position_table(
+ bytecode_array->SourcePositionTableIfCollected(), broker->isolate());
+ source_positions_.reserve(source_position_table->length());
+ for (int i = 0; i < source_position_table->length(); i++) {
+ source_positions_.push_back(source_position_table->get(i));
+ }
+
+ Handle<ByteArray> handlers(bytecode_array->handler_table(),
+ broker->isolate());
+ handler_table_.reserve(handlers->length());
+ for (int i = 0; i < handlers->length(); i++) {
+ handler_table_.push_back(handlers->get(i));
+ }
+
+ is_serialized_for_compilation_ = true;
+ }
+
+ const byte* source_positions_address() const {
+ return source_positions_.data();
+ }
+
+ size_t source_positions_size() const { return source_positions_.size(); }
+
+ Address handler_table_address() const {
+ CHECK(is_serialized_for_compilation_);
+ return reinterpret_cast<Address>(handler_table_.data());
+ }
+
+ int handler_table_size() const {
+ CHECK(is_serialized_for_compilation_);
+ return static_cast<int>(handler_table_.size());
+ }
+
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
register_count_(object->register_count()),
parameter_count_(object->parameter_count()),
incoming_new_target_or_generator_register_(
- object->incoming_new_target_or_generator_register()) {}
+ object->incoming_new_target_or_generator_register()),
+ bytecodes_(broker->zone()),
+ source_positions_(broker->zone()),
+ handler_table_(broker->zone()),
+ constant_pool_(broker->zone()) {}
private:
int const register_count_;
int const parameter_count_;
interpreter::Register const incoming_new_target_or_generator_register_;
+
+ bool is_serialized_for_compilation_ = false;
+ ZoneVector<uint8_t> bytecodes_;
+ ZoneVector<uint8_t> source_positions_;
+ ZoneVector<uint8_t> handler_table_;
+ ZoneVector<ObjectData*> constant_pool_;
};
class JSArrayData : public JSObjectData {
@@ -1377,6 +1567,22 @@ class SharedFunctionInfoData : public HeapObjectData {
void SetSerializedForCompilation(JSHeapBroker* broker,
FeedbackVectorRef feedback);
bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
+ FunctionTemplateInfoData* function_template_info() const {
+ return function_template_info_;
+ }
+ JSArrayData* GetTemplateObject(FeedbackSlot slot) const {
+ auto lookup_it = template_objects_.find(slot.ToInt());
+ if (lookup_it != template_objects_.cend()) {
+ return lookup_it->second;
+ }
+ return nullptr;
+ }
+ void SetTemplateObject(FeedbackSlot slot, JSArrayData* object) {
+ CHECK(
+ template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
+ }
+
#define DECL_ACCESSOR(type, name) \
type name() const { return name##_; }
BROKER_SFI_FIELDS(DECL_ACCESSOR)
@@ -1391,6 +1597,8 @@ class SharedFunctionInfoData : public HeapObjectData {
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
+ FunctionTemplateInfoData* function_template_info_;
+ ZoneMap<int, JSArrayData*> template_objects_;
};
SharedFunctionInfoData::SharedFunctionInfoData(
@@ -1408,7 +1616,9 @@ SharedFunctionInfoData::SharedFunctionInfoData(
#define INIT_MEMBER(type, name) , name##_(object->name())
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
-{
+ ,
+ function_template_info_(nullptr),
+ template_objects_(broker->zone()) {
DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
}
@@ -1420,15 +1630,28 @@ void SharedFunctionInfoData::SetSerializedForCompilation(
<< " as serialized for compilation");
}
+void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
+ JSHeapBroker* broker) {
+ if (function_template_info_) return;
+
+ function_template_info_ =
+ broker
+ ->GetOrCreateData(handle(
+ Handle<SharedFunctionInfo>::cast(object())->function_data(),
+ broker->isolate()))
+ ->AsFunctionTemplateInfo();
+}
+
bool SharedFunctionInfoData::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
return serialized_for_compilation_.find(feedback.object()) !=
serialized_for_compilation_.end();
}
-class ModuleData : public HeapObjectData {
+class SourceTextModuleData : public HeapObjectData {
public:
- ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle<Module> object);
+ SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SourceTextModule> object);
void Serialize(JSHeapBroker* broker);
CellData* GetCell(int cell_index) const;
@@ -1439,35 +1662,36 @@ class ModuleData : public HeapObjectData {
ZoneVector<CellData*> exports_;
};
-ModuleData::ModuleData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Module> object)
+SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<SourceTextModule> object)
: HeapObjectData(broker, storage, object),
imports_(broker->zone()),
exports_(broker->zone()) {}
-CellData* ModuleData::GetCell(int cell_index) const {
+CellData* SourceTextModuleData::GetCell(int cell_index) const {
CHECK(serialized_);
CellData* cell;
- switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
- case ModuleDescriptor::kImport:
- cell = imports_.at(Module::ImportIndex(cell_index));
+ switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case SourceTextModuleDescriptor::kImport:
+ cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
break;
- case ModuleDescriptor::kExport:
- cell = exports_.at(Module::ExportIndex(cell_index));
+ case SourceTextModuleDescriptor::kExport:
+ cell = exports_.at(SourceTextModule::ExportIndex(cell_index));
break;
- case ModuleDescriptor::kInvalid:
+ case SourceTextModuleDescriptor::kInvalid:
UNREACHABLE();
}
CHECK_NOT_NULL(cell);
return cell;
}
-void ModuleData::Serialize(JSHeapBroker* broker) {
+void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
- TraceScope tracer(broker, this, "ModuleData::Serialize");
- Handle<Module> module = Handle<Module>::cast(object());
+ TraceScope tracer(broker, this, "SourceTextModuleData::Serialize");
+ Handle<SourceTextModule> module = Handle<SourceTextModule>::cast(object());
// TODO(neis): We could be smarter and only serialize the cells we care about.
// TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
@@ -1614,7 +1838,7 @@ bool JSObjectData::cow_or_empty_elements_tenured() const {
FixedArrayBaseData* JSObjectData::elements() const { return elements_; }
void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
- SerializeRecursive(broker, kMaxFastLiteralDepth);
+ SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
}
void JSObjectData::SerializeElements(JSHeapBroker* broker) {
@@ -1717,11 +1941,13 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
<< contents.size() << " total)");
}
-void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
+void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
+ int depth) {
if (serialized_as_boilerplate_) return;
serialized_as_boilerplate_ = true;
- TraceScope tracer(broker, this, "JSObjectData::SerializeRecursive");
+ TraceScope tracer(broker, this,
+ "JSObjectData::SerializeRecursiveAsBoilerplate");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
// We only serialize boilerplates that pass the IsInlinableFastLiteral
@@ -1767,7 +1993,8 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
ObjectData* value_data = broker->GetOrCreateData(value);
- value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
}
}
} else {
@@ -1802,9 +2029,22 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
} else {
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
isolate);
+ // In case of unboxed double fields we use a sentinel NaN value to mark
+ // uninitialized fields. A boilerplate value with such a field may migrate
+ // from its unboxed double to a tagged representation. In the process the
+ // raw double is converted to a heap number. The sentinel value carries no
+ // special meaning when it occurs in a heap number, so we would like to
+ // recover the uninitialized value.
+ // We check for the sentinel here, specifically, since migrations might
+ // have been triggered as part of boilerplate serialization.
+ if (value->IsHeapNumber() &&
+ HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
+ value = isolate->factory()->uninitialized_value();
+ }
ObjectData* value_data = broker->GetOrCreateData(value);
if (value->IsJSObject()) {
- value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
}
inobject_fields_.push_back(JSObjectField{value_data});
}
@@ -1839,35 +2079,50 @@ bool ObjectRef::equals(const ObjectRef& other) const {
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
-ContextRef ContextRef::previous() const {
+ContextRef ContextRef::previous(size_t* depth, bool serialize) const {
+ DCHECK_NOT_NULL(depth);
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- return ContextRef(broker(),
- handle(object()->previous(), broker()->isolate()));
+ Context current = *object();
+ while (*depth != 0 && current.unchecked_previous().IsContext()) {
+ current = Context::cast(current.unchecked_previous());
+ (*depth)--;
+ }
+ return ContextRef(broker(), handle(current, broker()->isolate()));
}
- return ContextRef(broker(), data()->AsContext()->previous());
+ ContextData* current = this->data()->AsContext();
+ return ContextRef(broker(), current->previous(broker(), depth, serialize));
}
-// Not needed for TypedLowering.
-ObjectRef ContextRef::get(int index) const {
+base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
Handle<Object> value(object()->get(index), broker()->isolate());
return ObjectRef(broker(), value);
}
- return ObjectRef(broker(), data()->AsContext()->GetSlot(index));
+ ObjectData* optional_slot =
+ data()->AsContext()->GetSlot(broker(), index, serialize);
+ if (optional_slot != nullptr) {
+ return ObjectRef(broker(), optional_slot);
+ }
+ return base::nullopt;
}
-JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
+JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
+ bool tracing_enabled)
: isolate_(isolate),
broker_zone_(broker_zone),
current_zone_(broker_zone),
refs_(new (zone())
RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())),
array_and_object_prototypes_(zone()),
- feedback_(zone()) {
+ tracing_enabled_(tracing_enabled),
+ feedback_(zone()),
+ bytecode_analyses_(zone()),
+ ais_for_loading_then_(zone()),
+ ais_for_loading_exec_(zone()) {
// Note that this initialization of the refs_ pointer with the minimal
// initial capacity is redundant in the normal use case (concurrent
// compilation enabled, standard objects to be serialized), as the map
@@ -1939,7 +2194,9 @@ void JSHeapBroker::SerializeShareableObjects() {
{
Builtins::Name builtins[] = {
Builtins::kAllocateInYoungGeneration,
+ Builtins::kAllocateRegularInYoungGeneration,
Builtins::kAllocateInOldGeneration,
+ Builtins::kAllocateRegularInOldGeneration,
Builtins::kArgumentsAdaptorTrampoline,
Builtins::kArrayConstructorImpl,
Builtins::kCallFunctionForwardVarargs,
@@ -2400,6 +2657,11 @@ bool AllocationSiteRef::IsFastLiteral() const {
return data()->AsAllocationSite()->IsFastLiteral();
}
+void JSObjectRef::SerializeElements() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSObject()->SerializeElements(broker());
+}
+
void JSObjectRef::EnsureElementsTenured() {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation allow_handle_allocation;
@@ -2553,6 +2815,95 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
return data()->AsFixedDoubleArray()->Get(i).get_scalar();
}
+uint8_t BytecodeArrayRef::get(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->get(index);
+ }
+ return data()->AsBytecodeArray()->get(index);
+}
+
+Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->GetFirstBytecodeAddress();
+ }
+ return data()->AsBytecodeArray()->GetFirstBytecodeAddress();
+}
+
+Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return handle(object()->constant_pool().get(index), broker()->isolate());
+ }
+ return data()->AsBytecodeArray()->GetConstantAtIndex(index,
+ broker()->isolate());
+}
+
+bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->constant_pool().get(index).IsSmi();
+ }
+ return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
+}
+
+Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return Smi::cast(object()->constant_pool().get(index));
+ }
+ return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
+}
+
+bool BytecodeArrayRef::IsSerializedForCompilation() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+ return data()->AsBytecodeArray()->IsSerializedForCompilation();
+}
+
+void BytecodeArrayRef::SerializeForCompilation() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ data()->AsBytecodeArray()->SerializeForCompilation(broker());
+}
+
+const byte* BytecodeArrayRef::source_positions_address() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->SourcePositionTableIfCollected().GetDataStartAddress();
+ }
+ return data()->AsBytecodeArray()->source_positions_address();
+}
+
+int BytecodeArrayRef::source_positions_size() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->SourcePositionTableIfCollected().length();
+ }
+ return static_cast<int>(data()->AsBytecodeArray()->source_positions_size());
+}
+
+Address BytecodeArrayRef::handler_table_address() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return reinterpret_cast<Address>(
+ object()->handler_table().GetDataStartAddress());
+ }
+ return data()->AsBytecodeArray()->handler_table_address();
+}
+
+int BytecodeArrayRef::handler_table_size() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->handler_table().length();
+ }
+ return data()->AsBytecodeArray()->handler_table_size();
+}
+
#define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \
if (broker()->mode() == JSHeapBroker::kDisabled) { \
AllowHandleAllocation handle_allocation; \
@@ -2630,15 +2981,13 @@ BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
-BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
-BIMODAL_ACCESSOR_B(Map, bit_field2, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
-BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::IsExtensibleBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed,
Map::IsAccessCheckNeededBit)
@@ -2663,7 +3012,109 @@ BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-BIMODAL_ACCESSOR(FunctionTemplateInfo, Object, call_code)
+base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return CallHandlerInfoRef(
+ broker(), handle(object()->call_code(), broker()->isolate()));
+ }
+ CallHandlerInfoData* call_code =
+ data()->AsFunctionTemplateInfo()->call_code();
+ if (!call_code) return base::nullopt;
+ return CallHandlerInfoRef(broker(), call_code);
+}
+
+bool FunctionTemplateInfoRef::is_signature_undefined() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ return object()->signature().IsUndefined(broker()->isolate());
+ }
+ return data()->AsFunctionTemplateInfo()->is_signature_undefined();
+}
+
+bool FunctionTemplateInfoRef::has_call_code() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ CallOptimization call_optimization(broker()->isolate(), object());
+ return call_optimization.is_simple_api_call();
+ }
+ return data()->AsFunctionTemplateInfo()->has_call_code();
+}
+
+BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
+
+HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
+ MapRef receiver_map, bool serialize) {
+ const HolderLookupResult not_found;
+
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ CallOptimization call_optimization(broker()->isolate(), object());
+ Handle<Map> receiver_map_ref(receiver_map.object());
+ if (!receiver_map_ref->IsJSReceiverMap() ||
+ (receiver_map_ref->is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_map_ref, &result.lookup);
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound:
+ result.holder = JSObjectRef(broker(), holder);
+ break;
+ default:
+ DCHECK_EQ(result.holder, base::nullopt);
+ break;
+ }
+ return result;
+ }
+
+ FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
+ KnownReceiversMap::iterator lookup_it =
+ fti_data->known_receivers().find(receiver_map.data()->AsMap());
+ if (lookup_it != fti_data->known_receivers().cend()) {
+ return lookup_it->second;
+ }
+ if (!serialize) {
+ TRACE_BROKER_MISSING(broker(),
+ "holder for receiver with map " << receiver_map);
+ return not_found;
+ }
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() && !accept_any_receiver())) {
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), not_found});
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ CallOptimization call_optimization(broker()->isolate(), object());
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_map.object(), &result.lookup);
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound: {
+ result.holder = JSObjectRef(broker(), holder);
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), result});
+ break;
+ }
+ default: {
+ DCHECK_EQ(result.holder, base::nullopt);
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), result});
+ }
+ }
+ return result;
+}
BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
@@ -2746,11 +3197,21 @@ bool StringRef::IsSeqString() const {
return data()->AsString()->is_seq_string();
}
+ScopeInfoRef NativeContextRef::scope_info() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ScopeInfoRef(broker(),
+ handle(object()->scope_info(), broker()->isolate()));
+ }
+ return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
+}
+
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
if (broker()->mode() == JSHeapBroker::kDisabled) {
- return get(index).AsMap();
+ return get(index).value().AsMap();
}
return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
index - Context::FIRST_FUNCTION_MAP_INDEX));
@@ -2853,6 +3314,19 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
return ObjectRef(broker(), element);
}
+base::Optional<ObjectRef> JSObjectRef::GetOwnProperty(
+ Representation field_representation, FieldIndex index,
+ bool serialize) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return GetOwnPropertyFromHeap(broker(), Handle<JSObject>::cast(object()),
+ field_representation, index);
+ }
+ ObjectData* property = data()->AsJSObject()->GetOwnProperty(
+ broker(), field_representation, index, serialize);
+ if (property == nullptr) return base::nullopt;
+ return ObjectRef(broker(), property);
+}
+
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index,
bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
@@ -2884,14 +3358,19 @@ double MutableHeapNumberRef::value() const {
return data()->AsMutableHeapNumber()->value();
}
-CellRef ModuleRef::GetCell(int cell_index) const {
+uint64_t BigIntRef::AsUint64() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64);
+ return data()->AsBigInt()->AsUint64();
+}
+
+CellRef SourceTextModuleRef::GetCell(int cell_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
return CellRef(broker(),
handle(object()->GetCell(cell_index), broker()->isolate()));
}
- return CellRef(broker(), data()->AsModule()->GetCell(cell_index));
+ return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index));
}
ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
@@ -3108,6 +3587,8 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
for (int i = first; i <= last; ++i) {
function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap());
}
+
+ scope_info_ = broker->GetOrCreateData(context->scope_info())->AsScopeInfo();
}
void JSFunctionRef::Serialize() {
@@ -3133,6 +3614,46 @@ bool JSFunctionRef::IsSerializedForCompilation() const {
shared().IsSerializedForCompilation(feedback_vector());
}
+JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description,
+ FeedbackVectorRef vector,
+ FeedbackSlot slot,
+ bool serialize) {
+ // Look in the feedback vector for the array. A Smi indicates that it's
+ // not yet cached here.
+ ObjectRef candidate = vector.get(slot);
+ if (!candidate.IsSmi()) {
+ return candidate.AsJSArray();
+ }
+
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<TemplateObjectDescription> tod =
+ Handle<TemplateObjectDescription>::cast(description.object());
+ Handle<JSArray> template_object =
+ TemplateObjectDescription::GetTemplateObject(
+ broker()->isolate(), broker()->native_context().object(), tod,
+ object(), slot.ToInt());
+ return JSArrayRef(broker(), template_object);
+ }
+
+ JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot);
+ if (array != nullptr) return JSArrayRef(broker(), array);
+
+ CHECK(serialize);
+ CHECK(broker()->SerializingAllowed());
+
+ Handle<TemplateObjectDescription> tod =
+ Handle<TemplateObjectDescription>::cast(description.object());
+ Handle<JSArray> template_object =
+ TemplateObjectDescription::GetTemplateObject(
+ broker()->isolate(), broker()->native_context().object(), tod,
+ object(), slot.ToInt());
+ array = broker()->GetOrCreateData(template_object)->AsJSArray();
+ data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array);
+ return JSArrayRef(broker(), array);
+}
+
void SharedFunctionInfoRef::SetSerializedForCompilation(
FeedbackVectorRef feedback) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3140,9 +3661,27 @@ void SharedFunctionInfoRef::SetSerializedForCompilation(
feedback);
}
+void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+
+ data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
+}
+
+base::Optional<FunctionTemplateInfoRef>
+SharedFunctionInfoRef::function_template_info() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return FunctionTemplateInfoRef(
+ broker(), handle(object()->function_data(), broker()->isolate()));
+ }
+ FunctionTemplateInfoData* function_template_info =
+ data()->AsSharedFunctionInfo()->function_template_info();
+ if (!function_template_info) return base::nullopt;
+ return FunctionTemplateInfoRef(broker(), function_template_info);
+}
+
bool SharedFunctionInfoRef::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
- CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
+ if (broker()->mode() == JSHeapBroker::kDisabled) return true;
return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback);
}
@@ -3181,22 +3720,10 @@ bool MapRef::serialized_prototype() const {
return data()->AsMap()->serialized_prototype();
}
-void ModuleRef::Serialize() {
+void SourceTextModuleRef::Serialize() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsModule()->Serialize(broker());
-}
-
-void ContextRef::SerializeContextChain() {
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsContext()->SerializeContextChain(broker());
-}
-
-void ContextRef::SerializeSlot(int index) {
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsContext()->SerializeSlot(broker(), index);
+ data()->AsSourceTextModule()->Serialize(broker());
}
void NativeContextRef::Serialize() {
@@ -3228,10 +3755,10 @@ void PropertyCellRef::Serialize() {
data()->AsPropertyCell()->Serialize(broker());
}
-void FunctionTemplateInfoRef::Serialize() {
+void FunctionTemplateInfoRef::SerializeCallCode() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsFunctionTemplateInfo()->Serialize(broker());
+ data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell(
@@ -3307,10 +3834,67 @@ base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
return {};
}
-ElementAccessFeedback::ElementAccessFeedback(Zone* zone)
+KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
+ if (IsKeyedLoadICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode());
+ }
+ if (IsKeyedHasICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode());
+ }
+ if (IsKeyedStoreICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode());
+ }
+ if (IsStoreInArrayLiteralICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kStoreInLiteral,
+ nexus.GetKeyedAccessStoreMode());
+ }
+ UNREACHABLE();
+}
+
+AccessMode KeyedAccessMode::access_mode() const { return access_mode_; }
+
+bool KeyedAccessMode::IsLoad() const {
+ return access_mode_ == AccessMode::kLoad || access_mode_ == AccessMode::kHas;
+}
+bool KeyedAccessMode::IsStore() const {
+ return access_mode_ == AccessMode::kStore ||
+ access_mode_ == AccessMode::kStoreInLiteral;
+}
+
+KeyedAccessLoadMode KeyedAccessMode::load_mode() const {
+ CHECK(IsLoad());
+ return load_store_mode_.load_mode;
+}
+
+KeyedAccessStoreMode KeyedAccessMode::store_mode() const {
+ CHECK(IsStore());
+ return load_store_mode_.store_mode;
+}
+
+KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessLoadMode load_mode)
+ : load_mode(load_mode) {}
+KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessStoreMode store_mode)
+ : store_mode(store_mode) {}
+
+KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
+ KeyedAccessLoadMode load_mode)
+ : access_mode_(access_mode), load_store_mode_(load_mode) {
+ CHECK(!IsStore());
+ CHECK(IsLoad());
+}
+KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
+ KeyedAccessStoreMode store_mode)
+ : access_mode_(access_mode), load_store_mode_(store_mode) {
+ CHECK(!IsLoad());
+ CHECK(IsStore());
+}
+
+ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
+ KeyedAccessMode const& keyed_mode)
: ProcessedFeedback(kElementAccess),
receiver_maps(zone),
- transitions(zone) {}
+ transitions(zone),
+ keyed_mode(keyed_mode) {}
ElementAccessFeedback::MapIterator::MapIterator(
ElementAccessFeedback const& processed, JSHeapBroker* broker)
@@ -3383,7 +3967,7 @@ GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback(
}
ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps) {
+ MapHandles const& maps, KeyedAccessMode const& keyed_mode) {
DCHECK(!maps.empty());
// Collect possible transition targets.
@@ -3397,7 +3981,8 @@ ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
}
}
- ElementAccessFeedback* result = new (zone()) ElementAccessFeedback(zone());
+ ElementAccessFeedback* result =
+ new (zone()) ElementAccessFeedback(zone(), keyed_mode);
// Separate the actual receiver maps and the possible transition sources.
for (Handle<Map> map : maps) {
@@ -3464,7 +4049,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
}
ContextRef context_ref(this, context);
if (immutable) {
- context_ref.SerializeSlot(context_slot_index);
+ context_ref.get(context_slot_index, true);
}
return new (zone())
GlobalAccessFeedback(context_ref, context_slot_index, immutable);
@@ -3489,6 +4074,54 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
return NameRef(this, handle(raw_name, isolate()));
}
+PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) {
+ auto access_info = ais_for_loading_then_.find(map);
+ if (access_info == ais_for_loading_then_.end()) {
+ TRACE_BROKER_MISSING(
+ this, "access info for reducing JSResolvePromise with map " << map);
+ return PropertyAccessInfo::Invalid(zone());
+ }
+ return access_info->second;
+}
+
+void JSHeapBroker::CreateAccessInfoForLoadingThen(
+ MapRef map, CompilationDependencies* dependencies) {
+ auto access_info = ais_for_loading_then_.find(map);
+ if (access_info == ais_for_loading_then_.end()) {
+ AccessInfoFactory access_info_factory(this, dependencies, zone());
+ Handle<Name> then_string = isolate()->factory()->then_string();
+ ais_for_loading_then_.insert(
+ std::make_pair(map, access_info_factory.ComputePropertyAccessInfo(
+ map.object(), then_string, AccessMode::kLoad)));
+ }
+}
+
+PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) {
+ auto access_info = ais_for_loading_exec_.find(map);
+ if (access_info == ais_for_loading_exec_.end()) {
+ TRACE_BROKER_MISSING(this,
+ "access info for property 'exec' on map " << map);
+ return PropertyAccessInfo::Invalid(zone());
+ }
+ return access_info->second;
+}
+
+PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec(
+ MapRef map, CompilationDependencies* dependencies) {
+ auto access_info = ais_for_loading_exec_.find(map);
+ if (access_info != ais_for_loading_exec_.end()) {
+ return access_info->second;
+ }
+
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ AccessInfoFactory access_info_factory(this, dependencies, zone());
+ PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo(
+ map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad);
+
+ auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec));
+ return inserted_ai.first->second;
+}
+
ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const {
CHECK_EQ(kElementAccess, kind());
return static_cast<ElementAccessFeedback const*>(this);
@@ -3499,6 +4132,66 @@ NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const {
return static_cast<NamedAccessFeedback const*>(this);
}
+BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
+ Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
+ bool analyze_liveness, bool serialize) {
+ ObjectData* bytecode_array_data = GetData(bytecode_array);
+ CHECK_NOT_NULL(bytecode_array_data);
+
+ auto it = bytecode_analyses_.find(bytecode_array_data);
+ if (it != bytecode_analyses_.end()) {
+ // Bytecode analysis can be run for OSR or for non-OSR. In the rare case
+ // where we optimize for OSR and consider the top-level function itself for
+ // inlining (because of recursion), we need both the OSR and the non-OSR
+ // analysis. Fortunately, the only difference between the two lies in
+ // whether the OSR entry offset gets computed (from the OSR bailout id).
+ // Hence it's okay to reuse the OSR-version when asked for the non-OSR
+ // version, such that we need to store at most one analysis result per
+ // bytecode array.
+ CHECK_IMPLIES(osr_bailout_id != it->second->osr_bailout_id(),
+ osr_bailout_id.IsNone());
+ CHECK_EQ(analyze_liveness, it->second->liveness_analyzed());
+ return *it->second;
+ }
+
+ CHECK(serialize);
+ BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis(
+ bytecode_array, zone(), osr_bailout_id, analyze_liveness);
+ DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id);
+ bytecode_analyses_[bytecode_array_data] = analysis;
+ return *analysis;
+}
+
+OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
+ : array_(bytecode_array) {}
+
+int OffHeapBytecodeArray::length() const { return array_.length(); }
+
+int OffHeapBytecodeArray::parameter_count() const {
+ return array_.parameter_count();
+}
+
+uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); }
+
+void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); }
+
+Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const {
+ return array_.GetFirstBytecodeAddress();
+}
+
+Handle<Object> OffHeapBytecodeArray::GetConstantAtIndex(
+ int index, Isolate* isolate) const {
+ return array_.GetConstantAtIndex(index);
+}
+
+bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const {
+ return array_.IsConstantAtIndexSmi(index);
+}
+
+Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const {
+ return array_.GetConstantAtIndexAsSmi(index);
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 2c4cc766bc..ffc10d2b93 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -8,796 +8,24 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/refs-map.h"
#include "src/handles/handles.h"
+#include "src/interpreter/bytecode-array-accessor.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/function-kind.h"
-#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-
-class BytecodeArray;
-class CallHandlerInfo;
-class FixedDoubleArray;
-class FunctionTemplateInfo;
-class HeapNumber;
-class InternalizedString;
-class JSBoundFunction;
-class JSDataView;
-class JSGlobalProxy;
-class JSRegExp;
-class JSTypedArray;
-class NativeContext;
-class ScriptContextTable;
-class VectorSlotPair;
-
namespace compiler {
-// Whether we are loading a property or storing to a property.
-// For a store during literal creation, do not walk up the prototype chain.
-enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
-
-enum class OddballType : uint8_t {
- kNone, // Not an Oddball.
- kBoolean, // True or False.
- kUndefined,
- kNull,
- kHole,
- kUninitialized,
- kOther // Oddball, but none of the above.
-};
-
-// This list is sorted such that subtypes appear before their supertypes.
-// DO NOT VIOLATE THIS PROPERTY!
-#define HEAP_BROKER_OBJECT_LIST(V) \
- /* Subtypes of JSObject */ \
- V(JSArray) \
- V(JSBoundFunction) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSGlobalProxy) \
- V(JSRegExp) \
- V(JSTypedArray) \
- /* Subtypes of Context */ \
- V(NativeContext) \
- /* Subtypes of FixedArray */ \
- V(Context) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- /* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- /* Subtypes of Name */ \
- V(InternalizedString) \
- V(String) \
- V(Symbol) \
- /* Subtypes of HeapObject */ \
- V(AllocationSite) \
- V(CallHandlerInfo) \
- V(Cell) \
- V(Code) \
- V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArrayBase) \
- V(FunctionTemplateInfo) \
- V(HeapNumber) \
- V(JSObject) \
- V(Map) \
- V(Module) \
- V(MutableHeapNumber) \
- V(Name) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
- /* Subtypes of Object */ \
- V(HeapObject)
-
-class CompilationDependencies;
-class JSHeapBroker;
-class ObjectData;
-class PerIsolateCompilerCache;
-class PropertyAccessInfo;
-#define FORWARD_DECL(Name) class Name##Ref;
-HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
-#undef FORWARD_DECL
-
-class V8_EXPORT_PRIVATE ObjectRef {
- public:
- ObjectRef(JSHeapBroker* broker, Handle<Object> object);
- ObjectRef(JSHeapBroker* broker, ObjectData* data)
- : data_(data), broker_(broker) {
- CHECK_NOT_NULL(data_);
- }
-
- Handle<Object> object() const;
-
- bool equals(const ObjectRef& other) const;
-
- bool IsSmi() const;
- int AsSmi() const;
-
-#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
-#undef HEAP_IS_METHOD_DECL
-
-#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
-#undef HEAP_AS_METHOD_DECL
-
- bool IsNullOrUndefined() const;
-
- bool BooleanValue() const;
- Maybe<double> OddballToNumber() const;
-
- // Return the element at key {index} if {index} is known to be an own data
- // property of the object that is non-writable and non-configurable.
- base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
- bool serialize = false) const;
-
- Isolate* isolate() const;
-
- protected:
- JSHeapBroker* broker() const;
- ObjectData* data() const;
- ObjectData* data_; // Should be used only by object() getters.
-
- private:
- friend class JSArrayData;
- friend class JSGlobalProxyRef;
- friend class JSGlobalProxyData;
- friend class JSObjectData;
- friend class StringData;
-
- friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
-
- JSHeapBroker* broker_;
-};
-
+class BytecodeAnalysis;
+class ObjectRef;
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
-// Temporary class that carries information from a Map. We'd like to remove
-// this class and use MapRef instead, but we can't as long as we support the
-// kDisabled broker mode. That's because obtaining the MapRef via
-// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
-// During OptimizeGraph we generally don't have a HandleScope, however. There
-// are two places where we therefore use GetHeapObjectType() instead. Both that
-// function and this class should eventually be removed.
-class HeapObjectType {
- public:
- enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
-
- using Flags = base::Flags<Flag>;
-
- HeapObjectType(InstanceType instance_type, Flags flags,
- OddballType oddball_type)
- : instance_type_(instance_type),
- oddball_type_(oddball_type),
- flags_(flags) {
- DCHECK_EQ(instance_type == ODDBALL_TYPE,
- oddball_type != OddballType::kNone);
- }
-
- OddballType oddball_type() const { return oddball_type_; }
- InstanceType instance_type() const { return instance_type_; }
- Flags flags() const { return flags_; }
-
- bool is_callable() const { return flags_ & kCallable; }
- bool is_undetectable() const { return flags_ & kUndetectable; }
-
- private:
- InstanceType const instance_type_;
- OddballType const oddball_type_;
- Flags const flags_;
-};
-
-class HeapObjectRef : public ObjectRef {
- public:
- using ObjectRef::ObjectRef;
- Handle<HeapObject> object() const;
-
- MapRef map() const;
-
- // See the comment on the HeapObjectType class.
- HeapObjectType GetHeapObjectType() const;
-};
-
-class PropertyCellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<PropertyCell> object() const;
-
- PropertyDetails property_details() const;
-
- void Serialize();
- ObjectRef value() const;
-};
-
-class JSObjectRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<JSObject> object() const;
-
- uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
- double RawFastDoublePropertyAt(FieldIndex index) const;
- ObjectRef RawFastPropertyAt(FieldIndex index) const;
-
- FixedArrayBaseRef elements() const;
- void EnsureElementsTenured();
- ElementsKind GetElementsKind() const;
-
- void SerializeObjectCreateMap();
- base::Optional<MapRef> GetObjectCreateMap() const;
-};
-
-class JSDataViewRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSDataView> object() const;
-
- size_t byte_length() const;
- size_t byte_offset() const;
-};
-
-class JSBoundFunctionRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSBoundFunction> object() const;
-
- void Serialize();
-
- // The following are available only after calling Serialize().
- ObjectRef bound_target_function() const;
- ObjectRef bound_this() const;
- FixedArrayRef bound_arguments() const;
-};
-
-class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSFunction> object() const;
-
- bool has_feedback_vector() const;
- bool has_initial_map() const;
- bool has_prototype() const;
- bool PrototypeRequiresRuntimeLookup() const;
-
- void Serialize();
- bool serialized() const;
-
- // The following are available only after calling Serialize().
- ObjectRef prototype() const;
- MapRef initial_map() const;
- ContextRef context() const;
- NativeContextRef native_context() const;
- SharedFunctionInfoRef shared() const;
- FeedbackVectorRef feedback_vector() const;
- int InitialMapInstanceSizeWithMinSlack() const;
-
- bool IsSerializedForCompilation() const;
-};
-
-class JSRegExpRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSRegExp> object() const;
-
- ObjectRef raw_properties_or_hash() const;
- ObjectRef data() const;
- ObjectRef source() const;
- ObjectRef flags() const;
- ObjectRef last_index() const;
-};
-
-class HeapNumberRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<HeapNumber> object() const;
-
- double value() const;
-};
-
-class MutableHeapNumberRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<MutableHeapNumber> object() const;
-
- double value() const;
-};
-
-class ContextRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Context> object() const;
-
- void SerializeContextChain();
- ContextRef previous() const;
-
- void SerializeSlot(int index);
- ObjectRef get(int index) const;
-};
-
-#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- V(JSFunction, array_function) \
- V(JSFunction, boolean_function) \
- V(JSFunction, bigint_function) \
- V(JSFunction, number_function) \
- V(JSFunction, object_function) \
- V(JSFunction, promise_function) \
- V(JSFunction, promise_then) \
- V(JSFunction, string_function) \
- V(JSFunction, symbol_function) \
- V(JSGlobalProxy, global_proxy_object) \
- V(JSObject, promise_prototype) \
- V(Map, bound_function_with_constructor_map) \
- V(Map, bound_function_without_constructor_map) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
- V(Map, js_array_holey_double_elements_map) \
- V(Map, js_array_holey_elements_map) \
- V(Map, js_array_holey_smi_elements_map) \
- V(Map, js_array_packed_double_elements_map) \
- V(Map, js_array_packed_elements_map) \
- V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(ScriptContextTable, script_context_table) \
- V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
- V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
- V(SharedFunctionInfo, promise_then_finally_shared_fun) \
- V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
-
-// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
-// happened when Turbofan is invoked via --always-opt.
-#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
- V(Map, async_function_object_map) \
- V(Map, map_key_iterator_map) \
- V(Map, map_key_value_iterator_map) \
- V(Map, map_value_iterator_map) \
- V(Map, set_key_value_iterator_map) \
- V(Map, set_value_iterator_map)
-
-#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
-
-class NativeContextRef : public ContextRef {
- public:
- using ContextRef::ContextRef;
- Handle<NativeContext> object() const;
-
- void Serialize();
-
-#define DECL_ACCESSOR(type, name) type##Ref name() const;
- BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- MapRef GetFunctionMapFromIndex(int index) const;
- MapRef GetInitialJSArrayMap(ElementsKind kind) const;
- base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
-};
-
-class NameRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Name> object() const;
-
- bool IsUniqueName() const;
-};
-
-class ScriptContextTableRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ScriptContextTable> object() const;
-
- struct LookupResult {
- ContextRef context;
- bool immutable;
- int index;
- };
-
- base::Optional<LookupResult> lookup(const NameRef& name) const;
-};
-
-class DescriptorArrayRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<DescriptorArray> object() const;
-};
-
-class FeedbackCellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FeedbackCell> object() const;
-
- HeapObjectRef value() const;
-};
-
-class FeedbackVectorRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FeedbackVector> object() const;
-
- ObjectRef get(FeedbackSlot slot) const;
-
- void SerializeSlots();
-};
-
-class FunctionTemplateInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FunctionTemplateInfo> object() const;
-
- void Serialize();
- ObjectRef call_code() const;
-};
-
-class CallHandlerInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<CallHandlerInfo> object() const;
-
- Address callback() const;
-
- void Serialize();
- ObjectRef data() const;
-};
-
-class AllocationSiteRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<AllocationSite> object() const;
-
- bool PointsToLiteral() const;
- AllocationType GetAllocationType() const;
- ObjectRef nested_site() const;
-
- // {IsFastLiteral} determines whether the given array or object literal
- // boilerplate satisfies all limits to be considered for fast deep-copying
- // and computes the total size of all objects that are part of the graph.
- //
- // If PointsToLiteral() is false, then IsFastLiteral() is also false.
- bool IsFastLiteral() const;
- // We only serialize boilerplate if IsFastLiteral is true.
- base::Optional<JSObjectRef> boilerplate() const;
-
- ElementsKind GetElementsKind() const;
- bool CanInlineCall() const;
-};
-
-class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Map> object() const;
-
- int instance_size() const;
- InstanceType instance_type() const;
- int GetInObjectProperties() const;
- int GetInObjectPropertiesStartInWords() const;
- int NumberOfOwnDescriptors() const;
- int GetInObjectPropertyOffset(int index) const;
- int constructor_function_index() const;
- int NextFreePropertyIndex() const;
- int UnusedPropertyFields() const;
- ElementsKind elements_kind() const;
- bool is_stable() const;
- bool is_extensible() const;
- bool is_constructor() const;
- bool has_prototype_slot() const;
- bool is_access_check_needed() const;
- bool is_deprecated() const;
- bool CanBeDeprecated() const;
- bool CanTransition() const;
- bool IsInobjectSlackTrackingInProgress() const;
- bool is_dictionary_map() const;
- bool IsFixedCowArrayMap() const;
- bool IsPrimitiveMap() const;
- bool is_undetectable() const;
- bool is_callable() const;
- bool has_indexed_interceptor() const;
- bool has_hidden_prototype() const;
- bool is_migration_target() const;
- bool supports_fast_array_iteration() const;
- bool supports_fast_array_resize() const;
- bool IsMapOfCurrentGlobalProxy() const;
-
- OddballType oddball_type() const;
-
-#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
- INSTANCE_TYPE_CHECKERS(DEF_TESTER)
-#undef DEF_TESTER
-
- void SerializeBackPointer();
- HeapObjectRef GetBackPointer() const;
-
- void SerializePrototype();
- bool serialized_prototype() const;
- HeapObjectRef prototype() const;
-
- void SerializeForElementLoad();
-
- void SerializeForElementStore();
- bool HasOnlyStablePrototypesWithFastElements(
- ZoneVector<MapRef>* prototype_maps);
-
- // Concerning the underlying instance_descriptors:
- void SerializeOwnDescriptors();
- void SerializeOwnDescriptor(int descriptor_index);
- MapRef FindFieldOwner(int descriptor_index) const;
- PropertyDetails GetPropertyDetails(int descriptor_index) const;
- NameRef GetPropertyKey(int descriptor_index) const;
- FieldIndex GetFieldIndexFor(int descriptor_index) const;
- ObjectRef GetFieldType(int descriptor_index) const;
- bool IsUnboxedDoubleField(int descriptor_index) const;
-
- // Available after calling JSFunctionRef::Serialize on a function that has
- // this map as initial map.
- ObjectRef GetConstructor() const;
- base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
-};
-
-class FixedArrayBaseRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FixedArrayBase> object() const;
-
- int length() const;
-};
-
-class FixedArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<FixedArray> object() const;
-
- ObjectRef get(int i) const;
-};
-
-class FixedDoubleArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<FixedDoubleArray> object() const;
-
- double get_scalar(int i) const;
- bool is_the_hole(int i) const;
-};
-
-class BytecodeArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<BytecodeArray> object() const;
-
- int register_count() const;
- int parameter_count() const;
- interpreter::Register incoming_new_target_or_generator_register() const;
-};
-
-class JSArrayRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSArray> object() const;
-
- ObjectRef length() const;
-
- // Return the element at key {index} if the array has a copy-on-write elements
- // storage and {index} is known to be an own data property.
- base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
- bool serialize = false) const;
-};
-
-class ScopeInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ScopeInfo> object() const;
-
- int ContextLength() const;
-};
-
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(bool, is_safe_to_skip_arguments_adaptor) \
- V(bool, IsInlineable) \
- V(bool, is_compiled)
-
-class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<SharedFunctionInfo> object() const;
-
- int builtin_id() const;
- BytecodeArrayRef GetBytecodeArray() const;
-
-#define DECL_ACCESSOR(type, name) type name() const;
- BROKER_SFI_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
- void SetSerializedForCompilation(FeedbackVectorRef feedback);
-};
-
-class StringRef : public NameRef {
- public:
- using NameRef::NameRef;
- Handle<String> object() const;
-
- int length() const;
- uint16_t GetFirstChar();
- base::Optional<double> ToNumber();
- bool IsSeqString() const;
- bool IsExternalString() const;
-};
-
-class SymbolRef : public NameRef {
- public:
- using NameRef::NameRef;
- Handle<Symbol> object() const;
-};
-
-class JSTypedArrayRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSTypedArray> object() const;
-
- bool is_on_heap() const;
- size_t length() const;
- void* external_pointer() const;
-
- void Serialize();
- bool serialized() const;
-
- HeapObjectRef buffer() const;
-};
-
-class ModuleRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Module> object() const;
-
- void Serialize();
-
- CellRef GetCell(int cell_index) const;
-};
-
-class CellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Cell> object() const;
-
- ObjectRef value() const;
-};
-
-class JSGlobalProxyRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSGlobalProxy> object() const;
-
- // If {serialize} is false:
- // If the property is known to exist as a property cell (on the global
- // object), return that property cell. Otherwise (not known to exist as a
- // property cell or known not to exist as a property cell) return nothing.
- // If {serialize} is true:
- // Like above but potentially access the heap and serialize the necessary
- // information.
- base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
- bool serialize = false) const;
-};
-
-class CodeRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Code> object() const;
-};
-
-class InternalizedStringRef : public StringRef {
- public:
- using StringRef::StringRef;
- Handle<InternalizedString> object() const;
-};
-
-class ElementAccessFeedback;
-class NamedAccessFeedback;
-
-class ProcessedFeedback : public ZoneObject {
- public:
- enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
- Kind kind() const { return kind_; }
-
- ElementAccessFeedback const* AsElementAccess() const;
- NamedAccessFeedback const* AsNamedAccess() const;
-
- protected:
- explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
-
- private:
- Kind const kind_;
-};
-
-class InsufficientFeedback final : public ProcessedFeedback {
- public:
- InsufficientFeedback();
-};
-
-class GlobalAccessFeedback : public ProcessedFeedback {
- public:
- explicit GlobalAccessFeedback(PropertyCellRef cell);
- GlobalAccessFeedback(ContextRef script_context, int slot_index,
- bool immutable);
-
- bool IsPropertyCell() const;
- PropertyCellRef property_cell() const;
-
- bool IsScriptContextSlot() const { return !IsPropertyCell(); }
- ContextRef script_context() const;
- int slot_index() const;
- bool immutable() const;
-
- base::Optional<ObjectRef> GetConstantHint() const;
-
- private:
- ObjectRef const cell_or_context_;
- int const index_and_immutable_;
-};
-
-class ElementAccessFeedback : public ProcessedFeedback {
- public:
- explicit ElementAccessFeedback(Zone* zone);
-
- // No transition sources appear in {receiver_maps}.
- // All transition targets appear in {receiver_maps}.
- ZoneVector<Handle<Map>> receiver_maps;
- ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
-
- class MapIterator {
- public:
- bool done() const;
- void advance();
- MapRef current() const;
-
- private:
- friend class ElementAccessFeedback;
-
- explicit MapIterator(ElementAccessFeedback const& processed,
- JSHeapBroker* broker);
-
- ElementAccessFeedback const& processed_;
- JSHeapBroker* const broker_;
- size_t index_ = 0;
- };
-
- // Iterator over all maps: first {receiver_maps}, then transition sources.
- MapIterator all_maps(JSHeapBroker* broker) const;
-};
-
-class NamedAccessFeedback : public ProcessedFeedback {
- public:
- NamedAccessFeedback(NameRef const& name,
- ZoneVector<PropertyAccessInfo> const& access_infos);
-
- NameRef const& name() const { return name_; }
- ZoneVector<PropertyAccessInfo> const& access_infos() const {
- return access_infos_;
- }
-
- private:
- NameRef const name_;
- ZoneVector<PropertyAccessInfo> const access_infos_;
-};
-
struct FeedbackSource {
FeedbackSource(Handle<FeedbackVector> vector_, FeedbackSlot slot_)
: vector(vector_), slot(slot_) {}
@@ -821,26 +49,28 @@ struct FeedbackSource {
};
};
-#define TRACE_BROKER(broker, x) \
- do { \
- if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \
+#define TRACE_BROKER(broker, x) \
+ do { \
+ if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \
+ broker->Trace() << x << '\n'; \
} while (false)
#define TRACE_BROKER_MISSING(broker, x) \
do { \
- if (FLAG_trace_heap_broker) \
+ if (broker->tracing_enabled()) \
broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \
} while (false)
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
- JSHeapBroker(Isolate* isolate, Zone* broker_zone);
+ JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled);
void SetNativeContextRef();
void SerializeStandardObjects();
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return current_zone_; }
+ bool tracing_enabled() const { return tracing_enabled_; }
NativeContextRef native_context() const { return native_context_.value(); }
PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
@@ -875,12 +105,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps);
+ MapHandles const& maps, KeyedAccessMode const& keyed_mode);
GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(
FeedbackSource const& source);
+ BytecodeAnalysis const& GetBytecodeAnalysis(
+ Handle<BytecodeArray> bytecode_array, BailoutId osr_offset,
+ bool analyze_liveness, bool serialize);
+
base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
+ // If there is no result stored for {map}, we return an Invalid
+ // PropertyAccessInfo.
+ PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map);
+ void CreateAccessInfoForLoadingThen(MapRef map,
+ CompilationDependencies* dependencies);
+ PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map);
+ PropertyAccessInfo const& CreateAccessInfoForLoadingExec(
+ MapRef map, CompilationDependencies* dependencies);
+
std::ostream& Trace();
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -902,12 +145,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Handle<JSObject>::equal_to>
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
+ bool const tracing_enabled_;
StdoutStream trace_out_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
+ ZoneUnorderedMap<ObjectData*, BytecodeAnalysis*> bytecode_analyses_;
+ typedef ZoneUnorderedMap<MapRef, PropertyAccessInfo, ObjectRef::Hash,
+ ObjectRef::Equal>
+ MapToAccessInfos;
+ MapToAccessInfos ais_for_loading_then_;
+ MapToAccessInfos ais_for_loading_exec_;
static const size_t kMinimalRefsBucketCount = 8; // must be power of 2
static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
@@ -948,6 +198,23 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
// compilation is finished.
bool CanInlineElementAccess(MapRef const& map);
+class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
+ public:
+ explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array);
+
+ int length() const override;
+ int parameter_count() const override;
+ uint8_t get(int index) const override;
+ void set(int index, uint8_t value) override;
+ Address GetFirstBytecodeAddress() const override;
+ Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const override;
+ bool IsConstantAtIndexSmi(int index) const override;
+ Smi GetConstantAtIndexAsSmi(int index) const override;
+
+ private:
+ BytecodeArrayRef array_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index cc48ae80cb..7e7c9e3a0e 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -30,8 +30,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
ObjectRef object(broker(), HeapConstantOf(node->op()));
if (object.IsJSFunction()) object.AsJSFunction().Serialize();
if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap();
- if (object.IsModule()) object.AsModule().Serialize();
- if (object.IsContext()) object.AsContext().SerializeContextChain();
+ if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize();
break;
}
case IrOpcode::kJSCreateArray: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index f78635b139..e11d6b59a3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -7,6 +7,7 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects/objects-inl.h"
@@ -21,15 +22,9 @@ namespace compiler {
} while (false)
namespace {
-
-bool IsSmallInlineFunction(BytecodeArrayRef bytecode) {
- // Forcibly inline small functions.
- if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) {
- return true;
- }
- return false;
+bool IsSmall(BytecodeArrayRef bytecode) {
+ return bytecode.length() <= FLAG_max_inlined_bytecode_size_small;
}
-
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
@@ -65,7 +60,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (function.IsSerializedForCompilation()) {
- out.bytecode[n] = function.shared().GetBytecodeArray(), isolate();
+ out.bytecode[n] = function.shared().GetBytecodeArray();
}
}
out.num_functions = value_input_count;
@@ -91,6 +86,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+ if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute &&
+ mode_ != kStressInlining) {
+ return NoChange();
+ }
+
// Check if we already saw that {node} before, and if so, just skip it.
if (seen_.find(node->id()) != seen_.end()) return NoChange();
seen_.insert(node->id());
@@ -107,7 +107,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
return NoChange();
}
- bool can_inline = false, force_inline_small = true;
+ bool can_inline_candidate = false, candidate_is_small = true;
candidate.total_size = 0;
Node* frame_state = NodeProperties::GetFrameStateInput(node);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
@@ -155,15 +155,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// serialized.
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) {
- can_inline = true;
+ can_inline_candidate = true;
candidate.total_size += bytecode.length();
}
- // We don't force inline small functions if any of them is not inlineable.
- if (!IsSmallInlineFunction(bytecode)) {
- force_inline_small = false;
- }
+ candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
- if (!can_inline) return NoChange();
+ if (!can_inline_candidate) return NoChange();
// Gather feedback on how often this call site has been hit before.
if (node->opcode() == IrOpcode::kJSCall) {
@@ -195,9 +192,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
// Forcibly inline small functions here. In the case of polymorphic inlining
- // force_inline_small is set only when all functions are small.
- if (force_inline_small &&
- cumulative_count_ < FLAG_max_inlined_bytecode_size_absolute) {
+ // candidate_is_small is set only when all functions are small.
+ if (candidate_is_small) {
TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
node->op()->mnemonic());
return InlineCandidate(candidate, true);
@@ -221,21 +217,24 @@ void JSInliningHeuristic::Finalize() {
Candidate candidate = *i;
candidates_.erase(i);
+ // Make sure we don't try to inline dead candidate nodes.
+ if (candidate.node->IsDead()) {
+ continue;
+ }
+
// Make sure we have some extra budget left, so that any small functions
// exposed by this function would be given a chance to inline.
double size_of_candidate =
candidate.total_size * FLAG_reserve_inline_budget_scale_factor;
- int total_size = cumulative_count_ + static_cast<int>(size_of_candidate);
+ int total_size =
+ total_inlined_bytecode_size_ + static_cast<int>(size_of_candidate);
if (total_size > FLAG_max_inlined_bytecode_size_cumulative) {
// Try if any smaller functions are available to inline.
continue;
}
- // Make sure we don't try to inline dead candidate nodes.
- if (!candidate.node->IsDead()) {
- Reduction const reduction = InlineCandidate(candidate, false);
- if (reduction.Changed()) return;
- }
+ Reduction const reduction = InlineCandidate(candidate, false);
+ if (reduction.Changed()) return;
}
}
@@ -630,7 +629,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
- cumulative_count_ += candidate.bytecode[0].value().length();
+ total_inlined_bytecode_size_ += candidate.bytecode[0].value().length();
}
return reduction;
}
@@ -688,20 +687,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
ReplaceWithValue(node, value, effect, control);
// Inline the individual, cloned call sites.
- for (int i = 0; i < num_calls; ++i) {
- Node* node = calls[i];
+ for (int i = 0; i < num_calls && total_inlined_bytecode_size_ <
+ FLAG_max_inlined_bytecode_size_absolute;
+ ++i) {
if (candidate.can_inline_function[i] &&
- (small_function ||
- cumulative_count_ < FLAG_max_inlined_bytecode_size_cumulative)) {
+ (small_function || total_inlined_bytecode_size_ <
+ FLAG_max_inlined_bytecode_size_cumulative)) {
+ Node* node = calls[i];
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
+ total_inlined_bytecode_size_ += candidate.bytecode[i]->length();
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
node->Kill();
- // Small functions don't count towards the budget.
- if (!small_function) {
- cumulative_count_ += candidate.bytecode[i]->length();
- }
}
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 99ad258c31..b143e9b67f 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -97,7 +97,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
SourcePositionTable* source_positions_;
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
- int cumulative_count_ = 0;
+ int total_inlined_bytecode_size_ = 0;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index e43e710da7..91cbea2346 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -7,11 +7,13 @@
#include "src/ast/ast.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -466,14 +468,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
AllowHandleAllocation allow_handle_alloc;
AllowHeapAllocation allow_heap_alloc;
AllowCodeDependencyChange allow_code_dep_change;
- Handle<Context> native_context =
- handle(info_->native_context(), isolate());
-
- BuildGraphFromBytecode(broker(), zone(), bytecode_array.object(),
- shared_info.value().object(),
- feedback_vector.object(), BailoutId::None(),
- jsgraph(), call.frequency(), source_positions_,
- native_context, inlining_id, flags);
+ CallFrequency frequency = call.frequency();
+ Handle<NativeContext> native_context(info_->native_context(), isolate());
+ BuildGraphFromBytecode(
+ broker(), zone(), bytecode_array.object(),
+ shared_info.value().object(), feedback_vector.object(),
+ BailoutId::None(), jsgraph(), frequency, source_positions_,
+ native_context, inlining_id, flags, &info_->tick_counter());
}
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 94a9e71b2e..f50f7b591d 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -59,7 +59,8 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
- FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out);
+ FeedbackVectorRef DetermineCallContext(
+ Node* node, Node*& context_out); // NOLINT(runtime/references)
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 312ab38f51..7d742a5f32 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -33,12 +33,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-// This is needed for gc_mole which will compile this file without the full set
-// of GN defined macros.
-#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
-#endif
-
namespace {
bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
@@ -513,8 +507,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, Handle<HeapObject> prototype) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
// Try to determine either that all of the {receiver_maps} have the given
@@ -686,6 +680,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// ES section #sec-promise-resolve-functions
Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
Node* promise = NodeProperties::GetValueInput(node, 0);
Node* resolution = NodeProperties::GetValueInput(node, 1);
@@ -702,9 +697,17 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- access_info_factory.ComputePropertyAccessInfos(
- resolution_maps, factory()->then_string(), AccessMode::kLoad,
- &access_infos);
+ if (!FLAG_concurrent_inlining) {
+ access_info_factory.ComputePropertyAccessInfos(
+ resolution_maps, factory()->then_string(), AccessMode::kLoad,
+ &access_infos);
+ } else {
+ // Obtain pre-computed access infos from the broker.
+ for (auto map : resolution_maps) {
+ MapRef map_ref(broker(), map);
+ access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref));
+ }
+ }
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
@@ -975,9 +978,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
-
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op());
if (!p.feedback().IsValid()) return NoChange();
FeedbackSource source(p.feedback());
@@ -1007,9 +1009,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
-
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op());
@@ -1298,7 +1299,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
- Node* node, Node* value, FeedbackNexus const& nexus, NameRef const& name,
+ Node* node, Node* value, FeedbackSource const& source, NameRef const& name,
AccessMode access_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
@@ -1312,11 +1313,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
- return ReducePropertyAccessUsingProcessedFeedback(node, nullptr, name, value,
- nexus, access_mode);
+ return ReducePropertyAccess(node, nullptr, name, value, source, access_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const receiver = NodeProperties::GetValueInput(node, 0);
@@ -1355,56 +1356,47 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
}
- // Extract receiver maps from the load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), nexus, name,
+ return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), name,
AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(
- node, value, nexus, NameRef(broker(), p.name()), AccessMode::kStore);
+ return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
+ NameRef(broker(), p.name()),
+ AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the creation of a named property based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, value, nexus,
+ return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
NameRef(broker(), p.name()),
AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
- Node* node, Node* index, Node* value, AccessMode access_mode,
- KeyedAccessLoadMode load_mode) {
+ Node* node, Node* index, Node* value, KeyedAccessMode const& keyed_mode) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Strings are immutable in JavaScript.
- if (access_mode == AccessMode::kStore) return NoChange();
+ if (keyed_mode.access_mode() == AccessMode::kStore) return NoChange();
// `in` cannot be used on strings.
- if (access_mode == AccessMode::kHas) return NoChange();
+ if (keyed_mode.access_mode() == AccessMode::kHas) return NoChange();
// Ensure that the {receiver} is actually a String.
receiver = effect = graph()->NewNode(
@@ -1416,7 +1408,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
// Load the single character string from {receiver} or yield undefined
// if the {index} is out of bounds (depending on the {load_mode}).
value = BuildIndexedStringLoad(receiver, index, length, &effect, &control,
- load_mode);
+ keyed_mode.load_mode());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1437,24 +1429,31 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessFeedback const& processed) {
DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
-
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
node->opcode() == IrOpcode::kJSHasProperty);
+
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state =
NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
+ AccessMode access_mode = processed.keyed_mode.access_mode();
+ if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
+ receiver->opcode() == IrOpcode::kHeapConstant) {
+ Reduction reduction = ReduceKeyedLoadFromHeapConstant(
+ node, index, access_mode, processed.keyed_mode.load_mode());
+ if (reduction.Changed()) return reduction;
+ }
+
if (HasOnlyStringMaps(broker(), processed.receiver_maps)) {
DCHECK(processed.transitions.empty());
- return ReduceElementAccessOnString(node, index, value, access_mode,
- load_mode);
+ return ReduceElementAccessOnString(node, index, value,
+ processed.keyed_mode);
}
// Compute element access infos for the receiver maps.
@@ -1485,7 +1484,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// then we need to check that all prototypes have stable maps with
// fast elements (and we need to guard against changes to that below).
if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
- IsGrowStoreMode(store_mode)) &&
+ IsGrowStoreMode(processed.keyed_mode.store_mode())) &&
!receiver_map.HasOnlyStablePrototypesWithFastElements(
&prototype_maps)) {
return NoChange();
@@ -1558,7 +1557,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Access the actual element.
ValueEffectControl continuation =
BuildElementAccess(receiver, index, value, effect, control, access_info,
- access_mode, load_mode, store_mode);
+ processed.keyed_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1591,7 +1590,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
? ElementsTransition::kFastTransition
: ElementsTransition::kSlowTransition,
transition_source.object(), transition_target.object())),
- receiver, effect, control);
+ receiver, this_effect, this_control);
}
// Perform map check(s) on {receiver}.
@@ -1623,9 +1622,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Access the actual element.
- ValueEffectControl continuation = BuildElementAccess(
- this_receiver, this_index, this_value, this_effect, this_control,
- access_info, access_mode, load_mode, store_mode);
+ ValueEffectControl continuation =
+ BuildElementAccess(this_receiver, this_index, this_value, this_effect,
+ this_control, access_info, processed.keyed_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1659,7 +1658,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
- Node* node, Node* key, FeedbackNexus const& nexus, AccessMode access_mode,
+ Node* node, Node* key, AccessMode access_mode,
KeyedAccessLoadMode load_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSHasProperty);
@@ -1715,54 +1714,24 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
// accesses using the known length, which doesn't change.
if (receiver_ref.IsString()) {
DCHECK_NE(access_mode, AccessMode::kHas);
- // We can only assume that the {index} is a valid array index if the
- // IC is in element access mode and not MEGAMORPHIC, otherwise there's
- // no guard for the bounds check below.
- if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
- // Ensure that {key} is less than {receiver} length.
- Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
-
- // Load the single character string from {receiver} or yield
- // undefined if the {key} is out of bounds (depending on the
- // {load_mode}).
- Node* value = BuildIndexedStringLoad(receiver, key, length, &effect,
- &control, load_mode);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
+ // Ensure that {key} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
- return NoChange();
-}
-
-Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
- Node* node, Node* key, Node* value, FeedbackNexus const& nexus,
- AccessMode access_mode, KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty ||
- node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty);
-
- Node* receiver = NodeProperties::GetValueInput(node, 0);
-
- if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
- receiver->opcode() == IrOpcode::kHeapConstant) {
- Reduction reduction = ReduceKeyedLoadFromHeapConstant(
- node, key, nexus, access_mode, load_mode);
- if (reduction.Changed()) return reduction;
+ // Load the single character string from {receiver} or yield
+ // undefined if the {key} is out of bounds (depending on the
+ // {load_mode}).
+ Node* value = BuildIndexedStringLoad(receiver, key, length, &effect,
+ &control, load_mode);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
- return ReducePropertyAccessUsingProcessedFeedback(node, key, base::nullopt,
- value, nexus, access_mode,
- load_mode, store_mode);
+ return NoChange();
}
-Reduction
-JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
+Reduction JSNativeContextSpecialization::ReducePropertyAccess(
Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
- FeedbackNexus const& nexus, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ FeedbackSource const& source, AccessMode access_mode) {
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
@@ -1777,11 +1746,12 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
ProcessedFeedback const* processed = nullptr;
if (FLAG_concurrent_inlining) {
- processed = broker()->GetFeedback(FeedbackSource(nexus));
+ processed = broker()->GetFeedback(source);
// TODO(neis): Infer maps from the graph and consolidate with feedback/hints
// and filter impossible candidates based on inferred root map.
} else {
// TODO(neis): Try to unify this with the similar code in the serializer.
+ FeedbackNexus nexus(source.vector, source.slot);
if (nexus.ic_state() == UNINITIALIZED) {
processed = new (zone()) InsufficientFeedback();
} else {
@@ -1801,8 +1771,8 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
processed = new (zone()) NamedAccessFeedback(*name, access_infos);
} else if (nexus.GetKeyType() == ELEMENT &&
MEGAMORPHIC != nexus.ic_state()) {
- processed =
- broker()->ProcessFeedbackMapsForElementAccess(receiver_maps);
+ processed = broker()->ProcessFeedbackMapsForElementAccess(
+ receiver_maps, KeyedAccessMode::FromNexus(nexus));
}
}
}
@@ -1818,9 +1788,10 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
return ReduceNamedAccess(node, value, *processed->AsNamedAccess(),
access_mode, key);
case ProcessedFeedback::kElementAccess:
+ CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(),
+ access_mode);
return ReduceElementAccess(node, key, value,
- *processed->AsElementAccess(), access_mode,
- load_mode, store_mode);
+ *processed->AsElementAccess());
case ProcessedFeedback::kGlobalAccess:
UNREACHABLE();
}
@@ -1846,21 +1817,15 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
}
Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* key = NodeProperties::GetValueInput(node, 1);
Node* value = jsgraph()->Dead();
- // Extract receiver maps from the has property IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access load mode from the keyed load IC.
- KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
-
- // Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kHas, load_mode,
- STANDARD_STORE);
+ return ReducePropertyAccess(node, key, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kHas);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
@@ -1970,6 +1935,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
}
Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* name = NodeProperties::GetValueInput(node, 1);
@@ -1979,62 +1945,49 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
if (reduction.Changed()) return reduction;
}
- // Extract receiver maps from the keyed load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access load mode from the keyed load IC.
- KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
-
- // Try to lower the keyed access based on the {nexus}.
Node* value = jsgraph()->Dead();
- return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad,
- load_mode, STANDARD_STORE);
+ return ReducePropertyAccess(node, name, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* const key = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access store mode from the keyed store IC.
- KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
-
- // Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kStore,
- STANDARD_LOAD, store_mode);
+ return ReducePropertyAccess(node, key, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kStore);
}
Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* receiver, Node* context, Node* frame_state, Node** effect,
Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- Node* target = jsgraph()->Constant(access_info.constant());
+ ObjectRef constant(broker(), access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the getter function.
Node* value;
- ObjectRef constant(broker(), access_info.constant());
if (constant.IsJSFunction()) {
value = *effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state, *effect, *control);
} else {
- auto function_template_info = constant.AsFunctionTemplateInfo();
- function_template_info.Serialize();
- Node* holder =
- access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ Node* holder = access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(ObjectRef(
+ broker(), access_info.holder().ToHandleChecked()));
SharedFunctionInfoRef shared_info(
broker(), frame_info.shared_info().ToHandleChecked());
- value = InlineApiCall(receiver, holder, frame_state, nullptr, effect,
- control, shared_info, function_template_info);
+
+ value =
+ InlineApiCall(receiver, holder, frame_state, nullptr, effect, control,
+ shared_info, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2052,26 +2005,24 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* receiver, Node* value, Node* context, Node* frame_state,
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- Node* target = jsgraph()->Constant(access_info.constant());
+ ObjectRef constant(broker(), access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the setter function.
- ObjectRef constant(broker(), access_info.constant());
if (constant.IsJSFunction()) {
*effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state, *effect, *control);
} else {
- auto function_template_info = constant.AsFunctionTemplateInfo();
- function_template_info.Serialize();
- Node* holder =
- access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ Node* holder = access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(ObjectRef(
+ broker(), access_info.holder().ToHandleChecked()));
SharedFunctionInfoRef shared_info(
broker(), frame_info.shared_info().ToHandleChecked());
InlineApiCall(receiver, holder, frame_state, value, effect, control,
- shared_info, function_template_info);
+ shared_info, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2088,8 +2039,16 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, SharedFunctionInfoRef const& shared_info,
FunctionTemplateInfoRef const& function_template_info) {
- auto call_handler_info =
- function_template_info.call_code().AsCallHandlerInfo();
+ if (!function_template_info.has_call_code()) {
+ return nullptr;
+ }
+
+ if (!function_template_info.call_code().has_value()) {
+ TRACE_BROKER_MISSING(broker(), "call code for function template info "
+ << function_template_info);
+ return nullptr;
+ }
+ CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
@@ -2151,7 +2110,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
&control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
- Node* cell = jsgraph()->Constant(access_info.export_cell());
+ Node* cell = jsgraph()->Constant(
+ ObjectRef(broker(), access_info.constant()).AsCell());
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -2382,7 +2342,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
// Check if we need to grow the properties backing store
// with this transitioning store.
MapRef transition_map_ref(broker(), transition_map);
- transition_map_ref.SerializeBackPointer();
MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -2404,7 +2363,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
common()->BeginRegion(RegionObservability::kObservable), effect);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map), effect, control);
+ jsgraph()->Constant(transition_map_ref), effect, control);
effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
value, effect, control);
effect = graph()->NewNode(common()->FinishRegion(),
@@ -2495,21 +2454,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access store mode from the keyed store IC.
- KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
-
- return ReduceKeyedAccess(node, index, value, nexus,
- AccessMode::kStoreInLiteral, STANDARD_LOAD,
- store_mode);
+ return ReducePropertyAccess(node, index, base::nullopt, value,
+ FeedbackSource(p.feedback()),
+ AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
@@ -2546,8 +2500,7 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessInfo const& access_info, KeyedAccessMode const& keyed_mode) {
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
@@ -2583,7 +2536,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// for Chrome. Node and Electron both set this limit to 0. Setting
// the base to Smi zero here allows the EffectControlLinearizer to
// optimize away the tricky part of the access later.
- if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) {
+ if (JSTypedArray::kMaxSizeInHeap == 0) {
base_pointer = jsgraph()->ZeroConstant();
} else {
base_pointer = effect =
@@ -2629,8 +2582,10 @@ JSNativeContextSpecialization::BuildElementAccess(
buffer_or_receiver = buffer;
}
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ if ((keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
+ (keyed_mode.IsStore() &&
+ keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
// Only check that the {index} is in SignedSmall range. We do the actual
// bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
@@ -2651,10 +2606,10 @@ JSNativeContextSpecialization::BuildElementAccess(
// Access the actual element.
ExternalArrayType external_array_type =
GetArrayTypeFromElementsKind(elements_kind);
- switch (access_mode) {
+ switch (keyed_mode.access_mode()) {
case AccessMode::kLoad: {
// Check if we can return undefined for out-of-bounds loads.
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) {
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(
@@ -2716,7 +2671,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can skip the out-of-bounds store.
- if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ if (keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -2766,9 +2721,9 @@ JSNativeContextSpecialization::BuildElementAccess(
// Don't try to store to a copy-on-write backing store (unless supported by
// the store mode).
- if (access_mode == AccessMode::kStore &&
+ if (keyed_mode.access_mode() == AccessMode::kStore &&
IsSmiOrObjectElementsKind(elements_kind) &&
- !IsCOWHandlingStoreMode(store_mode)) {
+ !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
@@ -2791,11 +2746,10 @@ JSNativeContextSpecialization::BuildElementAccess(
elements, effect, control);
// Check if we might need to grow the {elements} backing store.
- if (IsGrowStoreMode(store_mode)) {
+ if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
// For growing stores we validate the {index} below.
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
- } else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ } else if (keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
@@ -2826,7 +2780,7 @@ JSNativeContextSpecialization::BuildElementAccess(
kFullWriteBarrier, LoadSensitivity::kCritical};
// Access the actual element.
- if (access_mode == AccessMode::kLoad) {
+ if (keyed_mode.access_mode() == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
if (IsHoleyElementsKind(elements_kind)) {
@@ -2839,7 +2793,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can return undefined for out-of-bounds loads.
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
@@ -2923,7 +2877,7 @@ JSNativeContextSpecialization::BuildElementAccess(
effect, control);
}
}
- } else if (access_mode == AccessMode::kHas) {
+ } else if (keyed_mode.access_mode() == AccessMode::kHas) {
// For packed arrays with NoElementsProctector valid, a bound check
// is equivalent to HasProperty.
value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
@@ -2996,8 +2950,9 @@ JSNativeContextSpecialization::BuildElementAccess(
vtrue, vfalse, control);
}
} else {
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
+ DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
+ keyed_mode.access_mode() == AccessMode::kStoreInLiteral);
+
if (IsSmiElementsKind(elements_kind)) {
value = effect = graph()->NewNode(
simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
@@ -3011,11 +2966,11 @@ JSNativeContextSpecialization::BuildElementAccess(
// Ensure that copy-on-write backing store is writable.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_HANDLE_COW) {
+ keyed_mode.store_mode() == STORE_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
- } else if (IsGrowStoreMode(store_mode)) {
+ } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
// Determine the length of the {elements} backing store.
Node* elements_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
@@ -3053,7 +3008,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// If we didn't grow {elements}, it might still be COW, in which case we
// copy it now.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_AND_GROW_HANDLE_COW) {
+ keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
@@ -3295,7 +3250,8 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect, &maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
+ &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
receiver_maps->push_back(maps[i]);
@@ -3357,8 +3313,6 @@ SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
return jsgraph()->simplified();
}
-#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 7de2639966..8510c76bfc 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/objects/map.h"
@@ -93,24 +94,15 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode);
+ ElementAccessFeedback const& processed);
// In the case of non-keyed (named) accesses, pass the name as {static_name}
// and use {nullptr} for {key} (load/store modes are irrelevant).
- Reduction ReducePropertyAccessUsingProcessedFeedback(
- Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
- FeedbackNexus const& nexus, AccessMode access_mode,
- KeyedAccessLoadMode load_mode = STANDARD_LOAD,
- KeyedAccessStoreMode store_mode = STANDARD_STORE);
- Reduction ReduceKeyedAccess(Node* node, Node* key, Node* value,
- FeedbackNexus const& nexus,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode);
+ Reduction ReducePropertyAccess(Node* node, Node* key,
+ base::Optional<NameRef> static_name,
+ Node* value, FeedbackSource const& source,
+ AccessMode access_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
- FeedbackNexus const& nexus,
+ FeedbackSource const& source,
NameRef const& name,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
@@ -123,12 +115,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
NameRef const& name, AccessMode access_mode,
Node* key, PropertyCellRef const& property_cell);
Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key,
- FeedbackNexus const& nexus,
AccessMode access_mode,
KeyedAccessLoadMode load_mode);
Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode);
+ KeyedAccessMode const& keyed_mode);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Reduction ReduceJSToString(Node* node);
@@ -197,10 +187,11 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
FunctionTemplateInfoRef const& function_template_info);
// Construct the appropriate subgraph for element access.
- ValueEffectControl BuildElementAccess(
- Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode);
+ ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
+ Node* value, Node* effect,
+ Node* control,
+ ElementAccessInfo const& access_info,
+ KeyedAccessMode const& keyed_mode);
// Construct appropriate subgraph to load from a String.
Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a779790b8d..e0f97922b2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-std::ostream& operator<<(std::ostream& os, CallFrequency f) {
+std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
if (f.IsUnknown()) return os << "unknown";
return os << f.value();
}
@@ -28,7 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) {
return OpParameter<CallFrequency>(op);
}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
return os << p.arity() << ", " << p.start_index();
@@ -843,7 +842,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity,
parameters); // parameter
}
-const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
+const Operator* JSOperatorBuilder::CallWithArrayLike(
+ CallFrequency const& frequency) {
return new (zone()) Operator1<CallFrequency>( // --
IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode
"JSCallWithArrayLike", // name
@@ -899,8 +899,10 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs(
parameters); // parameter
}
+// Note: frequency is taken by reference to work around a GCC bug
+// on AIX (v8:8193).
const Operator* JSOperatorBuilder::Construct(uint32_t arity,
- CallFrequency frequency,
+ CallFrequency const& frequency,
VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
@@ -911,7 +913,7 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
}
const Operator* JSOperatorBuilder::ConstructWithArrayLike(
- CallFrequency frequency) {
+ CallFrequency const& frequency) {
return new (zone()) Operator1<CallFrequency>( // --
IrOpcode::kJSConstructWithArrayLike, // opcode
Operator::kNoProperties, // properties
@@ -921,7 +923,8 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike(
}
const Operator* JSOperatorBuilder::ConstructWithSpread(
- uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) {
+ uint32_t arity, CallFrequency const& frequency,
+ VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 0f315b1cb5..e7d9acb152 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -48,7 +48,7 @@ class CallFrequency final {
}
bool operator!=(CallFrequency const& that) const { return !(*this == that); }
- friend size_t hash_value(CallFrequency f) {
+ friend size_t hash_value(CallFrequency const& f) {
return bit_cast<uint32_t>(f.value_);
}
@@ -58,7 +58,7 @@ class CallFrequency final {
float value_;
};
-std::ostream& operator<<(std::ostream&, CallFrequency);
+std::ostream& operator<<(std::ostream&, CallFrequency const&);
CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT;
@@ -101,7 +101,7 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
// used as a parameter by JSConstruct and JSConstructWithSpread operators.
class ConstructParameters final {
public:
- ConstructParameters(uint32_t arity, CallFrequency frequency,
+ ConstructParameters(uint32_t arity, CallFrequency const& frequency,
VectorSlotPair const& feedback)
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
@@ -757,7 +757,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
- const Operator* CallWithArrayLike(CallFrequency frequency);
+ const Operator* CallWithArrayLike(CallFrequency const& frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
@@ -768,11 +768,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Construct(uint32_t arity,
- CallFrequency frequency = CallFrequency(),
+ CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
- const Operator* ConstructWithArrayLike(CallFrequency frequency);
+ const Operator* ConstructWithArrayLike(CallFrequency const& frequency);
const Operator* ConstructWithSpread(
- uint32_t arity, CallFrequency frequency = CallFrequency(),
+ uint32_t arity, CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
const Operator* LoadProperty(VectorSlotPair const& feedback);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 9d882e8238..f3696bcc48 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -44,6 +44,25 @@ bool BinaryOperationHintToNumberOperationHint(
return false;
}
+bool BinaryOperationHintToBigIntOperationHint(
+ BinaryOperationHint binop_hint, BigIntOperationHint* bigint_hint) {
+ switch (binop_hint) {
+ case BinaryOperationHint::kSignedSmall:
+ case BinaryOperationHint::kSignedSmallInputs:
+ case BinaryOperationHint::kSigned32:
+ case BinaryOperationHint::kNumber:
+ case BinaryOperationHint::kNumberOrOddball:
+ case BinaryOperationHint::kAny:
+ case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kString:
+ return false;
+ case BinaryOperationHint::kBigInt:
+ *bigint_hint = BigIntOperationHint::kBigInt;
+ return true;
+ }
+ UNREACHABLE();
+}
+
} // namespace
class JSSpeculativeBinopBuilder final {
@@ -74,6 +93,11 @@ class JSSpeculativeBinopBuilder final {
hint);
}
+ bool GetBinaryBigIntOperationHint(BigIntOperationHint* hint) {
+ return BinaryOperationHintToBigIntOperationHint(GetBinaryOperationHint(),
+ hint);
+ }
+
bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
switch (GetCompareOperationHint()) {
case CompareOperationHint::kSignedSmall:
@@ -138,6 +162,16 @@ class JSSpeculativeBinopBuilder final {
UNREACHABLE();
}
+ const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) {
+ switch (op_->opcode()) {
+ case IrOpcode::kJSAdd:
+ return simplified()->SpeculativeBigIntAdd(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ }
+
const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
switch (op_->opcode()) {
case IrOpcode::kJSEqual:
@@ -179,6 +213,16 @@ class JSSpeculativeBinopBuilder final {
return nullptr;
}
+ Node* TryBuildBigIntBinop() {
+ BigIntOperationHint hint;
+ if (GetBinaryBigIntOperationHint(&hint)) {
+ const Operator* op = SpeculativeBigIntOp(hint);
+ Node* node = BuildSpeculativeOperation(op);
+ return node;
+ }
+ return nullptr;
+ }
+
Node* TryBuildNumberCompare() {
NumberOperationHint hint;
if (GetCompareNumberOperationHint(&hint)) {
@@ -264,6 +308,15 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
operand, jsgraph()->SmiConstant(-1), effect,
control, slot);
node = b.TryBuildNumberBinop();
+ if (!node) {
+ FeedbackNexus nexus(feedback_vector(), slot);
+ if (nexus.GetBinaryOperationFeedback() ==
+ BinaryOperationHint::kBigInt) {
+ const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
+ node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ }
+ }
break;
}
default:
@@ -345,6 +398,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
if (Node* node = b.TryBuildNumberBinop()) {
return LoweringResult::SideEffectFree(node, node, control);
}
+ if (op->opcode() == IrOpcode::kJSAdd) {
+ if (Node* node = b.TryBuildBigIntBinop()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
+ }
break;
}
case IrOpcode::kJSExponentiate: {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 7164a0b708..a74c019355 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -153,7 +153,8 @@ class JSTypeHintLowering {
private:
friend class JSSpeculativeBinopBuilder;
- Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control,
+ Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references)
+ Node* effect, Node* control,
DeoptimizeReason reson) const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index ba50b75792..3190fc9930 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -10,6 +10,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -1364,20 +1365,21 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
Type module_type = NodeProperties::GetType(module);
if (module_type.IsHeapConstant()) {
- ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule();
+ SourceTextModuleRef module_constant =
+ module_type.AsHeapConstant()->Ref().AsSourceTextModule();
CellRef cell_constant = module_constant.GetCell(cell_index);
return jsgraph()->Constant(cell_constant);
}
FieldAccess field_access;
int index;
- if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport) {
+ if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) ==
+ SourceTextModuleDescriptor::kExport) {
field_access = AccessBuilder::ForModuleRegularExports();
index = cell_index - 1;
} else {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kImport);
field_access = AccessBuilder::ForModuleRegularImports();
index = -cell_index - 1;
}
@@ -1408,9 +1410,9 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* value = NodeProperties::GetValueInput(node, 1);
- DCHECK_EQ(
- ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node->op())),
- ModuleDescriptor::kExport);
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(
+ OpParameter<int32_t>(node->op())),
+ SourceTextModuleDescriptor::kExport);
Node* cell = BuildGetModuleCell(node);
if (cell->op()->EffectOutputCount() > 0) effect = cell;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 8bb47b43e9..1d88a27a5f 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -137,13 +137,19 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
-int CallDescriptor::CalculateFixedFrameSize() const {
+// TODO(jkummerow, sigurds): Arguably frame size calculation should be
+// keyed on code/frame type, not on CallDescriptor kind. Think about a
+// good way to organize this logic.
+int CallDescriptor::CalculateFixedFrameSize(Code::Kind code_kind) const {
switch (kind_) {
case kCallJSFunction:
return PushArgumentCount()
? OptimizedBuiltinFrameConstants::kFixedSlotCount
: StandardFrameConstants::kFixedSlotCount;
case kCallAddress:
+ if (code_kind == Code::C_WASM_ENTRY) {
+ return CWasmEntryFrameConstants::kFixedSlotCount;
+ }
return CommonFrameConstants::kFixedSlotCountAboveFp +
CommonFrameConstants::kCPSlotCount;
case kCallCodeObject:
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index e4fa6f9f20..05eb0e7d11 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool CanTailCall(const Node* call) const;
- int CalculateFixedFrameSize() const;
+ int CalculateFixedFrameSize(Code::Kind code_kind) const;
RegList AllocatableRegisters() const { return allocatable_registers_; }
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index c42bfd839a..f9998723f3 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -419,14 +419,15 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
}
void LoadElimination::AbstractState::FieldsMerge(
- AbstractFields& this_fields, AbstractFields const& that_fields,
+ AbstractFields* this_fields, AbstractFields const& that_fields,
Zone* zone) {
- for (size_t i = 0; i < this_fields.size(); ++i) {
- if (this_fields[i]) {
+ for (size_t i = 0; i < this_fields->size(); ++i) {
+ AbstractField const*& this_field = (*this_fields)[i];
+ if (this_field) {
if (that_fields[i]) {
- this_fields[i] = this_fields[i]->Merge(that_fields[i], zone);
+ this_field = this_field->Merge(that_fields[i], zone);
} else {
- this_fields[i] = nullptr;
+ this_field = nullptr;
}
}
}
@@ -442,8 +443,8 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
// Merge the information we have about the fields.
- FieldsMerge(this->fields_, that->fields_, zone);
- FieldsMerge(this->const_fields_, that->const_fields_, zone);
+ FieldsMerge(&this->fields_, that->fields_, zone);
+ FieldsMerge(&this->const_fields_, that->const_fields_, zone);
// Merge the information we have about the maps.
if (this->maps_) {
@@ -923,20 +924,23 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
FieldInfo const* lookup_result =
state->LookupField(object, field_index, constness);
- if (lookup_result && constness == PropertyConstness::kMutable) {
+ if (lookup_result && (constness == PropertyConstness::kMutable ||
+ V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) {
// At runtime, we should never encounter
// - any store replacing existing info with a different, incompatible
// representation, nor
// - two consecutive const stores.
// However, we may see such code statically, so we guard against
// executing it by emitting Unreachable.
- // TODO(gsps): Re-enable the double const store check once we have
- // identified other FieldAccesses that should be marked mutable
- // instead of const (cf. JSCreateLowering::AllocateFastLiteral).
+ // TODO(gsps): Re-enable the double const store check even for
+ // non-debug builds once we have identified other FieldAccesses
+ // that should be marked mutable instead of const
+ // (cf. JSCreateLowering::AllocateFastLiteral).
bool incompatible_representation =
!lookup_result->name.is_null() &&
!IsCompatible(representation, lookup_result->representation);
- if (incompatible_representation) {
+ if (incompatible_representation ||
+ constness == PropertyConstness::kConst) {
Node* control = NodeProperties::GetControlInput(node);
Node* unreachable =
graph()->NewNode(common()->Unreachable(), effect, control);
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 7658d01365..4ad1fa64a2 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -233,7 +233,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
bool FieldsEquals(AbstractFields const& this_fields,
AbstractFields const& that_fields) const;
- void FieldsMerge(AbstractFields& this_fields,
+ void FieldsMerge(AbstractFields* this_fields,
AbstractFields const& that_fields, Zone* zone);
AbstractElements const* elements_ = nullptr;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index d6b88b13f5..41d50549b3 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-analysis.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
@@ -12,6 +13,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
#define OFFSET(x) ((x)&0x1F)
@@ -51,7 +55,8 @@ struct TempLoopInfo {
// marks on edges into/out-of the loop header nodes.
class LoopFinderImpl {
public:
- LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
+ LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter,
+ Zone* zone)
: zone_(zone),
end_(graph->end()),
queue_(zone),
@@ -63,7 +68,8 @@ class LoopFinderImpl {
loops_found_(0),
width_(0),
backward_(nullptr),
- forward_(nullptr) {}
+ forward_(nullptr),
+ tick_counter_(tick_counter) {}
void Run() {
PropagateBackward();
@@ -116,6 +122,7 @@ class LoopFinderImpl {
int width_;
uint32_t* backward_;
uint32_t* forward_;
+ TickCounter* const tick_counter_;
int num_nodes() {
return static_cast<int>(loop_tree_->node_to_loop_num_.size());
@@ -183,6 +190,7 @@ class LoopFinderImpl {
Queue(end_);
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
info(node);
queue_.pop_front();
@@ -301,6 +309,7 @@ class LoopFinderImpl {
}
// Propagate forward on paths that were backward reachable from backedges.
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop_front();
queued_.Set(node, false);
@@ -512,11 +521,11 @@ class LoopFinderImpl {
}
};
-
-LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
+LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
+ Zone* zone) {
LoopTree* loop_tree =
new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
- LoopFinderImpl finder(graph, loop_tree, zone);
+ LoopFinderImpl finder(graph, loop_tree, tick_counter, zone);
finder.Run();
if (FLAG_trace_turbo_loop) {
finder.Print();
@@ -524,7 +533,6 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
return loop_tree;
}
-
Node* LoopTree::HeaderNode(Loop* loop) {
Node* first = *HeaderNodes(loop).begin();
if (first->opcode() == IrOpcode::kLoop) return first;
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 620a9554e0..043833a54c 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -13,6 +13,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// TODO(titzer): don't assume entry edges have a particular index.
@@ -156,7 +159,8 @@ class LoopTree : public ZoneObject {
class V8_EXPORT_PRIVATE LoopFinder {
public:
// Build a loop tree for the entire graph.
- static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
+ static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter,
+ Zone* temp_zone);
};
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index f8e78b2169..80205f80b6 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -240,6 +240,7 @@ class MachineRepresentationInferrer {
MachineType::PointerRepresentation();
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -428,6 +429,7 @@ class MachineRepresentationChecker {
MachineRepresentation::kWord64);
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
case IrOpcode::kTaggedPoisonOnSpeculation:
CheckValueInputIsTagged(node, 0);
break;
@@ -556,7 +558,7 @@ class MachineRepresentationChecker {
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
@@ -700,6 +702,7 @@ class MachineRepresentationChecker {
case IrOpcode::kThrow:
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
+ case IrOpcode::kStaticAssert:
break;
default:
if (node->op()->ValueInputCount() != 0) {
@@ -748,6 +751,11 @@ class MachineRepresentationChecker {
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressedSigned:
return;
+ case MachineRepresentation::kNone:
+ if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
+ return;
+ }
+ break;
default:
break;
}
@@ -851,6 +859,9 @@ class MachineRepresentationChecker {
case MachineRepresentation::kCompressedPointer:
return;
case MachineRepresentation::kNone: {
+ if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
+ return;
+ }
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index a6a8e87cf4..f720c29084 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -710,7 +710,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64Compare(node);
case IrOpcode::kFloat64RoundDown:
return ReduceFloat64RoundDown(node);
- case IrOpcode::kBitcastTaggedToWord: {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord: {
NodeMatcher m(node->InputAt(0));
if (m.IsBitcastWordToTaggedSigned()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index d2ddedc8fa..f447861aca 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -140,6 +140,7 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -244,6 +245,13 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -261,6 +269,17 @@ MachineType AtomicOpType(Operator const* op) {
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2Add, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
@@ -338,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) {
V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
+ V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
@@ -439,12 +460,15 @@ MachineType AtomicOpType(Operator const* op) {
V(Exchange)
#define SIMD_LANE_OP_LIST(V) \
+ V(F64x2, 2) \
V(F32x4, 4) \
+ V(I64x2, 2) \
V(I32x4, 4) \
V(I16x8, 8) \
V(I8x16, 16)
#define SIMD_FORMAT_LIST(V) \
+ V(64x2, 64) \
V(32x4, 32) \
V(16x8, 16) \
V(8x16, 8)
@@ -754,6 +778,14 @@ struct MachineOperatorGlobalCache {
};
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
+ struct MemoryBarrierOperator : public Operator {
+ MemoryBarrierOperator()
+ : Operator(IrOpcode::kMemoryBarrier,
+ Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ MemoryBarrierOperator kMemoryBarrier;
+
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
@@ -807,12 +839,12 @@ struct MachineOperatorGlobalCache {
};
Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
- struct DebugAbortOperator : public Operator {
- DebugAbortOperator()
- : Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1,
- 1, 1, 0, 1, 0) {}
+ struct AbortCSAAssertOperator : public Operator {
+ AbortCSAAssertOperator()
+ : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
+ "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
};
- DebugAbortOperator kDebugAbort;
+ AbortCSAAssertOperator kAbortCSAAssert;
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
@@ -1005,8 +1037,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
return &cache_.kBitcastMaybeObjectToWord;
}
-const Operator* MachineOperatorBuilder::DebugAbort() {
- return &cache_.kDebugAbort;
+const Operator* MachineOperatorBuilder::AbortCSAAssert() {
+ return &cache_.kAbortCSAAssert;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
@@ -1017,6 +1049,10 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
+const Operator* MachineOperatorBuilder::MemBarrier() {
+ return &cache_.kMemoryBarrier;
+}
+
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
@@ -1300,6 +1336,11 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
2, 0, 0, 1, 0, 0, array);
}
+const uint8_t* S8x16ShuffleOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kS8x16Shuffle, op->opcode());
+ return OpParameter<uint8_t*>(op);
+}
+
#undef PURE_BINARY_OP_LIST_32
#undef PURE_BINARY_OP_LIST_64
#undef MACHINE_PURE_OP_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 8b1250dd30..0f81301206 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -112,6 +112,9 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op)
+ V8_WARN_UNUSED_RESULT;
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -216,7 +219,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
AlignmentRequirements::FullUnalignedAccessSupport());
const Operator* Comment(const char* msg);
- const Operator* DebugAbort();
+ const Operator* AbortCSAAssert();
const Operator* DebugBreak();
const Operator* UnsafePointerAdd();
@@ -295,9 +298,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
- // This operator reinterprets the bits of a tagged pointer as word.
+ // This operator reinterprets the bits of a tagged pointer as a word.
const Operator* BitcastTaggedToWord();
+ // This operator reinterprets the bits of a Smi as a word.
+ const Operator* BitcastTaggedSignedToWord();
+
// This operator reinterprets the bits of a tagged MaybeObject pointer as
// word.
const Operator* BitcastMaybeObjectToWord();
@@ -462,6 +468,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float64SilenceNaN();
// SIMD operators.
+ const Operator* F64x2Splat();
+ const Operator* F64x2Abs();
+ const Operator* F64x2Neg();
+ const Operator* F64x2ExtractLane(int32_t);
+ const Operator* F64x2ReplaceLane(int32_t);
+ const Operator* F64x2Eq();
+ const Operator* F64x2Ne();
+ const Operator* F64x2Lt();
+ const Operator* F64x2Le();
+
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
const Operator* F32x4ReplaceLane(int32_t);
@@ -483,6 +499,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Lt();
const Operator* F32x4Le();
+ const Operator* I64x2Splat();
+ const Operator* I64x2ExtractLane(int32_t);
+ const Operator* I64x2ReplaceLane(int32_t);
+ const Operator* I64x2Neg();
+ const Operator* I64x2Shl(int32_t);
+ const Operator* I64x2ShrS(int32_t);
+ const Operator* I64x2Add();
+ const Operator* I64x2Sub();
+ const Operator* I64x2Mul();
+ const Operator* I64x2Eq();
+ const Operator* I64x2Ne();
+ const Operator* I64x2GtS();
+ const Operator* I64x2GeS();
+ const Operator* I64x2ShrU(int32_t);
+ const Operator* I64x2GtU();
+ const Operator* I64x2GeU();
+
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
const Operator* I32x4ReplaceLane(int32_t);
@@ -585,6 +618,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
+ const Operator* S1x2AnyTrue();
+ const Operator* S1x2AllTrue();
const Operator* S1x4AnyTrue();
const Operator* S1x4AllTrue();
const Operator* S1x8AnyTrue();
@@ -620,6 +655,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
+ // Memory barrier.
+ const Operator* MemBarrier();
+
// atomic-load [base + index]
const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-load [base + index]
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index f43ba0d155..07ac95b4f7 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -19,7 +19,7 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
: broker_(broker), object_(object) {
ZoneHandleSet<Map> maps;
auto result =
- NodeProperties::InferReceiverMaps(broker_, object_, effect, &maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps);
maps_.insert(maps_.end(), maps.begin(), maps.end());
maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps)
? kUnreliableDontNeedGuard
@@ -65,21 +65,25 @@ bool MapInference::AllOfInstanceTypes(std::function<bool(InstanceType)> f) {
bool MapInference::AllOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
- // TODO(neis): Brokerize the MapInference.
- AllowHandleDereference allow_handle_deref;
CHECK(HaveMaps());
- return std::all_of(maps_.begin(), maps_.end(),
- [f](Handle<Map> map) { return f(map->instance_type()); });
+ auto instance_type = [this, f](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return f(map_ref.instance_type());
+ };
+ return std::all_of(maps_.begin(), maps_.end(), instance_type);
}
bool MapInference::AnyOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
- AllowHandleDereference allow_handle_deref;
CHECK(HaveMaps());
- return std::any_of(maps_.begin(), maps_.end(),
- [f](Handle<Map> map) { return f(map->instance_type()); });
+ auto instance_type = [this, f](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return f(map_ref.instance_type());
+ };
+
+ return std::any_of(maps_.begin(), maps_.end(), instance_type);
}
MapHandles const& MapInference::GetMaps() {
@@ -122,7 +126,10 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
const VectorSlotPair& feedback) {
if (Safe()) return true;
- auto is_stable = [](Handle<Map> map) { return map->is_stable(); };
+ auto is_stable = [this](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return map_ref.is_stable();
+ };
if (dependencies != nullptr &&
std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) {
for (Handle<Map> map : maps_) {
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 29cbb4d26c..368c060c1d 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/memory-optimizer.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -20,7 +21,8 @@ namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
- const char* function_debug_name)
+ const char* function_debug_name,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
@@ -29,7 +31,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
allocation_folding_(allocation_folding),
- function_debug_name_(function_debug_name) {}
+ function_debug_name_(function_debug_name),
+ tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -99,7 +102,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
@@ -108,6 +111,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
+ case IrOpcode::kLoadFromObject:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
@@ -118,6 +122,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
@@ -214,6 +219,7 @@ Node* EffectPhiForPhi(Node* phi) {
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
+ tick_counter_->DoTick();
DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
@@ -296,6 +302,21 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
+ Node* allocate_builtin;
+ if (allocation_type == AllocationType::kYoung) {
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ }
+ } else {
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ }
+ }
+
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
@@ -371,11 +392,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
- Node* target = allocation_type == AllocationType::kYoung
- ? __
- AllocateInYoungGenerationStubConstant()
- : __
- AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -384,7 +400,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
- __ Call(allocate_operator_.get(), target, size));
+ __ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
@@ -434,11 +450,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
- Node* target = allocation_type == AllocationType::kYoung
- ? __
- AllocateInYoungGenerationStubConstant()
- : __
- AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -446,7 +457,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
- __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
+ __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
@@ -483,8 +494,6 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
- Node* offset = node->InputAt(1);
- node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state);
}
@@ -494,9 +503,7 @@ void MemoryOptimizer::VisitStoreToObject(Node* node,
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
- Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
- node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index cbefcb67de..71f33fa3d7 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -10,6 +10,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -36,7 +39,7 @@ class MemoryOptimizer final {
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
- const char* function_debug_name);
+ const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
void Optimize();
@@ -158,6 +161,7 @@ class MemoryOptimizer final {
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
const char* function_debug_name_;
+ TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index d6528c553a..1e00ec00f4 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -5,6 +5,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/map-inference.h"
@@ -392,7 +393,7 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
}
// static
-NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
+NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 4a23b6781d..a660fe7022 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -151,7 +151,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
kReliableReceiverMaps, // Receiver maps can be trusted.
kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
};
- static InferReceiverMapsResult InferReceiverMaps(
+ // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead.
+ static InferReceiverMapsResult InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 50cfdf6248..7688379e9f 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -303,7 +303,13 @@ void Node::Print() const {
void Node::Print(std::ostream& os) const {
os << *this << std::endl;
for (Node* input : this->inputs()) {
- os << " " << *input << std::endl;
+ os << " ";
+ if (input) {
+ os << *input;
+ } else {
+ os << "(NULL)";
+ }
+ os << std::endl;
}
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 9ac8ec581f..d621e23e3a 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -45,6 +45,7 @@
V(NumberConstant) \
V(PointerConstant) \
V(HeapConstant) \
+ V(CompressedHeapConstant) \
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
@@ -231,6 +232,7 @@
// Opcodes for VirtuaMachine-level operators.
#define SIMPLIFIED_CHANGE_OP_LIST(V) \
+ V(ChangeCompressedSignedToInt32) \
V(ChangeTaggedSignedToInt32) \
V(ChangeTaggedSignedToInt64) \
V(ChangeTaggedToInt32) \
@@ -240,6 +242,7 @@
V(ChangeTaggedToTaggedSigned) \
V(ChangeCompressedToTaggedSigned) \
V(ChangeTaggedToCompressedSigned) \
+ V(ChangeInt31ToCompressedSigned) \
V(ChangeInt31ToTaggedSigned) \
V(ChangeInt32ToTagged) \
V(ChangeInt64ToTagged) \
@@ -249,6 +252,8 @@
V(ChangeFloat64ToTaggedPointer) \
V(ChangeTaggedToBit) \
V(ChangeBitToTagged) \
+ V(ChangeUint64ToBigInt) \
+ V(TruncateBigIntToUint64) \
V(TruncateTaggedToWord32) \
V(TruncateTaggedToFloat64) \
V(TruncateTaggedToBit) \
@@ -262,6 +267,7 @@
V(CheckedUint32Div) \
V(CheckedUint32Mod) \
V(CheckedInt32Mul) \
+ V(CheckedInt32ToCompressedSigned) \
V(CheckedInt32ToTaggedSigned) \
V(CheckedInt64ToInt32) \
V(CheckedInt64ToTaggedSigned) \
@@ -318,6 +324,8 @@
V(NumberMin) \
V(NumberPow)
+#define SIMPLIFIED_BIGINT_BINOP_LIST(V) V(BigIntAdd)
+
#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
V(SpeculativeNumberAdd) \
V(SpeculativeNumberSubtract) \
@@ -369,6 +377,11 @@
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
+#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \
+ V(BigIntAsUintN) \
+ V(BigIntNegate) \
+ V(CheckBigInt)
+
#define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber)
#define SIMPLIFIED_OTHER_OP_LIST(V) \
@@ -382,6 +395,7 @@
V(StringCodePointAt) \
V(StringFromSingleCharCode) \
V(StringFromSingleCodePoint) \
+ V(StringFromCodePointAt) \
V(StringIndexOf) \
V(StringLength) \
V(StringToLowerCaseIntl) \
@@ -461,16 +475,24 @@
V(FindOrderedHashMapEntryForInt32Key) \
V(PoisonIndex) \
V(RuntimeAbort) \
+ V(AssertType) \
V(DateNow)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) V(SpeculativeBigIntAdd)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) V(SpeculativeBigIntNegate)
+
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
SIMPLIFIED_CHECKED_OP_LIST(V) \
SIMPLIFIED_COMPARE_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_BINOP_LIST(V) \
+ SIMPLIFIED_BIGINT_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_BIGINT_UNOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \
SIMPLIFIED_OTHER_OP_LIST(V)
// Opcodes for Machine-level operators.
@@ -616,7 +638,7 @@
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_WORD64_ATOMIC_OP_LIST(V) \
- V(DebugAbort) \
+ V(AbortCSAAssert) \
V(DebugBreak) \
V(Comment) \
V(Load) \
@@ -631,6 +653,7 @@
V(Word64ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
+ V(BitcastTaggedSignedToWord) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
@@ -692,6 +715,7 @@
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
+ V(MemoryBarrier) \
V(Word32AtomicLoad) \
V(Word32AtomicStore) \
V(Word32AtomicExchange) \
@@ -718,6 +742,15 @@
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
+ V(F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -739,6 +772,22 @@
V(F32x4Le) \
V(F32x4Gt) \
V(F32x4Ge) \
+ V(I64x2Splat) \
+ V(I64x2ExtractLane) \
+ V(I64x2ReplaceLane) \
+ V(I64x2Neg) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I64x2ShrU) \
+ V(I64x2GtU) \
+ V(I64x2GeU) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
V(I32x4ReplaceLane) \
@@ -844,6 +893,8 @@
V(S128Xor) \
V(S128Select) \
V(S8x16Shuffle) \
+ V(S1x2AnyTrue) \
+ V(S1x2AllTrue) \
V(S1x4AnyTrue) \
V(S1x4AllTrue) \
V(S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 475623f76b..8cb991ceb7 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/operation-typer.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
#include "src/execution/isolate.h"
@@ -259,7 +260,8 @@ Type OperationTyper::ConvertReceiver(Type type) {
type = Type::Intersect(type, Type::Receiver(), zone());
if (maybe_primitive) {
// ConvertReceiver maps null and undefined to the JSGlobalProxy of the
- // target function, and all other primitives are wrapped into a JSValue.
+ // target function, and all other primitives are wrapped into a
+ // JSPrimitiveWrapper.
type = Type::Union(type, Type::OtherObject(), zone());
}
return type;
@@ -577,6 +579,13 @@ Type OperationTyper::NumberSilenceNaN(Type type) {
return type;
}
+Type OperationTyper::BigIntAsUintN(Type type) {
+ DCHECK(type.Is(Type::BigInt()));
+ return Type::BigInt();
+}
+
+Type OperationTyper::CheckBigInt(Type type) { return Type::BigInt(); }
+
Type OperationTyper::NumberAdd(Type lhs, Type rhs) {
DCHECK(lhs.Is(Type::Number()));
DCHECK(rhs.Is(Type::Number()));
@@ -1111,6 +1120,26 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
+Type OperationTyper::BigIntAdd(Type lhs, Type rhs) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ return Type::BigInt();
+}
+
+Type OperationTyper::BigIntNegate(Type type) {
+ if (type.IsNone()) return type;
+ return Type::BigInt();
+}
+
+Type OperationTyper::SpeculativeBigIntAdd(Type lhs, Type rhs) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ return Type::BigInt();
+}
+
+Type OperationTyper::SpeculativeBigIntNegate(Type type) {
+ if (type.IsNone()) return type;
+ return Type::BigInt();
+}
+
Type OperationTyper::SpeculativeToNumber(Type type) {
return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone()));
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index a905662ad1..728e297a1b 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -43,14 +43,18 @@ class V8_EXPORT_PRIVATE OperationTyper {
// Unary operators.
#define DECLARE_METHOD(Name) Type Name(Type type);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
DECLARE_METHOD(ConvertReceiver)
#undef DECLARE_METHOD
-// Number binary operators.
+// Numeric binary operators.
#define DECLARE_METHOD(Name) Type Name(Type lhs, Type rhs);
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
// Comparison operators.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e771cef123..eb060b71e1 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,6 +16,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
+#include "src/compiler/add-type-assertions-reducer.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -34,6 +35,7 @@
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/csa-load-elimination.h"
#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/decompression-elimination.h"
#include "src/compiler/effect-control-linearizer.h"
@@ -114,7 +116,8 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker_enabled())),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
@@ -266,7 +269,7 @@ class PipelineData {
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
MachineGraph* mcgraph() const { return mcgraph_; }
- Handle<Context> native_context() const {
+ Handle<NativeContext> native_context() const {
return handle(info()->native_context(), isolate());
}
Handle<JSGlobalObject> global_object() const {
@@ -324,7 +327,8 @@ class PipelineData {
Typer* CreateTyper() {
DCHECK_NULL(typer_);
- typer_ = new Typer(broker(), typer_flags_, graph());
+ typer_ =
+ new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
return typer_;
}
@@ -397,7 +401,8 @@ class PipelineData {
DCHECK_NULL(frame_);
int fixed_frame_size = 0;
if (call_descriptor != nullptr) {
- fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
+ fixed_frame_size =
+ call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
@@ -408,7 +413,8 @@ class PipelineData {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), flags, debug_name());
+ sequence(), flags, &info()->tick_counter(),
+ debug_name());
}
void InitializeOsrHelper() {
@@ -1040,6 +1046,119 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
+class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
+ public:
+ WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph,
+ Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions)
+ // Note that the OptimizedCompilationInfo is not initialized at the time
+ // we pass it to the CompilationJob constructor, but it is not
+ // dereferenced there.
+ : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
+ "TurboFan"),
+ debug_name_(std::move(debug_name)),
+ info_(CStrVector(debug_name_.get()), graph->zone(), kind),
+ call_descriptor_(call_descriptor),
+ zone_stats_(isolate->allocator()),
+ zone_(std::move(zone)),
+ graph_(graph),
+ data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
+ new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
+ pipeline_(&data_) {}
+
+ ~WasmHeapStubCompilationJob() = default;
+
+ protected:
+ Status PrepareJobImpl(Isolate* isolate) final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
+
+ private:
+ std::unique_ptr<char[]> debug_name_;
+ OptimizedCompilationInfo info_;
+ CallDescriptor* call_descriptor_;
+ ZoneStats zone_stats_;
+ std::unique_ptr<Zone> zone_;
+ Graph* graph_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
+};
+
+// static
+std::unique_ptr<OptimizedCompilationJob>
+Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone,
+ Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions) {
+ return base::make_unique<WasmHeapStubCompilationJob>(
+ isolate, call_descriptor, std::move(zone), graph, kind,
+ std::move(debug_name), options, source_positions);
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info_, isolate->GetTurboStatistics(), &zone_stats_));
+ pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
+ }
+ if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data_.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+ if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind())
+ << " graph -- " << std::endl
+ << AsRPO(*data_.graph());
+ }
+
+ if (info_.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info_, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info_.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+ pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
+ pipeline_.ComputeScheduledGraph();
+ if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
+ return CompilationJob::SUCCEEDED;
+ }
+ return CompilationJob::FAILED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
+ Handle<Code> code;
+ if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) &&
+ pipeline_.CommitDependencies(code)) {
+ info_.SetCode(code);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble(compilation_info()->GetDebugName().get(), os);
+ }
+#endif
+ return SUCCEEDED;
+ }
+ return FAILED;
+}
+
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
@@ -1065,7 +1184,7 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
- SourcePosition::kNotInlined, flags);
+ SourcePosition::kNotInlined, flags, &data->info()->tick_counter());
}
};
@@ -1102,7 +1221,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
Isolate* isolate = data->isolate();
OptimizedCompilationInfo* info = data->info();
- GraphReducer graph_reducer(temp_zone, data->graph(),
+ GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1196,6 +1315,7 @@ struct UntyperPhase {
}
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -1216,6 +1336,7 @@ struct CopyMetadataForConcurrentCompilePhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
@@ -1242,13 +1363,13 @@ struct SerializationPhase {
if (data->info()->is_source_positions_enabled()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
- if (data->info()->is_osr()) {
- flags |= SerializerForBackgroundCompilationFlag::kOsr;
+ if (data->info()->is_analyze_environment_liveness()) {
+ flags |=
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- SerializerForBackgroundCompilation serializer(
- data->broker(), data->dependencies(), temp_zone,
- data->info()->closure(), flags);
- serializer.Run();
+ RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(),
+ temp_zone, data->info()->closure(),
+ flags, data->info()->osr_offset());
}
};
@@ -1257,6 +1378,7 @@ struct TypedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1292,9 +1414,12 @@ struct EscapeAnalysisPhase {
static const char* phase_name() { return "V8.TFEscapeAnalysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
+ EscapeAnalysis escape_analysis(data->jsgraph(),
+ &data->info()->tick_counter(), temp_zone);
escape_analysis.ReduceGraph();
- GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead());
+ GraphReducer reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
@@ -1305,13 +1430,28 @@ struct EscapeAnalysisPhase {
}
};
+struct TypeAssertionsPhase {
+ static const char* phase_name() { return "V8.TFTypeAssertions"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &type_assertions);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "V8.TFSimplifiedLowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel());
+ data->info()->GetPoisoningMitigationLevel(),
+ &data->info()->tick_counter());
lowering.LowerAllNodes();
}
};
@@ -1325,8 +1465,8 @@ struct LoopPeelingPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- LoopTree* loop_tree =
- LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(
+ data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
data->source_positions(), data->node_origins())
.PeelInnerLoopsOfTree();
@@ -1346,6 +1486,7 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer);
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -1358,6 +1499,7 @@ struct EarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1384,7 +1526,8 @@ struct ControlFlowOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
ControlFlowOptimizer optimizer(data->graph(), data->common(),
- data->machine(), temp_zone);
+ data->machine(),
+ &data->info()->tick_counter(), temp_zone);
optimizer.Optimize();
}
};
@@ -1406,8 +1549,9 @@ struct EffectControlLinearizationPhase {
// fix the effect and control flow for nodes with low-level side
// effects (such as changing representation to tagged or
// 'floating' allocation regions.)
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kTempSchedule);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ temp_zone, data->graph(), Scheduler::kTempSchedule,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
TraceSchedule(data->info(), data, schedule,
"effect linearization schedule");
@@ -1433,6 +1577,7 @@ struct EffectControlLinearizationPhase {
// doing a common operator reducer and dead code elimination just before
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1455,7 +1600,8 @@ struct StoreStoreEliminationPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- StoreStoreElimination::Run(data->jsgraph(), temp_zone);
+ StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
+ temp_zone);
}
};
@@ -1464,6 +1610,7 @@ struct LoadEliminationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1513,7 +1660,7 @@ struct MemoryOptimizationPhase {
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
- data->debug_name());
+ data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}
};
@@ -1523,6 +1670,7 @@ struct LateOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1555,6 +1703,7 @@ struct MachineOperatorOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -1565,11 +1714,38 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct CsaEarlyOptimizationPhase {
+ static const char* phase_name() { return "V8.CSAEarlyOptimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct CsaOptimizationPhase {
static const char* phase_name() { return "V8.CSAOptimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1621,9 +1797,10 @@ struct ComputeSchedulePhase {
void Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = Scheduler::ComputeSchedule(
- temp_zone, data->graph(), data->info()->is_splitting_enabled()
- ? Scheduler::kSplitNodes
- : Scheduler::kNoFlags);
+ temp_zone, data->graph(),
+ data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
data->set_schedule(schedule);
}
@@ -1671,6 +1848,7 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
+ &data->info()->tick_counter(),
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1920,7 +2098,8 @@ struct PrintGraphPhase {
Schedule* schedule = data->schedule();
if (schedule == nullptr) {
schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kNoFlags);
+ Scheduler::kNoFlags,
+ &info->tick_counter());
}
AllowHandleDereference allow_deref;
@@ -2089,6 +2268,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
}
+ if (FLAG_assert_types) {
+ Run<TypeAssertionsPhase>();
+ RunPrintAndVerify(TypeAssertionsPhase::phase_name());
+ }
+
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
@@ -2201,6 +2385,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
}
+ pipeline.Run<CsaEarlyOptimizationPhase>();
+ pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
+
// Optimize memory access and allocation operations.
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
@@ -2331,58 +2518,6 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
// static
-MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
- SourcePositionTable* source_positions) {
- OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
- // Construct a pipeline for scheduling and code generation.
- ZoneStats zone_stats(isolate->allocator());
- NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
- PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
- source_positions, node_positions, nullptr, options);
- std::unique_ptr<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(
- &info, isolate->GetTurboStatistics(), &zone_stats));
- pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
- }
-
- PipelineImpl pipeline(&data);
-
- if (info.trace_turbo_json_enabled() ||
- info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
- << std::endl
- << AsRPO(*graph);
- }
-
- if (info.trace_turbo_json_enabled()) {
- TurboJsonFile json_of(&info, std::ios_base::trunc);
- json_of << "{\"function\":\"" << info.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- }
-
- pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
- pipeline.ComputeScheduledGraph();
-
- Handle<Code> code;
- if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
- pipeline.CommitDependencies(code)) {
- return code;
- }
- return MaybeHandle<Code>();
-}
-
-// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker) {
@@ -2449,11 +2584,11 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-OptimizedCompilationJob* Pipeline::NewCompilationJob(
+std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
- return new PipelineCompilationJob(isolate, shared, function);
+ return base::make_unique<PipelineCompilationJob>(isolate, shared, function);
}
// static
@@ -2490,13 +2625,14 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
data.BeginPhaseKind("V8.WasmOptimization");
- const bool is_asm_js = module->origin == wasm::kAsmJsOrigin;
+ const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
data.info()->MarkAsSplittingEnabled();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
@@ -2515,6 +2651,7 @@ void Pipeline::GenerateCodeForWasmFunction(
} else {
PipelineRunScope scope(&data, "V8.WasmBaseOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
@@ -2870,8 +3007,9 @@ bool PipelineImpl::SelectInstructionsAndAssemble(
}
MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
- if (!SelectInstructionsAndAssemble(call_descriptor))
+ if (!SelectInstructionsAndAssemble(call_descriptor)) {
return MaybeHandle<Code>();
+ }
return FinalizeCode();
}
@@ -2928,6 +3066,9 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (data->info()->is_turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ if (data->info()->trace_turbo_allocation_enabled()) {
+ flags |= RegisterAllocationFlag::kTraceAllocation;
+ }
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 7f9a242d98..6898faaad0 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -41,9 +41,8 @@ class SourcePositionTable;
class Pipeline : public AllStatic {
public:
// Returns a new compilation job for the given JavaScript function.
- static OptimizedCompilationJob* NewCompilationJob(Isolate* isolate,
- Handle<JSFunction> function,
- bool has_script);
+ static std::unique_ptr<OptimizedCompilationJob> NewCompilationJob(
+ Isolate* isolate, Handle<JSFunction> function, bool has_script);
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
@@ -60,11 +59,11 @@ class Pipeline : public AllStatic {
const char* debug_name, const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
- // Run the pipeline on a machine graph and generate code.
- static MaybeHandle<Code> GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name,
- const AssemblerOptions& assembler_options,
+ // Returns a new compilation job for a wasm heap stub.
+ static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions = nullptr);
// Run the pipeline on a machine graph and generate code.
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index dafd481797..99a06ef874 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -127,7 +127,7 @@ Node* PropertyAccessBuilder::ResolveHolder(
PropertyAccessInfo const& access_info, Node* receiver) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- return jsgraph()->Constant(holder);
+ return jsgraph()->Constant(ObjectRef(broker(), holder));
}
return receiver;
}
@@ -151,7 +151,16 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* receiver) {
+ // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during
+ // acccess info computation. Requires extra care in the case where the
+ // receiver is the holder.
+ AllowCodeDependencyChange dependency_change_;
+ AllowHandleAllocation handle_allocation_;
+ AllowHandleDereference handle_dereference_;
+ AllowHeapAllocation heap_allocation_;
+
if (!access_info.IsDataConstant()) return nullptr;
+
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
// If {access_info} has a holder, just use it.
@@ -165,7 +174,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
MapRef receiver_map = m.Ref(broker()).map();
if (std::find_if(access_info.receiver_maps().begin(),
access_info.receiver_maps().end(), [&](Handle<Map> map) {
- return map.address() == receiver_map.object().address();
+ return map.equals(receiver_map.object());
}) == access_info.receiver_maps().end()) {
// The map of the receiver is not in the feedback, let us bail out.
return nullptr;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index dc1edc710d..277c89c932 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -556,8 +556,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
current_block_ = nullptr;
}
-void RawMachineAssembler::DebugAbort(Node* message) {
- AddNode(machine()->DebugAbort(), message);
+void RawMachineAssembler::AbortCSAAssert(Node* message) {
+ AddNode(machine()->AbortCSAAssert(), message);
}
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 67326ac730..890c38c551 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -732,6 +732,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* BitcastTaggedToWord(Node* a) {
return AddNode(machine()->BitcastTaggedToWord(), a);
}
+ Node* BitcastTaggedSignedToWord(Node* a) {
+ return AddNode(machine()->BitcastTaggedSignedToWord(), a);
+ }
Node* BitcastMaybeObjectToWord(Node* a) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
}
@@ -1016,7 +1019,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
- void DebugAbort(Node* message);
+ void AbortCSAAssert(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 0822e47bba..9b401bcf43 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -19,6 +19,7 @@ RedundancyElimination::~RedundancyElimination() = default;
Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
+ case IrOpcode::kCheckBigInt:
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckEqualsInternalizedString:
case IrOpcode::kCheckEqualsSymbol:
@@ -147,7 +148,9 @@ bool CheckSubsumes(Node const* a, Node const* b) {
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckBigInt:
break;
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index cebd87e73d..7a4577b799 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/type-cache.h"
@@ -25,12 +26,14 @@ const char* Truncation::description() const {
return "truncate-to-bool";
case TruncationKind::kWord32:
return "truncate-to-word32";
- case TruncationKind::kFloat64:
+ case TruncationKind::kWord64:
+ return "truncate-to-word64";
+ case TruncationKind::kOddballAndBigIntToNumber:
switch (identify_zeros()) {
case kIdentifyZeros:
- return "truncate-to-float64 (identify zeros)";
+ return "truncate-oddball&bigint-to-number (identify zeros)";
case kDistinguishZeros:
- return "truncate-to-float64 (distinguish zeros)";
+ return "truncate-oddball&bigint-to-number (distinguish zeros)";
}
case TruncationKind::kAny:
switch (identify_zeros()) {
@@ -45,22 +48,25 @@ const char* Truncation::description() const {
// Partial order for truncations:
//
-// kAny <-------+
-// ^ |
-// | |
-// kFloat64 |
-// ^ |
-// / |
-// kWord32 kBool
-// ^ ^
-// \ /
-// \ /
-// \ /
-// \ /
-// \ /
-// kNone
+// kAny <-------+
+// ^ |
+// | |
+// kOddballAndBigIntToNumber |
+// ^ |
+// / |
+// kWord64 |
+// ^ |
+// | |
+// kWord32 kBool
+// ^ ^
+// \ /
+// \ /
+// \ /
+// \ /
+// \ /
+// kNone
//
-// TODO(jarin) We might consider making kBool < kFloat64.
+// TODO(jarin) We might consider making kBool < kOddballAndBigIntToNumber.
// static
Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
@@ -68,9 +74,9 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
if (LessGeneral(rep1, rep2)) return rep2;
if (LessGeneral(rep2, rep1)) return rep1;
// Handle the generalization of float64-representable values.
- if (LessGeneral(rep1, TruncationKind::kFloat64) &&
- LessGeneral(rep2, TruncationKind::kFloat64)) {
- return TruncationKind::kFloat64;
+ if (LessGeneral(rep1, TruncationKind::kOddballAndBigIntToNumber) &&
+ LessGeneral(rep2, TruncationKind::kOddballAndBigIntToNumber)) {
+ return TruncationKind::kOddballAndBigIntToNumber;
}
// Handle the generalization of any-representable values.
if (LessGeneral(rep1, TruncationKind::kAny) &&
@@ -101,9 +107,16 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
case TruncationKind::kWord32:
return rep2 == TruncationKind::kWord32 ||
- rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
- case TruncationKind::kFloat64:
- return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord64:
+ return rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
+ case TruncationKind::kOddballAndBigIntToNumber:
+ return rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
case TruncationKind::kAny:
return rep2 == TruncationKind::kAny;
}
@@ -125,10 +138,11 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
-RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
+RepresentationChanger::RepresentationChanger(JSGraph* jsgraph,
+ JSHeapBroker* broker)
: cache_(TypeCache::Get()),
jsgraph_(jsgraph),
- isolate_(isolate),
+ broker_(broker),
testing_type_errors_(false),
type_error_(false) {}
@@ -169,7 +183,8 @@ Node* RepresentationChanger::GetRepresentationFor(
use_node, use_info);
case MachineRepresentation::kTaggedPointer:
DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
- use_info.type_check() == TypeCheckKind::kHeapObject);
+ use_info.type_check() == TypeCheckKind::kHeapObject ||
+ use_info.type_check() == TypeCheckKind::kBigInt);
return GetTaggedPointerRepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kTagged:
@@ -207,7 +222,8 @@ Node* RepresentationChanger::GetRepresentationFor(
use_info);
case MachineRepresentation::kWord64:
DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
- use_info.type_check() == TypeCheckKind::kSigned64);
+ use_info.type_check() == TypeCheckKind::kSigned64 ||
+ use_info.type_check() == TypeCheckKind::kBigInt);
return GetWord64RepresentationFor(node, output_rep, output_type, use_node,
use_info);
case MachineRepresentation::kSimd128:
@@ -418,6 +434,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (output_type.Is(Type::BigInt())) {
+ op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -447,16 +465,37 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
+ } else if (IsAnyTagged(output_rep) &&
+ (use_info.type_check() == TypeCheckKind::kBigInt ||
+ output_type.Is(Type::BigInt()))) {
+ if (output_type.Is(Type::BigInt())) {
+ return node;
+ }
+ op = simplified()->CheckBigInt(use_info.feedback());
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ if (use_info.type_check() == TypeCheckKind::kBigInt &&
+ !output_type.Is(Type::BigInt())) {
+ node = InsertChangeCompressedToTagged(node);
+ op = simplified()->CheckBigInt(use_info.feedback());
+ } else {
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ }
+ } else if (output_rep == MachineRepresentation::kCompressed &&
+ output_type.Is(Type::BigInt())) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kCompressed &&
+ use_info.type_check() == TypeCheckKind::kBigInt) {
+ node = InsertChangeCompressedToTagged(node);
+ op = simplified()->CheckBigInt(use_info.feedback());
} else if (CanBeCompressedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for CompressedSigned output representation.
+ op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback());
}
- // TODO(turbofan): Consider adding a Bailout operator that just deopts
- // for CompressedSigned output representation.
- op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -535,6 +574,9 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_type.Is(cache_->kSafeInteger)) {
// int64 -> tagged
op = simplified()->ChangeInt64ToTagged();
+ } else if (output_type.Is(Type::BigInt())) {
+ // uint64 -> BigInt
+ op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -560,7 +602,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
op = simplified()->ChangeUint32ToTagged();
} else if (output_type.Is(Type::Number()) ||
(output_type.Is(Type::NumberOrOddball()) &&
- truncation.IsUsedAsFloat64())) {
+ truncation.TruncatesOddballAndBigIntToNumber())) {
op = simplified()->ChangeFloat64ToTagged(
output_type.Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
@@ -569,7 +611,11 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
}
- } else if (IsAnyCompressed(output_rep)) {
+ } else if (output_rep == MachineRepresentation::kCompressedSigned) {
+ op = machine()->ChangeCompressedSignedToTaggedSigned();
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kCompressed) {
op = machine()->ChangeCompressedToTagged();
} else {
return TypeError(node, output_rep, output_type,
@@ -606,9 +652,20 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor(
use_node, use_info);
op = machine()->ChangeTaggedSignedToCompressedSigned();
} else if (IsWord(output_rep)) {
- node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
- use_node, use_info);
- op = machine()->ChangeTaggedSignedToCompressedSigned();
+ if (output_type.Is(Type::Signed31())) {
+ op = simplified()->ChangeInt31ToCompressedSigned();
+ } else if (output_type.Is(Type::Signed32())) {
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback());
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kCompressedSigned);
+ }
+ } else {
+ node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedSignedToCompressedSigned();
+ }
} else if (output_rep == MachineRepresentation::kWord64) {
node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
use_node, use_info);
@@ -645,10 +702,11 @@ Node* RepresentationChanger::GetCompressedPointerRepresentationFor(
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for TaggedSigned output representation.
+ op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
}
- // TODO(turbofan): Consider adding a Bailout operator that just deopts
- // for TaggedSigned output representation.
- op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
} else if (output_rep == MachineRepresentation::kBit) {
// TODO(v8:8977): specialize here and below
node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
@@ -810,11 +868,14 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* use_node, UseInfo use_info) {
NumberMatcher m(node);
if (m.HasValue()) {
+ // BigInts are not used as number constants.
+ DCHECK(use_info.type_check() != TypeCheckKind::kBigInt);
switch (use_info.type_check()) {
case TypeCheckKind::kNone:
case TypeCheckKind::kNumber:
case TypeCheckKind::kNumberOrOddball:
return jsgraph()->Float64Constant(m.Value());
+ case TypeCheckKind::kBigInt:
case TypeCheckKind::kHeapObject:
case TypeCheckKind::kSigned32:
case TypeCheckKind::kSigned64:
@@ -843,9 +904,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
- // TODO(tebbi): TypeCheckKind::kNumberOrOddball should imply Float64
- // truncation, since this exactly means that we treat Oddballs as Numbers.
- if (use_info.truncation().IsUsedAsFloat64() ||
+ if (use_info.truncation().TruncatesOddballAndBigIntToNumber() ||
use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -867,7 +926,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (output_type.Is(Type::Number())) {
op = simplified()->ChangeTaggedToFloat64();
} else if ((output_type.Is(Type::NumberOrOddball()) &&
- use_info.truncation().IsUsedAsFloat64()) ||
+ use_info.truncation().TruncatesOddballAndBigIntToNumber()) ||
output_type.Is(Type::NumberOrHole())) {
// JavaScript 'null' is an Oddball that results in +0 when truncated to
// Number. In a context like -0 == null, which must evaluate to false,
@@ -1063,11 +1122,15 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
output_type, use_node, use_info);
} else if (output_rep == MachineRepresentation::kCompressedSigned) {
// TODO(v8:8977): Specialise here
- op = machine()->ChangeCompressedSignedToTaggedSigned();
- node = jsgraph()->graph()->NewNode(op, node);
- return GetWord32RepresentationFor(node,
- MachineRepresentation::kTaggedSigned,
- output_type, use_node, use_info);
+ if (output_type.Is(Type::SignedSmall())) {
+ op = simplified()->ChangeCompressedSignedToInt32();
+ } else {
+ op = machine()->ChangeCompressedSignedToTaggedSigned();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetWord32RepresentationFor(node,
+ MachineRepresentation::kTaggedSigned,
+ output_type, use_node, use_info);
+ }
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
// TODO(v8:8977): Specialise here
op = machine()->ChangeCompressedPointerToTaggedPointer();
@@ -1252,6 +1315,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
}
break;
}
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher m(node);
+ if (m.HasValue() && m.Ref(broker_).IsBigInt()) {
+ auto bigint = m.Ref(broker_).AsBigInt();
+ return jsgraph()->Int64Constant(
+ static_cast<int64_t>(bigint.AsUint64()));
+ }
+ break;
+ }
default:
break;
}
@@ -1272,9 +1344,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
} else if (IsWord(output_rep)) {
- if (output_type.Is(Type::Unsigned32())) {
+ if (output_type.Is(Type::Unsigned32OrMinusZero())) {
+ // uint32 -> uint64
+ CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()),
+ use_info.truncation().IdentifiesZeroAndMinusZero());
op = machine()->ChangeUint32ToUint64();
- } else if (output_type.Is(Type::Signed32())) {
+ } else if (output_type.Is(Type::Signed32OrMinusZero())) {
+ // int32 -> int64
+ CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()),
+ use_info.truncation().IdentifiesZeroAndMinusZero());
op = machine()->ChangeInt32ToInt64();
} else {
return TypeError(node, output_rep, output_type,
@@ -1323,6 +1401,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
+ } else if (IsAnyTagged(output_rep) &&
+ use_info.truncation().IsUsedAsWord64() &&
+ (use_info.type_check() == TypeCheckKind::kBigInt ||
+ output_type.Is(Type::BigInt()))) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = simplified()->TruncateBigIntToUint64();
} else if (CanBeTaggedPointer(output_rep)) {
if (output_type.Is(cache_->kInt64)) {
op = simplified()->ChangeTaggedToInt64();
@@ -1656,6 +1741,13 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) {
return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
}
+Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(),
+ node);
+}
+
+Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index e8bb3f12ac..d338667603 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -29,8 +29,13 @@ class Truncation final {
static Truncation Word32() {
return Truncation(TruncationKind::kWord32, kIdentifyZeros);
}
- static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) {
- return Truncation(TruncationKind::kFloat64, identify_zeros);
+ static Truncation Word64() {
+ return Truncation(TruncationKind::kWord64, kIdentifyZeros);
+ }
+ static Truncation OddballAndBigIntToNumber(
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return Truncation(TruncationKind::kOddballAndBigIntToNumber,
+ identify_zeros);
}
static Truncation Any(IdentifyZeros identify_zeros = kDistinguishZeros) {
return Truncation(TruncationKind::kAny, identify_zeros);
@@ -50,8 +55,11 @@ class Truncation final {
bool IsUsedAsWord32() const {
return LessGeneral(kind_, TruncationKind::kWord32);
}
- bool IsUsedAsFloat64() const {
- return LessGeneral(kind_, TruncationKind::kFloat64);
+ bool IsUsedAsWord64() const {
+ return LessGeneral(kind_, TruncationKind::kWord64);
+ }
+ bool TruncatesOddballAndBigIntToNumber() const {
+ return LessGeneral(kind_, TruncationKind::kOddballAndBigIntToNumber);
}
bool IdentifiesUndefinedAndZero() {
return LessGeneral(kind_, TruncationKind::kWord32) ||
@@ -81,13 +89,15 @@ class Truncation final {
kNone,
kBool,
kWord32,
- kFloat64,
+ kWord64,
+ kOddballAndBigIntToNumber,
kAny
};
explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros)
: kind_(kind), identify_zeros_(identify_zeros) {
- DCHECK(kind == TruncationKind::kAny || kind == TruncationKind::kFloat64 ||
+ DCHECK(kind == TruncationKind::kAny ||
+ kind == TruncationKind::kOddballAndBigIntToNumber ||
identify_zeros == kIdentifyZeros);
}
TruncationKind kind() const { return kind_; }
@@ -109,7 +119,8 @@ enum class TypeCheckKind : uint8_t {
kSigned64,
kNumber,
kNumberOrOddball,
- kHeapObject
+ kHeapObject,
+ kBigInt,
};
inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
@@ -128,6 +139,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "NumberOrOddball";
case TypeCheckKind::kHeapObject:
return os << "HeapObject";
+ case TypeCheckKind::kBigInt:
+ return os << "BigInt";
}
UNREACHABLE();
}
@@ -160,6 +173,13 @@ class UseInfo {
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(),
+ TypeCheckKind::kBigInt, feedback);
+ }
static UseInfo Word64() {
return UseInfo(MachineRepresentation::kWord64, Truncation::Any());
}
@@ -175,7 +195,7 @@ class UseInfo {
static UseInfo TruncatingFloat64(
IdentifyZeros identify_zeros = kDistinguishZeros) {
return UseInfo(MachineRepresentation::kFloat64,
- Truncation::Float64(identify_zeros));
+ Truncation::OddballAndBigIntToNumber(identify_zeros));
}
static UseInfo AnyTagged() {
return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
@@ -203,6 +223,12 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
TypeCheckKind::kHeapObject, feedback);
}
+
+ static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
+ TypeCheckKind::kBigInt, feedback);
+ }
+
static UseInfo CheckedSignedSmallAsTaggedSigned(
const VectorSlotPair& feedback,
IdentifyZeros identify_zeros = kDistinguishZeros) {
@@ -240,8 +266,6 @@ class UseInfo {
}
static UseInfo CheckedNumberOrOddballAsFloat64(
IdentifyZeros identify_zeros, const VectorSlotPair& feedback) {
- // TODO(tebbi): We should use Float64 truncation here, since this exactly
- // means that we treat Oddballs as Numbers.
return UseInfo(MachineRepresentation::kFloat64,
Truncation::Any(identify_zeros),
TypeCheckKind::kNumberOrOddball, feedback);
@@ -287,7 +311,7 @@ class UseInfo {
// Eagerly folds any representation changes for constants.
class V8_EXPORT_PRIVATE RepresentationChanger final {
public:
- RepresentationChanger(JSGraph* jsgraph, Isolate* isolate);
+ RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker);
// Changes representation from {output_type} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
@@ -317,7 +341,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
private:
TypeCache const* cache_;
JSGraph* jsgraph_;
- Isolate* isolate_;
+ JSHeapBroker* broker_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -371,12 +395,13 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
Node* InsertChangeUint32ToFloat64(Node* node);
+ Node* InsertChangeCompressedToTagged(Node* node);
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
Node* InsertTruncateInt64ToInt32(Node* node);
Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() const { return isolate_; }
+ Isolate* isolate() const;
Factory* factory() const { return isolate()->factory(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index b57162f7f5..25919bb3b3 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/adapters.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
@@ -26,7 +27,7 @@ namespace compiler {
} while (false)
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
- size_t node_count_hint)
+ size_t node_count_hint, TickCounter* tick_counter)
: zone_(zone),
graph_(graph),
schedule_(schedule),
@@ -34,12 +35,14 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
- node_data_(zone) {
+ node_data_(zone),
+ tick_counter_(tick_counter) {
node_data_.reserve(node_count_hint);
node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
}
-Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags,
+ TickCounter* tick_counter) {
Zone* schedule_zone =
(flags & Scheduler::kTempSchedule) ? zone : graph->zone();
@@ -50,7 +53,8 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
Schedule* schedule =
new (schedule_zone) Schedule(schedule_zone, node_count_hint);
- Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
+ Scheduler scheduler(zone, graph, schedule, flags, node_count_hint,
+ tick_counter);
scheduler.BuildCFG();
scheduler.ComputeSpecialRPONumbering();
@@ -65,7 +69,6 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
return schedule;
}
-
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
SchedulerData def = {schedule_->start(), 0, kUnknown};
return def;
@@ -258,6 +261,7 @@ class CFGBuilder : public ZoneObject {
Queue(scheduler_->graph_->end());
while (!queue_.empty()) { // Breadth-first backwards traversal.
+ scheduler_->tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
int max = NodeProperties::PastControlIndex(node);
@@ -283,6 +287,7 @@ class CFGBuilder : public ZoneObject {
component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit);
while (!queue_.empty()) { // Breadth-first backwards traversal.
+ scheduler_->tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
@@ -728,11 +733,10 @@ class SpecialRPONumberer : public ZoneObject {
}
};
- int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
- BasicBlock* child, int unvisited) {
+ int Push(int depth, BasicBlock* child, int unvisited) {
if (child->rpo_number() == unvisited) {
- stack[depth].block = child;
- stack[depth].index = 0;
+ stack_[depth].block = child;
+ stack_[depth].index = 0;
child->set_rpo_number(kBlockOnStack);
return depth + 1;
}
@@ -780,7 +784,7 @@ class SpecialRPONumberer : public ZoneObject {
DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
previous_block_count_ = schedule_->BasicBlockCount();
- int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
+ int stack_depth = Push(0, entry, kBlockUnvisited1);
int num_loops = static_cast<int>(loops_.size());
while (stack_depth > 0) {
@@ -802,7 +806,7 @@ class SpecialRPONumberer : public ZoneObject {
} else {
// Push the successor onto the stack.
DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
- stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
+ stack_depth = Push(stack_depth, succ, kBlockUnvisited1);
}
} else {
// Finished with all successors; pop the stack and add the block.
@@ -827,7 +831,7 @@ class SpecialRPONumberer : public ZoneObject {
// edges that lead out of loops. Visits each block once, but linking loop
// sections together is linear in the loop size, so overall is
// O(|B| + max(loop_depth) * max(|loop|))
- stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
+ stack_depth = Push(0, entry, kBlockUnvisited2);
while (stack_depth > 0) {
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
BasicBlock* block = frame->block;
@@ -874,7 +878,7 @@ class SpecialRPONumberer : public ZoneObject {
loop->AddOutgoing(zone_, succ);
} else {
// Push the successor onto the stack.
- stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
+ stack_depth = Push(stack_depth, succ, kBlockUnvisited2);
if (HasLoopNumber(succ)) {
// Push the inner loop onto the loop stack.
DCHECK(GetLoopNumber(succ) < num_loops);
@@ -958,8 +962,9 @@ class SpecialRPONumberer : public ZoneObject {
}
// Computes loop membership from the backedges of the control flow graph.
- void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
- size_t num_loops, ZoneVector<Backedge>* backedges) {
+ void ComputeLoopInfo(
+ ZoneVector<SpecialRPOStackFrame>& queue, // NOLINT(runtime/references)
+ size_t num_loops, ZoneVector<Backedge>* backedges) {
// Extend existing loop membership vectors.
for (LoopInfo& loop : loops_) {
loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
@@ -1234,6 +1239,7 @@ void Scheduler::PrepareUses() {
visited[node->id()] = true;
stack.push(node->input_edges().begin());
while (!stack.empty()) {
+ tick_counter_->DoTick();
Edge edge = *stack.top();
Node* node = edge.to();
if (visited[node->id()]) {
@@ -1262,6 +1268,7 @@ class ScheduleEarlyNodeVisitor {
for (Node* const root : *roots) {
queue_.push(root);
while (!queue_.empty()) {
+ scheduler_->tick_counter_->DoTick();
VisitNode(queue_.front());
queue_.pop();
}
@@ -1388,6 +1395,7 @@ class ScheduleLateNodeVisitor {
queue->push(node);
do {
+ scheduler_->tick_counter_->DoTick();
Node* const node = queue->front();
queue->pop();
VisitNode(node);
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index bd2f2780dd..3d1fa40025 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -15,6 +15,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -23,7 +26,6 @@ class ControlEquivalence;
class Graph;
class SpecialRPONumberer;
-
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class V8_EXPORT_PRIVATE Scheduler {
@@ -34,7 +36,8 @@ class V8_EXPORT_PRIVATE Scheduler {
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
- static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags);
+ static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags,
+ TickCounter* tick_counter);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
@@ -78,9 +81,10 @@ class V8_EXPORT_PRIVATE Scheduler {
CFGBuilder* control_flow_builder_; // Builds basic blocks for controls.
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
+ TickCounter* const tick_counter_;
Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
- size_t node_count_hint_);
+ size_t node_count_hint_, TickCounter* tick_counter);
inline SchedulerData DefaultSchedulerData();
inline SchedulerData* GetData(Node* node);
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index ecbd9cc030..5597850b06 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -6,30 +6,495 @@
#include <sstream>
+#include "src/base/optional.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/vector-slot-pair.h"
#include "src/handles/handles-inl.h"
+#include "src/ic/call-optimization.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
+#define CLEAR_ENVIRONMENT_LIST(V) \
+ V(CallRuntimeForPair) \
+ V(Debugger) \
+ V(ResumeGenerator) \
+ V(SuspendGenerator)
+
+#define KILL_ENVIRONMENT_LIST(V) \
+ V(Abort) \
+ V(ReThrow) \
+ V(Throw)
+
+#define CLEAR_ACCUMULATOR_LIST(V) \
+ V(Add) \
+ V(AddSmi) \
+ V(BitwiseAnd) \
+ V(BitwiseAndSmi) \
+ V(BitwiseNot) \
+ V(BitwiseOr) \
+ V(BitwiseOrSmi) \
+ V(BitwiseXor) \
+ V(BitwiseXorSmi) \
+ V(CallRuntime) \
+ V(CloneObject) \
+ V(CreateArrayFromIterable) \
+ V(CreateArrayLiteral) \
+ V(CreateEmptyArrayLiteral) \
+ V(CreateEmptyObjectLiteral) \
+ V(CreateMappedArguments) \
+ V(CreateObjectLiteral) \
+ V(CreateRegExpLiteral) \
+ V(CreateRestParameter) \
+ V(CreateUnmappedArguments) \
+ V(Dec) \
+ V(DeletePropertySloppy) \
+ V(DeletePropertyStrict) \
+ V(Div) \
+ V(DivSmi) \
+ V(Exp) \
+ V(ExpSmi) \
+ V(ForInContinue) \
+ V(ForInEnumerate) \
+ V(ForInNext) \
+ V(ForInStep) \
+ V(Inc) \
+ V(LdaLookupSlot) \
+ V(LdaLookupSlotInsideTypeof) \
+ V(LogicalNot) \
+ V(Mod) \
+ V(ModSmi) \
+ V(Mul) \
+ V(MulSmi) \
+ V(Negate) \
+ V(SetPendingMessage) \
+ V(ShiftLeft) \
+ V(ShiftLeftSmi) \
+ V(ShiftRight) \
+ V(ShiftRightLogical) \
+ V(ShiftRightLogicalSmi) \
+ V(ShiftRightSmi) \
+ V(StaLookupSlot) \
+ V(Sub) \
+ V(SubSmi) \
+ V(TestEqual) \
+ V(TestEqualStrict) \
+ V(TestGreaterThan) \
+ V(TestGreaterThanOrEqual) \
+ V(TestInstanceOf) \
+ V(TestLessThan) \
+ V(TestLessThanOrEqual) \
+ V(TestNull) \
+ V(TestReferenceEqual) \
+ V(TestTypeOf) \
+ V(TestUndefined) \
+ V(TestUndetectable) \
+ V(ToBooleanLogicalNot) \
+ V(ToName) \
+ V(ToNumber) \
+ V(ToNumeric) \
+ V(ToString) \
+ V(TypeOf)
+
+#define UNCONDITIONAL_JUMPS_LIST(V) \
+ V(Jump) \
+ V(JumpConstant) \
+ V(JumpLoop)
+
+#define CONDITIONAL_JUMPS_LIST(V) \
+ V(JumpIfFalse) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotNull) \
+ V(JumpIfNotNullConstant) \
+ V(JumpIfNotUndefined) \
+ V(JumpIfNotUndefinedConstant) \
+ V(JumpIfNull) \
+ V(JumpIfNullConstant) \
+ V(JumpIfToBooleanFalse) \
+ V(JumpIfToBooleanFalseConstant) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfTrue) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfUndefined) \
+ V(JumpIfUndefinedConstant)
+
+#define IGNORED_BYTECODE_LIST(V) \
+ V(CallNoFeedback) \
+ V(IncBlockCounter) \
+ V(LdaNamedPropertyNoFeedback) \
+ V(StackCheck) \
+ V(StaNamedPropertyNoFeedback) \
+ V(ThrowReferenceErrorIfHole) \
+ V(ThrowSuperAlreadyCalledIfNotHole) \
+ V(ThrowSuperNotCalledIfHole)
+
+#define UNREACHABLE_BYTECODE_LIST(V) \
+ V(ExtraWide) \
+ V(Illegal) \
+ V(Wide)
+
+#define SUPPORTED_BYTECODE_LIST(V) \
+ V(CallAnyReceiver) \
+ V(CallJSRuntime) \
+ V(CallProperty) \
+ V(CallProperty0) \
+ V(CallProperty1) \
+ V(CallProperty2) \
+ V(CallUndefinedReceiver) \
+ V(CallUndefinedReceiver0) \
+ V(CallUndefinedReceiver1) \
+ V(CallUndefinedReceiver2) \
+ V(CallWithSpread) \
+ V(Construct) \
+ V(ConstructWithSpread) \
+ V(CreateBlockContext) \
+ V(CreateCatchContext) \
+ V(CreateClosure) \
+ V(CreateEvalContext) \
+ V(CreateFunctionContext) \
+ V(CreateWithContext) \
+ V(GetSuperConstructor) \
+ V(GetTemplateObject) \
+ V(InvokeIntrinsic) \
+ V(LdaConstant) \
+ V(LdaContextSlot) \
+ V(LdaCurrentContextSlot) \
+ V(LdaImmutableContextSlot) \
+ V(LdaImmutableCurrentContextSlot) \
+ V(LdaModuleVariable) \
+ V(LdaFalse) \
+ V(LdaGlobal) \
+ V(LdaGlobalInsideTypeof) \
+ V(LdaKeyedProperty) \
+ V(LdaLookupContextSlot) \
+ V(LdaLookupContextSlotInsideTypeof) \
+ V(LdaLookupGlobalSlot) \
+ V(LdaLookupGlobalSlotInsideTypeof) \
+ V(LdaNamedProperty) \
+ V(LdaNull) \
+ V(Ldar) \
+ V(LdaSmi) \
+ V(LdaTheHole) \
+ V(LdaTrue) \
+ V(LdaUndefined) \
+ V(LdaZero) \
+ V(Mov) \
+ V(PopContext) \
+ V(PushContext) \
+ V(Return) \
+ V(StaContextSlot) \
+ V(StaCurrentContextSlot) \
+ V(StaGlobal) \
+ V(StaInArrayLiteral) \
+ V(StaKeyedProperty) \
+ V(StaModuleVariable) \
+ V(StaNamedOwnProperty) \
+ V(StaNamedProperty) \
+ V(Star) \
+ V(SwitchOnGeneratorState) \
+ V(SwitchOnSmiNoFeedback) \
+ V(TestIn) \
+ CLEAR_ACCUMULATOR_LIST(V) \
+ CLEAR_ENVIRONMENT_LIST(V) \
+ CONDITIONAL_JUMPS_LIST(V) \
+ IGNORED_BYTECODE_LIST(V) \
+ KILL_ENVIRONMENT_LIST(V) \
+ UNCONDITIONAL_JUMPS_LIST(V) \
+ UNREACHABLE_BYTECODE_LIST(V)
+
+template <typename T>
+struct HandleComparator {
+ bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
+ return lhs.address() < rhs.address();
+ }
+};
+
+struct VirtualContext {
+ unsigned int distance;
+ Handle<Context> context;
+
+ VirtualContext(unsigned int distance_in, Handle<Context> context_in)
+ : distance(distance_in), context(context_in) {
+ CHECK_GT(distance, 0);
+ }
+ bool operator<(const VirtualContext& other) const {
+ return HandleComparator<Context>()(context, other.context) &&
+ distance < other.distance;
+ }
+};
+
+class FunctionBlueprint;
+using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
+using VirtualContextsSet = ZoneSet<VirtualContext>;
+using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
+using BlueprintsSet = ZoneSet<FunctionBlueprint>;
+
+class Hints {
+ public:
+ explicit Hints(Zone* zone);
+
+ const ConstantsSet& constants() const;
+ const MapsSet& maps() const;
+ const BlueprintsSet& function_blueprints() const;
+ const VirtualContextsSet& virtual_contexts() const;
+
+ void AddConstant(Handle<Object> constant);
+ void AddMap(Handle<Map> map);
+ void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
+ void AddVirtualContext(VirtualContext virtual_context);
+
+ void Add(const Hints& other);
+
+ void Clear();
+ bool IsEmpty() const;
+
+#ifdef ENABLE_SLOW_DCHECKS
+ bool Includes(Hints const& other) const;
+ bool Equals(Hints const& other) const;
+#endif
+
+ private:
+ VirtualContextsSet virtual_contexts_;
+ ConstantsSet constants_;
+ MapsSet maps_;
+ BlueprintsSet function_blueprints_;
+};
+
+using HintsVector = ZoneVector<Hints>;
+
+class FunctionBlueprint {
+ public:
+ FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone);
+
+ FunctionBlueprint(Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ const Hints& context_hints);
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
+ const Hints& context_hints() const { return context_hints_; }
+
+ bool operator<(const FunctionBlueprint& other) const {
+ // A feedback vector is never used for more than one SFI, so it can
+ // be used for strict ordering of blueprints.
+ DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_),
+ shared_.equals(other.shared_));
+ return HandleComparator<FeedbackVector>()(feedback_vector_,
+ other.feedback_vector_);
+ }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ Handle<FeedbackVector> feedback_vector_;
+ Hints context_hints_;
+};
+
+class CompilationSubject {
+ public:
+ explicit CompilationSubject(FunctionBlueprint blueprint)
+ : blueprint_(blueprint) {}
+
+ // The zone parameter is to correctly initialize the blueprint,
+ // which contains zone-allocated context information.
+ CompilationSubject(Handle<JSFunction> closure, Isolate* isolate, Zone* zone);
+
+ const FunctionBlueprint& blueprint() const { return blueprint_; }
+ MaybeHandle<JSFunction> closure() const { return closure_; }
+
+ private:
+ FunctionBlueprint blueprint_;
+ MaybeHandle<JSFunction> closure_;
+};
+
+// The SerializerForBackgroundCompilation makes sure that the relevant function
+// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
+// optimizations in the compiler, is copied to the heap broker.
+class SerializerForBackgroundCompilation {
+ public:
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset);
+ Hints Run(); // NOTE: Returns empty for an already-serialized function.
+
+ class Environment;
+
+ private:
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ CompilationSubject function, base::Optional<Hints> new_target,
+ const HintsVector& arguments,
+ SerializerForBackgroundCompilationFlags flags);
+
+ bool BailoutOnUninitialized(FeedbackSlot slot);
+
+ void TraverseBytecode();
+
+#define DECLARE_VISIT_BYTECODE(name, ...) \
+ void Visit##name(interpreter::BytecodeArrayIterator* iterator);
+ SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, FeedbackSlot slot,
+ bool with_spread = false);
+ void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
+ ConvertReceiverMode receiver_mode,
+ bool with_spread = false);
+ void ProcessApiCall(Handle<SharedFunctionInfo> target,
+ const HintsVector& arguments);
+ void ProcessReceiverMapForApiCall(
+ FunctionTemplateInfoRef& target, // NOLINT(runtime/references)
+ Handle<Map> receiver);
+ void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
+ const HintsVector& arguments);
+
+ void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
+
+ void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
+ FeedbackSlot slot, AccessMode mode);
+ void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
+ AccessMode mode);
+ void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
+ FeedbackSlot slot, AccessMode mode);
+ void ProcessMapHintsForPromises(Hints const& receiver_hints);
+ void ProcessHintsForPromiseResolve(Hints const& resolution_hints);
+ void ProcessHintsForRegExpTest(Hints const& regexp_hints);
+ PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
+ void ProcessHintsForFunctionCall(Hints const& target_hints);
+
+ GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
+ NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
+ const MapHandles& maps, AccessMode mode, NameRef const& name);
+ ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
+ const MapHandles& maps, AccessMode mode,
+ KeyedAccessMode const& keyed_mode);
+ void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
+ base::Optional<NameRef> static_name);
+ void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
+
+ void ProcessCreateContext();
+ enum ContextProcessingMode {
+ kIgnoreSlot,
+ kSerializeSlot,
+ kSerializeSlotAndAddToAccumulator
+ };
+
+ void ProcessContextAccess(const Hints& context_hints, int slot, int depth,
+ ContextProcessingMode mode);
+ void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references)
+ int slot, ContextProcessingMode mode);
+ void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
+ void ProcessLdaLookupContextSlot(
+ interpreter::BytecodeArrayIterator* iterator);
+
+ // Performs extension lookups for [0, depth) like
+ // BytecodeGraphBuilder::CheckContextExtensions().
+ void ProcessCheckContextExtensions(int depth);
+
+ Hints RunChildSerializer(CompilationSubject function,
+ base::Optional<Hints> new_target,
+ const HintsVector& arguments, bool with_spread);
+
+ // When (forward-)branching bytecodes are encountered, e.g. a conditional
+ // jump, we call ContributeToJumpTargetEnvironment to "remember" the current
+ // environment, associated with the jump target offset. When serialization
+ // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to
+ // merge that environment back into whatever is the current environment then.
+ // Note: Since there may be multiple jumps to the same target,
+ // ContributeToJumpTargetEnvironment may actually do a merge as well.
+ void ContributeToJumpTargetEnvironment(int target_offset);
+ void IncorporateJumpTargetEnvironment(int target_offset);
+
+ Handle<BytecodeArray> bytecode_array() const;
+ BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize);
+
+ JSHeapBroker* broker() const { return broker_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+ Environment* environment() const { return environment_; }
+ SerializerForBackgroundCompilationFlags flags() const { return flags_; }
+ BailoutId osr_offset() const { return osr_offset_; }
+
+ JSHeapBroker* const broker_;
+ CompilationDependencies* const dependencies_;
+ Zone* const zone_;
+ Environment* const environment_;
+ ZoneUnorderedMap<int, Environment*> jump_target_environments_;
+ SerializerForBackgroundCompilationFlags const flags_;
+ BailoutId const osr_offset_;
+};
+
+void RunSerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset) {
+ SerializerForBackgroundCompilation serializer(broker, dependencies, zone,
+ closure, flags, osr_offset);
+ serializer.Run();
+}
+
using BytecodeArrayIterator = interpreter::BytecodeArrayIterator;
+FunctionBlueprint::FunctionBlueprint(Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ const Hints& context_hints)
+ : shared_(shared),
+ feedback_vector_(feedback_vector),
+ context_hints_(context_hints) {}
+
+FunctionBlueprint::FunctionBlueprint(Handle<JSFunction> function,
+ Isolate* isolate, Zone* zone)
+ : shared_(handle(function->shared(), isolate)),
+ feedback_vector_(handle(function->feedback_vector(), isolate)),
+ context_hints_(zone) {
+ context_hints_.AddConstant(handle(function->context(), isolate));
+}
+
CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
- Isolate* isolate)
- : blueprint_{handle(closure->shared(), isolate),
- handle(closure->feedback_vector(), isolate)},
- closure_(closure) {
+ Isolate* isolate, Zone* zone)
+ : blueprint_(closure, isolate, zone), closure_(closure) {
CHECK(closure->has_feedback_vector());
}
Hints::Hints(Zone* zone)
- : constants_(zone), maps_(zone), function_blueprints_(zone) {}
+ : virtual_contexts_(zone),
+ constants_(zone),
+ maps_(zone),
+ function_blueprints_(zone) {}
+
+#ifdef ENABLE_SLOW_DCHECKS
+namespace {
+template <typename K, typename Compare>
+bool SetIncludes(ZoneSet<K, Compare> const& lhs,
+ ZoneSet<K, Compare> const& rhs) {
+ return std::all_of(rhs.cbegin(), rhs.cend(),
+ [&](K const& x) { return lhs.find(x) != lhs.cend(); });
+}
+} // namespace
+bool Hints::Includes(Hints const& other) const {
+ return SetIncludes(constants(), other.constants()) &&
+ SetIncludes(function_blueprints(), other.function_blueprints()) &&
+ SetIncludes(maps(), other.maps());
+}
+bool Hints::Equals(Hints const& other) const {
+ return this->Includes(other) && other.Includes(*this);
+}
+#endif
const ConstantsSet& Hints::constants() const { return constants_; }
@@ -39,6 +504,14 @@ const BlueprintsSet& Hints::function_blueprints() const {
return function_blueprints_;
}
+const VirtualContextsSet& Hints::virtual_contexts() const {
+ return virtual_contexts_;
+}
+
+void Hints::AddVirtualContext(VirtualContext virtual_context) {
+ virtual_contexts_.insert(virtual_context);
+}
+
void Hints::AddConstant(Handle<Object> constant) {
constants_.insert(constant);
}
@@ -53,16 +526,29 @@ void Hints::Add(const Hints& other) {
for (auto x : other.constants()) AddConstant(x);
for (auto x : other.maps()) AddMap(x);
for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x);
}
bool Hints::IsEmpty() const {
- return constants().empty() && maps().empty() && function_blueprints().empty();
+ return constants().empty() && maps().empty() &&
+ function_blueprints().empty() && virtual_contexts().empty();
}
std::ostream& operator<<(std::ostream& out,
+ const VirtualContext& virtual_context) {
+ out << "Distance " << virtual_context.distance << " from "
+ << Brief(*virtual_context.context) << std::endl;
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const Hints& hints);
+
+std::ostream& operator<<(std::ostream& out,
const FunctionBlueprint& blueprint) {
- out << Brief(*blueprint.shared) << std::endl;
- out << Brief(*blueprint.feedback_vector) << std::endl;
+ out << Brief(*blueprint.shared()) << std::endl;
+ out << Brief(*blueprint.feedback_vector()) << std::endl;
+ !blueprint.context_hints().IsEmpty() && out << blueprint.context_hints()
+ << "):" << std::endl;
return out;
}
@@ -76,10 +562,14 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) {
for (FunctionBlueprint const& blueprint : hints.function_blueprints()) {
out << " blueprint " << blueprint << std::endl;
}
+ for (VirtualContext const& virtual_context : hints.virtual_contexts()) {
+ out << " virtual context " << virtual_context << std::endl;
+ }
return out;
}
void Hints::Clear() {
+ virtual_contexts_.clear();
constants_.clear();
maps_.clear();
function_blueprints_.clear();
@@ -92,50 +582,53 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
base::Optional<Hints> new_target, const HintsVector& arguments);
- bool IsDead() const { return environment_hints_.empty(); }
+ bool IsDead() const { return ephemeral_hints_.empty(); }
void Kill() {
DCHECK(!IsDead());
- environment_hints_.clear();
+ ephemeral_hints_.clear();
DCHECK(IsDead());
}
void Revive() {
DCHECK(IsDead());
- environment_hints_.resize(environment_hints_size(), Hints(zone()));
+ ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone()));
DCHECK(!IsDead());
}
- // When control flow bytecodes are encountered, e.g. a conditional jump,
- // the current environment needs to be stashed together with the target jump
- // address. Later, when this target bytecode is handled, the stashed
- // environment will be merged into the current one.
+ // Merge {other} into {this} environment (leaving {other} unmodified).
void Merge(Environment* other);
FunctionBlueprint function() const { return function_; }
+ Hints const& closure_hints() const { return closure_hints_; }
+ Hints const& current_context_hints() const { return current_context_hints_; }
+ Hints& current_context_hints() { return current_context_hints_; }
+ Hints const& return_value_hints() const { return return_value_hints_; }
+ Hints& return_value_hints() { return return_value_hints_; }
+
Hints& accumulator_hints() {
- CHECK_LT(accumulator_index(), environment_hints_.size());
- return environment_hints_[accumulator_index()];
+ CHECK_LT(accumulator_index(), ephemeral_hints_.size());
+ return ephemeral_hints_[accumulator_index()];
}
+
Hints& register_hints(interpreter::Register reg) {
+ if (reg.is_function_closure()) return closure_hints_;
+ if (reg.is_current_context()) return current_context_hints_;
int local_index = RegisterToLocalIndex(reg);
- CHECK_LT(local_index, environment_hints_.size());
- return environment_hints_[local_index];
+ CHECK_LT(local_index, ephemeral_hints_.size());
+ return ephemeral_hints_[local_index];
}
- Hints& return_value_hints() { return return_value_hints_; }
- // Clears all hints except those for the return value and the closure.
+ // Clears all hints except those for the context, return value, and the
+ // closure.
void ClearEphemeralHints() {
- DCHECK_EQ(environment_hints_.size(), function_closure_index() + 1);
- for (int i = 0; i < function_closure_index(); ++i) {
- environment_hints_[i].Clear();
- }
+ for (auto& hints : ephemeral_hints_) hints.Clear();
}
// Appends the hints for the given register range to {dst} (in order).
void ExportRegisterHints(interpreter::Register first, size_t count,
- HintsVector& dst);
+ HintsVector& dst); // NOLINT(runtime/references)
private:
friend std::ostream& operator<<(std::ostream& out, const Environment& env);
@@ -153,34 +646,39 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
int const parameter_count_;
int const register_count_;
- // environment_hints_ contains hints for the contents of the registers,
+ Hints closure_hints_;
+ Hints current_context_hints_;
+ Hints return_value_hints_;
+
+ // ephemeral_hints_ contains hints for the contents of the registers,
// the accumulator and the parameters. The layout is as follows:
- // [ parameters | registers | accumulator | context | closure ]
+ // [ parameters | registers | accumulator ]
// The first parameter is the receiver.
- HintsVector environment_hints_;
+ HintsVector ephemeral_hints_;
int accumulator_index() const { return parameter_count() + register_count(); }
- int current_context_index() const { return accumulator_index() + 1; }
- int function_closure_index() const { return current_context_index() + 1; }
- int environment_hints_size() const { return function_closure_index() + 1; }
-
- Hints return_value_hints_;
+ int ephemeral_hints_size() const { return accumulator_index() + 1; }
};
SerializerForBackgroundCompilation::Environment::Environment(
Zone* zone, CompilationSubject function)
: zone_(zone),
function_(function.blueprint()),
- parameter_count_(function_.shared->GetBytecodeArray().parameter_count()),
- register_count_(function_.shared->GetBytecodeArray().register_count()),
- environment_hints_(environment_hints_size(), Hints(zone), zone),
- return_value_hints_(zone) {
+ parameter_count_(
+ function_.shared()->GetBytecodeArray().parameter_count()),
+ register_count_(function_.shared()->GetBytecodeArray().register_count()),
+ closure_hints_(zone),
+ current_context_hints_(zone),
+ return_value_hints_(zone),
+ ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) {
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
- environment_hints_[function_closure_index()].AddConstant(closure);
+ closure_hints_.AddConstant(closure);
} else {
- environment_hints_[function_closure_index()].AddFunctionBlueprint(
- function.blueprint());
+ closure_hints_.AddFunctionBlueprint(function.blueprint());
}
+
+ // Consume blueprint context hint information.
+ current_context_hints().Add(function.blueprint().context_hints());
}
SerializerForBackgroundCompilation::Environment::Environment(
@@ -191,18 +689,19 @@ SerializerForBackgroundCompilation::Environment::Environment(
// the parameter_count.
size_t param_count = static_cast<size_t>(parameter_count());
for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) {
- environment_hints_[i] = arguments[i];
+ ephemeral_hints_[i] = arguments[i];
}
// Pad the rest with "undefined".
Hints undefined_hint(zone);
undefined_hint.AddConstant(isolate->factory()->undefined_value());
for (size_t i = arguments.size(); i < param_count; ++i) {
- environment_hints_[i] = undefined_hint;
+ ephemeral_hints_[i] = undefined_hint;
}
interpreter::Register new_target_reg =
- function_.shared->GetBytecodeArray()
+ function_.shared()
+ ->GetBytecodeArray()
.incoming_new_target_or_generator_register();
if (new_target_reg.is_valid()) {
DCHECK(register_hints(new_target_reg).IsEmpty());
@@ -219,16 +718,20 @@ void SerializerForBackgroundCompilation::Environment::Merge(
CHECK_EQ(parameter_count(), other->parameter_count());
CHECK_EQ(register_count(), other->register_count());
+ SLOW_DCHECK(closure_hints_.Equals(other->closure_hints_));
+
if (IsDead()) {
- environment_hints_ = other->environment_hints_;
+ ephemeral_hints_ = other->ephemeral_hints_;
+ SLOW_DCHECK(return_value_hints_.Includes(other->return_value_hints_));
CHECK(!IsDead());
return;
}
- CHECK_EQ(environment_hints_.size(), other->environment_hints_.size());
- for (size_t i = 0; i < environment_hints_.size(); ++i) {
- environment_hints_[i].Add(other->environment_hints_[i]);
+ CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size());
+ for (size_t i = 0; i < ephemeral_hints_.size(); ++i) {
+ ephemeral_hints_[i].Add(other->ephemeral_hints_[i]);
}
+
return_value_hints_.Add(other->return_value_hints_);
}
@@ -236,42 +739,39 @@ std::ostream& operator<<(
std::ostream& out,
const SerializerForBackgroundCompilation::Environment& env) {
std::ostringstream output_stream;
+ output_stream << "Function ";
+ env.function_.shared()->Name().Print(output_stream);
- for (size_t i = 0; i << env.parameter_count(); ++i) {
- Hints const& hints = env.environment_hints_[i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for a" << i << ":\n" << hints;
- }
- }
- for (size_t i = 0; i << env.register_count(); ++i) {
- Hints const& hints = env.environment_hints_[env.parameter_count() + i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for r" << i << ":\n" << hints;
- }
- }
- {
- Hints const& hints = env.environment_hints_[env.accumulator_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <accumulator>:\n" << hints;
+ if (env.IsDead()) {
+ output_stream << "dead\n";
+ } else {
+ output_stream << "alive\n";
+ for (int i = 0; i < static_cast<int>(env.ephemeral_hints_.size()); ++i) {
+ Hints const& hints = env.ephemeral_hints_[i];
+ if (!hints.IsEmpty()) {
+ if (i < env.parameter_count()) {
+ output_stream << "Hints for a" << i << ":\n";
+ } else if (i < env.parameter_count() + env.register_count()) {
+ int local_register = i - env.parameter_count();
+ output_stream << "Hints for r" << local_register << ":\n";
+ } else if (i == env.accumulator_index()) {
+ output_stream << "Hints for <accumulator>:\n";
+ } else {
+ UNREACHABLE();
+ }
+ output_stream << hints;
+ }
}
}
- {
- Hints const& hints = env.environment_hints_[env.function_closure_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <closure>:\n" << hints;
- }
+
+ if (!env.closure_hints().IsEmpty()) {
+ output_stream << "Hints for <closure>:\n" << env.closure_hints();
}
- {
- Hints const& hints = env.environment_hints_[env.current_context_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <context>:\n" << hints;
- }
+ if (!env.current_context_hints().IsEmpty()) {
+ output_stream << "Hints for <context>:\n" << env.current_context_hints();
}
- {
- Hints const& hints = env.return_value_hints_;
- if (!hints.IsEmpty()) {
- output_stream << "Hints for {return value}:\n" << hints;
- }
+ if (!env.return_value_hints().IsEmpty()) {
+ output_stream << "Hints for {return value}:\n" << env.return_value_hints();
}
out << output_stream.str();
@@ -280,25 +780,26 @@ std::ostream& operator<<(
int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
interpreter::Register reg) const {
- // TODO(mslekova): We also want to gather hints for the context.
- if (reg.is_current_context()) return current_context_index();
- if (reg.is_function_closure()) return function_closure_index();
if (reg.is_parameter()) {
return reg.ToParameterIndex(parameter_count());
} else {
+ DCHECK(!reg.is_function_closure());
return parameter_count() + reg.index();
}
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags)
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset)
: broker_(broker),
dependencies_(dependencies),
zone_(zone),
- environment_(new (zone) Environment(zone, {closure, broker_->isolate()})),
- stashed_environments_(zone),
- flags_(flags) {
+ environment_(new (zone) Environment(
+ zone, CompilationSubject(closure, broker_->isolate(), zone))),
+ jump_target_environments_(zone),
+ flags_(flags),
+ osr_offset_(osr_offset) {
JSFunctionRef(broker, closure).Serialize();
}
@@ -311,9 +812,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
zone_(zone),
environment_(new (zone) Environment(zone, broker_->isolate(), function,
new_target, arguments)),
- stashed_environments_(zone),
- flags_(flags) {
- DCHECK(!(flags_ & SerializerForBackgroundCompilationFlag::kOsr));
+ jump_target_environments_(zone),
+ flags_(flags),
+ osr_offset_(BailoutId::None()) {
TraceScope tracer(
broker_, this,
"SerializerForBackgroundCompilation::SerializerForBackgroundCompilation");
@@ -331,12 +832,12 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) {
return false;
}
- if (flags() & SerializerForBackgroundCompilationFlag::kOsr) {
+ if (!osr_offset().IsNone()) {
// Exclude OSR from this optimization because we might end up skipping the
// OSR entry point. TODO(neis): Support OSR?
return false;
}
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
if (!slot.IsInvalid() && nexus.IsUninitialized()) {
FeedbackSource source(nexus);
if (broker()->HasFeedback(source)) {
@@ -354,9 +855,9 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
Hints SerializerForBackgroundCompilation::Run() {
TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
- SharedFunctionInfoRef shared(broker(), environment()->function().shared);
- FeedbackVectorRef feedback_vector(broker(),
- environment()->function().feedback_vector);
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared());
+ FeedbackVectorRef feedback_vector(
+ broker(), environment()->function().feedback_vector());
if (shared.IsSerializedForCompilation(feedback_vector)) {
TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
<< Brief(*shared.object())
@@ -382,9 +883,10 @@ Hints SerializerForBackgroundCompilation::Run() {
class ExceptionHandlerMatcher {
public:
explicit ExceptionHandlerMatcher(
- BytecodeArrayIterator const& bytecode_iterator)
+ BytecodeArrayIterator const& bytecode_iterator,
+ Handle<BytecodeArray> bytecode_array)
: bytecode_iterator_(bytecode_iterator) {
- HandlerTable table(*bytecode_iterator_.bytecode_array());
+ HandlerTable table(*bytecode_array);
for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) {
handlers_.insert(table.GetRangeHandler(i));
}
@@ -407,30 +909,53 @@ class ExceptionHandlerMatcher {
std::set<int>::const_iterator handlers_iterator_;
};
+Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
+ const {
+ return handle(environment()->function().shared()->GetBytecodeArray(),
+ broker()->isolate());
+}
+
+BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis(
+ bool serialize) {
+ return broker()->GetBytecodeAnalysis(
+ bytecode_array(), osr_offset(),
+ flags() &
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness,
+ serialize);
+}
+
void SerializerForBackgroundCompilation::TraverseBytecode() {
- BytecodeArrayRef bytecode_array(
- broker(), handle(environment()->function().shared->GetBytecodeArray(),
- broker()->isolate()));
- BytecodeArrayIterator iterator(bytecode_array.object());
- ExceptionHandlerMatcher handler_matcher(iterator);
+ BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true);
+ BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation();
+
+ BytecodeArrayIterator iterator(bytecode_array());
+ ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array());
for (; !iterator.done(); iterator.Advance()) {
- MergeAfterJump(&iterator);
+ int const current_offset = iterator.current_offset();
+ IncorporateJumpTargetEnvironment(current_offset);
+
+ TRACE_BROKER(broker(),
+ "Handling bytecode: " << current_offset << " "
+ << iterator.current_bytecode());
+ TRACE_BROKER(broker(), "Current environment: " << *environment());
if (environment()->IsDead()) {
- if (iterator.current_bytecode() ==
- interpreter::Bytecode::kResumeGenerator ||
- handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
+ if (handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
environment()->Revive();
} else {
continue; // Skip this bytecode since TF won't generate code for it.
}
}
- TRACE_BROKER(broker(),
- "Handling bytecode: " << iterator.current_offset() << " "
- << iterator.current_bytecode());
- TRACE_BROKER(broker(), "Current environment:\n" << *environment());
+ if (bytecode_analysis.IsLoopHeader(current_offset)) {
+ // Graph builder might insert jumps to resume targets in the loop body.
+ LoopInfo const& loop_info =
+ bytecode_analysis.GetLoopInfoFor(current_offset);
+ for (const auto& target : loop_info.resume_jump_targets()) {
+ ContributeToJumpTargetEnvironment(target.target_offset());
+ }
+ }
switch (iterator.current_bytecode()) {
#define DEFINE_BYTECODE_CASE(name) \
@@ -447,21 +972,6 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
}
}
-void SerializerForBackgroundCompilation::VisitIllegal(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
-void SerializerForBackgroundCompilation::VisitWide(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
-void SerializerForBackgroundCompilation::VisitExtraWide(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
BytecodeArrayIterator* iterator) {
interpreter::Register dst = iterator->GetRegisterOperand(0);
@@ -480,6 +990,20 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
}
}
+void SerializerForBackgroundCompilation::VisitGetTemplateObject(
+ BytecodeArrayIterator* iterator) {
+ ObjectRef description(
+ broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackVectorRef feedback_vector(
+ broker(), environment()->function().feedback_vector());
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared());
+ JSArrayRef template_object =
+ shared.GetTemplateObject(description, feedback_vector, slot, true);
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(template_object.object());
+}
+
void SerializerForBackgroundCompilation::VisitLdaTrue(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
@@ -529,11 +1053,171 @@ void SerializerForBackgroundCompilation::VisitLdaSmi(
Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
}
+void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
+ BytecodeArrayIterator* iterator) {
+ Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0);
+ // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (functionId == Runtime::kInlineAsyncFunctionResolve) {
+ interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+ size_t reg_count = iterator->GetRegisterCountOperand(2);
+ CHECK_EQ(reg_count, 3);
+ HintsVector arguments(zone());
+ environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+ Hints const& resolution_hints = arguments[1]; // The resolution object.
+ ProcessHintsForPromiseResolve(resolution_hints);
+ environment()->accumulator_hints().Clear();
+ return;
+ }
+ environment()->ClearEphemeralHints();
+}
+
void SerializerForBackgroundCompilation::VisitLdaConstant(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- handle(iterator->GetConstantForIndexOperand(0), broker()->isolate()));
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+}
+
+void SerializerForBackgroundCompilation::VisitPushContext(
+ BytecodeArrayIterator* iterator) {
+ // Transfer current context hints to the destination register hints.
+ Hints& current_context_hints = environment()->current_context_hints();
+ Hints& saved_context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ saved_context_hints.Clear();
+ saved_context_hints.Add(current_context_hints);
+
+ // New Context is in the accumulator. Put those hints into the current context
+ // register hints.
+ current_context_hints.Clear();
+ current_context_hints.Add(environment()->accumulator_hints());
+}
+
+void SerializerForBackgroundCompilation::VisitPopContext(
+ BytecodeArrayIterator* iterator) {
+ // Replace current context hints with hints given in the argument register.
+ Hints& new_context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ environment()->current_context_hints().Clear();
+ environment()->current_context_hints().Add(new_context_hints);
+}
+
+void SerializerForBackgroundCompilation::ProcessImmutableLoad(
+ ContextRef& context_ref, int slot, ContextProcessingMode mode) {
+ DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator);
+ base::Optional<ObjectRef> slot_value = context_ref.get(slot, true);
+
+ // Also, put the object into the constant hints for the accumulator.
+ if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) {
+ environment()->accumulator_hints().AddConstant(slot_value.value().object());
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessContextAccess(
+ const Hints& context_hints, int slot, int depth,
+ ContextProcessingMode mode) {
+ // This function is for JSContextSpecialization::ReduceJSLoadContext and
+ // ReduceJSStoreContext. Those reductions attempt to eliminate as many
+ // loads as possible by making use of constant Context objects. In the
+ // case of an immutable load, ReduceJSLoadContext even attempts to load
+ // the value at {slot}, replacing the load with a constant.
+ for (auto x : context_hints.constants()) {
+ if (x->IsContext()) {
+ // Walk this context to the given depth and serialize the slot found.
+ ContextRef context_ref(broker(), x);
+ size_t remaining_depth = depth;
+ context_ref = context_ref.previous(&remaining_depth, true);
+ if (remaining_depth == 0 && mode != kIgnoreSlot) {
+ ProcessImmutableLoad(context_ref, slot, mode);
+ }
+ }
+ }
+ for (auto x : context_hints.virtual_contexts()) {
+ if (x.distance <= static_cast<unsigned int>(depth)) {
+ ContextRef context_ref(broker(), x.context);
+ size_t remaining_depth = depth - x.distance;
+ context_ref = context_ref.previous(&remaining_depth, true);
+ if (remaining_depth == 0 && mode != kIgnoreSlot) {
+ ProcessImmutableLoad(context_ref, slot, mode);
+ }
+ }
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitLdaContextSlot(
+ BytecodeArrayIterator* iterator) {
+ Hints& context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ Hints& context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth,
+ kSerializeSlotAndAddToAccumulator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth,
+ kSerializeSlotAndAddToAccumulator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaModuleVariable(
+ BytecodeArrayIterator* iterator) {
+ const int depth = iterator->GetUnsignedImmediateOperand(1);
+
+ // TODO(mvstanton): If we have a constant module, should we serialize the
+ // cell as well? Then we could put the value in the accumulator.
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(environment()->current_context_hints(),
+ Context::EXTENSION_INDEX, depth, kSerializeSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaModuleVariable(
+ BytecodeArrayIterator* iterator) {
+ const int depth = iterator->GetUnsignedImmediateOperand(1);
+ ProcessContextAccess(environment()->current_context_hints(),
+ Context::EXTENSION_INDEX, depth, kSerializeSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ Hints& register_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
}
void SerializerForBackgroundCompilation::VisitLdar(
@@ -558,14 +1242,60 @@ void SerializerForBackgroundCompilation::VisitMov(
environment()->register_hints(dst).Add(environment()->register_hints(src));
}
+void SerializerForBackgroundCompilation::VisitCreateFunctionContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateBlockContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateEvalContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateWithContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateCatchContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::ProcessCreateContext() {
+ Hints& accumulator_hints = environment()->accumulator_hints();
+ accumulator_hints.Clear();
+ Hints& current_context_hints = environment()->current_context_hints();
+
+ // For each constant context, we must create a virtual context from
+ // it of distance one.
+ for (auto x : current_context_hints.constants()) {
+ if (x->IsContext()) {
+ Handle<Context> as_context(Handle<Context>::cast(x));
+ accumulator_hints.AddVirtualContext(VirtualContext(1, as_context));
+ }
+ }
+
+ // For each virtual context, we must create a virtual context from
+ // it of distance {existing distance} + 1.
+ for (auto x : current_context_hints.virtual_contexts()) {
+ accumulator_hints.AddVirtualContext(
+ VirtualContext(x.distance + 1, x.context));
+ }
+}
+
void SerializerForBackgroundCompilation::VisitCreateClosure(
BytecodeArrayIterator* iterator) {
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)),
- broker()->isolate());
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
Handle<FeedbackCell> feedback_cell =
- environment()->function().feedback_vector->GetClosureFeedbackCell(
+ environment()->function().feedback_vector()->GetClosureFeedbackCell(
iterator->GetIndexOperand(1));
FeedbackCellRef feedback_cell_ref(broker(), feedback_cell);
Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
@@ -573,8 +1303,13 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
environment()->accumulator_hints().Clear();
if (cell_value->IsFeedbackVector()) {
- environment()->accumulator_hints().AddFunctionBlueprint(
- {shared, Handle<FeedbackVector>::cast(cell_value)});
+ // Gather the context hints from the current context register hint
+ // structure.
+ FunctionBlueprint blueprint(shared,
+ Handle<FeedbackVector>::cast(cell_value),
+ environment()->current_context_hints());
+
+ environment()->accumulator_hints().AddFunctionBlueprint(blueprint);
}
}
@@ -685,6 +1420,16 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread(
ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true);
}
+void SerializerForBackgroundCompilation::VisitCallJSRuntime(
+ BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+
+ // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index}
+ // slot in the native context to be serialized.
+ const int runtime_index = iterator->GetNativeContextIndexOperand(0);
+ broker()->native_context().get(runtime_index, true);
+}
+
Hints SerializerForBackgroundCompilation::RunChildSerializer(
CompilationSubject function, base::Optional<Hints> new_target,
const HintsVector& arguments, bool with_spread) {
@@ -700,14 +1445,14 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer(
padded.pop_back(); // Remove the spread element.
// Fill the rest with empty hints.
padded.resize(
- function.blueprint().shared->GetBytecodeArray().parameter_count(),
+ function.blueprint().shared()->GetBytecodeArray().parameter_count(),
Hints(zone()));
return RunChildSerializer(function, new_target, padded, false);
}
SerializerForBackgroundCompilation child_serializer(
broker(), dependencies(), zone(), function, new_target, arguments,
- flags().without(SerializerForBackgroundCompilationFlag::kOsr));
+ flags());
return child_serializer.Run();
}
@@ -734,7 +1479,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
// Incorporate feedback into hints.
base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback(
- broker(), environment()->function().feedback_vector, slot);
+ broker(), environment()->function().feedback_vector(), slot);
if (feedback.has_value() && feedback->map().is_callable()) {
if (new_target.has_value()) {
// Construct; feedback is new_target, which often is also the callee.
@@ -752,15 +1497,37 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
if (!hint->IsJSFunction()) continue;
Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
- if (!function->shared().IsInlineable() || !function->has_feedback_vector())
- continue;
+ JSFunctionRef(broker(), function).Serialize();
+
+ Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
+
+ if (shared->IsApiFunction()) {
+ ProcessApiCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ } else if (shared->HasBuiltinId()) {
+ ProcessBuiltinCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ }
+
+ if (!shared->IsInlineable() || !function->has_feedback_vector()) continue;
environment()->accumulator_hints().Add(RunChildSerializer(
- {function, broker()->isolate()}, new_target, arguments, with_spread));
+ CompilationSubject(function, broker()->isolate(), zone()), new_target,
+ arguments, with_spread));
}
for (auto hint : callee.function_blueprints()) {
- if (!hint.shared->IsInlineable()) continue;
+ Handle<SharedFunctionInfo> shared = hint.shared();
+
+ if (shared->IsApiFunction()) {
+ ProcessApiCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ } else if (shared->HasBuiltinId()) {
+ ProcessBuiltinCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ }
+
+ if (!shared->IsInlineable()) continue;
environment()->accumulator_hints().Add(RunChildSerializer(
CompilationSubject(hint), new_target, arguments, with_spread));
}
@@ -788,22 +1555,222 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
}
-void SerializerForBackgroundCompilation::ProcessJump(
- interpreter::BytecodeArrayIterator* iterator) {
- int jump_target = iterator->GetJumpTargetOffset();
- int current_offset = iterator->current_offset();
- if (current_offset >= jump_target) return;
+void SerializerForBackgroundCompilation::ProcessApiCall(
+ Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+ FunctionTemplateInfoRef target_template_info(
+ broker(), handle(target->function_data(), broker()->isolate()));
+ if (!target_template_info.has_call_code()) return;
+
+ target_template_info.SerializeCallCode();
+
+ SharedFunctionInfoRef target_ref(broker(), target);
+ target_ref.SerializeFunctionTemplateInfo();
+
+ if (target_template_info.accept_any_receiver() &&
+ target_template_info.is_signature_undefined())
+ return;
- stashed_environments_[jump_target] = new (zone()) Environment(*environment());
+ CHECK_GE(arguments.size(), 1);
+ Hints const& receiver_hints = arguments[0];
+ for (auto hint : receiver_hints.constants()) {
+ if (hint->IsUndefined()) {
+ // The receiver is the global proxy.
+ Handle<JSGlobalProxy> global_proxy =
+ broker()->native_context().global_proxy_object().object();
+ ProcessReceiverMapForApiCall(
+ target_template_info,
+ handle(global_proxy->map(), broker()->isolate()));
+ continue;
+ }
+
+ if (!hint->IsJSReceiver()) continue;
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
+
+ ProcessReceiverMapForApiCall(target_template_info,
+ handle(receiver->map(), broker()->isolate()));
+ }
+
+ for (auto receiver_map : receiver_hints.maps()) {
+ ProcessReceiverMapForApiCall(target_template_info, receiver_map);
+ }
}
-void SerializerForBackgroundCompilation::MergeAfterJump(
+void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
+ FunctionTemplateInfoRef& target, Handle<Map> receiver) {
+ if (receiver->is_access_check_needed()) {
+ return;
+ }
+
+ MapRef receiver_map(broker(), receiver);
+ TRACE_BROKER(broker(), "Serializing holder for target:" << target);
+
+ target.LookupHolderOfExpectedType(receiver_map, true);
+}
+
+void SerializerForBackgroundCompilation::ProcessBuiltinCall(
+ Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+ DCHECK(target->HasBuiltinId());
+ const int builtin_id = target->builtin_id();
+ const char* name = Builtins::name(builtin_id);
+ TRACE_BROKER(broker(), "Serializing for call to builtin " << name);
+ switch (builtin_id) {
+ case Builtins::kPromisePrototypeCatch: {
+ // For JSCallReducer::ReducePromisePrototypeCatch.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromisePrototypeFinally: {
+ // For JSCallReducer::ReducePromisePrototypeFinally.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromisePrototypeThen: {
+ // For JSCallReducer::ReducePromisePrototypeThen.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromiseResolveTrampoline:
+ // For JSCallReducer::ReducePromiseInternalResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (arguments.size() >= 2) {
+ Hints const& resolution_hints = arguments[1];
+ ProcessHintsForPromiseResolve(resolution_hints);
+ }
+ break;
+ case Builtins::kPromiseInternalResolve:
+ // For JSCallReducer::ReducePromiseInternalResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (arguments.size() >= 3) {
+ Hints const& resolution_hints = arguments[2];
+ ProcessHintsForPromiseResolve(resolution_hints);
+ }
+ break;
+ case Builtins::kRegExpPrototypeTest: {
+ // For JSCallReducer::ReduceRegExpPrototypeTest.
+ if (arguments.size() >= 1) {
+ Hints const& regexp_hints = arguments[0];
+ ProcessHintsForRegExpTest(regexp_hints);
+ }
+ break;
+ }
+ case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ Hints const& target_hints = arguments[0];
+ ProcessHintsForFunctionCall(target_hints);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve(
+ Hints const& resolution_hints) {
+ auto processMap = [&](Handle<Map> map) {
+ broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map),
+ dependencies());
+ };
+
+ for (auto hint : resolution_hints.constants()) {
+ if (!hint->IsJSReceiver()) continue;
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
+ processMap(handle(receiver->map(), broker()->isolate()));
+ }
+ for (auto map_hint : resolution_hints.maps()) {
+ processMap(map_hint);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessMapHintsForPromises(
+ Hints const& receiver_hints) {
+ // We need to serialize the prototypes on each receiver map.
+ for (auto constant : receiver_hints.constants()) {
+ if (!constant->IsJSPromise()) continue;
+ Handle<Map> map(Handle<HeapObject>::cast(constant)->map(),
+ broker()->isolate());
+ MapRef(broker(), map).SerializePrototype();
+ }
+ for (auto map : receiver_hints.maps()) {
+ if (!map->IsJSPromiseMap()) continue;
+ MapRef(broker(), map).SerializePrototype();
+ }
+}
+
+PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
+ MapRef map) {
+ PropertyAccessInfo ai_exec =
+ broker()->CreateAccessInfoForLoadingExec(map, dependencies());
+
+ Handle<JSObject> holder;
+ if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) {
+ // The property is on the prototype chain.
+ JSObjectRef holder_ref(broker(), holder);
+ holder_ref.GetOwnProperty(ai_exec.field_representation(),
+ ai_exec.field_index(), true);
+ }
+ return ai_exec;
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
+ Hints const& regexp_hints) {
+ for (auto hint : regexp_hints.constants()) {
+ if (!hint->IsJSRegExp()) continue;
+ Handle<JSRegExp> regexp(Handle<JSRegExp>::cast(hint));
+ Handle<Map> regexp_map(regexp->map(), broker()->isolate());
+ PropertyAccessInfo ai_exec =
+ ProcessMapForRegExpTest(MapRef(broker(), regexp_map));
+ Handle<JSObject> holder;
+ if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
+ // The property is on the object itself.
+ JSObjectRef holder_ref(broker(), regexp);
+ holder_ref.GetOwnProperty(ai_exec.field_representation(),
+ ai_exec.field_index(), true);
+ }
+ }
+
+ for (auto map : regexp_hints.maps()) {
+ if (!map->IsJSRegExpMap()) continue;
+ ProcessMapForRegExpTest(MapRef(broker(), map));
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall(
+ Hints const& target_hints) {
+ for (auto constant : target_hints.constants()) {
+ if (!constant->IsJSFunction()) continue;
+ JSFunctionRef func(broker(), constant);
+ func.Serialize();
+ }
+}
+
+void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment(
+ int target_offset) {
+ auto it = jump_target_environments_.find(target_offset);
+ if (it == jump_target_environments_.end()) {
+ jump_target_environments_[target_offset] =
+ new (zone()) Environment(*environment());
+ } else {
+ it->second->Merge(environment());
+ }
+}
+
+void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment(
+ int target_offset) {
+ auto it = jump_target_environments_.find(target_offset);
+ if (it != jump_target_environments_.end()) {
+ environment()->Merge(it->second);
+ jump_target_environments_.erase(it);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessJump(
interpreter::BytecodeArrayIterator* iterator) {
- int current_offset = iterator->current_offset();
- auto stash = stashed_environments_.find(current_offset);
- if (stash != stashed_environments_.end()) {
- environment()->Merge(stash->second);
- stashed_environments_.erase(stash);
+ int jump_target = iterator->GetJumpTargetOffset();
+ if (iterator->current_offset() < jump_target) {
+ ContributeToJumpTargetEnvironment(jump_target);
}
}
@@ -813,10 +1780,25 @@ void SerializerForBackgroundCompilation::VisitReturn(
environment()->ClearEphemeralHints();
}
+void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback(
+ interpreter::BytecodeArrayIterator* iterator) {
+ interpreter::JumpTableTargetOffsets targets =
+ iterator->GetJumpTableTargetOffsets();
+ for (const auto& target : targets) {
+ ContributeToJumpTargetEnvironment(target.target_offset);
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState(
+ interpreter::BytecodeArrayIterator* iterator) {
+ for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) {
+ ContributeToJumpTargetEnvironment(target.target_offset());
+ }
+}
+
void SerializerForBackgroundCompilation::Environment::ExportRegisterHints(
interpreter::Register first, size_t count, HintsVector& dst) {
- dst.resize(dst.size() + count, Hints(zone()));
- int reg_base = first.index();
+ const int reg_base = first.index();
for (int i = 0; i < static_cast<int>(count); ++i) {
dst.push_back(register_hints(interpreter::Register(reg_base + i)));
}
@@ -856,8 +1838,8 @@ GlobalAccessFeedback const*
SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess(
FeedbackSlot slot) {
if (slot.IsInvalid()) return nullptr;
- if (environment()->function().feedback_vector.is_null()) return nullptr;
- FeedbackSource source(environment()->function().feedback_vector, slot);
+ if (environment()->function().feedback_vector().is_null()) return nullptr;
+ FeedbackSource source(environment()->function().feedback_vector(), slot);
if (broker()->HasFeedback(source)) {
return broker()->GetGlobalAccessFeedback(source);
@@ -889,14 +1871,31 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
VisitLdaGlobal(iterator);
}
-void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
+void SerializerForBackgroundCompilation::ProcessCheckContextExtensions(
+ int depth) {
+ // for BytecodeGraphBuilder::CheckContextExtensions.
+ Hints& context_hints = environment()->current_context_hints();
+ for (int i = 0; i < depth; i++) {
+ ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i,
+ kSerializeSlot);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot(
BytecodeArrayIterator* iterator) {
+ ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2));
+ // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal.
VisitLdaGlobal(iterator);
}
+void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupGlobalSlot(iterator);
+}
+
void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof(
BytecodeArrayIterator* iterator) {
- VisitLdaGlobal(iterator);
+ ProcessLdaLookupGlobalSlot(iterator);
}
void SerializerForBackgroundCompilation::VisitStaGlobal(
@@ -905,6 +1904,26 @@ void SerializerForBackgroundCompilation::VisitStaGlobal(
ProcessFeedbackForGlobalAccess(slot);
}
+void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot_index = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ ProcessCheckContextExtensions(depth);
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupContextSlot(iterator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupContextSlot(iterator);
+}
+
namespace {
template <class MapContainer>
MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
@@ -922,9 +1941,10 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
ElementAccessFeedback const*
SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess(
- const MapHandles& maps, AccessMode mode) {
+ const MapHandles& maps, AccessMode mode,
+ KeyedAccessMode const& keyed_mode) {
ElementAccessFeedback const* result =
- broker()->ProcessFeedbackMapsForElementAccess(maps);
+ broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode);
for (ElementAccessFeedback::MapIterator it = result->all_maps(broker());
!it.done(); it.advance()) {
switch (mode) {
@@ -952,9 +1972,34 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
ProcessMapForNamedPropertyAccess(map_ref, name);
AccessInfoFactory access_info_factory(broker(), dependencies(),
broker()->zone());
- access_infos.push_back(access_info_factory.ComputePropertyAccessInfo(
+ PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo(
map, name.object(), mode));
+ access_infos.push_back(info);
+
+ // TODO(turbofan): We want to take receiver hints into account as well,
+ // not only the feedback maps.
+ // For JSNativeContextSpecialization::InlinePropertySetterCall
+ // and InlinePropertyGetterCall.
+ if (info.IsAccessorConstant() && !info.constant().is_null()) {
+ if (info.constant()->IsJSFunction()) {
+ // For JSCallReducer::ReduceCallApiFunction.
+ Handle<SharedFunctionInfo> sfi(
+ handle(Handle<JSFunction>::cast(info.constant())->shared(),
+ broker()->isolate()));
+ if (sfi->IsApiFunction()) {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+ ProcessReceiverMapForApiCall(fti_ref, map);
+ }
+ } else {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), Handle<FunctionTemplateInfo>::cast(info.constant()));
+ if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+ }
+ }
}
+
DCHECK(!access_infos.empty());
return new (broker()->zone()) NamedAccessFeedback(name, access_infos);
}
@@ -962,9 +2007,9 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
FeedbackSlot slot, AccessMode mode, base::Optional<NameRef> static_name) {
if (slot.IsInvalid()) return;
- if (environment()->function().feedback_vector.is_null()) return;
+ if (environment()->function().feedback_vector().is_null()) return;
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
FeedbackSource source(nexus);
if (broker()->HasFeedback(source)) return;
@@ -992,8 +2037,10 @@ void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus);
if (name.has_value()) {
processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name);
- } else if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) {
- processed = ProcessFeedbackMapsForElementAccess(maps, mode);
+ } else if (nexus.GetKeyType() == ELEMENT) {
+ DCHECK_NE(nexus.ic_state(), MEGAMORPHIC);
+ processed = ProcessFeedbackMapsForElementAccess(
+ maps, mode, KeyedAccessMode::FromNexus(nexus));
}
broker()->SetFeedback(source, processed);
}
@@ -1087,8 +2134,8 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
BytecodeArrayIterator* iterator, AccessMode mode) {
Hints const& receiver =
environment()->register_hints(iterator->GetRegisterOperand(0));
- Handle<Name> name(Name::cast(iterator->GetConstantForIndexOperand(1)),
- broker()->isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ iterator->GetConstantForIndexOperand(1, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
}
@@ -1176,6 +2223,31 @@ UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP)
IGNORED_BYTECODE_LIST(DEFINE_IGNORE)
#undef DEFINE_IGNORE
+#define DEFINE_UNREACHABLE(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ UNREACHABLE(); \
+ }
+UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE)
+#undef DEFINE_UNREACHABLE
+
+#define DEFINE_KILL(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ environment()->Kill(); \
+ }
+KILL_ENVIRONMENT_LIST(DEFINE_KILL)
+#undef DEFINE_KILL
+
+#undef CLEAR_ENVIRONMENT_LIST
+#undef KILL_ENVIRONMENT_LIST
+#undef CLEAR_ACCUMULATOR_LIST
+#undef UNCONDITIONAL_JUMPS_LIST
+#undef CONDITIONAL_JUMPS_LIST
+#undef IGNORED_BYTECODE_LIST
+#undef UNREACHABLE_BYTECODE_LIST
+#undef SUPPORTED_BYTECODE_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 0ee37ef280..881ed61a55 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -5,346 +5,31 @@
#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
-#include "src/base/optional.h"
-#include "src/compiler/access-info.h"
-#include "src/utils/utils.h"
#include "src/handles/handles.h"
-#include "src/handles/maybe-handles.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-namespace interpreter {
-class BytecodeArrayIterator;
-} // namespace interpreter
-
-class BytecodeArray;
-class FeedbackVector;
-class LookupIterator;
-class NativeContext;
-class ScriptContextTable;
-class SharedFunctionInfo;
-class SourcePositionTableIterator;
+class BailoutId;
class Zone;
namespace compiler {
-#define CLEAR_ENVIRONMENT_LIST(V) \
- V(Abort) \
- V(CallRuntime) \
- V(CallRuntimeForPair) \
- V(CreateBlockContext) \
- V(CreateEvalContext) \
- V(CreateFunctionContext) \
- V(Debugger) \
- V(PopContext) \
- V(PushContext) \
- V(ResumeGenerator) \
- V(ReThrow) \
- V(StaContextSlot) \
- V(StaCurrentContextSlot) \
- V(SuspendGenerator) \
- V(SwitchOnGeneratorState) \
- V(Throw)
-
-#define CLEAR_ACCUMULATOR_LIST(V) \
- V(Add) \
- V(AddSmi) \
- V(BitwiseAnd) \
- V(BitwiseAndSmi) \
- V(BitwiseNot) \
- V(BitwiseOr) \
- V(BitwiseOrSmi) \
- V(BitwiseXor) \
- V(BitwiseXorSmi) \
- V(CloneObject) \
- V(CreateArrayFromIterable) \
- V(CreateArrayLiteral) \
- V(CreateEmptyArrayLiteral) \
- V(CreateEmptyObjectLiteral) \
- V(CreateMappedArguments) \
- V(CreateObjectLiteral) \
- V(CreateRestParameter) \
- V(CreateUnmappedArguments) \
- V(Dec) \
- V(DeletePropertySloppy) \
- V(DeletePropertyStrict) \
- V(Div) \
- V(DivSmi) \
- V(Exp) \
- V(ExpSmi) \
- V(ForInContinue) \
- V(ForInEnumerate) \
- V(ForInNext) \
- V(ForInStep) \
- V(GetTemplateObject) \
- V(Inc) \
- V(LdaContextSlot) \
- V(LdaCurrentContextSlot) \
- V(LdaImmutableContextSlot) \
- V(LdaImmutableCurrentContextSlot) \
- V(LogicalNot) \
- V(Mod) \
- V(ModSmi) \
- V(Mul) \
- V(MulSmi) \
- V(Negate) \
- V(SetPendingMessage) \
- V(ShiftLeft) \
- V(ShiftLeftSmi) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(ShiftRightLogicalSmi) \
- V(ShiftRightSmi) \
- V(Sub) \
- V(SubSmi) \
- V(TestEqual) \
- V(TestEqualStrict) \
- V(TestGreaterThan) \
- V(TestGreaterThanOrEqual) \
- V(TestInstanceOf) \
- V(TestLessThan) \
- V(TestLessThanOrEqual) \
- V(TestNull) \
- V(TestReferenceEqual) \
- V(TestTypeOf) \
- V(TestUndefined) \
- V(TestUndetectable) \
- V(ToBooleanLogicalNot) \
- V(ToName) \
- V(ToNumber) \
- V(ToNumeric) \
- V(ToString) \
- V(TypeOf)
-
-#define UNCONDITIONAL_JUMPS_LIST(V) \
- V(Jump) \
- V(JumpConstant) \
- V(JumpLoop)
-
-#define CONDITIONAL_JUMPS_LIST(V) \
- V(JumpIfFalse) \
- V(JumpIfFalseConstant) \
- V(JumpIfJSReceiver) \
- V(JumpIfJSReceiverConstant) \
- V(JumpIfNotNull) \
- V(JumpIfNotNullConstant) \
- V(JumpIfNotUndefined) \
- V(JumpIfNotUndefinedConstant) \
- V(JumpIfNull) \
- V(JumpIfNullConstant) \
- V(JumpIfToBooleanFalse) \
- V(JumpIfToBooleanFalseConstant) \
- V(JumpIfToBooleanTrue) \
- V(JumpIfToBooleanTrueConstant) \
- V(JumpIfTrue) \
- V(JumpIfTrueConstant) \
- V(JumpIfUndefined) \
- V(JumpIfUndefinedConstant)
-
-#define IGNORED_BYTECODE_LIST(V) \
- V(CallNoFeedback) \
- V(LdaNamedPropertyNoFeedback) \
- V(StackCheck) \
- V(StaNamedPropertyNoFeedback) \
- V(ThrowReferenceErrorIfHole) \
- V(ThrowSuperAlreadyCalledIfNotHole) \
- V(ThrowSuperNotCalledIfHole)
-
-#define SUPPORTED_BYTECODE_LIST(V) \
- V(CallAnyReceiver) \
- V(CallProperty) \
- V(CallProperty0) \
- V(CallProperty1) \
- V(CallProperty2) \
- V(CallUndefinedReceiver) \
- V(CallUndefinedReceiver0) \
- V(CallUndefinedReceiver1) \
- V(CallUndefinedReceiver2) \
- V(CallWithSpread) \
- V(Construct) \
- V(ConstructWithSpread) \
- V(CreateClosure) \
- V(ExtraWide) \
- V(GetSuperConstructor) \
- V(Illegal) \
- V(LdaConstant) \
- V(LdaFalse) \
- V(LdaGlobal) \
- V(LdaGlobalInsideTypeof) \
- V(LdaKeyedProperty) \
- V(LdaLookupGlobalSlot) \
- V(LdaLookupGlobalSlotInsideTypeof) \
- V(LdaNamedProperty) \
- V(LdaNull) \
- V(Ldar) \
- V(LdaSmi) \
- V(LdaTheHole) \
- V(LdaTrue) \
- V(LdaUndefined) \
- V(LdaZero) \
- V(Mov) \
- V(Return) \
- V(StaGlobal) \
- V(StaInArrayLiteral) \
- V(StaKeyedProperty) \
- V(StaNamedOwnProperty) \
- V(StaNamedProperty) \
- V(Star) \
- V(TestIn) \
- V(Wide) \
- CLEAR_ENVIRONMENT_LIST(V) \
- CLEAR_ACCUMULATOR_LIST(V) \
- CONDITIONAL_JUMPS_LIST(V) \
- UNCONDITIONAL_JUMPS_LIST(V) \
- IGNORED_BYTECODE_LIST(V)
-
+class CompilationDependencies;
class JSHeapBroker;
-template <typename T>
-struct HandleComparator {
- bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
- return lhs.address() < rhs.address();
- }
-};
-
-struct FunctionBlueprint {
- Handle<SharedFunctionInfo> shared;
- Handle<FeedbackVector> feedback_vector;
-
- bool operator<(const FunctionBlueprint& other) const {
- // A feedback vector is never used for more than one SFI, so it can
- // be used for strict ordering of blueprints.
- DCHECK_IMPLIES(feedback_vector.equals(other.feedback_vector),
- shared.equals(other.shared));
- return HandleComparator<FeedbackVector>()(feedback_vector,
- other.feedback_vector);
- }
-};
-
-class CompilationSubject {
- public:
- explicit CompilationSubject(FunctionBlueprint blueprint)
- : blueprint_(blueprint) {}
- CompilationSubject(Handle<JSFunction> closure, Isolate* isolate);
-
- FunctionBlueprint blueprint() const { return blueprint_; }
- MaybeHandle<JSFunction> closure() const { return closure_; }
-
- private:
- FunctionBlueprint blueprint_;
- MaybeHandle<JSFunction> closure_;
-};
-
-using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
-using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
-using BlueprintsSet = ZoneSet<FunctionBlueprint>;
-
-class Hints {
- public:
- explicit Hints(Zone* zone);
-
- const ConstantsSet& constants() const;
- const MapsSet& maps() const;
- const BlueprintsSet& function_blueprints() const;
-
- void AddConstant(Handle<Object> constant);
- void AddMap(Handle<Map> map);
- void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
-
- void Add(const Hints& other);
-
- void Clear();
- bool IsEmpty() const;
-
- private:
- ConstantsSet constants_;
- MapsSet maps_;
- BlueprintsSet function_blueprints_;
-};
-using HintsVector = ZoneVector<Hints>;
-
enum class SerializerForBackgroundCompilationFlag : uint8_t {
kBailoutOnUninitialized = 1 << 0,
kCollectSourcePositions = 1 << 1,
- kOsr = 1 << 2,
+ kAnalyzeEnvironmentLiveness = 1 << 2,
};
using SerializerForBackgroundCompilationFlags =
base::Flags<SerializerForBackgroundCompilationFlag>;
-// The SerializerForBackgroundCompilation makes sure that the relevant function
-// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
-// optimizations in the compiler, is copied to the heap broker.
-class SerializerForBackgroundCompilation {
- public:
- SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags);
- Hints Run(); // NOTE: Returns empty for an already-serialized function.
-
- class Environment;
-
- private:
- SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SerializerForBackgroundCompilationFlags flags);
-
- bool BailoutOnUninitialized(FeedbackSlot slot);
-
- void TraverseBytecode();
-
-#define DECLARE_VISIT_BYTECODE(name, ...) \
- void Visit##name(interpreter::BytecodeArrayIterator* iterator);
- SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
-#undef DECLARE_VISIT_BYTECODE
-
- void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, FeedbackSlot slot,
- bool with_spread = false);
- void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
- ConvertReceiverMode receiver_mode,
- bool with_spread = false);
-
- void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
- void MergeAfterJump(interpreter::BytecodeArrayIterator* iterator);
-
- void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
- FeedbackSlot slot, AccessMode mode);
- void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
- AccessMode mode);
- void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
- FeedbackSlot slot, AccessMode mode);
-
- GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
- NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
- const MapHandles& maps, AccessMode mode, NameRef const& name);
- ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
- const MapHandles& maps, AccessMode mode);
- void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
- base::Optional<NameRef> static_name);
- void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
-
- Hints RunChildSerializer(CompilationSubject function,
- base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread);
-
- JSHeapBroker* broker() const { return broker_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
- Zone* zone() const { return zone_; }
- Environment* environment() const { return environment_; }
- SerializerForBackgroundCompilationFlags flags() const { return flags_; }
-
- JSHeapBroker* const broker_;
- CompilationDependencies* const dependencies_;
- Zone* const zone_;
- Environment* const environment_;
- ZoneUnorderedMap<int, Environment*> stashed_environments_;
- SerializerForBackgroundCompilationFlags const flags_;
-};
+void RunSerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index cab398c160..6deba2b002 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -16,6 +16,7 @@ namespace internal {
namespace compiler {
namespace {
+static const int kNumLanes64 = 2;
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int kNumLanes8 = 16;
@@ -76,6 +77,8 @@ void SimdScalarLowering::LowerGraph() {
}
}
+#define FOREACH_INT64X2_OPCODE(V) V(I64x2Splat)
+
#define FOREACH_INT32X4_OPCODE(V) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
@@ -119,6 +122,8 @@ void SimdScalarLowering::LowerGraph() {
V(S1x16AnyTrue) \
V(S1x16AllTrue)
+#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
+
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
@@ -208,8 +213,12 @@ void SimdScalarLowering::LowerGraph() {
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
+ case SimdType::kFloat64x2:
+ return MachineType::Float64();
case SimdType::kFloat32x4:
return MachineType::Float32();
+ case SimdType::kInt64x2:
+ return MachineType::Int64();
case SimdType::kInt32x4:
return MachineType::Int32();
case SimdType::kInt16x8:
@@ -223,6 +232,14 @@ MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
+ FOREACH_FLOAT64X2_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kFloat64x2;
+ break;
+ }
+ FOREACH_INT64X2_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt64x2;
+ break;
+ }
FOREACH_INT32X4_OPCODE(CASE_STMT)
case IrOpcode::kReturn:
case IrOpcode::kParameter:
@@ -326,7 +343,9 @@ static int GetReturnCountAfterLoweringSimd128(
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
- if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
+ if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) {
+ num_lanes = kNumLanes64;
+ } else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
num_lanes = kNumLanes32;
} else if (type == SimdType::kInt16x8) {
num_lanes = kNumLanes16;
@@ -1198,7 +1217,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
-#undef F32x4_UNOP_CASE
+#undef F32X4_UNOP_CASE
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
DCHECK_EQ(1, node->InputCount());
@@ -1223,8 +1242,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
- case IrOpcode::kI32x4Splat:
+ case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
+ case IrOpcode::kI64x2Splat:
+ case IrOpcode::kI32x4Splat:
case IrOpcode::kI16x8Splat:
case IrOpcode::kI8x16Splat: {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1347,7 +1368,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kS8x16Shuffle: {
DCHECK_EQ(2, node->InputCount());
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
+ const uint8_t* shuffle = S8x16ShuffleOf(node->op());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_node = zone()->NewArray<Node*>(16);
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 01ea195bdc..76723fcc77 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -32,7 +32,14 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 };
+ enum class SimdType : uint8_t {
+ kFloat64x2,
+ kFloat32x4,
+ kInt64x2,
+ kInt32x4,
+ kInt16x8,
+ kInt8x16
+ };
#if defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 8bc0e7af7b..b028a76bb0 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -22,8 +23,8 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/numbers/conversions-inl.h"
-#include "src/utils/address-map.h"
#include "src/objects/objects.h"
+#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
@@ -279,7 +280,8 @@ class RepresentationSelector {
RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
RepresentationChanger* changer,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins)
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
zone_(zone),
count_(jsgraph->graph()->NodeCount()),
@@ -296,7 +298,8 @@ class RepresentationSelector {
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
- op_typer_(broker, graph_zone()) {
+ op_typer_(broker, graph_zone()),
+ tick_counter_(tick_counter) {
}
// Forward propagation of types from type feedback.
@@ -444,6 +447,7 @@ class RepresentationSelector {
break; \
}
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(Name) \
@@ -747,21 +751,32 @@ class RepresentationSelector {
!GetUpperBound(node->InputAt(1)).Maybe(type);
}
+ void ChangeToDeadValue(Node* node, Node* effect, Node* control) {
+ DCHECK(TypeOf(node).IsNone());
+ // If the node is unreachable, insert an Unreachable node and mark the
+ // value dead.
+ // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
+ // InsertUnreachableIfNecessary.
+ Node* unreachable = effect =
+ graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control);
+ const Operator* dead_value =
+ jsgraph_->common()->DeadValue(GetInfo(node)->representation());
+ node->ReplaceInput(0, unreachable);
+ node->TrimInputCount(dead_value->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ NodeProperties::ChangeOp(node, dead_value);
+ }
+
void ChangeToPureOp(Node* node, const Operator* new_op) {
DCHECK(new_op->HasProperty(Operator::kPure));
+ DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount());
if (node->op()->EffectInputCount() > 0) {
DCHECK_LT(0, node->op()->ControlInputCount());
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
if (TypeOf(node).IsNone()) {
- // If the node is unreachable, insert an Unreachable node and mark the
- // value dead.
- // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
- // InsertUnreachableIfNecessary.
- Node* unreachable = effect = graph()->NewNode(
- jsgraph_->common()->Unreachable(), effect, control);
- new_op = jsgraph_->common()->DeadValue(GetInfo(node)->representation());
- node->ReplaceInput(0, unreachable);
+ ChangeToDeadValue(node, effect, control);
+ return;
}
// Rewire the effect and control chains.
node->TrimInputCount(new_op->ValueInputCount());
@@ -772,6 +787,30 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, new_op);
}
+ void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op,
+ int new_input_index, Node* new_input) {
+ DCHECK(new_op->HasProperty(Operator::kPure));
+ DCHECK_EQ(new_op->ValueInputCount(), 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 1);
+ DCHECK_LE(0, new_input_index);
+ DCHECK_LE(new_input_index, 1);
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (TypeOf(node).IsNone()) {
+ ChangeToDeadValue(node, effect, control);
+ return;
+ }
+ node->TrimInputCount(node->op()->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+ node->InsertInput(jsgraph_->zone(), new_input_index, new_input);
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
// Converts input {index} of {node} according to given UseInfo {use},
// assuming the type of the input is {input_type}. If {input_type} is null,
// it takes the input from the input node {TypeOf(node->InputAt(index))}.
@@ -804,6 +843,10 @@ class RepresentationSelector {
}
void ProcessInput(Node* node, int index, UseInfo use) {
+ DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
+ !node->op()->HasProperty(Operator::kNoDeopt) &&
+ node->op()->EffectInputCount() > 0);
+
switch (phase_) {
case PROPAGATE:
EnqueueInput(node, index, use);
@@ -958,7 +1001,8 @@ class RepresentationSelector {
return MachineRepresentation::kWord32;
} else if (type.Is(Type::Boolean())) {
return MachineRepresentation::kBit;
- } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) {
+ } else if (type.Is(Type::NumberOrOddball()) &&
+ use.TruncatesOddballAndBigIntToNumber()) {
return MachineRepresentation::kFloat64;
} else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) {
// TODO(turbofan): For Phis that return either NaN or some Smi, it's
@@ -968,6 +1012,8 @@ class RepresentationSelector {
return MachineRepresentation::kTagged;
} else if (type.Is(Type::Number())) {
return MachineRepresentation::kFloat64;
+ } else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) {
+ return MachineRepresentation::kWord64;
} else if (type.Is(Type::ExternalPointer())) {
return MachineType::PointerRepresentation();
}
@@ -1109,8 +1155,11 @@ class RepresentationSelector {
if (IsAnyCompressed(rep)) {
return MachineType::AnyCompressed();
}
- // Word64 representation is only valid for safe integer values.
if (rep == MachineRepresentation::kWord64) {
+ if (type.Is(Type::BigInt())) {
+ return MachineType::AnyTagged();
+ }
+
DCHECK(type.Is(TypeCache::Get()->kSafeInteger));
return MachineType(rep, MachineSemantic::kInt64);
}
@@ -1126,7 +1175,17 @@ class RepresentationSelector {
void VisitStateValues(Node* node) {
if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
- EnqueueInput(node, i, UseInfo::Any());
+ // When lowering 64 bit BigInts to Word64 representation, we have to
+ // make sure they are rematerialized before deoptimization. By
+ // propagating a AnyTagged use, the RepresentationChanger is going to
+ // insert the necessary conversions.
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) {
+ EnqueueInput(node, i, UseInfo::AnyTagged());
+ } else {
+ EnqueueInput(node, i, UseInfo::Any());
+ }
}
} else if (lower()) {
Zone* zone = jsgraph_->zone();
@@ -1135,6 +1194,12 @@ class RepresentationSelector {
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(input).Is(Type::BigInt())) {
+ ProcessInput(node, i, UseInfo::AnyTagged());
+ }
+
(*types)[i] =
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
@@ -1621,6 +1686,8 @@ class RepresentationSelector {
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
+ tick_counter_->DoTick();
+
// Unconditionally eliminate unused pure nodes (only relevant if there's
// a pure operation in between two effectful ones, where the last one
// is unused).
@@ -1715,13 +1782,15 @@ class RepresentationSelector {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric: {
+ DCHECK(NodeProperties::GetType(node).Is(Type::Union(
+ Type::BigInt(), Type::NumberOrOddball(), graph()->zone())));
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
if (truncation.IsUsedAsWord32()) {
SetOutput(node, MachineRepresentation::kWord32);
if (lower())
lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this);
- } else if (truncation.IsUsedAsFloat64()) {
+ } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
SetOutput(node, MachineRepresentation::kFloat64);
if (lower())
lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this);
@@ -2461,6 +2530,20 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kCheckBigInt: {
+ if (InputIs(node, Type::BigInt())) {
+ VisitNoop(node, truncation);
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
+ case IrOpcode::kBigIntAsUintN: {
+ ProcessInput(node, 0, UseInfo::TruncatingWord64());
+ SetOutput(node, MachineRepresentation::kWord64, Type::BigInt());
+ return;
+ }
case IrOpcode::kNumberAcos:
case IrOpcode::kNumberAcosh:
case IrOpcode::kNumberAsin:
@@ -2621,6 +2704,43 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kSpeculativeBigIntAdd: {
+ if (truncation.IsUsedAsWord64()) {
+ VisitBinop(node,
+ UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+ MachineRepresentation::kWord64);
+ if (lower()) {
+ ChangeToPureOp(node, lowering->machine()->Int64Add());
+ }
+ } else {
+ VisitBinop(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
+ }
+ }
+ return;
+ }
+ case IrOpcode::kSpeculativeBigIntNegate: {
+ if (truncation.IsUsedAsWord64()) {
+ VisitUnop(node,
+ UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+ MachineRepresentation::kWord64);
+ if (lower()) {
+ ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0,
+ jsgraph_->Int64Constant(0));
+ }
+ } else {
+ VisitUnop(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ ChangeToPureOp(node, lowering->simplified()->BigIntNegate());
+ }
+ }
+ return;
+ }
case IrOpcode::kStringConcat: {
// TODO(turbofan): We currently depend on having this first length input
// to make sure that the overflow check is properly scheduled before the
@@ -2657,6 +2777,10 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kStringFromCodePointAt: {
+ return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
+ MachineRepresentation::kTaggedPointer);
+ }
case IrOpcode::kStringIndexOf: {
ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessInput(node, 1, UseInfo::AnyTagged());
@@ -2983,7 +3107,7 @@ class RepresentationSelector {
simplified()->PlainPrimitiveToWord32());
}
}
- } else if (truncation.IsUsedAsFloat64()) {
+ } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
if (InputIs(node, Type::NumberOrOddball())) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -3236,7 +3360,7 @@ class RepresentationSelector {
// identifies NaN and undefined, we can just pass along
// the {truncation} and completely wipe the {node}.
if (truncation.IsUnused()) return VisitUnused(node);
- if (truncation.IsUsedAsFloat64()) {
+ if (truncation.TruncatesOddballAndBigIntToNumber()) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -3263,7 +3387,7 @@ class RepresentationSelector {
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::NumberOrOddball()) &&
- truncation.IsUsedAsFloat64()) {
+ truncation.TruncatesOddballAndBigIntToNumber()) {
// Propagate the Float64 truncation.
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -3431,6 +3555,9 @@ class RepresentationSelector {
return SetOutput(node, MachineRepresentation::kNone);
case IrOpcode::kStaticAssert:
return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged);
+ case IrOpcode::kAssertType:
+ return VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
default:
FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
@@ -3534,6 +3661,7 @@ class RepresentationSelector {
NodeOriginTable* node_origins_;
TypeCache const* type_cache_;
OperationTyper op_typer_; // helper for the feedback typer
+ TickCounter* const tick_counter_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -3547,19 +3675,22 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level)
+ PoisoningMitigationLevel poisoning_level,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level) {}
+ poisoning_level_(poisoning_level),
+ tick_counter_(tick_counter) {}
void SimplifiedLowering::LowerAllNodes() {
- RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
+ RepresentationChanger changer(jsgraph(), broker_);
RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
- source_positions_, node_origins_);
+ source_positions_, node_origins_,
+ tick_counter_);
selector.Run(this);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index e434af9d4f..414e3588d7 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -12,6 +12,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -26,7 +29,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level);
+ PoisoningMitigationLevel poisoning_level,
+ TickCounter* tick_counter);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -67,6 +71,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
PoisoningMitigationLevel poisoning_level_;
+ TickCounter* const tick_counter_;
+
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
Node* Int32Abs(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index ed3cfa8617..4f83635422 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -492,6 +492,18 @@ Handle<Map> FastMapParameterOf(const Operator* op) {
return Handle<Map>::null();
}
+std::ostream& operator<<(std::ostream& os, BigIntOperationHint hint) {
+ switch (hint) {
+ case BigIntOperationHint::kBigInt:
+ return os << "BigInt";
+ }
+ UNREACHABLE();
+}
+
+size_t hash_value(BigIntOperationHint hint) {
+ return static_cast<uint8_t>(hint);
+}
+
std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
@@ -585,12 +597,6 @@ Type AllocateTypeOf(const Operator* op) {
return AllocateParametersOf(op).type();
}
-UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kStringFromSingleCodePoint ||
- op->opcode() == IrOpcode::kStringCodePointAt);
- return OpParameter<UnicodeEncoding>(op);
-}
-
AbortReason AbortReasonOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
return static_cast<AbortReason>(OpParameter<int>(op));
@@ -702,9 +708,11 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
V(StringConcat, Operator::kNoProperties, 3, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
@@ -713,6 +721,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
@@ -723,6 +732,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ChangeCompressedToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToCompressedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToCompressedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
@@ -730,6 +740,8 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
@@ -769,9 +781,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NewConsString, Operator::kNoProperties, 3, 0) \
V(PoisonIndex, Operator::kNoProperties, 1, 0)
-#define EFFECT_DEPENDENT_OP_LIST(V) \
- V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(StringSubstring, Operator::kNoProperties, 3, 1) \
+#define EFFECT_DEPENDENT_OP_LIST(V) \
+ V(BigIntAdd, Operator::kNoProperties, 2, 1) \
+ V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(StringFromCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(StringSubstring, Operator::kNoProperties, 3, 1) \
V(DateNow, Operator::kNoProperties, 0, 1)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -801,6 +816,8 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckNumber, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
+ V(CheckBigInt, 1, 1) \
+ V(CheckedInt32ToCompressedSigned, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
V(CheckedInt64ToInt32, 1, 1) \
V(CheckedInt64ToTaggedSigned, 1, 1) \
@@ -895,32 +912,6 @@ struct SimplifiedOperatorGlobalCache final {
DEOPTIMIZE_REASON_LIST(CHECK_IF)
#undef CHECK_IF
- template <UnicodeEncoding kEncoding>
- struct StringCodePointAtOperator final : public Operator1<UnicodeEncoding> {
- StringCodePointAtOperator()
- : Operator1<UnicodeEncoding>(IrOpcode::kStringCodePointAt,
- Operator::kFoldable | Operator::kNoThrow,
- "StringCodePointAt", 2, 1, 1, 1, 1, 0,
- kEncoding) {}
- };
- StringCodePointAtOperator<UnicodeEncoding::UTF16>
- kStringCodePointAtOperatorUTF16;
- StringCodePointAtOperator<UnicodeEncoding::UTF32>
- kStringCodePointAtOperatorUTF32;
-
- template <UnicodeEncoding kEncoding>
- struct StringFromSingleCodePointOperator final
- : public Operator1<UnicodeEncoding> {
- StringFromSingleCodePointOperator()
- : Operator1<UnicodeEncoding>(
- IrOpcode::kStringFromSingleCodePoint, Operator::kPure,
- "StringFromSingleCodePoint", 1, 0, 0, 1, 0, 0, kEncoding) {}
- };
- StringFromSingleCodePointOperator<UnicodeEncoding::UTF16>
- kStringFromSingleCodePointOperatorUTF16;
- StringFromSingleCodePointOperator<UnicodeEncoding::UTF32>
- kStringFromSingleCodePointOperatorUTF32;
-
struct FindOrderedHashMapEntryOperator final : public Operator {
FindOrderedHashMapEntryOperator()
: Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable,
@@ -1236,6 +1227,20 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
static_cast<int>(reason)); // parameter
}
+const Operator* SimplifiedOperatorBuilder::BigIntAsUintN(int bits) {
+ CHECK(0 <= bits && bits <= 64);
+
+ return new (zone()) Operator1<int>(IrOpcode::kBigIntAsUintN, Operator::kPure,
+ "BigIntAsUintN", 1, 0, 0, 1, 0, 0, bits);
+}
+
+const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
+ DCHECK(type.IsRange());
+ return new (zone()) Operator1<Type>(IrOpcode::kAssertType,
+ Operator::kNoThrow | Operator::kNoDeopt,
+ "AssertType", 1, 0, 0, 1, 0, 0, type);
+}
+
const Operator* SimplifiedOperatorBuilder::CheckIf(
DeoptimizeReason reason, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1433,6 +1438,21 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
CheckFloat64HoleParameters(mode, feedback));
}
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd(
+ BigIntOperationHint hint) {
+ return new (zone()) Operator1<BigIntOperationHint>(
+ IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
+ BigIntOperationHint hint) {
+ return new (zone()) Operator1<BigIntOperationHint>(
+ IrOpcode::kSpeculativeBigIntNegate,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntNegate", 1, 1,
+ 1, 1, 1, 0, hint);
+}
+
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
NumberOperationHint hint, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1655,28 +1675,6 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
AllocateParameters(type, allocation, allow_large_objects));
}
-const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kStringCodePointAtOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kStringCodePointAtOperatorUTF32;
- }
- UNREACHABLE();
-}
-
-const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kStringFromSingleCodePointOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kStringFromSingleCodePointOperatorUTF32;
- }
- UNREACHABLE();
-}
-
#define SPECULATIVE_NUMBER_BINOP(Name) \
const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
switch (hint) { \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index d93544c5cd..bdac796adf 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -475,10 +475,15 @@ enum class NumberOperationHint : uint8_t {
kNumberOrOddball, // Inputs were Number or Oddball, output was Number.
};
+enum class BigIntOperationHint : uint8_t {
+ kBigInt,
+};
+
size_t hash_value(NumberOperationHint);
+size_t hash_value(BigIntOperationHint);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BigIntOperationHint);
V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
@@ -634,6 +639,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberSilenceNaN();
+ const Operator* BigIntAdd();
+ const Operator* BigIntNegate();
+
const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint);
const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint);
@@ -653,6 +661,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint);
const Operator* SpeculativeNumberEqual(NumberOperationHint hint);
+ const Operator* SpeculativeBigIntAdd(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint);
+ const Operator* BigIntAsUintN(int bits);
+
const Operator* ReferenceEqual();
const Operator* SameValue();
const Operator* SameValueNumbersOnly();
@@ -666,9 +678,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
const Operator* StringCharCodeAt();
- const Operator* StringCodePointAt(UnicodeEncoding encoding);
+ const Operator* StringCodePointAt();
const Operator* StringFromSingleCharCode();
- const Operator* StringFromSingleCodePoint(UnicodeEncoding encoding);
+ const Operator* StringFromSingleCodePoint();
+ const Operator* StringFromCodePointAt();
const Operator* StringIndexOf();
const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
@@ -686,6 +699,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* PlainPrimitiveToWord32();
const Operator* PlainPrimitiveToFloat64();
+ const Operator* ChangeCompressedSignedToInt32();
const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedSignedToInt64();
const Operator* ChangeTaggedToInt32();
@@ -695,6 +709,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeTaggedToTaggedSigned();
const Operator* ChangeCompressedToTaggedSigned();
const Operator* ChangeTaggedToCompressedSigned();
+ const Operator* ChangeInt31ToCompressedSigned();
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeInt64ToTagged();
@@ -704,6 +719,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeFloat64ToTaggedPointer();
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
+ const Operator* TruncateBigIntToUint64();
+ const Operator* ChangeUint64ToBigInt();
const Operator* TruncateTaggedToWord32();
const Operator* TruncateTaggedToFloat64();
const Operator* TruncateTaggedToBit();
@@ -740,6 +757,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedInt32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32ToCompressedSigned(
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback);
@@ -752,6 +771,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckBigInt(const VectorSlotPair& feedback);
const Operator* CheckedCompressedToTaggedPointer(
const VectorSlotPair& feedback);
const Operator* CheckedCompressedToTaggedSigned(
@@ -874,6 +894,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// Abort (for terminating execution on internal error).
const Operator* RuntimeAbort(AbortReason reason);
+ // Abort if the value input does not inhabit the given type
+ const Operator* AssertType(Type type);
+
const Operator* DateNow();
private:
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index c00613c232..2bb5a0a4b5 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -329,9 +329,7 @@ void StateValuesAccess::iterator::Pop() {
current_depth_--;
}
-
-bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
-
+bool StateValuesAccess::iterator::done() const { return current_depth_ < 0; }
void StateValuesAccess::iterator::Advance() {
Top()->Advance();
@@ -392,14 +390,12 @@ MachineType StateValuesAccess::iterator::type() {
}
}
-
-bool StateValuesAccess::iterator::operator!=(iterator& other) {
+bool StateValuesAccess::iterator::operator!=(iterator const& other) {
// We only allow comparison with end().
CHECK(other.done());
return !done();
}
-
StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() {
Advance();
return *this;
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 00ec3bb351..0ff5d218f1 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
class V8_EXPORT_PRIVATE iterator {
public:
// Bare minimum of operators needed for range iteration.
- bool operator!=(iterator& other);
+ bool operator!=(iterator const& other);
iterator& operator++();
TypedNode operator*();
@@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
Node* node();
MachineType type();
- bool done();
+ bool done() const;
void Advance();
void EnsureValid();
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 13d8199745..b71bcd7e66 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -6,6 +6,7 @@
#include "src/compiler/store-store-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -129,7 +130,8 @@ namespace {
class RedundantStoreFinder final {
public:
- RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone);
+ RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone);
void Find();
@@ -157,6 +159,7 @@ class RedundantStoreFinder final {
ZoneSet<Node*>& to_remove() { return to_remove_; }
JSGraph* const jsgraph_;
+ TickCounter* const tick_counter_;
Zone* const temp_zone_;
ZoneStack<Node*> revisit_;
@@ -199,6 +202,7 @@ void RedundantStoreFinder::Find() {
Visit(jsgraph()->graph()->end());
while (!revisit_.empty()) {
+ tick_counter_->DoTick();
Node* next = revisit_.top();
revisit_.pop();
DCHECK_LT(next->id(), in_revisit_.size());
@@ -230,9 +234,10 @@ bool RedundantStoreFinder::HasBeenVisited(Node* node) {
return !unobservable_for_id(node->id()).IsUnvisited();
}
-void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
+void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone) {
// Find superfluous nodes
- RedundantStoreFinder finder(js_graph, temp_zone);
+ RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
finder.Find();
// Remove superfluous nodes
@@ -336,8 +341,11 @@ bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
}
// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets.
-RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone)
+RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph,
+ TickCounter* tick_counter,
+ Zone* temp_zone)
: jsgraph_(js_graph),
+ tick_counter_(tick_counter),
temp_zone_(temp_zone),
revisit_(temp_zone),
in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index cda7591fcc..646640a310 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -11,11 +11,15 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
class StoreStoreElimination final {
public:
- static void Run(JSGraph* js_graph, Zone* temp_zone);
+ static void Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 4cf2c38bdb..5dbbad3dcd 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/flags.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -33,13 +34,15 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
+Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
+ TickCounter* tick_counter)
: flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
broker_(broker),
- operation_typer_(broker, zone()) {
+ operation_typer_(broker, zone()),
+ tick_counter_(tick_counter) {
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
@@ -47,7 +50,6 @@ Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
graph_->AddDecorator(decorator_);
}
-
Typer::~Typer() {
graph_->RemoveDecorator(decorator_);
}
@@ -91,14 +93,18 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return UpdateType(node, TypeBinaryOp(node, x));
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
return UpdateType(node, TypeUnaryOp(node, x));
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -157,14 +163,18 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return TypeBinaryOp(node, x);
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
return TypeUnaryOp(node, x);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -276,14 +286,18 @@ class Typer::Visitor : public Reducer {
return t->operation_typer_.Name(type); \
}
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name) \
static Type Name(Type lhs, Type rhs, Typer* t) { \
return t->operation_typer_.Name(lhs, rhs); \
}
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
static Type ObjectIsArrayBufferView(Type, Typer*);
@@ -410,7 +424,7 @@ void Typer::Run(const NodeVector& roots,
induction_vars->ChangeToInductionVariablePhis();
}
Visitor visitor(this, induction_vars);
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter_);
graph_reducer.AddReducer(&visitor);
for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
@@ -798,6 +812,8 @@ Type Typer::Visitor::TypeHeapConstant(Node* node) {
return TypeConstant(HeapConstantOf(node->op()));
}
+Type Typer::Visitor::TypeCompressedHeapConstant(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeExternalConstant(Node* node) {
return Type::ExternalPointer();
}
@@ -2060,6 +2076,10 @@ Type Typer::Visitor::TypeStringFromSingleCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromSingleCodePointTyper);
}
+Type Typer::Visitor::TypeStringFromCodePointAt(Node* node) {
+ return Type::String();
+}
+
Type Typer::Visitor::TypeStringIndexOf(Node* node) {
return Type::Range(-1.0, String::kMaxLength, zone());
}
@@ -2336,6 +2356,8 @@ Type Typer::Visitor::TypeFindOrderedHashMapEntryForInt32Key(Node* node) {
Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); }
+
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index fa87d81f1e..305470d724 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -25,7 +28,8 @@ class V8_EXPORT_PRIVATE Typer {
};
using Flags = base::Flags<Flag>;
- Typer(JSHeapBroker* broker, Flags flags, Graph* graph);
+ Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
+ TickCounter* tick_counter);
~Typer();
void Run();
@@ -49,6 +53,7 @@ class V8_EXPORT_PRIVATE Typer {
TypeCache const* cache_;
JSHeapBroker* broker_;
OperationTyper operation_typer_;
+ TickCounter* const tick_counter_;
Type singleton_false_;
Type singleton_true_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index edf07a4ffd..d4267a75fe 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -6,9 +6,10 @@
#include "src/compiler/types.h"
-#include "src/utils/ostreams.h"
#include "src/handles/handles-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -202,7 +203,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
return kOtherObject;
case JS_ARRAY_TYPE:
return kArray;
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
@@ -312,8 +313,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
- case MODULE_TYPE:
- case MODULE_INFO_ENTRY_TYPE:
+ case SOURCE_TEXT_MODULE_TYPE:
+ case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE:
+ case SYNTHETIC_MODULE_TYPE:
case CELL_TYPE:
case PREPARSE_DATA_TYPE:
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
@@ -349,6 +351,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ENUM_CACHE_TYPE:
case SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE:
case WASM_CAPI_FUNCTION_DATA_TYPE:
+ case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
@@ -363,6 +366,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE:
+#define MAKE_TORQUE_CLASS_TYPE(V) case V:
+ TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 21aaab5036..0dc1aa77b0 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
-#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/heap-refs.h"
#include "src/handles/handles.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects.h"
@@ -220,6 +220,7 @@ namespace compiler {
INTERNAL_BITSET_TYPE_LIST(V) \
PROPER_BITSET_TYPE_LIST(V)
+class JSHeapBroker;
class HeapConstantType;
class OtherNumberConstantType;
class TupleType;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 3f1b2e9f13..d3d4d54ea2 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -431,6 +431,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kHeapConstant:
+ case IrOpcode::kCompressedHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is anything.
@@ -933,7 +934,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kComment:
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
@@ -975,6 +976,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kSpeculativeBigIntAdd:
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kSpeculativeBigIntNegate:
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntAsUintN:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntAdd:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckValueInputIs(node, 1, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntNegate:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
case IrOpcode::kNumberMultiply:
@@ -1156,6 +1176,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kStringFromCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kStringIndexOf:
// (String, String, SignedSmall) -> SignedSmall
CheckValueInputIs(node, 0, Type::String());
@@ -1306,6 +1332,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
+ case IrOpcode::kChangeCompressedSignedToInt32:
case IrOpcode::kChangeTaggedSignedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1360,6 +1387,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeInt31ToCompressedSigned:
case IrOpcode::kChangeInt31ToTaggedSigned: {
// Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1429,6 +1457,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kTruncateBigIntToUint64:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kTruncateTaggedToBit:
case IrOpcode::kTruncateTaggedPointerToBit:
break;
@@ -1498,6 +1534,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedUint32Div:
case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedInt32Mul:
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
@@ -1520,6 +1557,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedTaggedToCompressedSigned:
case IrOpcode::kCheckedTaggedToCompressedPointer:
case IrOpcode::kCheckedTruncateTaggedToWord32:
+ case IrOpcode::kAssertType:
break;
case IrOpcode::kCheckFloat64Hole:
@@ -1619,6 +1657,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(0, value_count);
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kCheckBigInt:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::BigInt());
+ break;
// Machine operators
// -----------------------
@@ -1755,6 +1797,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeInt32ToInt64:
@@ -1800,6 +1843,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
+ case IrOpcode::kMemoryBarrier:
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicExchange:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 3396214e58..2da7177ece 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -14,6 +14,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/assembler.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
@@ -276,8 +277,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- return LOAD_INSTANCE_FIELD(NullValue,
- MachineType::TypeCompressedTaggedPointer());
+ Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(
+ isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue));
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
@@ -2195,8 +2197,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
@@ -2334,8 +2336,8 @@ Node** WasmGraphBuilder::GetExceptionValues(
BuildDecodeException32BitValue(values_array, &index));
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
@@ -2853,25 +2855,69 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
Node** args, Node*** rets,
wasm::WasmCodePosition position) {
- if (table_index == 0) {
- return BuildIndirectCall(sig_index, args, rets, position, kCallContinues);
- }
return BuildIndirectCall(table_index, sig_index, args, rets, position,
kCallContinues);
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
+void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
+ Node** ift_size,
+ Node** ift_sig_ids,
+ Node** ift_targets,
+ Node** ift_instances) {
+ if (table_index == 0) {
+ *ift_size =
+ LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+ *ift_sig_ids = LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds,
+ MachineType::Pointer());
+ *ift_targets = LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets,
+ MachineType::Pointer());
+ *ift_instances = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
+ return;
+ }
+
+ Node* ift_tables = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTables, MachineType::TypeCompressedTaggedPointer());
+ Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index);
+
+ *ift_size = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
+ MachineType::Int32());
+
+ *ift_sig_ids = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset),
+ MachineType::Pointer());
+
+ *ift_targets = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset),
+ MachineType::Pointer());
+
+ *ift_instances = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset),
+ MachineType::TypeCompressedTaggedPointer());
+}
+
+Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
+ uint32_t sig_index, Node** args,
Node*** rets,
wasm::WasmCodePosition position,
IsReturnCall continuation) {
DCHECK_NOT_NULL(args[0]);
DCHECK_NOT_NULL(env_);
- // Assume only one table for now.
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
+ // First we have to load the table.
+ Node* ift_size;
+ Node* ift_sig_ids;
+ Node* ift_targets;
+ Node* ift_instances;
+ LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
+ &ift_instances);
- Node* ift_size =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+ wasm::FunctionSig* sig = env_->module->signatures[sig_index];
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
@@ -2894,9 +2940,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
}
// Load signature from the table and check.
- Node* ift_sig_ids =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
-
int32_t expected_sig_id = env_->module->signature_ids[sig_index];
Node* int32_scaled_key = Uint32ToUintptr(
graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
@@ -2909,11 +2952,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- Node* ift_targets =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer());
- Node* ift_instances = LOAD_INSTANCE_FIELD(
- IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
-
Node* tagged_scaled_key;
if (kTaggedSize == kInt32Size) {
tagged_scaled_key = int32_scaled_key;
@@ -2955,48 +2993,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
}
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
- uint32_t sig_index, Node** args,
- Node*** rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation) {
- DCHECK_NOT_NULL(args[0]);
- Node* entry_index = args[0];
- DCHECK_NOT_NULL(env_);
- BoundsCheckTable(table_index, entry_index, position, wasm::kTrapFuncInvalid,
- nullptr);
-
- DCHECK(Smi::IsValid(table_index));
- DCHECK(Smi::IsValid(sig_index));
- Node* runtime_args[]{
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- BuildChangeUint31ToSmi(entry_index),
- graph()->NewNode(mcgraph()->common()->NumberConstant(sig_index))};
-
- Node* target_instance = BuildCallToRuntime(
- Runtime::kWasmIndirectCallCheckSignatureAndGetTargetInstance,
- runtime_args, arraysize(runtime_args));
-
- // We reuse the runtime_args array here, even though we only need the first
- // two arguments.
- Node* call_target = BuildCallToRuntime(
- Runtime::kWasmIndirectCallGetTargetAddress, runtime_args, 2);
-
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
- args[0] = call_target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
- switch (continuation) {
- case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
- case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
- }
-}
-
Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
@@ -3019,9 +3015,6 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
uint32_t sig_index, Node** args,
wasm::WasmCodePosition position) {
- if (table_index == 0) {
- return BuildIndirectCall(sig_index, args, nullptr, position, kReturnCall);
- }
return BuildIndirectCall(table_index, sig_index, args, nullptr, position,
kReturnCall);
}
@@ -3324,13 +3317,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
return result;
}
-Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
- return LOAD_TAGGED_POINTER(isolate_root,
- IsolateData::builtin_slot_offset(builtin_index));
-}
-
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded js_context.
Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(
@@ -3492,7 +3478,7 @@ void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
-Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
+Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) {
Node* base = nullptr;
@@ -3501,7 +3487,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
return LOAD_RAW_NODE_OFFSET(base, offset,
MachineType::TypeCompressedTagged());
}
- // We access anyfunc tables through runtime calls.
+ // We access funcref tables through runtime calls.
WasmTableGetDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
@@ -3521,7 +3507,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
Effect(), Control())));
}
-Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
+Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) {
Node* base = nullptr;
@@ -3530,7 +3516,7 @@ Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
return STORE_RAW_NODE_OFFSET(
base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
} else {
- // We access anyfunc tables through runtime calls.
+ // We access funcref tables through runtime calls.
WasmTableSetDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
@@ -4000,6 +3986,30 @@ Node* WasmGraphBuilder::S128Zero() {
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF64x2Splat:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]);
+ case wasm::kExprF64x2Abs:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
+ case wasm::kExprF64x2Neg:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]);
+ case wasm::kExprF64x2Eq:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Lt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Le:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Gt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[1],
+ inputs[0]);
+ case wasm::kExprF64x2Ge:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1],
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4054,6 +4064,49 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Ge:
return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1],
inputs[0]);
+ case wasm::kExprI64x2Splat:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
+ case wasm::kExprI64x2Neg:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
+ case wasm::kExprI64x2Add:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Sub:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Mul:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Eq:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
@@ -4305,6 +4358,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128Select:
return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[2],
inputs[0], inputs[1]);
+ case wasm::kExprS1x2AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]);
+ case wasm::kExprS1x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]);
case wasm::kExprS1x4AnyTrue:
return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]);
case wasm::kExprS1x4AllTrue:
@@ -4326,12 +4383,24 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF64x2ExtractLane:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprF64x2ReplaceLane:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprF32x4ExtractLane:
return graph()->NewNode(mcgraph()->machine()->F32x4ExtractLane(lane),
inputs[0]);
case wasm::kExprF32x4ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->F32x4ReplaceLane(lane),
inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtractLane:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprI64x2ReplaceLane:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprI32x4ExtractLane:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
inputs[0]);
@@ -4359,6 +4428,14 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprI64x2Shl:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]);
+ case wasm::kExprI64x2ShrS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift),
+ inputs[0]);
+ case wasm::kExprI64x2ShrU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift),
+ inputs[0]);
case wasm::kExprI32x4Shl:
return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]);
case wasm::kExprI32x4ShrS:
@@ -4612,6 +4689,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
return SetEffect(node);
}
+Node* WasmGraphBuilder::AtomicFence() {
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(),
+ Effect(), Control()));
+}
+
#undef ATOMIC_BINOP_LIST
#undef ATOMIC_CMP_EXCHG_LIST
#undef ATOMIC_LOAD_LIST
@@ -4636,8 +4718,19 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
- Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
auto m = mcgraph()->machine();
+ auto common = mcgraph()->common();
+ Node* size_null_check =
+ graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
+
+ Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;
@@ -4679,9 +4772,16 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds,
- graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
- position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds,
+ graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
@@ -4699,16 +4799,19 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
auto m = mcgraph()->machine();
- // The data must be copied backward if the regions overlap and src < dst. The
- // regions overlap if {src + size > dst && dst + size > src}. Since we already
- // test that {src < dst}, we know that {dst + size > src}, so this simplifies
- // to just {src + size > dst}. That sum can overflow, but if we subtract
- // {size} from both sides of the inequality we get the equivalent test
- // {size > dst - src}.
- Node* copy_backward = graph()->NewNode(
- m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst),
- graph()->NewNode(m->Uint32LessThan(),
- graph()->NewNode(m->Int32Sub(), dst, src), size));
+ auto common = mcgraph()->common();
+ // If size == 0, then memory.copy is a no-op.
+ Node* size_null_check =
+ graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
+ // The data must be copied backward if src < dst.
+ Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
@@ -4728,13 +4831,32 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds,
- graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
- position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds,
+ graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
+ auto machine = mcgraph()->machine();
+ auto common = mcgraph()->common();
+ // If size == 0, then memory.copy is a no-op.
+ Node* size_null_check = graph()->NewNode(machine->Word32Equal(), size,
+ mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
Node* fail = BoundsCheckMemRange(&dst, &size, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill()));
@@ -4742,7 +4864,15 @@ Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, value, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
@@ -4789,13 +4919,13 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
mcgraph()->Int32Constant(1), Effect(), Control()));
}
-Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index,
- uint32_t table_dst_index, Node* dst,
+Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
+ uint32_t table_src_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
@@ -4878,28 +5008,6 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
}
namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(4, 5)
-void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
- Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str));
-}
-
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig,
@@ -4914,12 +5022,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
- Node* target = (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
- ? mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmAllocateHeapNumber,
- RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(
- BUILTIN_CODE(isolate_, AllocateHeapNumber));
+ Node* target =
+ (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmAllocateHeapNumber,
+ RelocInfo::WASM_STUB_CALL)
+ : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber);
if (!allocate_heap_number_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0,
@@ -4956,6 +5064,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return mcgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
+ Node* BuildLoadUndefinedValueFromInstance() {
+ if (undefined_value_node_ == nullptr) {
+ Node* isolate_root = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Pointer()),
+ instance_node_.get(),
+ mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(IsolateRoot)),
+ graph()->start(), graph()->start());
+ undefined_value_node_ = InsertDecompressionIfNeeded(
+ MachineType::TypeCompressedTaggedPointer(),
+ graph()->NewNode(
+ mcgraph()->machine()->Load(
+ MachineType::TypeCompressedTaggedPointer()),
+ isolate_root,
+ mcgraph()->Int32Constant(
+ IsolateData::root_slot_offset(RootIndex::kUndefinedValue)),
+ isolate_root, graph()->start()));
+ }
+ return undefined_value_node_.get();
+ }
+
+ Node* BuildLoadBuiltinFromInstance(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ Node* isolate_root =
+ LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(isolate_root,
+ IsolateData::builtin_slot_offset(builtin_index));
+ }
+
Node* BuildChangeInt32ToTagged(Node* value) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
@@ -5096,7 +5232,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber));
+ : BuildLoadBuiltinFromInstance(Builtins::kToNumber);
Node* result = SetEffect(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
@@ -5126,8 +5262,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetControl(is_heap_object.if_true);
Node* orig_effect = Effect();
- Node* undefined_node = LOAD_INSTANCE_FIELD(
- UndefinedValue, MachineType::TypeCompressedTaggedPointer());
+ Node* undefined_node = BuildLoadUndefinedValueFromInstance();
Node* check_undefined =
graph()->NewNode(machine->WordEqual(), value, undefined_node);
Node* effect_tagged = Effect();
@@ -5173,8 +5308,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
return node;
default:
UNREACHABLE();
@@ -5196,7 +5331,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, I64ToBigInt));
+ : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt);
return SetEffect(
SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
@@ -5218,7 +5353,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, BigIntToI64));
+ : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64);
return SetEffect(SetControl(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target,
@@ -5228,15 +5363,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) {
DCHECK_NE(wasm::kWasmStmt, type);
- // The parameter is of type anyref or except_ref, we take it as is.
- if (type == wasm::kWasmAnyRef || type == wasm::kWasmExceptRef) {
+ // The parameter is of type anyref or exnref, we take it as is.
+ if (type == wasm::kWasmAnyRef || type == wasm::kWasmExnRef) {
return node;
}
- if (type == wasm::kWasmAnyFunc) {
+ if (type == wasm::kWasmFuncRef) {
Node* check =
BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidAnyFuncValue, js_context, &node, 1, effect_,
+ Runtime::kWasmIsValidFuncRefValue, js_context, &node, 1, effect_,
Control())));
Diamond type_check(graph(), mcgraph()->common(), check,
@@ -5471,8 +5606,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The callable is passed as the last parameter, after WASM arguments.
Node* callable_node = Param(wasm_count + 1);
- Node* undefined_node = LOAD_INSTANCE_FIELD(
- UndefinedValue, MachineType::TypeCompressedTaggedPointer());
+ Node* undefined_node = BuildLoadUndefinedValueFromInstance();
Node* call = nullptr;
bool sloppy_receiver = true;
@@ -5811,22 +5945,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
void BuildCWasmEntry() {
- // Build the start and the JS parameter nodes.
- SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5)));
+ // +1 offset for first parameter index being -1.
+ SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 1)));
- // Create parameter nodes (offset by 1 for the receiver parameter).
- Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1);
- Node* object_ref_node = Param(CWasmEntryParameters::kObjectRef + 1);
- Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
+ Node* code_entry = Param(CWasmEntryParameters::kCodeEntry);
+ Node* object_ref = Param(CWasmEntryParameters::kObjectRef);
+ Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer);
+ Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp);
+
+ Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
+ STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset,
+ c_entry_fp, MachineType::PointerRepresentation(),
+ kNoWriteBarrier);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- int arg_count =
- wasm_arg_count + 4; // code, object_ref_node, control, effect
+ int arg_count = wasm_arg_count + 4; // code, object_ref, control, effect
Node** args = Buffer(arg_count);
int pos = 0;
args[pos++] = code_entry;
- args[pos++] = object_ref_node;
+ args[pos++] = object_ref;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -5847,26 +5985,43 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* call = SetEffect(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), arg_count, args));
- // Store the return value.
- DCHECK_GE(1, sig_->return_count());
- if (sig_->return_count() == 1) {
+ Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call);
+ Node* if_exception =
+ graph()->NewNode(mcgraph()->common()->IfException(), call, call);
+
+ // Handle exception: return it.
+ SetControl(if_exception);
+ Return(if_exception);
+
+ // Handle success: store the return value(s).
+ SetControl(if_success);
+ pos = 0;
+ offset = 0;
+ for (wasm::ValueType type : sig_->returns()) {
StoreRepresentation store_rep(
- wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()),
- kNoWriteBarrier);
+ wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier);
+ Node* value = sig_->return_count() == 1
+ ? call
+ : graph()->NewNode(mcgraph()->common()->Projection(pos),
+ call, Control());
SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
- arg_buffer, Int32Constant(0), call, Effect(),
- Control()));
+ arg_buffer, Int32Constant(offset), value,
+ Effect(), Control()));
+ offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ pos++;
}
+
Return(jsgraph()->SmiConstant(0));
if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) {
MachineRepresentation sig_reps[] = {
- MachineRepresentation::kWord32, // return value
- MachineRepresentation::kTagged, // receiver
- MachineRepresentation::kTagged, // arg0 (code)
- MachineRepresentation::kTagged // arg1 (buffer)
+ MachineType::PointerRepresentation(), // return value
+ MachineType::PointerRepresentation(), // target
+ MachineRepresentation::kTagged, // object_ref
+ MachineType::PointerRepresentation(), // argv
+ MachineType::PointerRepresentation() // c_entry_fp
};
- Signature<MachineRepresentation> c_entry_sig(1, 2, sig_reps);
+ Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
mcgraph()->common(), mcgraph()->zone(), &c_entry_sig);
r.LowerGraph();
@@ -5879,6 +6034,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Isolate* const isolate_;
JSGraph* jsgraph_;
StubCallMode stub_mode_;
+ SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
wasm::WasmFeatures enabled_features_;
};
@@ -5901,27 +6057,25 @@ void AppendSignature(char* buffer, size_t max_name_len,
} // namespace
-MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
- wasm::FunctionSig* sig,
- bool is_import) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileJSToWasmWrapper");
+std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+ Isolate* isolate, wasm::FunctionSig* sig, bool is_import) {
//----------------------------------------------------------------------------
// Create the Graph.
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
+ std::unique_ptr<Zone> zone =
+ base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ Graph* graph = new (zone.get()) Graph(zone.get());
+ CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
+ zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
StubCallMode::kCallCodeObject,
wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
@@ -5929,73 +6083,66 @@ MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
builder.BuildJSToWasmWrapper(is_import);
//----------------------------------------------------------------------------
- // Run the compilation pipeline.
+ // Create the compilation job.
//----------------------------------------------------------------------------
static constexpr size_t kMaxNameLen = 128;
- char debug_name[kMaxNameLen] = "js_to_wasm:";
- AppendSignature(debug_name, kMaxNameLen, sig);
+ auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(debug_name.get(), "js_to_wasm:", 12);
+ AppendSignature(debug_name.get(), kMaxNameLen, sig);
- // Schedule and compile to machine code.
int params = static_cast<int>(sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, params + 1, CallDescriptor::kNoFlags);
+ zone.get(), false, params + 1, CallDescriptor::kNoFlags);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
- isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name,
- WasmAssemblerOptions());
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(debug_name, os);
- }
-#endif
-
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, "%s",
- debug_name);
- }
-
- return code;
+ return Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION,
+ std::move(debug_name), WasmAssemblerOptions());
}
-WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
- wasm::FunctionSig* expected_sig,
- bool has_bigint_feature) {
- if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
- auto imported_function = WasmExportedFunction::cast(*target);
- auto func_index = imported_function.function_index();
- auto module = imported_function.instance().module();
+std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
+ Handle<JSReceiver> callable, wasm::FunctionSig* expected_sig,
+ bool has_bigint_feature) {
+ if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
+ auto imported_function = Handle<WasmExportedFunction>::cast(callable);
+ auto func_index = imported_function->function_index();
+ auto module = imported_function->instance().module();
wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
if (*imported_sig != *expected_sig) {
- return WasmImportCallKind::kLinkError;
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- if (static_cast<uint32_t>(func_index) < module->num_imported_functions) {
- // TODO(wasm): this redirects all imported-reexported functions
- // through the call builtin. Fall through to JS function cases below?
- return WasmImportCallKind::kUseCallBuiltin;
+ if (static_cast<uint32_t>(func_index) >= module->num_imported_functions) {
+ return std::make_pair(WasmImportCallKind::kWasmToWasm, callable);
}
- return WasmImportCallKind::kWasmToWasm;
- }
- if (WasmCapiFunction::IsWasmCapiFunction(*target)) {
- WasmCapiFunction capi_function = WasmCapiFunction::cast(*target);
- if (!capi_function.IsSignatureEqual(expected_sig)) {
- return WasmImportCallKind::kLinkError;
+ Isolate* isolate = callable->GetIsolate();
+ // Resolve the short-cut to the underlying callable and continue.
+ Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
+ ImportedFunctionEntry entry(instance, func_index);
+ callable = handle(entry.callable(), isolate);
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*callable)) {
+ auto js_function = Handle<WasmJSFunction>::cast(callable);
+ if (!js_function->MatchesSignature(expected_sig)) {
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
+ }
+ Isolate* isolate = callable->GetIsolate();
+ // Resolve the short-cut to the underlying callable and continue.
+ callable = handle(js_function->GetCallable(), isolate);
+ }
+ if (WasmCapiFunction::IsWasmCapiFunction(*callable)) {
+ auto capi_function = Handle<WasmCapiFunction>::cast(callable);
+ if (!capi_function->IsSignatureEqual(expected_sig)) {
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- return WasmImportCallKind::kWasmToCapi;
+ return std::make_pair(WasmImportCallKind::kWasmToCapi, callable);
}
// Assuming we are calling to JS, check whether this would be a runtime error.
if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) {
- return WasmImportCallKind::kRuntimeTypeError;
+ return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable);
}
// For JavaScript calls, determine whether the target has an arity match
// and whether it has a sloppy receiver.
- if (target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ if (callable->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
SharedFunctionInfo shared = function->shared();
// Check for math intrinsics.
@@ -6004,7 +6151,9 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
DCHECK_NOT_NULL(sig); \
- if (*expected_sig == *sig) return WasmImportCallKind::k##name; \
+ if (*expected_sig == *sig) { \
+ return std::make_pair(WasmImportCallKind::k##name, callable); \
+ } \
}
#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
case Builtins::kMath##name: \
@@ -6051,19 +6200,23 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
if (IsClassConstructor(shared.kind())) {
// Class constructor will throw anyway.
- return WasmImportCallKind::kUseCallBuiltin;
+ return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
bool sloppy = is_sloppy(shared.language_mode()) && !shared.native();
if (shared.internal_formal_parameter_count() ==
expected_sig->parameter_count()) {
- return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
- : WasmImportCallKind::kJSFunctionArityMatch;
+ return std::make_pair(
+ sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMatch,
+ callable);
}
- return sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
- : WasmImportCallKind::kJSFunctionArityMismatch;
+ return std::make_pair(
+ sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMismatch,
+ callable);
}
// Unknown case. Use the call builtin.
- return WasmImportCallKind::kUseCallBuiltin;
+ return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
@@ -6103,10 +6256,9 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
#undef CASE
}
-wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
- wasm::NativeModule* native_module,
- WasmImportCallKind kind,
- wasm::FunctionSig* sig) {
+wasm::WasmCompilationResult CompileWasmMathIntrinsic(
+ wasm::WasmEngine* wasm_engine, WasmImportCallKind kind,
+ wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6125,7 +6277,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
InstructionSelector::AlignmentRequirements()));
wasm::CompilationEnv env(
- native_module->module(), wasm::UseTrapHandler::kNoTrapHandler,
+ nullptr, wasm::UseTrapHandler::kNoTrapHandler,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
wasm::kAllWasmFeatures, wasm::LowerSimd::kNoLowerSimd);
@@ -6167,21 +6319,12 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
wasm_engine, call_descriptor, mcgraph, Code::WASM_FUNCTION,
wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kFunction,
- wasm::ExecutionTier::kNone);
- // TODO(titzer): add counters for math intrinsic code size / allocation
- return native_module->PublishCode(std::move(wasm_code));
+ return result;
}
-wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
- wasm::NativeModule* native_module,
- WasmImportCallKind kind,
- wasm::FunctionSig* sig,
- bool source_positions) {
+wasm::WasmCompilationResult CompileWasmImportCallWrapper(
+ wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
+ WasmImportCallKind kind, wasm::FunctionSig* sig, bool source_positions) {
DCHECK_NE(WasmImportCallKind::kLinkError, kind);
DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
@@ -6189,7 +6332,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
if (FLAG_wasm_math_intrinsics &&
kind >= WasmImportCallKind::kFirstMathIntrinsic &&
kind <= WasmImportCallKind::kLastMathIntrinsic) {
- return CompileWasmMathIntrinsic(wasm_engine, native_module, kind, sig);
+ return CompileWasmMathIntrinsic(wasm_engine, kind, sig);
}
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6214,7 +6357,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table,
StubCallMode::kCallWasmRuntimeStub,
- native_module->enabled_features());
+ env->enabled_features);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmImportCallWrapper(kind);
@@ -6232,13 +6375,8 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION,
wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
source_position_table);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
- wasm::ExecutionTier::kNone);
- return native_module->PublishCode(std::move(wasm_code));
+ result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
+ return result;
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
@@ -6290,9 +6428,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
wasm::WasmCode::kWasmToCapiWrapper, debug_name,
WasmStubAssemblerOptions(), source_positions);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper,
wasm::ExecutionTier::kNone);
return native_module->PublishCode(std::move(wasm_code));
@@ -6338,24 +6475,26 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmCode::kInterpreterEntry, func_name.begin(),
WasmStubAssemblerOptions());
result.result_tier = wasm::ExecutionTier::kInterpreter;
+ result.kind = wasm::WasmCompilationResult::kInterpreterEntry;
return result;
}
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
+ std::unique_ptr<Zone> zone =
+ base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ Graph* graph = new (zone.get()) Graph(zone.get());
+ CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
+ zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
StubCallMode::kCallCodeObject,
wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
@@ -6363,29 +6502,36 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
builder.BuildCWasmEntry();
// Schedule and compile to machine code.
- CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, CWasmEntryParameters::kNumParameters + 1,
- CallDescriptor::kNoFlags);
+ MachineType sig_types[] = {MachineType::Pointer(), // return
+ MachineType::Pointer(), // target
+ MachineType::AnyTagged(), // object_ref
+ MachineType::Pointer(), // argv
+ MachineType::Pointer()}; // c_entry_fp
+ MachineSignature incoming_sig(1, 4, sig_types);
+ // Traps need the root register, for TailCallRuntimeWithCEntry to call
+ // Runtime::kThrowWasmError.
+ bool initialize_root_flag = true;
+ CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor(
+ zone.get(), &incoming_sig, initialize_root_flag);
// Build a name in the form "c-wasm-entry:<params>:<returns>".
static constexpr size_t kMaxNameLen = 128;
- char debug_name[kMaxNameLen] = "c-wasm-entry:";
- AppendSignature(debug_name, kMaxNameLen, sig);
-
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
- isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name,
- AssemblerOptions::Default(isolate));
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(debug_name, os);
- }
-#endif
+ auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(debug_name.get(), "c-wasm-entry:", 14);
+ AppendSignature(debug_name.get(), kMaxNameLen, sig);
+
+ // Run the compilation job synchronously.
+ std::unique_ptr<OptimizedCompilationJob> job(
+ Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY,
+ std::move(debug_name), AssemblerOptions::Default(isolate)));
+
+ if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
+ job->ExecuteJob() == CompilationJob::FAILED ||
+ job->FinalizeJob(isolate) == CompilationJob::FAILED) {
+ return {};
+ }
+ Handle<Code> code = job->compilation_info()->code();
return code;
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 460d0d2f1b..315733c396 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_WASM_COMPILER_H_
#include <memory>
+#include <utility>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
@@ -20,6 +21,7 @@
namespace v8 {
namespace internal {
struct AssemblerOptions;
+class OptimizedCompilationJob;
namespace compiler {
// Forward declarations for some compiler data structures.
@@ -103,13 +105,23 @@ enum class WasmImportCallKind : uint8_t {
kUseCallBuiltin
};
-V8_EXPORT_PRIVATE WasmImportCallKind
-GetWasmImportCallKind(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
+// TODO(wasm): There should be only one import kind for sloppy and strict in
+// order to reduce wrapper cache misses. The mode can be checked at runtime
+// instead.
+constexpr WasmImportCallKind kDefaultImportCallKind =
+ WasmImportCallKind::kJSFunctionArityMatchSloppy;
+
+// Resolves which import call wrapper is required for the given JS callable.
+// Returns the kind of wrapper need and the ultimate target callable. Note that
+// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap
+// another target, which is why the ultimate target is returned as well.
+V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
+ResolveWasmImportCall(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
bool has_bigint_feature);
// Compiles an import call wrapper, which allows WASM to call imports.
-V8_EXPORT_PRIVATE wasm::WasmCode* CompileWasmImportCallWrapper(
- wasm::WasmEngine*, wasm::NativeModule*, WasmImportCallKind,
+V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
+ wasm::WasmEngine*, wasm::CompilationEnv* env, WasmImportCallKind,
wasm::FunctionSig*, bool source_positions);
// Compiles a host call wrapper, which allows WASM to call host functions.
@@ -117,11 +129,9 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
wasm::NativeModule*,
wasm::FunctionSig*, Address address);
-// Creates a code object calling a wasm function with the given signature,
-// callable from JS.
-V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(Isolate*,
- wasm::FunctionSig*,
- bool is_import);
+// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
+std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+ Isolate* isolate, wasm::FunctionSig* sig, bool is_import);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
@@ -133,13 +143,13 @@ enum CWasmEntryParameters {
kCodeEntry,
kObjectRef,
kArgumentsBuffer,
+ kCEntryFp,
// marker:
kNumParameters
};
-// Compiles a stub with JS linkage, taking parameters as described by
-// {CWasmEntryParameters}. It loads the wasm parameters from the argument
-// buffer and calls the wasm function given as first parameter.
+// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
+// which knows how to feed it its parameters.
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
// Values from the instance object are cached between WASM-level function calls.
@@ -280,9 +290,9 @@ class WasmGraphBuilder {
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
- Node* GetTable(uint32_t table_index, Node* index,
+ Node* TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position);
- Node* SetTable(uint32_t table_index, Node* index, Node* val,
+ Node* TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -377,6 +387,7 @@ class WasmGraphBuilder {
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position);
+ Node* AtomicFence();
// Returns a pointer to the dropped_data_segments array. Traps if the data
// segment is active or has been dropped.
@@ -395,7 +406,7 @@ class WasmGraphBuilder {
Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
- Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst,
+ Node* TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
Node* TableGrow(uint32_t table_index, Node* value, Node* delta);
Node* TableSize(uint32_t table_index);
@@ -485,10 +496,10 @@ class WasmGraphBuilder {
Node* BuildCallNode(wasm::FunctionSig* sig, Node** args,
wasm::WasmCodePosition position, Node* instance_node,
const Operator* op);
- // Special implementation for CallIndirect for table 0.
- Node* BuildIndirectCall(uint32_t sig_index, Node** args, Node*** rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation);
+ // Helper function for {BuildIndirectCall}.
+ void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
+ Node** ift_sig_ids, Node** ift_targets,
+ Node** ift_instances);
Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args,
Node*** rets, wasm::WasmCodePosition position,
IsReturnCall continuation);
@@ -591,8 +602,6 @@ class WasmGraphBuilder {
return buf;
}
- Node* BuildLoadBuiltinFromInstance(int builtin_index);
-
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
// to get off the GC heap.
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index a29c596909..6656ab608d 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -48,6 +48,10 @@
#include "src/utils/utils.h"
#include "src/wasm/wasm-engine.h"
+#ifdef V8_USE_PERFETTO
+#include "perfetto/tracing.h"
+#endif // V8_USE_PERFETTO
+
#ifdef V8_INTL_SUPPORT
#include "unicode/locid.h"
#endif // V8_INTL_SUPPORT
@@ -247,15 +251,7 @@ namespace tracing {
namespace {
-// String options that can be used to initialize TraceOptions.
-const char kRecordUntilFull[] = "record-until-full";
-const char kRecordContinuously[] = "record-continuously";
-const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
-
-const char kRecordModeParam[] = "record_mode";
-const char kEnableSystraceParam[] = "enable_systrace";
-const char kEnableArgumentFilterParam[] = "enable_argument_filter";
-const char kIncludedCategoriesParam[] = "included_categories";
+static constexpr char kIncludedCategoriesParam[] = "included_categories";
class TraceConfigParser {
public:
@@ -273,30 +269,11 @@ class TraceConfigParser {
Local<Value> result = JSON::Parse(context, source).ToLocalChecked();
Local<v8::Object> trace_config_object = Local<v8::Object>::Cast(result);
- trace_config->SetTraceRecordMode(
- GetTraceRecordMode(isolate, context, trace_config_object));
- if (GetBoolean(isolate, context, trace_config_object,
- kEnableSystraceParam)) {
- trace_config->EnableSystrace();
- }
- if (GetBoolean(isolate, context, trace_config_object,
- kEnableArgumentFilterParam)) {
- trace_config->EnableArgumentFilter();
- }
UpdateIncludedCategoriesList(isolate, context, trace_config_object,
trace_config);
}
private:
- static bool GetBoolean(v8::Isolate* isolate, Local<Context> context,
- Local<v8::Object> object, const char* property) {
- Local<Value> value = GetValue(isolate, context, object, property);
- if (value->IsNumber()) {
- return value->BooleanValue(isolate);
- }
- return false;
- }
-
static int UpdateIncludedCategoriesList(
v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object,
platform::tracing::TraceConfig* trace_config) {
@@ -316,23 +293,6 @@ class TraceConfigParser {
}
return 0;
}
-
- static platform::tracing::TraceRecordMode GetTraceRecordMode(
- v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object) {
- Local<Value> value = GetValue(isolate, context, object, kRecordModeParam);
- if (value->IsString()) {
- Local<String> v8_string = value->ToString(context).ToLocalChecked();
- String::Utf8Value str(isolate, v8_string);
- if (strcmp(kRecordUntilFull, *str) == 0) {
- return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
- } else if (strcmp(kRecordContinuously, *str) == 0) {
- return platform::tracing::TraceRecordMode::RECORD_CONTINUOUSLY;
- } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) {
- return platform::tracing::TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE;
- }
- }
- return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
- }
};
} // namespace
@@ -1927,7 +1887,7 @@ static void PrintNonErrorsMessageCallback(Local<Message> message,
auto ToCString = [](const v8::String::Utf8Value& value) {
return *value ? *value : "<string conversion failed>";
};
- Isolate* isolate = Isolate::GetCurrent();
+ Isolate* isolate = message->GetIsolate();
v8::String::Utf8Value msg(isolate, message->Get());
const char* msg_string = ToCString(msg);
// Print (filename):(line number): (message).
@@ -2001,20 +1961,20 @@ int LineFromOffset(Local<debug::Script> script, int offset) {
return location.GetLineNumber();
}
-void WriteLcovDataForRange(std::vector<uint32_t>& lines, int start_line,
+void WriteLcovDataForRange(std::vector<uint32_t>* lines, int start_line,
int end_line, uint32_t count) {
// Ensure space in the array.
- lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()), 0);
+ lines->resize(std::max(static_cast<size_t>(end_line + 1), lines->size()), 0);
// Boundary lines could be shared between two functions with different
// invocation counts. Take the maximum.
- lines[start_line] = std::max(lines[start_line], count);
- lines[end_line] = std::max(lines[end_line], count);
+ (*lines)[start_line] = std::max((*lines)[start_line], count);
+ (*lines)[end_line] = std::max((*lines)[end_line], count);
// Invocation counts for non-boundary lines are overwritten.
- for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
+ for (int k = start_line + 1; k < end_line; k++) (*lines)[k] = count;
}
void WriteLcovDataForNamedRange(std::ostream& sink,
- std::vector<uint32_t>& lines,
+ std::vector<uint32_t>* lines,
const std::string& name, int start_line,
int end_line, uint32_t count) {
WriteLcovDataForRange(lines, start_line, end_line, count);
@@ -2064,7 +2024,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
name_stream << start.GetColumnNumber() << ">";
}
- WriteLcovDataForNamedRange(sink, lines, name_stream.str(), start_line,
+ WriteLcovDataForNamedRange(sink, &lines, name_stream.str(), start_line,
end_line, count);
}
@@ -2074,7 +2034,7 @@ void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
int start_line = LineFromOffset(script, block_data.StartOffset());
int end_line = LineFromOffset(script, block_data.EndOffset() - 1);
uint32_t count = block_data.Count();
- WriteLcovDataForRange(lines, start_line, end_line, count);
+ WriteLcovDataForRange(&lines, start_line, end_line, count);
}
}
// Write per-line coverage. LCOV uses 1-based line numbers.
@@ -3350,24 +3310,25 @@ int Shell::Main(int argc, char* argv[]) {
std::unique_ptr<platform::tracing::TracingController> tracing;
std::ofstream trace_file;
-#ifdef V8_USE_PERFETTO
- std::ofstream perfetto_trace_file;
-#endif // V8_USE_PERFETTO
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = base::make_unique<platform::tracing::TracingController>();
-
trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
DCHECK(trace_file.good());
+
+#ifdef V8_USE_PERFETTO
+ // Set up the in-process backend that the tracing controller will connect
+ // to.
+ perfetto::TracingInitArgs init_args;
+ init_args.backends = perfetto::BackendType::kInProcessBackend;
+ perfetto::Tracing::Initialize(init_args);
+
+ tracing->InitializeForPerfetto(&trace_file);
+#else
platform::tracing::TraceBuffer* trace_buffer =
platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
platform::tracing::TraceBuffer::kRingBufferChunks,
platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
tracing->Initialize(trace_buffer);
-
-#ifdef V8_USE_PERFETTO
- perfetto_trace_file.open("v8_perfetto_trace.json");
- DCHECK(trace_file.good());
- tracing->InitializeForPerfetto(&perfetto_trace_file);
#endif // V8_USE_PERFETTO
}
diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS
index fc4aa8d5ac..6edeeae0ea 100644
--- a/deps/v8/src/date/OWNERS
+++ b/deps/v8/src/date/OWNERS
@@ -1,3 +1,6 @@
ishell@chromium.org
jshin@chromium.org
ulan@chromium.org
+verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index 46b472480d..220aa1ce26 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
jgruber@chromium.org
mvstanton@chromium.org
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 4021cd5038..5337f98db9 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -68,7 +68,8 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
return a.start < b.start;
}
-void SortBlockData(std::vector<CoverageBlock>& v) {
+void SortBlockData(
+ std::vector<CoverageBlock>& v) { // NOLINT(runtime/references)
// Sort according to the block nesting structure.
std::sort(v.begin(), v.end(), CompareCoverageBlock);
}
@@ -534,9 +535,9 @@ std::unique_ptr<Coverage> Coverage::Collect(
->feedback_vectors_for_profiling_tools()
->IsArrayList());
DCHECK_EQ(v8::debug::CoverageMode::kBestEffort, collectionMode);
- HeapIterator heap_iterator(isolate->heap());
- for (HeapObject current_obj = heap_iterator.next();
- !current_obj.is_null(); current_obj = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(isolate->heap());
+ for (HeapObject current_obj = heap_iterator.Next();
+ !current_obj.is_null(); current_obj = heap_iterator.Next()) {
if (!current_obj.IsJSFunction()) continue;
JSFunction func = JSFunction::cast(current_obj);
SharedFunctionInfo shared = func.shared();
@@ -678,9 +679,9 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
std::vector<Handle<JSFunction>> funcs_needing_feedback_vector;
{
- HeapIterator heap_iterator(isolate->heap());
- for (HeapObject o = heap_iterator.next(); !o.is_null();
- o = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(isolate->heap());
+ for (HeapObject o = heap_iterator.Next(); !o.is_null();
+ o = heap_iterator.Next()) {
if (o.IsJSFunction()) {
JSFunction func = JSFunction::cast(o);
if (func.has_closure_feedback_cell_array()) {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 65e62f2aac..0d8a7b2c7e 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -51,7 +51,7 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
}
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
- StackFrame::Id frame_id,
+ StackFrameId frame_id,
int inlined_jsframe_index,
Handle<String> source,
bool throw_on_side_effect) {
@@ -312,6 +312,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ObjectValuesSkipFastPath) \
V(ObjectGetOwnPropertyNames) \
V(ObjectGetOwnPropertyNamesTryFast) \
+ V(ObjectIsExtensible) \
V(RegExpInitializeAndCompile) \
V(StackGuard) \
V(StringAdd) \
@@ -771,6 +772,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kStrictPoisonPillThrower:
case Builtins::kAllocateInYoungGeneration:
case Builtins::kAllocateInOldGeneration:
+ case Builtins::kAllocateRegularInYoungGeneration:
+ case Builtins::kAllocateRegularInOldGeneration:
return DebugInfo::kHasNoSideEffect;
// Set builtins.
@@ -904,7 +907,7 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
switch (callee) {
// Transitively called Builtins:
case Builtins::kAbort:
- case Builtins::kAbortJS:
+ case Builtins::kAbortCSAAssert:
case Builtins::kAdaptorWithBuiltinExitFrame:
case Builtins::kArrayConstructorImpl:
case Builtins::kArrayEveryLoopContinuation:
@@ -959,6 +962,8 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtins::Name caller,
case Builtins::kOrdinaryToPrimitive_String:
case Builtins::kParseInt:
case Builtins::kProxyHasProperty:
+ case Builtins::kProxyIsExtensible:
+ case Builtins::kProxyGetPrototypeOf:
case Builtins::kRecordWrite:
case Builtins::kStringAdd_CheckNone:
case Builtins::kStringEqual:
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 50817691d7..7819892050 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -7,8 +7,11 @@
#include <vector>
+#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
+#include "src/debug/debug.h"
+#include "src/execution/frames.h"
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string-table.h"
@@ -28,7 +31,7 @@ class DebugEvaluate : public AllStatic {
// - Parameters and stack-allocated locals need to be materialized. Altered
// values need to be written back to the stack afterwards.
// - The arguments object needs to materialized.
- static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
+ static MaybeHandle<Object> Local(Isolate* isolate, StackFrameId frame_id,
int inlined_jsframe_index,
Handle<String> source,
bool throw_on_side_effect);
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index a6ee31738d..4fe062b277 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -52,10 +52,13 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
}
}
-// NOLINTNEXTLINE
-FrameInspector::~FrameInspector() {
- // Destructor needs to be defined in the .cc file, because it instantiates
- // std::unique_ptr destructors but the types are not known in the header.
+// Destructor needs to be defined in the .cc file, because it instantiates
+// std::unique_ptr destructors but the types are not known in the header.
+FrameInspector::~FrameInspector() = default;
+
+JavaScriptFrame* FrameInspector::javascript_frame() {
+ return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
+ : JavaScriptFrame::cast(frame_);
}
int FrameInspector::GetParametersCount() {
@@ -90,8 +93,10 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
return ScopeInfo::ContextSlotIndex(*info, *parameter_name, &mode, &init_flag,
- &maybe_assigned_flag) != -1;
+ &maybe_assigned_flag,
+ &requires_brand_check) != -1;
}
RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 5ee4f8b61f..274d10030a 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -6,7 +6,6 @@
#define V8_DEBUG_DEBUG_FRAMES_H_
#include "src/deoptimizer/deoptimizer.h"
-#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/execution/v8threads.h"
#include "src/objects/objects.h"
@@ -15,12 +14,15 @@
namespace v8 {
namespace internal {
+class JavaScriptFrame;
+class StandardFrame;
+
class FrameInspector {
public:
FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate);
- ~FrameInspector(); // NOLINT (modernize-use-equals-default)
+ ~FrameInspector();
int GetParametersCount();
Handle<JSFunction> GetFunction() const { return function_; }
@@ -37,10 +39,7 @@ class FrameInspector {
bool IsWasm();
bool IsJavaScript();
- inline JavaScriptFrame* javascript_frame() {
- return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
- : JavaScriptFrame::cast(frame_);
- }
+ JavaScriptFrame* javascript_frame();
int inlined_frame_index() const { return inlined_frame_index_; }
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 79222371f9..59bc6d0863 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -164,8 +164,9 @@ class WasmScript : public Script {
uint32_t GetFunctionHash(int function_index);
};
-V8_EXPORT_PRIVATE void GetLoadedScripts(Isolate* isolate,
- PersistentValueVector<Script>& scripts);
+V8_EXPORT_PRIVATE void GetLoadedScripts(
+ Isolate* isolate,
+ PersistentValueVector<Script>& scripts); // NOLINT(runtime/references)
MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
Local<String> source);
diff --git a/deps/v8/src/debug/debug-scope-iterator.h b/deps/v8/src/debug/debug-scope-iterator.h
index 3859e8cb41..44d6c49860 100644
--- a/deps/v8/src/debug/debug-scope-iterator.h
+++ b/deps/v8/src/debug/debug-scope-iterator.h
@@ -8,7 +8,6 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug-scopes.h"
-#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 3a58f0b458..1091e3a819 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -13,7 +13,7 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/objects/js-generator-inl.h"
-#include "src/objects/module.h"
+#include "src/objects/source-text-module.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
@@ -538,11 +538,8 @@ void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
int beg_pos = inner_scope->start_position();
int end_pos = inner_scope->end_position();
DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
- if (beg_pos <= position && position < end_pos) {
- // Don't walk into inner functions.
- if (!inner_scope->is_function_scope()) {
- current = inner_scope;
- }
+ if (beg_pos < position && position < end_pos) {
+ current = inner_scope;
break;
}
}
@@ -576,7 +573,7 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
int count_index = scope_info->ModuleVariableCountIndex();
int module_variable_count = Smi::cast(scope_info->get(count_index)).value();
- Handle<Module> module(context_->module(), isolate_);
+ Handle<SourceTextModule> module(context_->module(), isolate_);
for (int i = 0; i < module_variable_count; ++i) {
int index;
@@ -587,7 +584,8 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
if (ScopeInfo::VariableIsSynthetic(raw_name)) continue;
name = handle(raw_name, isolate_);
}
- Handle<Object> value = Module::LoadVariable(isolate_, module, index);
+ Handle<Object> value =
+ SourceTextModule::LoadVariable(isolate_, module, index);
// Reflect variables under TDZ as undeclared in scope object.
if (value->IsTheHole(isolate_)) continue;
@@ -614,15 +612,32 @@ bool ScopeIterator::VisitContextLocals(const Visitor& visitor,
bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
if (mode == Mode::STACK && current_scope_->is_declaration_scope() &&
current_scope_->AsDeclarationScope()->has_this_declaration()) {
- Handle<Object> receiver = frame_inspector_ == nullptr
- ? handle(generator_->receiver(), isolate_)
- : frame_inspector_->GetReceiver();
+ // TODO(bmeurer): We should refactor the general variable lookup
+ // around "this", since the current way is rather hacky when the
+ // receiver is context-allocated.
+ auto this_var = current_scope_->AsDeclarationScope()->receiver();
+ Handle<Object> receiver =
+ this_var->location() == VariableLocation::CONTEXT
+ ? handle(context_->get(this_var->index()), isolate_)
+ : frame_inspector_ == nullptr
+ ? handle(generator_->receiver(), isolate_)
+ : frame_inspector_->GetReceiver();
if (receiver->IsOptimizedOut(isolate_) || receiver->IsTheHole(isolate_)) {
receiver = isolate_->factory()->undefined_value();
}
if (visitor(isolate_->factory()->this_string(), receiver)) return true;
}
+ if (current_scope_->is_function_scope()) {
+ Variable* function_var =
+ current_scope_->AsDeclarationScope()->function_var();
+ if (function_var != nullptr) {
+ Handle<JSFunction> function = frame_inspector_->GetFunction();
+ Handle<String> name = function_var->name();
+ if (visitor(name, function)) return true;
+ }
+ }
+
for (Variable* var : *current_scope_->locals()) {
DCHECK(!var->is_this());
if (ScopeInfo::VariableIsSynthetic(*var->name())) continue;
@@ -696,8 +711,8 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
case VariableLocation::MODULE: {
if (mode == Mode::STACK) continue;
// if (var->IsExport()) continue;
- Handle<Module> module(context_->module(), isolate_);
- value = Module::LoadVariable(isolate_, module, var->index());
+ Handle<SourceTextModule> module(context_->module(), isolate_);
+ value = SourceTextModule::LoadVariable(isolate_, module, var->index());
// Reflect variables under TDZ as undeclared in scope object.
if (value->IsTheHole(isolate_)) continue;
break;
@@ -837,8 +852,8 @@ bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
case VariableLocation::MODULE:
if (!var->IsExport()) return false;
- Handle<Module> module(context_->module(), isolate_);
- Module::StoreVariable(module, var->index(), new_value);
+ Handle<SourceTextModule> module(context_->module(), isolate_);
+ SourceTextModule::StoreVariable(module, var->index(), new_value);
return true;
}
UNREACHABLE();
@@ -869,9 +884,10 @@ bool ScopeIterator::SetContextVariableValue(Handle<String> variable_name,
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
- int slot_index =
- ScopeInfo::ContextSlotIndex(context_->scope_info(), *variable_name, &mode,
- &flag, &maybe_assigned_flag);
+ RequiresBrandCheckFlag requires_brand_check;
+ int slot_index = ScopeInfo::ContextSlotIndex(
+ context_->scope_info(), *variable_name, &mode, &flag,
+ &maybe_assigned_flag, &requires_brand_check);
if (slot_index < 0) return false;
context_->set(slot_index, *new_value);
@@ -889,13 +905,13 @@ bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
*variable_name, &mode, &init_flag, &maybe_assigned_flag);
// Setting imports is currently not supported.
- if (ModuleDescriptor::GetCellIndexKind(cell_index) !=
- ModuleDescriptor::kExport) {
+ if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) !=
+ SourceTextModuleDescriptor::kExport) {
return false;
}
- Handle<Module> module(context_->module(), isolate_);
- Module::StoreVariable(module, cell_index, new_value);
+ Handle<SourceTextModule> module(context_->module(), isolate_);
+ SourceTextModule::StoreVariable(module, cell_index, new_value);
return true;
}
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 6e1c8b27bc..5c3361619a 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -8,11 +8,11 @@
#include <vector>
#include "src/debug/debug-frames.h"
-#include "src/execution/frames.h"
namespace v8 {
namespace internal {
+class JavaScriptFrame;
class ParseInfo;
// Iterate over the actual scopes visible from a stack frame or from a closure.
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 2c2c438727..a0c6fa967c 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -98,9 +98,10 @@ v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
int slot_index = ScopeInfo::ContextSlotIndex(
context->scope_info(), ReadOnlyRoots(isolate_->heap()).this_string(),
- &mode, &flag, &maybe_assigned_flag);
+ &mode, &flag, &maybe_assigned_flag, &requires_brand_check);
if (slot_index < 0) return v8::MaybeLocal<v8::Value>();
Handle<Object> value = handle(context->get(slot_index), isolate_);
if (value->IsTheHole(isolate_)) return v8::MaybeLocal<v8::Value>();
@@ -166,7 +167,7 @@ DebugStackTraceIterator::GetScopeIterator() const {
bool DebugStackTraceIterator::Restart() {
DCHECK(!Done());
if (iterator_.is_wasm()) return false;
- return !LiveEdit::RestartFrame(iterator_.javascript_frame());
+ return LiveEdit::RestartFrame(iterator_.javascript_frame());
}
v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 5cc200d552..9b5200e343 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -15,6 +15,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer/deoptimizer.h"
@@ -22,7 +23,6 @@
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/v8threads.h"
#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h" // For NextDebuggingId.
@@ -336,7 +336,7 @@ void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
// Threading support.
void Debug::ThreadInit() {
- thread_local_.break_frame_id_ = StackFrame::NO_ID;
+ thread_local_.break_frame_id_ = StackFrameId::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_frame_count_ = -1;
@@ -960,9 +960,9 @@ void Debug::PrepareStep(StepAction step_action) {
// any. The debug frame will only be present if execution was stopped due to
// hitting a break point. In other situations (e.g. unhandled exception) the
// debug frame is not present.
- StackFrame::Id frame_id = break_frame_id();
+ StackFrameId frame_id = break_frame_id();
// If there is no JavaScript stack don't do anything.
- if (frame_id == StackFrame::NO_ID) return;
+ if (frame_id == StackFrameId::NO_ID) return;
feature_tracker()->Track(DebugFeatureTracker::kStepping);
@@ -1226,9 +1226,9 @@ void Debug::InstallDebugBreakTrampoline() {
std::vector<Handle<JSFunction>> needs_compile;
std::vector<Handle<AccessorPair>> needs_instantiate;
{
- HeapIterator iterator(isolate_->heap());
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(isolate_->heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (needs_to_clear_ic && obj.IsFeedbackVector()) {
FeedbackVector::cast(obj).ClearSlots(isolate_);
continue;
@@ -1649,7 +1649,7 @@ void Debug::ScheduleFrameRestart(StackFrame* frame) {
// Reset break frame ID to the frame below the restarted frame.
StackTraceFrameIterator it(isolate_);
- thread_local_.break_frame_id_ = StackFrame::NO_ID;
+ thread_local_.break_frame_id_ = StackFrameId::NO_ID;
for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
if (it.frame()->fp() > thread_local_.restart_fp_) {
thread_local_.break_frame_id_ = it.frame()->id();
@@ -1913,7 +1913,7 @@ void Debug::ProcessCompileEvent(bool has_compile_error, Handle<Script> script) {
int Debug::CurrentFrameCount() {
StackTraceFrameIterator it(isolate_);
- if (break_frame_id() != StackFrame::NO_ID) {
+ if (break_frame_id() != StackFrameId::NO_ID) {
// Skip to break frame.
DCHECK(in_debug_scope());
while (!it.done() && it.frame()->id() != break_frame_id()) it.Advance();
@@ -2058,7 +2058,7 @@ DebugScope::DebugScope(Debug* debug)
StackTraceFrameIterator it(isolate());
bool has_frames = !it.done();
debug_->thread_local_.break_frame_id_ =
- has_frames ? it.frame()->id() : StackFrame::NO_ID;
+ has_frames ? it.frame()->id() : StackFrameId::NO_ID;
debug_->UpdateState();
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 8ac77e259d..684397400a 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -11,7 +11,7 @@
#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
#include "src/debug/interface-types.h"
-#include "src/execution/frames.h"
+#include "src/execution/interrupts-scope.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/debug-objects.h"
@@ -22,7 +22,10 @@ namespace internal {
// Forward declarations.
class AbstractCode;
class DebugScope;
+class InterpretedFrame;
+class JavaScriptFrame;
class JSGeneratorObject;
+class StackFrame;
// Step actions. NOTE: These values are in macros.py as well.
enum StepAction : int8_t {
@@ -341,7 +344,7 @@ class V8_EXPORT_PRIVATE Debug {
void set_break_points_active(bool v) { break_points_active_ = v; }
bool break_points_active() const { return break_points_active_; }
- StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
+ StackFrameId break_frame_id() { return thread_local_.break_frame_id_; }
Handle<Object> return_value_handle();
Object return_value() { return thread_local_.return_value_; }
@@ -497,7 +500,7 @@ class V8_EXPORT_PRIVATE Debug {
base::AtomicWord current_debug_scope_;
// Frame id for the frame of the current break.
- StackFrame::Id break_frame_id_;
+ StackFrameId break_frame_id_;
// Step action for last step performed.
StepAction last_step_action_;
@@ -564,7 +567,7 @@ class DebugScope {
Debug* debug_;
DebugScope* prev_; // Previous scope if entered recursively.
- StackFrame::Id break_frame_id_; // Previous break frame id.
+ StackFrameId break_frame_id_; // Previous break frame id.
PostponeInterruptsScope no_interrupts_;
};
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 9144e03be4..6e8a349a7d 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -11,6 +11,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/codegen/source-position-table.h"
+#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
@@ -826,9 +827,10 @@ class FunctionDataMap : public ThreadVisitor {
void Fill(Isolate* isolate, Address* restart_frame_fp) {
{
- HeapIterator iterator(isolate->heap(), HeapIterator::kFilterUnreachable);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(isolate->heap(),
+ HeapObjectIterator::kFilterUnreachable);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
FunctionData* data = nullptr;
@@ -851,7 +853,7 @@ class FunctionDataMap : public ThreadVisitor {
}
}
FunctionData::StackPosition stack_position =
- isolate->debug()->break_frame_id() == StackFrame::NO_ID
+ isolate->debug()->break_frame_id() == StackFrameId::NO_ID
? FunctionData::PATCHABLE
: FunctionData::ABOVE_BREAK_FRAME;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
@@ -936,10 +938,10 @@ class FunctionDataMap : public ThreadVisitor {
std::map<FuncId, FunctionData> map_;
};
-bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
- Handle<Script> new_script,
- FunctionDataMap& function_data_map,
- debug::LiveEditResult* result) {
+bool CanPatchScript(
+ const LiteralMap& changed, Handle<Script> script, Handle<Script> new_script,
+ FunctionDataMap& function_data_map, // NOLINT(runtime/references)
+ debug::LiveEditResult* result) {
debug::LiveEditResult::Status status = debug::LiveEditResult::OK;
for (const auto& mapping : changed) {
FunctionData* data = nullptr;
@@ -970,9 +972,10 @@ bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
return true;
}
-bool CanRestartFrame(Isolate* isolate, Address fp,
- FunctionDataMap& function_data_map,
- const LiteralMap& changed, debug::LiveEditResult* result) {
+bool CanRestartFrame(
+ Isolate* isolate, Address fp,
+ FunctionDataMap& function_data_map, // NOLINT(runtime/references)
+ const LiteralMap& changed, debug::LiveEditResult* result) {
DCHECK_GT(fp, 0);
StackFrame* restart_frame = nullptr;
StackFrameIterator it(isolate);
@@ -1118,13 +1121,10 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
UpdatePositions(isolate, sfi, diffs);
sfi->set_script(*new_script);
- if (sfi->HasUncompiledData()) {
- sfi->uncompiled_data().set_function_literal_id(
- mapping.second->function_literal_id());
- }
+ sfi->set_function_literal_id(mapping.second->function_literal_id());
new_script->shared_function_infos().Set(
mapping.second->function_literal_id(), HeapObjectReference::Weak(*sfi));
- DCHECK_EQ(sfi->FunctionLiteralId(isolate),
+ DCHECK_EQ(sfi->function_literal_id(),
mapping.second->function_literal_id());
// Save the new start_position -> id mapping, so that we can recover it when
@@ -1222,7 +1222,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
std::set<int> start_positions;
for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
DCHECK_EQ(sfi.script(), *new_script);
- DCHECK_EQ(sfi.FunctionLiteralId(isolate), it.CurrentIndex());
+ DCHECK_EQ(sfi.function_literal_id(), it.CurrentIndex());
// Don't check the start position of the top-level function, as it can
// overlap with a function in the script.
if (sfi.is_toplevel()) {
@@ -1242,7 +1242,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
SharedFunctionInfo::cast(constants.get(i));
DCHECK_EQ(inner_sfi.script(), *new_script);
DCHECK_EQ(inner_sfi, new_script->shared_function_infos()
- .Get(inner_sfi.FunctionLiteralId(isolate))
+ .Get(inner_sfi.function_literal_id())
->GetHeapObject());
}
}
@@ -1273,8 +1273,8 @@ void LiveEdit::InitializeThreadLocal(Debug* debug) {
bool LiveEdit::RestartFrame(JavaScriptFrame* frame) {
if (!LiveEdit::kFrameDropperSupported) return false;
Isolate* isolate = frame->isolate();
- StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
- bool break_frame_found = break_frame_id == StackFrame::NO_ID;
+ StackFrameId break_frame_id = isolate->debug()->break_frame_id();
+ bool break_frame_found = break_frame_id == StackFrameId::NO_ID;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
StackFrame* current = it.frame();
break_frame_found = break_frame_found || break_frame_id == current->id();
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
index 97a194d7cf..632607a952 100644
--- a/deps/v8/src/deoptimizer/OWNERS
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -3,3 +3,5 @@ jarin@chromium.org
mstarzinger@chromium.org
sigurds@chromium.org
tebbi@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index 4004dfd90f..89e9988f9e 100644
--- a/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -231,7 +231,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(lr);
__ Jump(scratch);
}
- __ stop("Unreachable.");
+ __ stop();
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/deoptimizer/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index d556e89927..ac2273460a 100644
--- a/deps/v8/src/deoptimizer/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -12,6 +12,7 @@ namespace internal {
#define DEOPTIMIZE_REASON_LIST(V) \
V(ArrayBufferWasDetached, "array buffer was detached") \
+ V(BigIntTooBig, "BigInt too big") \
V(CowArrayElementsChanged, "copy-on-write array's elements changed") \
V(CouldNotGrowElements, "failed to grow elements store") \
V(DeoptimizeNow, "%_DeoptimizeNow") \
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index 91556cfbdc..47c40d373e 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -159,7 +159,7 @@ Code Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Isolate* isolate = isolate_;
- Context native_context = function_.context().native_context();
+ NativeContext native_context = function_.context().native_context();
Object element = native_context.DeoptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
@@ -270,10 +270,10 @@ class ActivationsFinder : public ThreadVisitor {
// Move marked code from the optimized code list to the deoptimized code list,
// and replace pc on the stack for codes marked for deoptimization.
-void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
+void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
DisallowHeapAllocation no_allocation;
- Isolate* isolate = context.GetIsolate();
+ Isolate* isolate = native_context.GetIsolate();
Code topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
@@ -315,7 +315,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
// Move marked code from the optimized code list to the deoptimized code list.
// Walk over all optimized code objects in this native context.
Code prev;
- Object element = context.OptimizedCodeListHead();
+ Object element = native_context.OptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION);
@@ -329,12 +329,12 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
prev.set_next_code_link(next);
} else {
// There was no previous node, the next node is the new head.
- context.SetOptimizedCodeListHead(next);
+ native_context.SetOptimizedCodeListHead(next);
}
// Move the code to the _deoptimized_ code list.
- code.set_next_code_link(context.DeoptimizedCodeListHead());
- context.SetDeoptimizedCodeListHead(code);
+ code.set_next_code_link(native_context.DeoptimizedCodeListHead());
+ native_context.SetDeoptimizedCodeListHead(code);
} else {
// Not marked; preserve this element.
prev = code;
@@ -373,7 +373,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
// For all contexts, mark all code, then deoptimize.
Object context = isolate->heap()->native_contexts_list();
while (!context.IsUndefined(isolate)) {
- Context native_context = Context::cast(context);
+ NativeContext native_context = NativeContext::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
@@ -393,15 +393,15 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
// For all contexts, deoptimize code already marked.
Object context = isolate->heap()->native_contexts_list();
while (!context.IsUndefined(isolate)) {
- Context native_context = Context::cast(context);
+ NativeContext native_context = NativeContext::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context.next_context_link();
}
}
-void Deoptimizer::MarkAllCodeForContext(Context context) {
- Object element = context.OptimizedCodeListHead();
- Isolate* isolate = context.GetIsolate();
+void Deoptimizer::MarkAllCodeForContext(NativeContext native_context) {
+ Object element = native_context.OptimizedCodeListHead();
+ Isolate* isolate = native_context.GetIsolate();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION);
@@ -590,7 +590,7 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
// Count all entries in the deoptimizing code list of every context.
Object context = isolate->heap()->native_contexts_list();
while (!context.IsUndefined(isolate)) {
- Context native_context = Context::cast(context);
+ NativeContext native_context = NativeContext::cast(context);
Object element = native_context.DeoptimizedCodeListHead();
while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
@@ -633,6 +633,12 @@ bool ShouldPadArguments(int arg_count) {
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
+ // When we call this function, the return address of the previous frame has
+ // been removed from the stack by GenerateDeoptimizationEntries() so the stack
+ // is not iterable by the SafeStackFrameIterator.
+#if V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK
+ DCHECK_EQ(0, isolate()->isolate_data()->stack_is_iterable());
+#endif
base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
@@ -662,10 +668,6 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
- StackGuard* const stack_guard = isolate()->stack_guard();
- CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
- stack_guard->real_jslimit());
-
if (trace_scope_ != nullptr) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
@@ -723,7 +725,6 @@ void Deoptimizer::DoComputeOutputFrames() {
// Translate each output frame.
int frame_index = 0; // output_frame_index
- size_t total_output_frame_size = 0;
for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
@@ -759,7 +760,6 @@ void Deoptimizer::DoComputeOutputFrames() {
FATAL("invalid frame");
break;
}
- total_output_frame_size += output_[frame_index]->GetFrameSize();
}
FrameDescription* topmost = output_[count - 1];
@@ -779,14 +779,6 @@ void Deoptimizer::DoComputeOutputFrames() {
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
caller_frame_top_, ms);
}
-
- // TODO(jgruber,neis):
- // The situation that the output frames do not fit into the stack space should
- // be prevented by an optimized function's initial stack check: That check
- // must fail if the (interpreter) frames generated upon deoptimization of the
- // function would overflow the stack.
- CHECK_GT(static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size,
- stack_guard->real_jslimit());
}
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
@@ -1364,21 +1356,25 @@ bool Deoptimizer::BuiltinContinuationModeIsWithCatch(
UNREACHABLE();
}
-StackFrame::Type Deoptimizer::BuiltinContinuationModeToFrameType(
- BuiltinContinuationMode mode) {
+namespace {
+
+StackFrame::Type BuiltinContinuationModeToFrameType(
+ Deoptimizer::BuiltinContinuationMode mode) {
switch (mode) {
- case BuiltinContinuationMode::STUB:
+ case Deoptimizer::BuiltinContinuationMode::STUB:
return StackFrame::BUILTIN_CONTINUATION;
- case BuiltinContinuationMode::JAVASCRIPT:
+ case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT:
return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION;
- case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
- case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ case Deoptimizer::BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
}
UNREACHABLE();
}
+} // namespace
+
Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
BuiltinContinuationMode mode, bool must_handle_result) {
switch (mode) {
@@ -1438,7 +1434,7 @@ Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
// +-------------------------+
// | context |<- this non-standard context slot contains
// +-------------------------+ the context, even for non-JS builtins.
-// | builtin address |
+// | builtin index |
// +-------------------------+
// | builtin input GPR reg0 |<- populated from deopt FrameState using
// +-------------------------+ the builtin's CallInterfaceDescriptor
@@ -1663,7 +1659,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
"builtin JavaScript context\n");
// The builtin to continue to.
- frame_writer.PushRawObject(builtin, "builtin address\n");
+ frame_writer.PushRawObject(Smi::FromInt(builtin.builtin_index()),
+ "builtin index\n");
for (int i = 0; i < allocatable_register_count; ++i) {
int code = config->GetAllocatableGeneralCode(i);
@@ -3037,12 +3034,7 @@ int TranslatedState::CreateNextTranslatedValue(
return translated_value.GetChildrenCount();
}
intptr_t value = registers->GetRegister(input_reg);
-#if defined(V8_COMPRESS_POINTERS)
- Address uncompressed_value = DecompressTaggedAny(
- isolate()->isolate_root(), static_cast<uint32_t>(value));
-#else
- Address uncompressed_value = value;
-#endif
+ Address uncompressed_value = DecompressIfNeeded(value);
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value,
converter.NameOfCPURegister(input_reg));
@@ -3165,12 +3157,7 @@ int TranslatedState::CreateNextTranslatedValue(
int slot_offset =
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
-#if defined(V8_COMPRESS_POINTERS)
- Address uncompressed_value = DecompressTaggedAny(
- isolate()->isolate_root(), static_cast<uint32_t>(value));
-#else
- Address uncompressed_value = value;
-#endif
+ Address uncompressed_value = DecompressIfNeeded(value);
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ",
uncompressed_value, slot_offset < 0 ? '-' : '+',
@@ -3284,6 +3271,15 @@ int TranslatedState::CreateNextTranslatedValue(
FATAL("We should never get here - unexpected deopt info.");
}
+Address TranslatedState::DecompressIfNeeded(intptr_t value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ return DecompressTaggedAny(isolate()->isolate_root(),
+ static_cast<uint32_t>(value));
+ } else {
+ return value;
+ }
+}
+
TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationData data =
diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 67e3e54405..a2471247ef 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -16,7 +16,6 @@
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/diagnostics/code-tracer.h"
#include "src/execution/frame-constants.h"
-#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/shared-function-info.h"
@@ -28,8 +27,10 @@ namespace v8 {
namespace internal {
class FrameDescription;
+class JavaScriptFrame;
class TranslationIterator;
class DeoptimizedFrameInfo;
+class TranslatedFrame;
class TranslatedState;
class RegisterValues;
class MacroAssembler;
@@ -340,6 +341,7 @@ class TranslatedState {
int CreateNextTranslatedValue(int frame_index, TranslationIterator* iterator,
FixedArray literal_array, Address fp,
RegisterValues* registers, FILE* trace_file);
+ Address DecompressIfNeeded(intptr_t value);
Address ComputeArgumentsPosition(Address input_frame_pointer,
CreateArgumentsType type, int* length);
void CreateArgumentsElementsTranslatedValues(int frame_index,
@@ -499,6 +501,13 @@ class Deoptimizer : public Malloced {
static const int kMaxNumberOfEntries = 16384;
+ enum class BuiltinContinuationMode {
+ STUB,
+ JAVASCRIPT,
+ JAVASCRIPT_WITH_CATCH,
+ JAVASCRIPT_HANDLE_EXCEPTION
+ };
+
private:
friend class FrameWriter;
void QueueValueForMaterialization(Address output_address, Object obj,
@@ -521,16 +530,8 @@ class Deoptimizer : public Malloced {
void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index);
- enum class BuiltinContinuationMode {
- STUB,
- JAVASCRIPT,
- JAVASCRIPT_WITH_CATCH,
- JAVASCRIPT_HANDLE_EXCEPTION
- };
static bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode);
static bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode);
- static StackFrame::Type BuiltinContinuationModeToFrameType(
- BuiltinContinuationMode mode);
static Builtins::Name TrampolineForBuiltinContinuation(
BuiltinContinuationMode mode, bool must_handle_result);
@@ -549,11 +550,8 @@ class Deoptimizer : public Malloced {
Isolate* isolate,
DeoptimizeKind kind);
- // Marks all the code in the given context for deoptimization.
- static void MarkAllCodeForContext(Context native_context);
-
- // Deoptimizes all code marked in the given context.
- static void DeoptimizeMarkedCodeForContext(Context native_context);
+ static void MarkAllCodeForContext(NativeContext native_context);
+ static void DeoptimizeMarkedCodeForContext(NativeContext native_context);
// Some architectures need to push padding together with the TOS register
// in order to maintain stack alignment.
diff --git a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 6b01449ba7..f40ff562be 100644
--- a/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -116,6 +116,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(0));
+
// Remove the return address and the double registers.
__ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
@@ -194,6 +200,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ push(Operand(esi, offset));
}
+ __ mov_b(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate), edx),
+ Immediate(1));
+
// Restore the registers from the stack.
__ popad();
diff --git a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index a56501660b..07bc9a511b 100644
--- a/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -225,7 +225,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
- __ stop("Unreachable.");
+ __ stop();
}
// Maximum size of a table entry generated below.
diff --git a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index 6869199f1b..f85659c4ab 100644
--- a/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -226,7 +226,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
- __ stop("Unreachable.");
+ __ stop();
}
// Maximum size of a table entry generated below.
diff --git a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 268660c2ef..41616a5af2 100644
--- a/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -56,11 +56,13 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
-
- __ mov(ip, Operand(ExternalReference::Create(
- IsolateAddressId::kCEntryFPAddress, isolate)));
- __ StoreP(fp, MemOperand(ip));
-
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ mov(scratch, Operand(ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, isolate)));
+ __ StoreP(fp, MemOperand(scratch));
+ }
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
@@ -210,20 +212,28 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ push(r9);
// Restore the registers from the last output frame.
- DCHECK(!(ip.bit() & restored_regs));
- __ mr(ip, r5);
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(ip, offset));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ DCHECK(!(scratch.bit() & restored_regs));
+ __ mr(scratch, r5);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ LoadP(ToRegister(i), MemOperand(scratch, offset));
+ }
}
}
- __ pop(ip); // get continuation, leave pc on stack
- __ pop(r0);
- __ mtlr(r0);
- __ Jump(ip);
- __ stop("Unreachable.");
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ pop(scratch); // get continuation, leave pc on stack
+ __ pop(r0);
+ __ mtlr(r0);
+ __ Jump(scratch);
+ }
+ __ stop();
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index db2330a8e8..6da740b0e5 100644
--- a/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -228,7 +228,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
- __ stop("Unreachable.");
+ __ stop();
}
bool Deoptimizer::PadTopOfStackRegister() { return false; }
diff --git a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index 7654dc965f..cfdd6c9ef1 100644
--- a/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -129,6 +129,12 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ popq(Operand(rbx, dst_offset));
}
+ // Mark the stack as not iterable for the CPU profiler which won't be able to
+ // walk the stack without the return address.
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(0));
+
// Remove the return address from the stack.
__ addq(rsp, Immediate(kPCOnStackSize));
@@ -218,6 +224,10 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ popq(r);
}
+ __ movb(__ ExternalReferenceAsOperand(
+ ExternalReference::stack_is_iterable_address(isolate)),
+ Immediate(1));
+
// Return to the continuation point.
__ ret(0);
}
diff --git a/deps/v8/src/diagnostics/DEPS b/deps/v8/src/diagnostics/DEPS
new file mode 100644
index 0000000000..27782f9ecd
--- /dev/null
+++ b/deps/v8/src/diagnostics/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+src/compiler/node.h",
+]
diff --git a/deps/v8/src/diagnostics/eh-frame.cc b/deps/v8/src/diagnostics/eh-frame.cc
index e19e09f332..45d693a476 100644
--- a/deps/v8/src/diagnostics/eh-frame.cc
+++ b/deps/v8/src/diagnostics/eh-frame.cc
@@ -582,7 +582,8 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) { // NOLINT
// The encoded CIE size does not include the size field itself.
const int cie_size =
- ReadUnalignedUInt32(reinterpret_cast<Address>(start_)) + kInt32Size;
+ base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(start_)) +
+ kInt32Size;
const int fde_offset = cie_size;
const byte* cie_directives_start =
@@ -597,12 +598,13 @@ void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) { // NOLINT
reinterpret_cast<Address>(start_) + fde_offset +
EhFrameConstants::kProcedureAddressOffsetInFde;
int32_t procedure_offset =
- ReadUnalignedValue<int32_t>(procedure_offset_address);
+ base::ReadUnalignedValue<int32_t>(procedure_offset_address);
Address procedure_size_address = reinterpret_cast<Address>(start_) +
fde_offset +
EhFrameConstants::kProcedureSizeOffsetInFde;
- uint32_t procedure_size = ReadUnalignedUInt32(procedure_size_address);
+ uint32_t procedure_size =
+ base::ReadUnalignedValue<uint32_t>(procedure_size_address);
const byte* fde_start = start_ + fde_offset;
stream << reinterpret_cast<const void*>(fde_start) << " .eh_frame: FDE\n"
diff --git a/deps/v8/src/diagnostics/eh-frame.h b/deps/v8/src/diagnostics/eh-frame.h
index 8b78b04b16..a9d76a2743 100644
--- a/deps/v8/src/diagnostics/eh-frame.h
+++ b/deps/v8/src/diagnostics/eh-frame.h
@@ -6,9 +6,9 @@
#define V8_DIAGNOSTICS_EH_FRAME_H_
#include "src/base/compiler-specific.h"
+#include "src/base/memory.h"
#include "src/codegen/register-arch.h"
#include "src/common/globals.h"
-#include "src/common/v8memory.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -144,11 +144,11 @@ class V8_EXPORT_PRIVATE EhFrameWriter {
}
void PatchInt32(int base_offset, uint32_t value) {
DCHECK_EQ(
- ReadUnalignedUInt32(reinterpret_cast<Address>(eh_frame_buffer_.data()) +
- base_offset),
+ base::ReadUnalignedValue<uint32_t>(
+ reinterpret_cast<Address>(eh_frame_buffer_.data()) + base_offset),
kInt32Placeholder);
DCHECK_LT(base_offset + kInt32Size, eh_frame_offset());
- WriteUnalignedUInt32(
+ base::WriteUnalignedValue<uint32_t>(
reinterpret_cast<Address>(eh_frame_buffer_.data()) + base_offset,
value);
}
@@ -216,7 +216,9 @@ class V8_EXPORT_PRIVATE EhFrameIterator {
void SkipCie() {
DCHECK_EQ(next_, start_);
- next_ += ReadUnalignedUInt32(reinterpret_cast<Address>(next_)) + kInt32Size;
+ next_ +=
+ base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(next_)) +
+ kInt32Size;
}
void SkipToFdeDirectives() {
@@ -267,7 +269,7 @@ class V8_EXPORT_PRIVATE EhFrameIterator {
T GetNextValue() {
T result;
DCHECK_LE(next_ + sizeof(result), end_);
- result = ReadUnalignedValue<T>(reinterpret_cast<Address>(next_));
+ result = base::ReadUnalignedValue<T>(reinterpret_cast<Address>(next_));
next_ += sizeof(result);
return result;
}
diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index 70fd9fb06d..e1290bae4e 100644
--- a/deps/v8/src/diagnostics/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -4,6 +4,7 @@
#include "src/diagnostics/gdb-jit.h"
+#include <map>
#include <memory>
#include <vector>
@@ -17,7 +18,6 @@
#include "src/objects/objects.h"
#include "src/snapshot/natives.h"
#include "src/utils/ostreams.h"
-#include "src/utils/splay-tree-inl.h"
#include "src/utils/vector.h"
#include "src/zone/zone-chunk-list.h"
@@ -1822,23 +1822,24 @@ struct AddressRange {
Address end;
};
-struct SplayTreeConfig {
+struct AddressRangeLess {
+ bool operator()(const AddressRange& a, const AddressRange& b) const {
+ if (a.start == b.start) return a.end < b.end;
+ return a.start < b.start;
+ }
+};
+
+struct CodeMapConfig {
using Key = AddressRange;
using Value = JITCodeEntry*;
- static const AddressRange kNoKey;
- static Value NoValue() { return nullptr; }
- static int Compare(const AddressRange& a, const AddressRange& b) {
- // ptrdiff_t probably doesn't fit in an int.
- if (a.start < b.start) return -1;
- if (a.start == b.start) return 0;
- return 1;
- }
+ using Less = AddressRangeLess;
};
-const AddressRange SplayTreeConfig::kNoKey = {0, 0};
-using CodeMap = SplayTree<SplayTreeConfig>;
+using CodeMap =
+ std::map<CodeMapConfig::Key, CodeMapConfig::Value, CodeMapConfig::Less>;
static CodeMap* GetCodeMap() {
+ // TODO(jgruber): Don't leak.
static CodeMap* code_map = nullptr;
if (code_map == nullptr) code_map = new CodeMap();
return code_map;
@@ -1909,37 +1910,49 @@ static void AddUnwindInfo(CodeDescription* desc) {
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-// Remove entries from the splay tree that intersect the given address range,
+// Remove entries from the map that intersect the given address range,
// and deregister them from GDB.
static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
DCHECK(range.start < range.end);
- CodeMap::Locator cur;
- if (map->FindGreatestLessThan(range, &cur) || map->FindLeast(&cur)) {
- // Skip entries that are entirely less than the range of interest.
- while (cur.key().end <= range.start) {
- // CodeMap::FindLeastGreaterThan succeeds for entries whose key is greater
- // than _or equal to_ the given key, so we have to advance our key to get
- // the next one.
- AddressRange new_key;
- new_key.start = cur.key().end;
- new_key.end = 0;
- if (!map->FindLeastGreaterThan(new_key, &cur)) return;
- }
- // Evict intersecting ranges.
- while (cur.key().start < range.end) {
- AddressRange old_range = cur.key();
- JITCodeEntry* old_entry = cur.value();
- UnregisterCodeEntry(old_entry);
- DestroyCodeEntry(old_entry);
+ if (map->empty()) return;
+
+ // Find the first overlapping entry.
- CHECK(map->Remove(old_range));
- if (!map->FindLeastGreaterThan(old_range, &cur)) return;
+ // If successful, points to the first element not less than `range`. The
+ // returned iterator has the key in `first` and the value in `second`.
+ auto it = map->lower_bound(range);
+ auto start_it = it;
+
+ if (it == map->end()) {
+ start_it = map->begin();
+ } else if (it != map->begin()) {
+ for (--it; it != map->begin(); --it) {
+ if ((*it).first.end <= range.start) break;
+ start_it = it;
}
}
+
+ DCHECK(start_it != map->end());
+
+ // Find the first non-overlapping entry after `range`.
+
+ const auto end_it = map->lower_bound({range.end, 0});
+
+ // Evict intersecting ranges.
+
+ if (std::distance(start_it, end_it) < 1) return; // No overlapping entries.
+
+ for (auto it = start_it; it != end_it; it++) {
+ JITCodeEntry* old_entry = (*it).second;
+ UnregisterCodeEntry(old_entry);
+ DestroyCodeEntry(old_entry);
+ }
+
+ map->erase(start_it, end_it);
}
-// Insert the entry into the splay tree and register it with GDB.
+// Insert the entry into the map and register it with GDB.
static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
JITCodeEntry* entry, bool dump_if_enabled,
const char* name_hint) {
@@ -1956,9 +1969,9 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
}
#endif
- CodeMap::Locator cur;
- CHECK(map->Insert(range, &cur));
- cur.set_value(entry);
+ auto result = map->emplace(range, entry);
+ DCHECK(result.second); // Insertion happened.
+ USE(result);
RegisterCodeEntry(entry);
}
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index 534898fdf5..e8c9588bbe 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -184,6 +184,24 @@ void InstructionTable::AddJumpConditionalShort() {
}
}
+namespace {
+int8_t Imm8(const uint8_t* data) {
+ return *reinterpret_cast<const int8_t*>(data);
+}
+uint8_t Imm8_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint8_t*>(data);
+}
+int16_t Imm16(const uint8_t* data) {
+ return *reinterpret_cast<const int16_t*>(data);
+}
+uint16_t Imm16_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint16_t*>(data);
+}
+int32_t Imm32(const uint8_t* data) {
+ return *reinterpret_cast<const int32_t*>(data);
+}
+} // namespace
+
// The IA32 disassembler implementation.
class DisassemblerIA32 {
public:
@@ -373,8 +391,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
switch (mod) {
case 0:
if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[0x%x]", disp);
+ AppendToBuffer("[0x%x]", Imm32(modrmp + 1));
return 5;
} else if (rm == esp) {
byte sib = *(modrmp + 1);
@@ -384,7 +401,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
AppendToBuffer("[%s]", (this->*register_name)(rm));
return 2;
} else if (base == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ int32_t disp = Imm32(modrmp + 2);
AppendToBuffer("[%s*%d%s0x%x]", (this->*register_name)(index),
1 << scale, disp < 0 ? "-" : "+",
disp < 0 ? -disp : disp);
@@ -409,8 +426,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<int8_t*>(modrmp + 2);
+ int disp = mod == 2 ? Imm32(modrmp + 2) : Imm8(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
@@ -422,8 +438,7 @@ int DisassemblerIA32::PrintRightOperandHelper(
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<int8_t*>(modrmp + 1);
+ int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
return mod == 2 ? 5 : 2;
@@ -517,7 +532,7 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
AppendToBuffer(",0x%x", *(data + 1 + count));
return 1 + count + 1 /*int8*/;
} else {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+ AppendToBuffer(",0x%x", Imm32(data + 1 + count));
return 1 + count + 4 /*int32_t*/;
}
}
@@ -557,7 +572,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
AppendToBuffer("%s ", mnem);
int count = PrintRightOperand(data);
if (regop == 0) {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
+ AppendToBuffer(",0x%x", Imm32(data + count));
count += 4;
}
return 1 + count;
@@ -627,7 +642,7 @@ int DisassemblerIA32::JumpShort(byte* data) {
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
DCHECK_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
+ byte* dest = data + Imm32(data + 2) + 6;
const char* mnem = jump_conditional_mnem[cond];
AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
if (comment != nullptr) {
@@ -775,56 +790,53 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
+ AppendToBuffer(",%d", Imm8_U(current));
current++;
break;
case 0x0F:
AppendToBuffer("vpalignr %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
+ AppendToBuffer(",%d", Imm8_U(current));
current++;
break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(current));
current++;
break;
case 0x15:
AppendToBuffer("vpextrw ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(current));
current++;
break;
case 0x16:
AppendToBuffer("vpextrd ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(current));
current++;
break;
case 0x20:
AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x21:
AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x22:
AppendToBuffer("vpinsrd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
default:
@@ -872,7 +884,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
case 0x70:
AppendToBuffer("vpshuflw %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x7C:
@@ -933,7 +945,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
case 0x70:
AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x7f:
@@ -1173,7 +1185,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
case 0x70:
AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
case 0x71:
@@ -1197,7 +1209,7 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
+ AppendToBuffer(",%d", Imm8(current));
current++;
break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
@@ -1615,8 +1627,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case MOVE_REG_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ byte* addr = reinterpret_cast<byte*>(Imm32(data + 1));
AppendToBuffer("mov %s,%s", NameOfCPURegister(*data & 0x07),
NameOfAddress(addr));
data += 5;
@@ -1624,15 +1635,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+ byte* addr = data + Imm32(data + 1) + 5;
AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ byte* addr = reinterpret_cast<byte*>(Imm32(data + 1));
AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
@@ -1656,7 +1666,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (!processed) {
switch (*data) {
case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
+ AppendToBuffer("ret 0x%x", Imm16_U(data + 1));
data += 3;
break;
@@ -1670,7 +1680,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x69: {
data++;
data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
- AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
+ AppendToBuffer(",%d", Imm32(data));
data += 4;
} break;
@@ -1860,6 +1870,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
+ } else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xF0) {
+ AppendToBuffer("mfence");
+ data += 3;
} else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
AppendToBuffer("lfence");
data += 3;
@@ -1920,8 +1933,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer("%s ", "mov");
data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm32(data));
data += 4;
}
} break;
@@ -1980,8 +1992,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
AppendToBuffer("cmpw ");
data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else if (*data == 0x87) {
data++;
@@ -2005,15 +2016,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
AppendToBuffer("%s ", "mov_w");
data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else if (*data == 0xF7) {
data++;
AppendToBuffer("%s ", "test_w");
data += PrintRightOperand(data);
- int imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else if (*data == 0x0F) {
data++;
@@ -2062,7 +2071,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pblendw %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
+ AppendToBuffer(",%d", Imm8_U(data));
data++;
} else if (*data == 0x0F) {
data++;
@@ -2070,7 +2079,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("palignr %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
+ AppendToBuffer(",%d", Imm8_U(data));
data++;
} else if (*data == 0x14) {
data++;
@@ -2078,8 +2087,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pextrb ");
data += PrintRightOperand(data);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(data));
data++;
} else if (*data == 0x15) {
data++;
@@ -2087,8 +2095,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pextrw ");
data += PrintRightOperand(data);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(data));
data++;
} else if (*data == 0x16) {
data++;
@@ -2096,8 +2103,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pextrd ");
data += PrintRightOperand(data);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop),
- *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), Imm8(data));
data++;
} else if (*data == 0x17) {
data++;
@@ -2113,7 +2119,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pinsrb %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0x21) {
data++;
@@ -2121,7 +2127,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0x22) {
data++;
@@ -2129,7 +2135,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pinsrd %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else {
UnimplementedInstruction();
@@ -2193,7 +2199,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0x90) {
data++;
@@ -2257,7 +2263,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pinsrw %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0xE7) {
data++;
@@ -2309,22 +2315,22 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} break;
case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm32(data + 1));
data += 5;
break;
case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm8(data + 1));
data += 2;
break;
case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
+ AppendToBuffer("test al,0x%x", Imm8_U(data + 1));
data += 2;
break;
case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("test eax,0x%x", Imm32(data + 1));
data += 5;
break;
@@ -2377,7 +2383,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pshuflw %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else {
const char* mnem = "?";
@@ -2477,7 +2483,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pshufhw %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(data));
+ AppendToBuffer(",%d", Imm8(data));
data++;
} else if (b2 == 0x7F) {
AppendToBuffer("movdqu ");
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index c5219970cb..dc3b3b8091 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -63,10 +63,11 @@
#include "src/objects/struct-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects-inl.h"
#include "torque-generated/class-verifiers-tq.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -127,13 +128,6 @@ void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p) {
}
}
-namespace {
-void VerifyForeignPointer(Isolate* isolate, HeapObject host, Object foreign) {
- host.VerifyPointer(isolate, foreign);
- CHECK(foreign.IsUndefined(isolate) || Foreign::IsNormalized(foreign));
-}
-} // namespace
-
void Smi::SmiVerify(Isolate* isolate) {
CHECK(IsSmi());
CHECK(!IsCallable());
@@ -153,6 +147,10 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
SlicedString::cast(*this).SlicedStringVerify(isolate);
} else if (IsThinString()) {
ThinString::cast(*this).ThinStringVerify(isolate);
+ } else if (IsSeqString()) {
+ SeqString::cast(*this).SeqStringVerify(isolate);
+ } else if (IsExternalString()) {
+ ExternalString::cast(*this).ExternalStringVerify(isolate);
} else {
String::cast(*this).StringVerify(isolate);
}
@@ -293,8 +291,8 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
JSAsyncGeneratorObject::cast(*this).JSAsyncGeneratorObjectVerify(isolate);
break;
- case JS_VALUE_TYPE:
- JSValue::cast(*this).JSValueVerify(isolate);
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ JSPrimitiveWrapper::cast(*this).JSPrimitiveWrapperVerify(isolate);
break;
case JS_DATE_TYPE:
JSDate::cast(*this).JSDateVerify(isolate);
@@ -420,6 +418,12 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
SmallOrderedNameDictionary::cast(*this).SmallOrderedNameDictionaryVerify(
isolate);
break;
+ case SOURCE_TEXT_MODULE_TYPE:
+ SourceTextModule::cast(*this).SourceTextModuleVerify(isolate);
+ break;
+ case SYNTHETIC_MODULE_TYPE:
+ SyntheticModule::cast(*this).SyntheticModuleVerify(isolate);
+ break;
case CODE_DATA_CONTAINER_TYPE:
CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
break;
@@ -502,6 +506,11 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
CHECK(IsBytecodeArray());
CHECK(constant_pool().IsFixedArray());
VerifyHeapPointer(isolate, constant_pool());
+ CHECK(source_position_table().IsUndefined() ||
+ source_position_table().IsException() ||
+ source_position_table().IsByteArray() ||
+ source_position_table().IsSourcePositionTableWithFrameCache());
+ CHECK(handler_table().IsByteArray());
}
USE_TORQUE_VERIFIER(FreeSpace)
@@ -515,10 +524,13 @@ void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
CHECK(code->IsSmi() || code->IsWeakOrCleared());
}
-bool JSObject::ElementsAreSafeToExamine() const {
+USE_TORQUE_VERIFIER(JSReceiver)
+
+bool JSObject::ElementsAreSafeToExamine(Isolate* isolate) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- return elements() != GetReadOnlyRoots().one_pointer_filler_map();
+ return elements(isolate) !=
+ GetReadOnlyRoots(isolate).one_pointer_filler_map();
}
namespace {
@@ -624,7 +636,7 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine()) {
+ if (ElementsAreSafeToExamine(isolate)) {
CHECK_EQ((map().has_fast_smi_or_object_elements() ||
map().has_frozen_or_sealed_elements() ||
(elements() == GetReadOnlyRoots().empty_fixed_array()) ||
@@ -699,6 +711,8 @@ void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
}
}
+USE_TORQUE_VERIFIER(FixedArrayBase)
+
USE_TORQUE_VERIFIER(FixedArray)
void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
@@ -709,6 +723,8 @@ void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
}
void WeakArrayList::WeakArrayListVerify(Isolate* isolate) {
+ VerifySmiField(kCapacityOffset);
+ VerifySmiField(kLengthOffset);
for (int i = 0; i < length(); i++) {
MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i));
}
@@ -774,24 +790,27 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*this, isolate);
for (int i = 0; i < number_of_all_descriptors(); i++) {
- MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToKeyIndex(i)));
- MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToDetailsIndex(i)));
- MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToValueIndex(i)));
+ MaybeObjectSlot slot(GetDescriptorSlot(i));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryKeyIndex));
+ MaybeObject::VerifyMaybeObjectPointer(isolate,
+ *(slot + kEntryDetailsIndex));
+ MaybeObject::VerifyMaybeObjectPointer(isolate, *(slot + kEntryValueIndex));
}
if (number_of_all_descriptors() == 0) {
- Heap* heap = isolate->heap();
- CHECK_EQ(ReadOnlyRoots(heap).empty_descriptor_array(), *this);
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_descriptor_array(), *this);
CHECK_EQ(0, number_of_all_descriptors());
CHECK_EQ(0, number_of_descriptors());
- CHECK_EQ(ReadOnlyRoots(heap).empty_enum_cache(), enum_cache());
+ CHECK_EQ(ReadOnlyRoots(isolate).empty_enum_cache(), enum_cache());
} else {
CHECK_LT(0, number_of_all_descriptors());
CHECK_LE(number_of_descriptors(), number_of_all_descriptors());
- // Check that properties with private symbols names are non-enumerable.
+ // Check that properties with private symbols names are non-enumerable, and
+ // that fields are in order.
+ int expected_field_index = 0;
for (int descriptor = 0; descriptor < number_of_descriptors();
descriptor++) {
- Object key = get(ToKeyIndex(descriptor))->cast<Object>();
+ Object key = *(GetDescriptorSlot(descriptor) + kEntryKeyIndex);
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
if (key.IsUndefined(isolate)) continue;
@@ -799,14 +818,16 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
if (Name::cast(key).IsPrivate()) {
CHECK_NE(details.attributes() & DONT_ENUM, 0);
}
- MaybeObject value = get(ToValueIndex(descriptor));
+ MaybeObject value = GetValue(descriptor);
HeapObject heap_object;
if (details.location() == kField) {
+ CHECK_EQ(details.field_index(), expected_field_index);
CHECK(
value == MaybeObject::FromObject(FieldType::None()) ||
value == MaybeObject::FromObject(FieldType::Any()) ||
value->IsCleared() ||
(value->GetHeapObjectIfWeak(&heap_object) && heap_object.IsMap()));
+ expected_field_index += details.field_width_in_words();
} else {
CHECK(!value->IsWeakOrCleared());
CHECK(!value->cast<Object>().IsMap());
@@ -905,8 +926,6 @@ void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify(Isolate* isolate) {
queue().HeapObjectVerify(isolate);
}
-USE_TORQUE_VERIFIER(JSValue)
-
void JSDate::JSDateVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSDateVerify(*this, isolate);
@@ -940,13 +959,7 @@ void JSDate::JSDateVerify(Isolate* isolate) {
}
}
-void JSMessageObject::JSMessageObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSMessageObjectVerify(*this, isolate);
- VerifySmiField(kMessageTypeOffset);
- VerifySmiField(kStartPositionOffset);
- VerifySmiField(kEndPositionOffset);
- VerifySmiField(kErrorLevelOffset);
-}
+USE_TORQUE_VERIFIER(JSMessageObject)
void String::StringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StringVerify(*this, isolate);
@@ -982,19 +995,16 @@ void SlicedString::SlicedStringVerify(Isolate* isolate) {
CHECK_GE(this->length(), SlicedString::kMinLength);
}
+USE_TORQUE_VERIFIER(ExternalString)
+
void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSBoundFunctionVerify(*this, isolate);
CHECK(IsCallable());
-
- if (!raw_bound_target_function().IsUndefined(isolate)) {
- CHECK(bound_target_function().IsCallable());
- CHECK_EQ(IsConstructor(), bound_target_function().IsConstructor());
- }
+ CHECK_EQ(IsConstructor(), bound_target_function().IsConstructor());
}
void JSFunction::JSFunctionVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSFunctionVerify(*this, isolate);
- CHECK(raw_feedback_cell().IsFeedbackCell());
CHECK(code().IsCode());
CHECK(map().is_callable());
Handle<JSFunction> function(*this, isolate);
@@ -1168,13 +1178,12 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSArrayVerify(*this, isolate);
// If a GC was caused while constructing this array, the elements
// pointer may point to a one pointer filler map.
- if (!ElementsAreSafeToExamine()) return;
+ if (!ElementsAreSafeToExamine(isolate)) return;
if (elements().IsUndefined(isolate)) return;
CHECK(elements().IsFixedArray() || elements().IsFixedDoubleArray());
if (elements().length() == 0) {
CHECK_EQ(elements(), ReadOnlyRoots(isolate).empty_fixed_array());
}
- if (!length().IsNumber()) return;
// Verify that the length and the elements backing store are in sync.
if (length().IsSmi() && (HasFastElements() || HasFrozenOrSealedElements())) {
if (elements().length() > 0) {
@@ -1206,32 +1215,32 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
}
}
+USE_TORQUE_VERIFIER(JSCollection)
+
void JSSet::JSSetVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSSetVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsOrderedHashSet() || table().IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
void JSMap::JSMapVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSMapVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsOrderedHashMap() || table().IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
+USE_TORQUE_VERIFIER(JSCollectionIterator)
+
void JSSetIterator::JSSetIteratorVerify(Isolate* isolate) {
CHECK(IsJSSetIterator());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, table());
+ JSCollectionIteratorVerify(isolate);
CHECK(table().IsOrderedHashSet());
CHECK(index().IsSmi());
}
void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(IsJSMapIterator());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, table());
+ JSCollectionIteratorVerify(isolate);
CHECK(table().IsOrderedHashMap());
CHECK(index().IsSmi());
}
@@ -1257,14 +1266,8 @@ void WeakCell::WeakCellVerify(Isolate* isolate) {
key_list_next().IsUndefined(isolate));
CHECK(key_list_prev().IsWeakCell() || key_list_prev().IsUndefined(isolate));
- if (key_list_prev().IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(key_list_prev()).key_list_next(), *this);
- }
CHECK(key_list_next().IsWeakCell() || key_list_next().IsUndefined(isolate));
- if (key_list_next().IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(key_list_next()).key_list_prev(), *this);
- }
CHECK(finalization_group().IsUndefined(isolate) ||
finalization_group().IsJSFinalizationGroup());
@@ -1288,6 +1291,7 @@ void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
+ CHECK(next().IsUndefined(isolate) || next().IsJSFinalizationGroup());
}
void JSFinalizationGroupCleanupIterator::
@@ -1305,13 +1309,11 @@ void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskVerify(
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSWeakMapVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
}
void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSArrayIteratorVerify(*this, isolate);
- CHECK(iterated_object().IsJSReceiver());
CHECK_GE(next_index().Number(), 0);
CHECK_LE(next_index().Number(), kMaxSafeInteger);
@@ -1328,17 +1330,16 @@ void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
void JSStringIterator::JSStringIteratorVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSStringIteratorVerify(*this, isolate);
- CHECK(string().IsString());
-
CHECK_GE(index(), 0);
CHECK_LE(index(), String::kMaxLength);
}
USE_TORQUE_VERIFIER(JSAsyncFromSyncIterator)
+USE_TORQUE_VERIFIER(JSWeakCollection)
+
void JSWeakSet::JSWeakSetVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSWeakSetVerify(*this, isolate);
- VerifyHeapPointer(isolate, table());
CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
}
@@ -1351,11 +1352,7 @@ void CallableTask::CallableTaskVerify(Isolate* isolate) {
USE_TORQUE_VERIFIER(CallbackTask)
-void PromiseReactionJobTask::PromiseReactionJobTaskVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::PromiseReactionJobTaskVerify(*this, isolate);
- VerifyHeapPointer(isolate, handler());
- CHECK(handler().IsUndefined(isolate) || handler().IsCallable());
-}
+USE_TORQUE_VERIFIER(PromiseReactionJobTask)
USE_TORQUE_VERIFIER(PromiseFulfillReactionJobTask)
@@ -1369,7 +1366,6 @@ USE_TORQUE_VERIFIER(PromiseReaction)
void JSPromise::JSPromiseVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSPromiseVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
if (status() == Promise::kPending) {
CHECK(reactions().IsSmi() || reactions().IsPromiseReaction());
}
@@ -1460,7 +1456,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
break;
}
case JSRegExp::IRREGEXP: {
- bool is_native = RegExpImpl::UsesNativeRegExp();
+ bool is_native = RegExp::GeneratesNativeCode();
FixedArray arr = FixedArray::cast(data());
Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
@@ -1485,11 +1481,7 @@ void JSRegExp::JSRegExpVerify(Isolate* isolate) {
}
}
-void JSRegExpStringIterator::JSRegExpStringIteratorVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSRegExpStringIteratorVerify(*this, isolate);
- CHECK(iterating_string().IsString());
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSRegExpStringIterator)
void JSProxy::JSProxyVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSProxyVerify(*this, isolate);
@@ -1548,50 +1540,49 @@ void BigInt::BigIntVerify(Isolate* isolate) {
CHECK_IMPLIES(is_zero(), !sign()); // There is no -0n.
}
-void JSModuleNamespace::JSModuleNamespaceVerify(Isolate* isolate) {
- CHECK(IsJSModuleNamespace());
- VerifyPointer(isolate, module());
-}
+USE_TORQUE_VERIFIER(JSModuleNamespace)
-void ModuleInfoEntry::ModuleInfoEntryVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::ModuleInfoEntryVerify(*this, isolate);
+void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryVerify(
+ Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SourceTextModuleInfoEntryVerify(*this,
+ isolate);
CHECK_IMPLIES(import_name().IsString(), module_request() >= 0);
CHECK_IMPLIES(export_name().IsString() && import_name().IsString(),
local_name().IsUndefined(isolate));
}
void Module::ModuleVerify(Isolate* isolate) {
- CHECK(IsModule());
-
- VerifyPointer(isolate, code());
- VerifyPointer(isolate, exports());
- VerifyPointer(isolate, module_namespace());
- VerifyPointer(isolate, requested_modules());
- VerifyPointer(isolate, script());
- VerifyPointer(isolate, import_meta());
- VerifyPointer(isolate, exception());
- VerifySmiField(kHashOffset);
- VerifySmiField(kStatusOffset);
-
- CHECK((status() >= kEvaluating && code().IsModuleInfo()) ||
- (status() == kInstantiated && code().IsJSGeneratorObject()) ||
- (status() == kInstantiating && code().IsJSFunction()) ||
- (code().IsSharedFunctionInfo()));
+ TorqueGeneratedClassVerifiers::ModuleVerify(*this, isolate);
- CHECK_EQ(status() == kErrored, !exception().IsTheHole(isolate));
+ CHECK_EQ(status() == Module::kErrored, !exception().IsTheHole(isolate));
CHECK(module_namespace().IsUndefined(isolate) ||
module_namespace().IsJSModuleNamespace());
if (module_namespace().IsJSModuleNamespace()) {
- CHECK_LE(kInstantiating, status());
+ CHECK_LE(Module::kInstantiating, status());
CHECK_EQ(JSModuleNamespace::cast(module_namespace()).module(), *this);
}
+ CHECK_NE(hash(), 0);
+}
+
+void SourceTextModule::SourceTextModuleVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SourceTextModuleVerify(*this, isolate);
+
+ CHECK((status() >= kEvaluating && code().IsSourceTextModuleInfo()) ||
+ (status() == kInstantiated && code().IsJSGeneratorObject()) ||
+ (status() == kInstantiating && code().IsJSFunction()) ||
+ (code().IsSharedFunctionInfo()));
+
CHECK_EQ(requested_modules().length(), info().module_requests().length());
+}
- CHECK(import_meta().IsTheHole(isolate) || import_meta().IsJSObject());
+void SyntheticModule::SyntheticModuleVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::SyntheticModuleVerify(*this, isolate);
- CHECK_NE(hash(), 0);
+ for (int i = 0; i < export_names().length(); i++) {
+ CHECK(export_names().get(i).IsString());
+ }
}
void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
@@ -1646,8 +1637,6 @@ void EnumCache::EnumCacheVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(SourcePositionTableWithFrameCache)
-
USE_TORQUE_VERIFIER(ClassPositions)
void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
@@ -1686,31 +1675,15 @@ void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
wrapper_code().kind() == Code::C_WASM_ENTRY);
}
-void WasmModuleObject::WasmModuleObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmModuleObjectVerify(*this, isolate);
- CHECK(managed_native_module().IsForeign());
- CHECK(export_wrappers().IsFixedArray());
- CHECK(script().IsScript());
-}
+USE_TORQUE_VERIFIER(WasmModuleObject)
-void WasmTableObject::WasmTableObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmTableObjectVerify(*this, isolate);
- CHECK(elements().IsFixedArray());
- VerifySmiField(kRawTypeOffset);
-}
+USE_TORQUE_VERIFIER(WasmTableObject)
-void WasmMemoryObject::WasmMemoryObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmMemoryObjectVerify(*this, isolate);
- CHECK(array_buffer().IsJSArrayBuffer());
- VerifySmiField(kMaximumPagesOffset);
-}
+USE_TORQUE_VERIFIER(WasmMemoryObject)
USE_TORQUE_VERIFIER(WasmGlobalObject)
-void WasmExceptionObject::WasmExceptionObjectVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::WasmExceptionObjectVerify(*this, isolate);
- CHECK(serialized_signature().IsByteArray());
-}
+USE_TORQUE_VERIFIER(WasmExceptionObject)
void DataHandler::DataHandlerVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::DataHandlerVerify(*this, isolate);
@@ -1738,39 +1711,22 @@ void StoreHandler::StoreHandlerVerify(Isolate* isolate) {
// TODO(ishell): check handler integrity
}
-void AccessorInfo::AccessorInfoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::AccessorInfoVerify(*this, isolate);
- VerifyForeignPointer(isolate, *this, getter());
- VerifyForeignPointer(isolate, *this, setter());
- VerifyForeignPointer(isolate, *this, js_getter());
-}
+USE_TORQUE_VERIFIER(AccessorInfo)
USE_TORQUE_VERIFIER(AccessorPair)
USE_TORQUE_VERIFIER(AccessCheckInfo)
void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
- CHECK(IsCallHandlerInfo());
+ TorqueGeneratedClassVerifiers::CallHandlerInfoVerify(*this, isolate);
CHECK(map() == ReadOnlyRoots(isolate).side_effect_call_handler_info_map() ||
map() ==
ReadOnlyRoots(isolate).side_effect_free_call_handler_info_map() ||
map() == ReadOnlyRoots(isolate)
.next_call_side_effect_free_call_handler_info_map());
- VerifyPointer(isolate, callback());
- VerifyPointer(isolate, js_callback());
- VerifyPointer(isolate, data());
}
-void InterceptorInfo::InterceptorInfoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::InterceptorInfoVerify(*this, isolate);
- VerifyForeignPointer(isolate, *this, getter());
- VerifyForeignPointer(isolate, *this, setter());
- VerifyForeignPointer(isolate, *this, query());
- VerifyForeignPointer(isolate, *this, descriptor());
- VerifyForeignPointer(isolate, *this, deleter());
- VerifyForeignPointer(isolate, *this, enumerator());
- VerifyForeignPointer(isolate, *this, definer());
-}
+USE_TORQUE_VERIFIER(InterceptorInfo)
USE_TORQUE_VERIFIER(TemplateInfo)
@@ -1782,6 +1738,8 @@ USE_TORQUE_VERIFIER(WasmCapiFunctionData)
USE_TORQUE_VERIFIER(WasmJSFunctionData)
+USE_TORQUE_VERIFIER(WasmIndirectFunctionTable)
+
USE_TORQUE_VERIFIER(ObjectTemplateInfo)
void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
@@ -1792,11 +1750,7 @@ void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(nested_site().IsAllocationSite() || nested_site() == Smi::kZero);
}
-void AllocationMemento::AllocationMementoVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::AllocationMementoVerify(*this, isolate);
- VerifyHeapPointer(isolate, allocation_site());
- CHECK(!IsValid() || GetAllocationSite().IsAllocationSite());
-}
+USE_TORQUE_VERIFIER(AllocationMemento)
void Script::ScriptVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ScriptVerify(*this, isolate);
@@ -1859,62 +1813,26 @@ void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataVerify(
USE_TORQUE_VERIFIER(InterpreterData)
#ifdef V8_INTL_SUPPORT
-void JSV8BreakIterator::JSV8BreakIteratorVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kTypeOffset);
- VerifyObjectField(isolate, kBreakIteratorOffset);
- VerifyObjectField(isolate, kUnicodeStringOffset);
- VerifyObjectField(isolate, kBoundAdoptTextOffset);
- VerifyObjectField(isolate, kBoundFirstOffset);
- VerifyObjectField(isolate, kBoundNextOffset);
- VerifyObjectField(isolate, kBoundCurrentOffset);
- VerifyObjectField(isolate, kBoundBreakTypeOffset);
-}
-
-void JSCollator::JSCollatorVerify(Isolate* isolate) {
- CHECK(IsJSCollator());
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kICUCollatorOffset);
- VerifyObjectField(isolate, kBoundCompareOffset);
-}
-void JSDateTimeFormat::JSDateTimeFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSDateTimeFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSV8BreakIterator)
-void JSListFormat::JSListFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSListFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSCollator)
+
+USE_TORQUE_VERIFIER(JSDateTimeFormat)
+
+USE_TORQUE_VERIFIER(JSListFormat)
USE_TORQUE_VERIFIER(JSLocale)
-void JSNumberFormat::JSNumberFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSNumberFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSNumberFormat)
-void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSPluralRulesVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSPluralRules)
-void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSRelativeTimeFormatVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSRelativeTimeFormat)
-void JSSegmentIterator::JSSegmentIteratorVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSSegmentIteratorVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSSegmentIterator)
-void JSSegmenter::JSSegmenterVerify(Isolate* isolate) {
- TorqueGeneratedClassVerifiers::JSSegmenterVerify(*this, isolate);
- VerifySmiField(kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(JSSegmenter)
#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index e65c0af190..5284208285 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -50,6 +50,7 @@
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
#endif // V8_INTL_SUPPORT
+#include "src/compiler/node.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
@@ -60,11 +61,13 @@
#include "src/objects/struct-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/utils/ostreams.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -217,8 +220,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ASYNC_FUNCTION_OBJECT_TYPE:
- case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
// TODO(titzer): debug printing for more wasm objects
@@ -240,6 +241,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case WASM_INSTANCE_TYPE:
WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os);
break;
+ case JS_ASYNC_FUNCTION_OBJECT_TYPE:
+ case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
JSGeneratorObject::cast(*this).JSGeneratorObjectPrint(os);
break;
@@ -270,8 +273,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_GLOBAL_OBJECT_TYPE:
JSGlobalObject::cast(*this).JSGlobalObjectPrint(os);
break;
- case JS_VALUE_TYPE:
- JSValue::cast(*this).JSValuePrint(os);
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ JSPrimitiveWrapper::cast(*this).JSPrimitiveWrapperPrint(os);
break;
case JS_DATE_TYPE:
JSDate::cast(*this).JSDatePrint(os);
@@ -414,6 +417,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case SCOPE_INFO_TYPE:
ScopeInfo::cast(*this).ScopeInfoPrint(os);
break;
+ case SOURCE_TEXT_MODULE_TYPE:
+ SourceTextModule::cast(*this).SourceTextModulePrint(os);
+ break;
+ case SYNTHETIC_MODULE_TYPE:
+ SyntheticModule::cast(*this).SyntheticModulePrint(os);
+ break;
case FEEDBACK_METADATA_TYPE:
FeedbackMetadata::cast(*this).FeedbackMetadataPrint(os);
break;
@@ -1191,8 +1200,8 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
}
}
-void JSValue::JSValuePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, *this, "JSValue");
+void JSPrimitiveWrapper::JSPrimitiveWrapperPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, *this, "JSPrimitiveWrapper");
os << "\n - value: " << Brief(value());
JSObjectPrintBody(os, *this);
}
@@ -1730,8 +1739,9 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(
os << "\n";
}
-void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "ModuleInfoEntry");
+void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryPrint(
+ std::ostream& os) { // NOLINT
+ PrintHeader(os, "SourceTextModuleInfoEntry");
os << "\n - export_name: " << Brief(export_name());
os << "\n - local_name: " << Brief(local_name());
os << "\n - import_name: " << Brief(import_name());
@@ -1742,16 +1752,37 @@ void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+static void PrintModuleFields(Module module, std::ostream& os) {
+ os << "\n - exports: " << Brief(module.exports());
+ os << "\n - status: " << module.status();
+ os << "\n - exception: " << Brief(module.exception());
+}
+
void Module::ModulePrint(std::ostream& os) { // NOLINT
- PrintHeader(os, "Module");
+ if (this->IsSourceTextModule()) {
+ SourceTextModule::cast(*this).SourceTextModulePrint(os);
+ } else if (this->IsSyntheticModule()) {
+ SyntheticModule::cast(*this).SyntheticModulePrint(os);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void SourceTextModule::SourceTextModulePrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "SourceTextModule");
+ PrintModuleFields(*this, os);
os << "\n - origin: " << Brief(script().GetNameOrSourceURL());
os << "\n - code: " << Brief(code());
- os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
os << "\n - script: " << Brief(script());
os << "\n - import_meta: " << Brief(import_meta());
- os << "\n - status: " << status();
- os << "\n - exception: " << Brief(exception());
+ os << "\n";
+}
+
+void SyntheticModule::SyntheticModulePrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "SyntheticModule");
+ PrintModuleFields(*this, os);
+ os << "\n - export_names: " << Brief(export_names());
os << "\n";
}
@@ -2040,6 +2071,20 @@ void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
os << "\n";
}
+void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
+ std::ostream& os) {
+ PrintHeader(os, "WasmIndirectFunctionTable");
+ os << "\n - size: " << size();
+ os << "\n - sig_ids: " << static_cast<void*>(sig_ids());
+ os << "\n - targets: " << static_cast<void*>(targets());
+ if (has_managed_native_allocations()) {
+ os << "\n - managed_native_allocations: "
+ << Brief(managed_native_allocations());
+ }
+ os << "\n - refs: " << Brief(refs());
+ os << "\n";
+}
+
void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "ObjectTemplateInfo");
os << "\n - tag: " << Brief(tag());
@@ -2167,7 +2212,7 @@ void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
os << "\n - locale: " << Brief(locale());
os << "\n - type: " << TypeAsString();
os << "\n - icu plural rules: " << Brief(icu_plural_rules());
- os << "\n - icu decimal format: " << Brief(icu_decimal_format());
+ os << "\n - icu_number_formatter: " << Brief(icu_number_formatter());
JSObjectPrintBody(os, *this);
}
@@ -2483,7 +2528,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_stable()) os << "\n - stable_map";
if (is_migration_target()) os << "\n - migration_target";
if (is_dictionary_map()) os << "\n - dictionary_map";
- if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
if (has_named_interceptor()) os << "\n - named_interceptor";
if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
if (may_have_interesting_symbols()) os << "\n - may_have_interesting_symbols";
@@ -2511,10 +2555,10 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
layout_descriptor().ShortPrint(os);
}
- Isolate* isolate;
// Read-only maps can't have transitions, which is fortunate because we need
// the isolate to iterate over the transitions.
- if (GetIsolateFromWritableObject(*this, &isolate)) {
+ if (!IsReadOnlyHeapObject(*this)) {
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
DisallowHeapAllocation no_gc;
TransitionsAccessor transitions(isolate, *this, &no_gc);
int nof_transitions = transitions.NumberOfTransitions();
@@ -2812,3 +2856,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
#endif
}
}
+
+V8_EXPORT_PRIVATE extern void _v8_internal_Node_Print(void* object) {
+ reinterpret_cast<i::compiler::Node*>(object)->Print();
+}
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 096ffa2d48..8fb01dba9a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -10,37 +10,6 @@
#include "src/codegen/x64/assembler-x64.h"
#include "src/utils/allocation.h"
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index ab8ba34d90..493c56996b 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -237,6 +237,30 @@ static const InstructionDesc cmov_instructions[16] = {
{"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}};
+namespace {
+int8_t Imm8(const uint8_t* data) {
+ return *reinterpret_cast<const int8_t*>(data);
+}
+uint8_t Imm8_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint8_t*>(data);
+}
+int16_t Imm16(const uint8_t* data) {
+ return *reinterpret_cast<const int16_t*>(data);
+}
+uint16_t Imm16_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint16_t*>(data);
+}
+int32_t Imm32(const uint8_t* data) {
+ return *reinterpret_cast<const int32_t*>(data);
+}
+uint32_t Imm32_U(const uint8_t* data) {
+ return *reinterpret_cast<const uint32_t*>(data);
+}
+int64_t Imm64(const uint8_t* data) {
+ return *reinterpret_cast<const int64_t*>(data);
+}
+} // namespace
+
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -458,8 +482,7 @@ int DisassemblerX64::PrintRightOperandHelper(
switch (mod) {
case 0:
if ((rm & 7) == 5) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[rip+0x%x]", disp);
+ AppendToBuffer("[rip+0x%x]", Imm32(modrmp + 1));
return 5;
} else if ((rm & 7) == 4) {
// Codes for SIB byte.
@@ -473,7 +496,7 @@ int DisassemblerX64::PrintRightOperandHelper(
return 2;
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ int32_t disp = Imm32(modrmp + 2);
AppendToBuffer("[%s*%d%s0x%x]", NameOfCPURegister(index), 1 << scale,
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
return 6;
@@ -497,8 +520,7 @@ int DisassemblerX64::PrintRightOperandHelper(
byte sib = *(modrmp + 1);
int scale, index, base;
get_sib(sib, &scale, &index, &base);
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<int8_t*>(modrmp + 2);
+ int disp = (mod == 2) ? Imm32(modrmp + 2) : Imm8(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
AppendToBuffer("[%s%s0x%x]", NameOfCPURegister(base),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
@@ -510,8 +532,7 @@ int DisassemblerX64::PrintRightOperandHelper(
return mod == 2 ? 6 : 3;
} else {
// No sib.
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<int8_t*>(modrmp + 1);
+ int disp = (mod == 2) ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
AppendToBuffer("[%s%s0x%x]", NameOfCPURegister(rm),
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
if (rm == i::kRootRegister.code()) {
@@ -540,15 +561,15 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
count = 1;
break;
case OPERAND_WORD_SIZE:
- value = *reinterpret_cast<int16_t*>(data);
+ value = Imm16(data);
count = 2;
break;
case OPERAND_DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data);
+ value = Imm32_U(data);
count = 4;
break;
case OPERAND_QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data);
+ value = Imm32(data);
count = 4;
break;
default:
@@ -763,7 +784,7 @@ int DisassemblerX64::JumpShort(byte* data) {
int DisassemblerX64::JumpConditional(byte* data) {
DCHECK_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
+ byte* dest = data + Imm32(data + 2) + 6;
const char* mnem = conditional_code_suffix[cond];
AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
return 6; // includes 0x0F
@@ -1663,6 +1684,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_2_INSTRUCTION_LIST(SSE34_DIS_CASE)
#undef SSE34_DIS_CASE
default:
UnimplementedInstruction();
@@ -1715,13 +1737,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
+ // reg/m32/reg/m64, xmm, imm8
+ AppendToBuffer("pextr%c ", rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
} else if (third_byte == 0x20) {
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
+ AppendToBuffer("pinsrb "); // xmm, reg/m32, imm8
AppendToBuffer(" %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%d", (*current) & 3);
@@ -1735,7 +1758,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (third_byte == 0x22) {
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
+ // xmm, reg/m32/reg/m64, imm8
+ AppendToBuffer("pinsr%c ", rex_w() ? 'q' : 'd');
AppendToBuffer(" %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%d", (*current) & 3);
@@ -1871,6 +1895,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psrlw";
} else if (opcode == 0xD2) {
mnemonic = "psrld";
+ } else if (opcode == 0xD4) {
+ mnemonic = "paddq";
} else if (opcode == 0xD5) {
mnemonic = "pmullw";
} else if (opcode == 0xD7) {
@@ -1880,9 +1906,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0xD9) {
mnemonic = "psubusw";
} else if (opcode == 0xDA) {
- mnemonic = "pand";
- } else if (opcode == 0xDB) {
mnemonic = "pminub";
+ } else if (opcode == 0xDB) {
+ mnemonic = "pand";
} else if (opcode == 0xDC) {
mnemonic = "paddusb";
} else if (opcode == 0xDD) {
@@ -1921,6 +1947,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "psubw";
} else if (opcode == 0xFA) {
mnemonic = "psubd";
+ } else if (opcode == 0xFB) {
+ mnemonic = "psubq";
} else if (opcode == 0xFC) {
mnemonic = "paddb";
} else if (opcode == 0xFD) {
@@ -2262,7 +2290,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = true;
}
current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
- } else if (opcode == 0xAE && (*(data + 2) & 0xF8) == 0xE8) {
+ } else if (opcode == 0xAE && (data[2] & 0xF8) == 0xF0) {
+ AppendToBuffer("mfence");
+ current = data + 3;
+ } else if (opcode == 0xAE && (data[2] & 0xF8) == 0xE8) {
AppendToBuffer("lfence");
current = data + 3;
} else {
@@ -2415,18 +2446,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte* addr = nullptr;
switch (operand_size()) {
case OPERAND_WORD_SIZE:
- addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
+ addr = reinterpret_cast<byte*>(Imm16(data + 1));
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- addr =
- reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
+ addr = reinterpret_cast<byte*>(Imm32_U(data + 1));
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
- addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
+ addr = reinterpret_cast<byte*>(Imm64(data + 1));
data += 9;
break;
default:
@@ -2439,7 +2467,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+ byte* addr = data + Imm32(data + 1) + 5;
AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
@@ -2448,10 +2476,10 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
int32_t imm;
if (operand_size() == OPERAND_WORD_SIZE) {
- imm = *reinterpret_cast<int16_t*>(data + 1);
+ imm = Imm16(data + 1);
data += 3;
} else {
- imm = *reinterpret_cast<int32_t*>(data + 1);
+ imm = Imm32(data + 1);
data += 5;
}
AppendToBuffer("%s rax,0x%x", idesc.mnem, imm);
@@ -2472,7 +2500,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (!processed) {
switch (*data) {
case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
+ AppendToBuffer("ret 0x%x", Imm16_U(data + 1));
data += 3;
break;
@@ -2556,12 +2584,10 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov%c ", operand_size_code());
data += PrintRightOperand(data);
if (operand_size() == OPERAND_WORD_SIZE) {
- int16_t imm = *reinterpret_cast<int16_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm16(data));
data += 2;
} else {
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
+ AppendToBuffer(",0x%x", Imm32(data));
data += 4;
}
}
@@ -2657,12 +2683,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm32(data + 1));
data += 5;
break;
case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("push 0x%x", Imm8(data + 1));
data += 2;
break;
@@ -2670,8 +2696,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0xA3:
switch (operand_size()) {
case OPERAND_DOUBLEWORD_SIZE: {
- const char* memory_location = NameOfAddress(
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)));
+ const char* memory_location =
+ NameOfAddress(reinterpret_cast<byte*>(Imm32(data + 1)));
if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movzxlq rax,(%s)", memory_location);
} else { // Opcode 0xA3
@@ -2683,7 +2709,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
const char* memory_location =
- NameOfAddress(*reinterpret_cast<byte**>(data + 1));
+ NameOfAddress(reinterpret_cast<byte*>(Imm64(data + 1)));
if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movq rax,(%s)", memory_location);
} else { // Opcode 0xA3
@@ -2699,7 +2725,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
+ AppendToBuffer("test al,0x%x", Imm8_U(data + 1));
data += 2;
break;
@@ -2707,15 +2733,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int64_t value = 0;
switch (operand_size()) {
case OPERAND_WORD_SIZE:
- value = *reinterpret_cast<uint16_t*>(data + 1);
+ value = Imm16_U(data + 1);
data += 3;
break;
case OPERAND_DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data + 1);
+ value = Imm32_U(data + 1);
data += 5;
break;
case OPERAND_QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data + 1);
+ value = Imm32(data + 1);
data += 5;
break;
default:
@@ -2758,7 +2784,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x3C:
- AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ AppendToBuffer("cmp al,0x%x", Imm8(data + 1));
data += 2;
break;
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index a62d530e1a..75c1a1b30e 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,10 +1,13 @@
-binji@chromium.org
bmeurer@chromium.org
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
mstarzinger@chromium.org
+mythria@chromium.org
+delphick@chromium.org
petermarshall@chromium.org
szuend@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/execution/arm/frame-constants-arm.cc b/deps/v8/src/execution/arm/frame-constants-arm.cc
index af04813301..602242ac97 100644
--- a/deps/v8/src/execution/arm/frame-constants-arm.cc
+++ b/deps/v8/src/execution/arm/frame-constants-arm.cc
@@ -6,9 +6,9 @@
#include "src/execution/arm/frame-constants-arm.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/arm64/frame-constants-arm64.cc b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
index 89a5259e2b..607081a562 100644
--- a/deps/v8/src/execution/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
@@ -6,11 +6,11 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/execution/arm64/frame-constants-arm64.h"
+
#include "src/codegen/arm64/assembler-arm64-inl.h"
-#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/assembler.h"
-
-#include "src/execution/arm64/frame-constants-arm64.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 285b4b2134..06c4e3a6cc 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -5,32 +5,15 @@
#include "src/execution/execution.h"
#include "src/api/api-inl.h"
-#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/debug/debug.h"
+#include "src/compiler/wasm-compiler.h" // Only for static asserts.
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/vm-state-inl.h"
-#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
-#include "src/wasm/wasm-engine.h"
namespace v8 {
namespace internal {
-void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
- DCHECK_NOT_NULL(isolate_);
- thread_local_.set_jslimit(kInterruptLimit);
- thread_local_.set_climit(kInterruptLimit);
- isolate_->heap()->SetStackLimits();
-}
-
-void StackGuard::reset_limits(const ExecutionAccess& lock) {
- DCHECK_NOT_NULL(isolate_);
- thread_local_.set_jslimit(thread_local_.real_jslimit_);
- thread_local_.set_climit(thread_local_.real_climit_);
- isolate_->heap()->SetStackLimits();
-}
-
namespace {
Handle<Object> NormalizeReceiver(Isolate* isolate, Handle<Object> receiver) {
@@ -235,6 +218,22 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
return isolate->factory()->undefined_value();
}
+ if (params.execution_target == Execution::Target::kCallable) {
+ Handle<Context> context = isolate->native_context();
+ if (!context->script_execution_callback().IsUndefined(isolate)) {
+ v8::Context::AbortScriptExecutionCallback callback =
+ v8::ToCData<v8::Context::AbortScriptExecutionCallback>(
+ context->script_execution_callback());
+ v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ callback(api_isolate, api_context);
+ DCHECK(!isolate->has_scheduled_exception());
+ // Always throw an exception to abort execution, if callback exists.
+ isolate->ThrowIllegalOperation();
+ return MaybeHandle<Object>();
+ }
+ }
+
// Placeholder for return value.
Object value;
@@ -406,271 +405,68 @@ MaybeHandle<Object> Execution::TryRunMicrotasks(
exception_out));
}
-void StackGuard::SetStackLimit(uintptr_t limit) {
- ExecutionAccess access(isolate_);
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
- if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
- thread_local_.set_jslimit(jslimit);
- }
- if (thread_local_.climit() == thread_local_.real_climit_) {
- thread_local_.set_climit(limit);
- }
- thread_local_.real_climit_ = limit;
- thread_local_.real_jslimit_ = jslimit;
-}
-
-void StackGuard::AdjustStackLimitForSimulator() {
- ExecutionAccess access(isolate_);
- uintptr_t climit = thread_local_.real_climit_;
- // If the current limits are special (e.g. due to a pending interrupt) then
- // leave them alone.
- uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
- if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
- thread_local_.set_jslimit(jslimit);
- isolate_->heap()->SetStackLimits();
- }
-}
-
-void StackGuard::EnableInterrupts() {
- ExecutionAccess access(isolate_);
- if (has_pending_interrupts(access)) {
- set_interrupt_limits(access);
- }
-}
-
-void StackGuard::DisableInterrupts() {
- ExecutionAccess access(isolate_);
- reset_limits(access);
-}
-
-void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
- ExecutionAccess access(isolate_);
- DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
- if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
- // Intercept already requested interrupts.
- int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
- scope->intercepted_flags_ = intercepted;
- thread_local_.interrupt_flags_ &= ~intercepted;
- } else {
- DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
- // Restore postponed interrupts.
- int restored_flags = 0;
- for (InterruptsScope* current = thread_local_.interrupt_scopes_;
- current != nullptr; current = current->prev_) {
- restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
- current->intercepted_flags_ &= ~scope->intercept_mask_;
- }
- thread_local_.interrupt_flags_ |= restored_flags;
+struct StackHandlerMarker {
+ Address next;
+ Address padding;
+};
+STATIC_ASSERT(offsetof(StackHandlerMarker, next) ==
+ StackHandlerConstants::kNextOffset);
+STATIC_ASSERT(offsetof(StackHandlerMarker, padding) ==
+ StackHandlerConstants::kPaddingOffset);
+STATIC_ASSERT(sizeof(StackHandlerMarker) == StackHandlerConstants::kSize);
+
+void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
+ Address wasm_call_target, Handle<Object> object_ref,
+ Address packed_args) {
+ using WasmEntryStub = GeneratedCode<Address(
+ Address target, Address object_ref, Address argv, Address c_entry_fp)>;
+ WasmEntryStub stub_entry =
+ WasmEntryStub::FromAddress(isolate, wrapper_code->InstructionStart());
+
+ // Save and restore context around invocation and block the
+ // allocation of handles without explicit handle scopes.
+ SaveContext save(isolate);
+ SealHandleScope shs(isolate);
+
+ Address saved_c_entry_fp = *isolate->c_entry_fp_address();
+ Address saved_js_entry_sp = *isolate->js_entry_sp_address();
+ if (saved_js_entry_sp == kNullAddress) {
+ *isolate->js_entry_sp_address() = GetCurrentStackPosition();
}
- if (!has_pending_interrupts(access)) reset_limits(access);
- // Add scope to the chain.
- scope->prev_ = thread_local_.interrupt_scopes_;
- thread_local_.interrupt_scopes_ = scope;
-}
+ StackHandlerMarker stack_handler;
+ stack_handler.next = isolate->thread_local_top()->handler_;
+#ifdef V8_USE_ADDRESS_SANITIZER
+ stack_handler.padding = GetCurrentStackPosition();
+#else
+ stack_handler.padding = 0;
+#endif
+ isolate->thread_local_top()->handler_ =
+ reinterpret_cast<Address>(&stack_handler);
+ trap_handler::SetThreadInWasm();
-void StackGuard::PopInterruptsScope() {
- ExecutionAccess access(isolate_);
- InterruptsScope* top = thread_local_.interrupt_scopes_;
- DCHECK_NE(top->mode_, InterruptsScope::kNoop);
- if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
- // Make intercepted interrupts active.
- DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
- thread_local_.interrupt_flags_ |= top->intercepted_flags_;
- } else {
- DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
- // Postpone existing interupts if needed.
- if (top->prev_) {
- for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
- interrupt = interrupt << 1) {
- InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
- if ((thread_local_.interrupt_flags_ & flag) &&
- top->prev_->Intercept(flag)) {
- thread_local_.interrupt_flags_ &= ~flag;
- }
- }
+ {
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kCodeEntry == 0);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kObjectRef == 1);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kArgumentsBuffer == 2);
+ STATIC_ASSERT(compiler::CWasmEntryParameters::kCEntryFp == 3);
+ Address result = stub_entry.Call(wasm_call_target, object_ref->ptr(),
+ packed_args, saved_c_entry_fp);
+ if (result != kNullAddress) {
+ isolate->set_pending_exception(Object(result));
}
}
- if (has_pending_interrupts(access)) set_interrupt_limits(access);
- // Remove scope from chain.
- thread_local_.interrupt_scopes_ = top->prev_;
-}
-
-bool StackGuard::CheckInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- return thread_local_.interrupt_flags_ & flag;
-}
-void StackGuard::RequestInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- // Check the chain of InterruptsScope for interception.
- if (thread_local_.interrupt_scopes_ &&
- thread_local_.interrupt_scopes_->Intercept(flag)) {
- return;
+ // If there was an exception, then the thread-in-wasm flag is cleared
+ // already.
+ if (trap_handler::IsThreadInWasm()) {
+ trap_handler::ClearThreadInWasm();
}
-
- // Not intercepted. Set as active interrupt flag.
- thread_local_.interrupt_flags_ |= flag;
- set_interrupt_limits(access);
-
- // If this isolate is waiting in a futex, notify it to wake up.
- isolate_->futex_wait_list_node()->NotifyWake();
-}
-
-void StackGuard::ClearInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- // Clear the interrupt flag from the chain of InterruptsScope.
- for (InterruptsScope* current = thread_local_.interrupt_scopes_;
- current != nullptr; current = current->prev_) {
- current->intercepted_flags_ &= ~flag;
+ isolate->thread_local_top()->handler_ = stack_handler.next;
+ if (saved_js_entry_sp == kNullAddress) {
+ *isolate->js_entry_sp_address() = saved_js_entry_sp;
}
-
- // Clear the interrupt flag from the active interrupt flags.
- thread_local_.interrupt_flags_ &= ~flag;
- if (!has_pending_interrupts(access)) reset_limits(access);
-}
-
-bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
- ExecutionAccess access(isolate_);
- bool result = (thread_local_.interrupt_flags_ & flag);
- thread_local_.interrupt_flags_ &= ~flag;
- if (!has_pending_interrupts(access)) reset_limits(access);
- return result;
-}
-
-char* StackGuard::ArchiveStackGuard(char* to) {
- ExecutionAccess access(isolate_);
- MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
- ThreadLocal blank;
-
- // Set the stack limits using the old thread_local_.
- // TODO(isolates): This was the old semantics of constructing a ThreadLocal
- // (as the ctor called SetStackLimits, which looked at the
- // current thread_local_ from StackGuard)-- but is this
- // really what was intended?
- isolate_->heap()->SetStackLimits();
- thread_local_ = blank;
-
- return to + sizeof(ThreadLocal);
-}
-
-char* StackGuard::RestoreStackGuard(char* from) {
- ExecutionAccess access(isolate_);
- MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- isolate_->heap()->SetStackLimits();
- return from + sizeof(ThreadLocal);
-}
-
-void StackGuard::FreeThreadResources() {
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- per_thread->set_stack_limit(thread_local_.real_climit_);
-}
-
-void StackGuard::ThreadLocal::Clear() {
- real_jslimit_ = kIllegalLimit;
- set_jslimit(kIllegalLimit);
- real_climit_ = kIllegalLimit;
- set_climit(kIllegalLimit);
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
-}
-
-bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
- bool should_set_stack_limits = false;
- if (real_climit_ == kIllegalLimit) {
- const uintptr_t kLimitSize = FLAG_stack_size * KB;
- DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
- uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
- real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
- set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
- real_climit_ = limit;
- set_climit(limit);
- should_set_stack_limits = true;
- }
- interrupt_scopes_ = nullptr;
- interrupt_flags_ = 0;
- return should_set_stack_limits;
-}
-
-void StackGuard::ClearThread(const ExecutionAccess& lock) {
- thread_local_.Clear();
- isolate_->heap()->SetStackLimits();
-}
-
-void StackGuard::InitThread(const ExecutionAccess& lock) {
- if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
- Isolate::PerIsolateThreadData* per_thread =
- isolate_->FindOrAllocatePerThreadDataForThisThread();
- uintptr_t stored_limit = per_thread->stack_limit();
- // You should hold the ExecutionAccess lock when you call this.
- if (stored_limit != 0) {
- SetStackLimit(stored_limit);
- }
-}
-
-// --- C a l l s t o n a t i v e s ---
-
-Object StackGuard::HandleInterrupts() {
- TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
-
- if (FLAG_verify_predictable) {
- // Advance synthetic time by making a time request.
- isolate_->heap()->MonotonicallyIncreasingTimeInMs();
- }
-
- if (CheckAndClearInterrupt(GC_REQUEST)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
- isolate_->heap()->HandleGCRequest();
- }
-
- if (CheckAndClearInterrupt(GROW_SHARED_MEMORY)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "V8.WasmGrowSharedMemory");
- isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
- isolate_);
- }
-
- if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
- TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
- return isolate_->TerminateExecution();
- }
-
- if (CheckAndClearInterrupt(DEOPT_MARKED_ALLOCATION_SITES)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "V8.GCDeoptMarkedAllocationSites");
- isolate_->heap()->DeoptMarkedAllocationSites();
- }
-
- if (CheckAndClearInterrupt(INSTALL_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.InstallOptimizedFunctions");
- DCHECK(isolate_->concurrent_recompilation_enabled());
- isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- }
-
- if (CheckAndClearInterrupt(API_INTERRUPT)) {
- TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
- // Callbacks must be invoked outside of ExecutionAccess lock.
- isolate_->InvokeApiInterruptCallbacks();
- }
-
- if (CheckAndClearInterrupt(LOG_WASM_CODE)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
- isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
- }
-
- if (CheckAndClearInterrupt(WASM_CODE_GC)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
- isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
- }
-
- isolate_->counters()->stack_interrupts()->Increment();
- isolate_->counters()->runtime_profiler_ticks()->Increment();
- isolate_->runtime_profiler()->MarkCandidatesForOptimization();
-
- return ReadOnlyRoots(isolate_).undefined_value();
+ *isolate->c_entry_fp_address() = saved_c_entry_fp;
}
} // namespace internal
diff --git a/deps/v8/src/execution/execution.h b/deps/v8/src/execution/execution.h
index 48a8d64424..3b8ecf038d 100644
--- a/deps/v8/src/execution/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -5,7 +5,6 @@
#ifndef V8_EXECUTION_EXECUTION_H_
#define V8_EXECUTION_EXECUTION_H_
-#include "src/base/atomicops.h"
#include "src/common/globals.h"
namespace v8 {
@@ -60,174 +59,16 @@ class Execution final : public AllStatic {
static MaybeHandle<Object> TryRunMicrotasks(
Isolate* isolate, MicrotaskQueue* microtask_queue,
MaybeHandle<Object>* exception_out);
-};
-
-class ExecutionAccess;
-class InterruptsScope;
-
-// StackGuard contains the handling of the limits that are used to limit the
-// number of nested invocations of JavaScript and the stack size used in each
-// invocation.
-class V8_EXPORT_PRIVATE StackGuard final {
- public:
- explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
-
- // Pass the address beyond which the stack should not grow. The stack
- // is assumed to grow downwards.
- void SetStackLimit(uintptr_t limit);
-
- // The simulator uses a separate JS stack. Limits on the JS stack might have
- // to be adjusted in order to reflect overflows of the C stack, because we
- // cannot rely on the interleaving of frames on the simulator.
- void AdjustStackLimitForSimulator();
-
- // Threading support.
- char* ArchiveStackGuard(char* to);
- char* RestoreStackGuard(char* from);
- static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
- void FreeThreadResources();
- // Sets up the default stack guard for this thread if it has not
- // already been set up.
- void InitThread(const ExecutionAccess& lock);
- // Clears the stack guard for this thread so it does not look as if
- // it has been set up.
- void ClearThread(const ExecutionAccess& lock);
-
-#define INTERRUPT_LIST(V) \
- V(TERMINATE_EXECUTION, TerminateExecution, 0) \
- V(GC_REQUEST, GC, 1) \
- V(INSTALL_CODE, InstallCode, 2) \
- V(API_INTERRUPT, ApiInterrupt, 3) \
- V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
- V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
- V(LOG_WASM_CODE, LogWasmCode, 6) \
- V(WASM_CODE_GC, WasmCodeGC, 7)
-
-#define V(NAME, Name, id) \
- inline bool Check##Name() { return CheckInterrupt(NAME); } \
- inline bool CheckAndClear##Name() { return CheckAndClearInterrupt(NAME); } \
- inline void Request##Name() { RequestInterrupt(NAME); } \
- inline void Clear##Name() { ClearInterrupt(NAME); }
- INTERRUPT_LIST(V)
-#undef V
-
- // Flag used to set the interrupt causes.
- enum InterruptFlag {
-#define V(NAME, Name, id) NAME = (1 << id),
- INTERRUPT_LIST(V)
-#undef V
-#define V(NAME, Name, id) NAME |
- ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
-#undef V
- };
-
- uintptr_t climit() { return thread_local_.climit(); }
- uintptr_t jslimit() { return thread_local_.jslimit(); }
- // This provides an asynchronous read of the stack limits for the current
- // thread. There are no locks protecting this, but it is assumed that you
- // have the global V8 lock if you are using multiple V8 threads.
- uintptr_t real_climit() { return thread_local_.real_climit_; }
- uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
- Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
- Address address_of_real_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
- }
-
- // If the stack guard is triggered, but it is not an actual
- // stack overflow, then handle the interruption accordingly.
- Object HandleInterrupts();
-
- private:
- bool CheckInterrupt(InterruptFlag flag);
- void RequestInterrupt(InterruptFlag flag);
- void ClearInterrupt(InterruptFlag flag);
- bool CheckAndClearInterrupt(InterruptFlag flag);
-
- // You should hold the ExecutionAccess lock when calling this method.
- bool has_pending_interrupts(const ExecutionAccess& lock) {
- return thread_local_.interrupt_flags_ != 0;
- }
-
- // You should hold the ExecutionAccess lock when calling this method.
- inline void set_interrupt_limits(const ExecutionAccess& lock);
-
- // Reset limits to actual values. For example after handling interrupt.
- // You should hold the ExecutionAccess lock when calling this method.
- inline void reset_limits(const ExecutionAccess& lock);
-
- // Enable or disable interrupts.
- void EnableInterrupts();
- void DisableInterrupts();
-
-#if V8_TARGET_ARCH_64_BIT
- static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
- static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
-#else
- static const uintptr_t kInterruptLimit = 0xfffffffe;
- static const uintptr_t kIllegalLimit = 0xfffffff8;
-#endif
-
- void PushInterruptsScope(InterruptsScope* scope);
- void PopInterruptsScope();
-
- class ThreadLocal final {
- public:
- ThreadLocal() { Clear(); }
- // You should hold the ExecutionAccess lock when you call Initialize or
- // Clear.
- void Clear();
-
- // Returns true if the heap's stack limits should be set, false if not.
- bool Initialize(Isolate* isolate);
-
- // The stack limit is split into a JavaScript and a C++ stack limit. These
- // two are the same except when running on a simulator where the C++ and
- // JavaScript stacks are separate. Each of the two stack limits have two
- // values. The one eith the real_ prefix is the actual stack limit
- // set for the VM. The one without the real_ prefix has the same value as
- // the actual stack limit except when there is an interruption (e.g. debug
- // break or preemption) in which case it is lowered to make stack checks
- // fail. Both the generated code and the runtime system check against the
- // one without the real_ prefix.
- uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
-
- // jslimit_ and climit_ can be read without any lock.
- // Writing requires the ExecutionAccess lock.
- base::AtomicWord jslimit_;
- base::AtomicWord climit_;
-
- uintptr_t jslimit() {
- return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
- }
- void set_jslimit(uintptr_t limit) {
- return base::Relaxed_Store(&jslimit_,
- static_cast<base::AtomicWord>(limit));
- }
- uintptr_t climit() {
- return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
- }
- void set_climit(uintptr_t limit) {
- return base::Relaxed_Store(&climit_,
- static_cast<base::AtomicWord>(limit));
- }
-
- InterruptsScope* interrupt_scopes_;
- int interrupt_flags_;
- };
-
- // TODO(isolates): Technically this could be calculated directly from a
- // pointer to StackGuard.
- Isolate* isolate_;
- ThreadLocal thread_local_;
-
- friend class Isolate;
- friend class StackLimitCheck;
- friend class InterruptsScope;
- DISALLOW_COPY_AND_ASSIGN(StackGuard);
+ // Call a Wasm function identified by {wasm_call_target} through the
+ // provided {wrapper_code}, which must match the function's signature.
+ // Upon return, either isolate->has_pending_exception() is true, or
+ // the function's return values are in {packed_args}.
+ V8_EXPORT_PRIVATE static void CallWasm(Isolate* isolate,
+ Handle<Code> wrapper_code,
+ Address wasm_call_target,
+ Handle<Object> object_ref,
+ Address packed_args);
};
} // namespace internal
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 7ddee5689e..a6e5c9522c 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -249,6 +249,13 @@ class ConstructFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(5);
};
+class CWasmEntryFrameConstants : public TypedFrameConstants {
+ public:
+ // FP-relative:
+ static constexpr int kCEntryFPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ DEFINE_TYPED_FRAME_SIZES(1);
+};
+
class WasmCompiledFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -271,7 +278,7 @@ class BuiltinContinuationFrameConstants : public TypedFrameConstants {
TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static constexpr int kBuiltinContextOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- static constexpr int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ static constexpr int kBuiltinIndexOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
// The argument count is in the first allocatable register, stored below the
// fixed part of the frame and therefore is not part of the fixed frame size.
diff --git a/deps/v8/src/execution/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index aeb43fe0a6..52f38857cc 100644
--- a/deps/v8/src/execution/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_INL_H_
#define V8_EXECUTION_FRAMES_INL_H_
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
@@ -48,11 +48,11 @@ inline Address StackHandler::address() const {
inline StackHandler* StackHandler::next() const {
const int offset = StackHandlerConstants::kNextOffset;
- return FromAddress(Memory<Address>(address() + offset));
+ return FromAddress(base::Memory<Address>(address() + offset));
}
inline Address StackHandler::next_address() const {
- return Memory<Address>(address() + StackHandlerConstants::kNextOffset);
+ return base::Memory<Address>(address() + StackHandlerConstants::kNextOffset);
}
inline StackHandler* StackHandler::FromAddress(Address address) {
@@ -112,21 +112,22 @@ inline Object BuiltinExitFrame::receiver_slot_object() const {
const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
(argc - 1) * kSystemPointerSize;
- return Object(Memory<Address>(fp() + receiverOffset));
+ return Object(base::Memory<Address>(fp() + receiverOffset));
}
inline Object BuiltinExitFrame::argc_slot_object() const {
- return Object(Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
+ return Object(
+ base::Memory<Address>(fp() + BuiltinExitFrameConstants::kArgcOffset));
}
inline Object BuiltinExitFrame::target_slot_object() const {
return Object(
- Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
+ base::Memory<Address>(fp() + BuiltinExitFrameConstants::kTargetOffset));
}
inline Object BuiltinExitFrame::new_target_slot_object() const {
- return Object(
- Memory<Address>(fp() + BuiltinExitFrameConstants::kNewTargetOffset));
+ return Object(base::Memory<Address>(
+ fp() + BuiltinExitFrameConstants::kNewTargetOffset));
}
inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
@@ -134,20 +135,20 @@ inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
}
inline Object StandardFrame::GetExpression(int index) const {
- return Object(Memory<Address>(GetExpressionAddress(index)));
+ return Object(base::Memory<Address>(GetExpressionAddress(index)));
}
inline void StandardFrame::SetExpression(int index, Object value) {
- Memory<Address>(GetExpressionAddress(index)) = value.ptr();
+ base::Memory<Address>(GetExpressionAddress(index)) = value.ptr();
}
inline Address StandardFrame::caller_fp() const {
- return Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
+ return base::Memory<Address>(fp() + StandardFrameConstants::kCallerFPOffset);
}
inline Address StandardFrame::caller_pc() const {
- return Memory<Address>(ComputePCAddress(fp()));
+ return base::Memory<Address>(ComputePCAddress(fp()));
}
@@ -163,14 +164,14 @@ inline Address StandardFrame::ComputeConstantPoolAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
intptr_t frame_type =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
+ base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
}
inline bool StandardFrame::IsConstructFrame(Address fp) {
intptr_t frame_type =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
+ base::Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
}
@@ -187,7 +188,7 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
}
inline void JavaScriptFrame::set_receiver(Object value) {
- Memory<Address>(GetParameterSlot(-1)) = value.ptr();
+ base::Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
inline bool JavaScriptFrame::has_adapted_arguments() const {
@@ -196,7 +197,7 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
inline Object JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Object(Memory<Address>(fp() + offset));
+ return Object(base::Memory<Address>(fp() + offset));
}
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index af660a338e..126cb9530e 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -33,6 +33,23 @@ namespace internal {
ReturnAddressLocationResolver StackFrame::return_address_location_resolver_ =
nullptr;
+namespace {
+
+Address AddressOf(const StackHandler* handler) {
+ Address raw = handler->address();
+#ifdef V8_USE_ADDRESS_SANITIZER
+ // ASan puts C++-allocated StackHandler markers onto its fake stack.
+ // We work around that by storing the real stack address in the "padding"
+ // field. StackHandlers allocated from generated code have 0 as padding.
+ Address padding =
+ base::Memory<Address>(raw + StackHandlerConstants::kPaddingOffset);
+ if (padding != 0) return padding;
+#endif
+ return raw;
+}
+
+} // namespace
+
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator {
@@ -40,12 +57,18 @@ class StackHandlerIterator {
StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
: limit_(frame->fp()), handler_(handler) {
// Make sure the handler has already been unwound to this frame.
- DCHECK(frame->sp() <= handler->address());
+ DCHECK(frame->sp() <= AddressOf(handler));
+ // For CWasmEntry frames, the handler was registered by the last C++
+ // frame (Execution::CallWasm), so even though its address is already
+ // beyond the limit, we know we always want to unwind one handler.
+ if (frame->type() == StackFrame::C_WASM_ENTRY) {
+ handler_ = handler_->next();
+ }
}
StackHandler* handler() const { return handler_; }
- bool done() { return handler_ == nullptr || handler_->address() > limit_; }
+ bool done() { return handler_ == nullptr || AddressOf(handler_) > limit_; }
void Advance() {
DCHECK(!done());
handler_ = handler_->next();
@@ -146,7 +169,7 @@ StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
}
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate,
- StackFrame::Id id)
+ StackFrameId id)
: StackTraceFrameIterator(isolate) {
while (!done() && frame()->id() != id) Advance();
}
@@ -255,6 +278,11 @@ SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
bool advance_frame = true;
Address fast_c_fp = isolate->isolate_data()->fast_c_call_caller_fp();
+ uint8_t stack_is_iterable = isolate->isolate_data()->stack_is_iterable();
+ if (!stack_is_iterable) {
+ frame_ = nullptr;
+ return;
+ }
// 'Fast C calls' are a special type of C call where we call directly from JS
// to C without an exit frame inbetween. The CEntryStub is responsible for
// setting Isolate::c_entry_fp, meaning that it won't be set for fast C calls.
@@ -637,6 +665,12 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
return ExitFrame::GetStateForFramePointer(fp, state);
}
+StackFrame::Type CWasmEntryFrame::GetCallerState(State* state) const {
+ const int offset = CWasmEntryFrameConstants::kCEntryFPOffset;
+ Address fp = Memory<Address>(this->fp() + offset);
+ return ExitFrame::GetStateForFramePointer(fp, state);
+}
+
Code ConstructEntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSConstructEntry);
}
@@ -972,7 +1006,6 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
parameters_limit);
}
- DEFINE_ROOT_VALUE(isolate());
// Visit pointer spill slots and locals.
uint8_t* safepoint_bits = safepoint_entry.bits();
for (unsigned index = 0; index < stack_slots; index++) {
@@ -992,7 +1025,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
- DecompressTaggedPointer(ROOT_VALUE, compressed_value);
+ DecompressTaggedPointer(isolate(), compressed_value);
}
#endif
v->VisitRootPointer(Root::kTop, nullptr, spill_slot);
@@ -1910,7 +1943,8 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_size() > 0) {
- HandlerTable table(code->handler_table(), code->handler_table_size());
+ HandlerTable table(code->handler_table(), code->handler_table_size(),
+ HandlerTable::kReturnAddressBasedEncoding);
int pc_offset = static_cast<int>(pc() - code->instruction_start());
*stack_slots = static_cast<int>(code->stack_slots());
return table.LookupReturn(pc_offset);
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 982716db93..1f83984f97 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -6,6 +6,7 @@
#define V8_EXECUTION_FRAMES_H_
#include "src/codegen/safepoint-table.h"
+#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/objects/code.h"
#include "src/objects/objects.h"
@@ -98,12 +99,6 @@ class StackFrame {
};
#undef DECLARE_TYPE
- // Opaque data type for identifying stack frames. Used extensively
- // by the debugger.
- // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
- // has correct value range (see Issue 830 for more details).
- enum Id { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
-
// Used to mark the outermost JS entry frame.
//
// The mark is an opaque value that should be pushed onto the stack directly,
@@ -112,7 +107,9 @@ class StackFrame {
INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
};
+ // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
+ // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
kHeapObjectTag);
@@ -145,7 +142,13 @@ class StackFrame {
// the type of the value on the stack.
static Type MarkerToType(intptr_t marker) {
DCHECK(IsTypeMarker(marker));
- return static_cast<Type>(marker >> kSmiTagSize);
+ intptr_t type = marker >> kSmiTagSize;
+ // TODO(petermarshall): There is a bug in the arm simulators that causes
+ // invalid frame markers.
+#if !(defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM))
+ DCHECK_LT(static_cast<uintptr_t>(type), Type::NUMBER_OF_TYPES);
+#endif
+ return static_cast<Type>(type);
}
// Check if a marker is a stack frame type marker or a tagged pointer.
@@ -172,10 +175,7 @@ class StackFrame {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
- bool is_wasm_exit() const { return type() == WASM_EXIT; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
- bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
- bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
bool is_wasm_interpreter_entry() const {
return type() == WASM_INTERPRETER_ENTRY;
}
@@ -234,7 +234,7 @@ class StackFrame {
}
// Get the id of this stack frame.
- Id id() const { return static_cast<Id>(caller_sp()); }
+ StackFrameId id() const { return static_cast<StackFrameId>(caller_sp()); }
// Get the top handler from the current stack iterator.
inline StackHandler* top_handler() const;
@@ -1052,6 +1052,7 @@ class CWasmEntryFrame : public StubFrame {
private:
friend class StackFrameIteratorBase;
+ Type GetCallerState(State* state) const override;
};
class WasmCompileLazyFrame : public StandardFrame {
@@ -1259,7 +1260,7 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
// Skip frames until the frame with the given id is reached.
- StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
+ StackTraceFrameIterator(Isolate* isolate, StackFrameId id);
bool done() const { return iterator_.done(); }
void Advance();
void AdvanceOneFrame() { iterator_.Advance(); }
diff --git a/deps/v8/src/execution/ia32/frame-constants-ia32.cc b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
index e5e3855c79..7faecdb858 100644
--- a/deps/v8/src/execution/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
@@ -4,12 +4,12 @@
#if V8_TARGET_ARCH_IA32
+#include "src/execution/ia32/frame-constants-ia32.h"
+
#include "src/codegen/assembler.h"
#include "src/codegen/ia32/assembler-ia32-inl.h"
-#include "src/codegen/ia32/assembler-ia32.h"
#include "src/execution/frame-constants.h"
-
-#include "src/execution/ia32/frame-constants-ia32.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/interrupts-scope.cc b/deps/v8/src/execution/interrupts-scope.cc
new file mode 100644
index 0000000000..cf8611f8d6
--- /dev/null
+++ b/deps/v8/src/execution/interrupts-scope.cc
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/interrupts-scope.h"
+
+#include "src/execution/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
+ Mode mode)
+ : stack_guard_(isolate->stack_guard()),
+ intercept_mask_(intercept_mask),
+ intercepted_flags_(0),
+ mode_(mode) {
+ if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
+}
+
+bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
+ InterruptsScope* last_postpone_scope = nullptr;
+ for (InterruptsScope* current = this; current; current = current->prev_) {
+ // We only consider scopes related to passed flag.
+ if (!(current->intercept_mask_ & flag)) continue;
+ if (current->mode_ == kRunInterrupts) {
+ // If innermost scope is kRunInterrupts scope, prevent interrupt from
+ // being intercepted.
+ break;
+ } else {
+ DCHECK_EQ(current->mode_, kPostponeInterrupts);
+ last_postpone_scope = current;
+ }
+ }
+ // If there is no postpone scope for passed flag then we should not intercept.
+ if (!last_postpone_scope) return false;
+ last_postpone_scope->intercepted_flags_ |= flag;
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/interrupts-scope.h b/deps/v8/src/execution/interrupts-scope.h
new file mode 100644
index 0000000000..3d74850a84
--- /dev/null
+++ b/deps/v8/src/execution/interrupts-scope.h
@@ -0,0 +1,72 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_INTERRUPTS_SCOPE_H_
+#define V8_EXECUTION_INTERRUPTS_SCOPE_H_
+
+#include "src/execution/stack-guard.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// Scope intercepts only interrupt which is part of its interrupt_mask and does
+// not affect other interrupts.
+class InterruptsScope {
+ public:
+ enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
+
+ V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
+ Mode mode);
+
+ virtual ~InterruptsScope() {
+ if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
+ }
+
+ // Find the scope that intercepts this interrupt.
+ // It may be outermost PostponeInterruptsScope or innermost
+ // SafeForInterruptsScope if any.
+ // Return whether the interrupt has been intercepted.
+ bool Intercept(StackGuard::InterruptFlag flag);
+
+ private:
+ StackGuard* stack_guard_;
+ int intercept_mask_;
+ int intercepted_flags_;
+ Mode mode_;
+ InterruptsScope* prev_;
+
+ friend class StackGuard;
+};
+
+// Support for temporarily postponing interrupts. When the outermost
+// postpone scope is left the interrupts will be re-enabled and any
+// interrupts that occurred while in the scope will be taken into
+// account.
+class PostponeInterruptsScope : public InterruptsScope {
+ public:
+ PostponeInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : InterruptsScope(isolate, intercept_mask,
+ InterruptsScope::kPostponeInterrupts) {}
+ ~PostponeInterruptsScope() override = default;
+};
+
+// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
+// innermost scope is SafeForInterruptsScope ignoring any outer
+// PostponeInterruptsScopes.
+class SafeForInterruptsScope : public InterruptsScope {
+ public:
+ SafeForInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : InterruptsScope(isolate, intercept_mask,
+ InterruptsScope::kRunInterrupts) {}
+ ~SafeForInterruptsScope() override = default;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_INTERRUPTS_SCOPE_H_
diff --git a/deps/v8/src/execution/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index d83ae708ec..adeb7f54d3 100644
--- a/deps/v8/src/execution/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -81,8 +81,10 @@ class IsolateData final {
// The FP and PC that are saved right before TurboAssembler::CallCFunction.
Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
+ uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
+ uint8_t stack_is_iterable() { return stack_is_iterable_; }
// Returns true if this address points to data stored in this instance.
// If it's the case then the value can be accessed indirectly through the
@@ -121,6 +123,7 @@ class IsolateData final {
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
+ V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
8 + RoundUp<8>(static_cast<int>(kPaddingOffset)) - kPaddingOffset) \
@@ -172,6 +175,9 @@ class IsolateData final {
// instruction in compiled code.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;
+ // Whether the SafeStackFrameIterator can successfully iterate the current
+ // stack. Only valid values are 0 or 1.
+ uint8_t stack_is_iterable_ = 1;
// Ensure the size is 8-byte aligned in order to make alignment of the field
// following the IsolateData field predictable. This solves the issue with
@@ -219,6 +225,8 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerFPOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
+ STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
+ kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
}
diff --git a/deps/v8/src/execution/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index fcbbed139c..7e037fb410 100644
--- a/deps/v8/src/execution/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -145,9 +145,10 @@ bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
-bool Isolate::IsRegExpSpeciesLookupChainIntact() {
- PropertyCell species_cell =
- PropertyCell::cast(root(RootIndex::kRegExpSpeciesProtector));
+bool Isolate::IsRegExpSpeciesLookupChainIntact(
+ Handle<NativeContext> native_context) {
+ DCHECK_EQ(*native_context, this->raw_native_context());
+ PropertyCell species_cell = native_context->regexp_species_protector();
return species_cell.value().IsSmi() &&
Smi::ToInt(species_cell.value()) == kProtectorValid;
}
diff --git a/deps/v8/src/execution/isolate-utils-inl.h b/deps/v8/src/execution/isolate-utils-inl.h
new file mode 100644
index 0000000000..6095970a31
--- /dev/null
+++ b/deps/v8/src/execution/isolate-utils-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ISOLATE_UTILS_INL_H_
+#define V8_EXECUTION_ISOLATE_UTILS_INL_H_
+
+#include "src/execution/isolate-utils.h"
+
+#include "src/common/ptr-compr-inl.h"
+#include "src/execution/isolate.h"
+#include "src/heap/heap-write-barrier-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline Isolate* GetIsolateForPtrCompr(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ return Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+#else
+ return nullptr;
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ return GetIsolateFromWritableObject(object)->heap();
+#else
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ return chunk->GetHeap();
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
+#ifdef V8_COMPRESS_POINTERS
+ Isolate* isolate = Isolate::FromRoot(GetIsolateRoot(object.ptr()));
+ DCHECK_NOT_NULL(isolate);
+ return isolate;
+#else
+ return Isolate::FromHeap(GetHeapFromWritableObject(object));
+#endif // V8_COMPRESS_POINTERS
+}
+
+V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
+#ifdef V8_COMPRESS_POINTERS
+ *isolate = GetIsolateFromWritableObject(object);
+ return true;
+#else
+ heap_internals::MemoryChunk* chunk =
+ heap_internals::MemoryChunk::FromHeapObject(object);
+ if (chunk->InReadOnlySpace()) {
+ *isolate = nullptr;
+ return false;
+ }
+ *isolate = Isolate::FromHeap(chunk->GetHeap());
+ return true;
+#endif // V8_COMPRESS_POINTERS
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ISOLATE_UTILS_INL_H_
diff --git a/deps/v8/src/execution/isolate-utils.h b/deps/v8/src/execution/isolate-utils.h
new file mode 100644
index 0000000000..31c154e7a4
--- /dev/null
+++ b/deps/v8/src/execution/isolate-utils.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_ISOLATE_UTILS_H_
+#define V8_EXECUTION_ISOLATE_UTILS_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Computes isolate from any read only or writable heap object. The resulting
+// value is intended to be used only as a hoisted computation of isolate root
+// inside trivial accessors for optmizing value decompression.
+// When pointer compression is disabled this function always returns nullptr.
+V8_INLINE Isolate* GetIsolateForPtrCompr(HeapObject object);
+
+V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object);
+
+V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object);
+
+// Returns true if it succeeded to obtain isolate from given object.
+// If it fails then the object is definitely a read-only object but it may also
+// succeed for read only objects if pointer compression is enabled.
+V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_ISOLATE_UTILS_H_
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 8a8db12ca3..2b3551cdfb 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -736,15 +736,19 @@ class FrameArrayBuilder {
}
// Creates a StackTraceFrame object for each frame in the FrameArray.
- Handle<FixedArray> GetElementsAsStackTraceFrameArray() {
+ Handle<FixedArray> GetElementsAsStackTraceFrameArray(
+ bool enable_frame_caching) {
elements_->ShrinkToFit(isolate_);
const int frame_count = elements_->FrameCount();
Handle<FixedArray> stack_trace =
isolate_->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; ++i) {
- // Caching stack frames only happens for non-Wasm frames.
- if (!elements_->IsAnyWasmFrame(i)) {
+ // Caching stack frames only happens for user JS frames.
+ const bool cache_frame =
+ enable_frame_caching && !elements_->IsAnyWasmFrame(i) &&
+ elements_->Function(i).shared().IsUserJavaScript();
+ if (cache_frame) {
MaybeHandle<StackTraceFrame> maybe_frame =
StackFrameCacheHelper::LookupCachedFrame(
isolate_, handle(elements_->Code(i), isolate_),
@@ -760,7 +764,7 @@ class FrameArrayBuilder {
isolate_->factory()->NewStackTraceFrame(elements_, i);
stack_trace->set(i, *frame);
- if (!elements_->IsAnyWasmFrame(i)) {
+ if (cache_frame) {
StackFrameCacheHelper::CacheFrameAndUpdateCache(
isolate_, handle(elements_->Code(i), isolate_),
Smi::ToInt(elements_->Offset(i)), frame);
@@ -938,6 +942,14 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
PromiseCapability::cast(context->get(index)), isolate);
if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
+ } else if (IsBuiltinFunction(isolate, reaction->fulfill_handler(),
+ Builtins::kPromiseCapabilityDefaultResolve)) {
+ Handle<JSFunction> function(JSFunction::cast(reaction->fulfill_handler()),
+ isolate);
+ Handle<Context> context(function->context(), isolate);
+ promise =
+ handle(JSPromise::cast(context->get(PromiseBuiltins::kPromiseSlot)),
+ isolate);
} else {
// We have some generic promise chain here, so try to
// continue with the chained promise on the reaction
@@ -973,9 +985,7 @@ struct CaptureStackTraceOptions {
bool capture_builtin_exit_frames;
bool capture_only_frames_subject_to_debugging;
bool async_stack_trace;
-
- enum CaptureResult { RAW_FRAME_ARRAY, STACK_TRACE_FRAME_ARRAY };
- CaptureResult capture_result;
+ bool enable_frame_caching;
};
Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
@@ -1105,10 +1115,8 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
}
// TODO(yangguo): Queue this structured stack trace for preprocessing on GC.
- if (options.capture_result == CaptureStackTraceOptions::RAW_FRAME_ARRAY) {
- return builder.GetElements();
- }
- return builder.GetElementsAsStackTraceFrameArray();
+ return builder.GetElementsAsStackTraceFrameArray(
+ options.enable_frame_caching);
}
} // namespace
@@ -1126,7 +1134,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
options.async_stack_trace = FLAG_async_stack_traces;
options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = false;
- options.capture_result = CaptureStackTraceOptions::RAW_FRAME_ARRAY;
+ options.enable_frame_caching = false;
return CaptureStackTrace(this, caller, options);
}
@@ -1222,7 +1230,7 @@ Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
? FrameArrayBuilder::ALL
: FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
options.capture_only_frames_subject_to_debugging = true;
- options.capture_result = CaptureStackTraceOptions::STACK_TRACE_FRAME_ARRAY;
+ options.enable_frame_caching = true;
return Handle<FixedArray>::cast(
CaptureStackTrace(this, factory()->undefined_value(), options));
@@ -1377,7 +1385,8 @@ Object Isolate::StackOverflow() {
Handle<Object> exception;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
this, exception,
- ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller, true));
+ ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kSimple));
Throw(*exception, nullptr);
@@ -1621,7 +1630,12 @@ Object Isolate::UnwindAndFindHandler() {
thread_local_top()->pending_handler_fp_ = handler_fp;
thread_local_top()->pending_handler_sp_ = handler_sp;
- // Return and clear pending exception.
+ // Return and clear pending exception. The contract is that:
+ // (1) the pending exception is stored in one place (no duplication), and
+ // (2) within generated-code land, that one place is the return register.
+ // If/when we unwind back into C++ (returning to the JSEntry stub,
+ // or to Execution::CallWasm), the returned exception will be sent
+ // back to isolate->set_pending_exception(...).
clear_pending_exception();
return exception;
};
@@ -1656,6 +1670,19 @@ Object Isolate::UnwindAndFindHandler() {
0);
}
+ case StackFrame::C_WASM_ENTRY: {
+ StackHandler* handler = frame->top_handler();
+ thread_local_top()->handler_ = handler->next_address();
+ Code code = frame->LookupCode();
+ HandlerTable table(code);
+ Address instruction_start = code.InstructionStart();
+ int return_offset = static_cast<int>(frame->pc() - instruction_start);
+ int handler_offset = table.LookupReturn(return_offset);
+ DCHECK_NE(-1, handler_offset);
+ return FoundHandler(Context(), instruction_start, handler_offset,
+ code.constant_pool(), frame->sp(), frame->fp());
+ }
+
case StackFrame::WASM_COMPILED: {
if (trap_handler::IsThreadInWasm()) {
trap_handler::ClearThreadInWasm();
@@ -2014,33 +2041,23 @@ Object Isolate::PromoteScheduledException() {
}
void Isolate::PrintCurrentStackTrace(FILE* out) {
- IncrementalStringBuilder builder(this);
- for (StackTraceFrameIterator it(this); !it.done(); it.Advance()) {
- if (!it.is_javascript()) continue;
+ CaptureStackTraceOptions options;
+ options.limit = 0;
+ options.skip_mode = SKIP_NONE;
+ options.capture_builtin_exit_frames = true;
+ options.async_stack_trace = FLAG_async_stack_traces;
+ options.filter_mode = FrameArrayBuilder::CURRENT_SECURITY_CONTEXT;
+ options.capture_only_frames_subject_to_debugging = false;
+ options.enable_frame_caching = false;
- HandleScope scope(this);
- JavaScriptFrame* frame = it.javascript_frame();
-
- Handle<Object> receiver(frame->receiver(), this);
- Handle<JSFunction> function(frame->function(), this);
- Handle<AbstractCode> code;
- int offset;
- if (frame->is_interpreted()) {
- InterpretedFrame* interpreted_frame = InterpretedFrame::cast(frame);
- code = handle(AbstractCode::cast(interpreted_frame->GetBytecodeArray()),
- this);
- offset = interpreted_frame->GetBytecodeOffset();
- } else {
- code = handle(AbstractCode::cast(frame->LookupCode()), this);
- offset = static_cast<int>(frame->pc() - code->InstructionStart());
- }
+ Handle<FixedArray> frames = Handle<FixedArray>::cast(
+ CaptureStackTrace(this, this->factory()->undefined_value(), options));
- // To preserve backwards compatiblity, only append a newline when
- // the current stringified frame actually has characters.
- const int old_length = builder.Length();
- JSStackFrame site(this, receiver, function, code, offset);
- site.ToString(builder);
- if (old_length != builder.Length()) builder.AppendCharacter('\n');
+ IncrementalStringBuilder builder(this);
+ for (int i = 0; i < frames->length(); ++i) {
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(frames->get(i)), this);
+
+ SerializeStackTraceFrame(this, frame, builder);
}
Handle<String> stack_trace = builder.Finish().ToHandleChecked();
@@ -2113,7 +2130,8 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
JSReceiver::GetDataProperty(Handle<JSObject>::cast(exception), key);
if (!property->IsFixedArray()) return false;
- Handle<FrameArray> elements = Handle<FrameArray>::cast(property);
+ Handle<FrameArray> elements =
+ GetFrameArrayFromStackTrace(this, Handle<FixedArray>::cast(property));
const int frame_count = elements->FrameCount();
for (int i = 0; i < frame_count; i++) {
@@ -2248,7 +2266,7 @@ bool Isolate::IsExternalHandlerOnTop(Object exception) {
}
void Isolate::ReportPendingMessagesImpl(bool report_externally) {
- Object exception = pending_exception();
+ Object exception_obj = pending_exception();
// Clear the pending message object early to avoid endless recursion.
Object message_obj = thread_local_top()->pending_message_obj_;
@@ -2256,7 +2274,7 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
// For uncatchable exceptions we do nothing. If needed, the exception and the
// message have already been propagated to v8::TryCatch.
- if (!is_catchable_by_javascript(exception)) return;
+ if (!is_catchable_by_javascript(exception_obj)) return;
// Determine whether the message needs to be reported to all message handlers
// depending on whether and external v8::TryCatch or an internal JavaScript
@@ -2267,19 +2285,20 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
should_report_exception = try_catch_handler()->is_verbose_;
} else {
// Report the exception if it isn't caught by JavaScript code.
- should_report_exception = !IsJavaScriptHandlerOnTop(exception);
+ should_report_exception = !IsJavaScriptHandlerOnTop(exception_obj);
}
// Actually report the pending message to all message handlers.
if (!message_obj.IsTheHole(this) && should_report_exception) {
HandleScope scope(this);
Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
+ Handle<Object> exception(exception_obj, this);
Handle<Script> script(message->script(), this);
// Clear the exception and restore it afterwards, otherwise
// CollectSourcePositions will abort.
clear_pending_exception();
JSMessageObject::EnsureSourcePositionsAvailable(this, message);
- set_pending_exception(exception);
+ set_pending_exception(*exception);
int start_pos = message->GetStartPosition();
int end_pos = message->GetEndPosition();
MessageLocation location(script, start_pos, end_pos);
@@ -2853,6 +2872,13 @@ void Isolate::Delete(Isolate* isolate) {
SetIsolateThreadLocals(saved_isolate, saved_data);
}
+void Isolate::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
+ DCHECK_NOT_NULL(ro_heap);
+ DCHECK_IMPLIES(read_only_heap_ != nullptr, read_only_heap_ == ro_heap);
+ read_only_heap_ = ro_heap;
+ heap_.SetUpFromReadOnlyHeap(ro_heap);
+}
+
v8::PageAllocator* Isolate::page_allocator() {
return isolate_allocator_->page_allocator();
}
@@ -3282,6 +3308,21 @@ bool Isolate::InitWithSnapshot(ReadOnlyDeserializer* read_only_deserializer,
return Init(read_only_deserializer, startup_deserializer);
}
+static void AddCrashKeysForIsolateAndHeapPointers(Isolate* isolate) {
+ v8::Platform* platform = V8::GetCurrentPlatform();
+
+ const int id = isolate->id();
+ platform->AddCrashKey(id, "isolate", reinterpret_cast<uintptr_t>(isolate));
+
+ auto heap = isolate->heap();
+ platform->AddCrashKey(id, "ro_space",
+ reinterpret_cast<uintptr_t>(heap->read_only_space()->first_page()));
+ platform->AddCrashKey(id, "map_space",
+ reinterpret_cast<uintptr_t>(heap->map_space()->first_page()));
+ platform->AddCrashKey(id, "code_space",
+ reinterpret_cast<uintptr_t>(heap->code_space()->first_page()));
+}
+
bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
StartupDeserializer* startup_deserializer) {
TRACE_ISOLATE(init);
@@ -3432,7 +3473,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
if (create_heap_objects) {
heap_.read_only_space()->ClearStringPaddingIfNeeded();
- heap_.read_only_heap()->OnCreateHeapObjectsComplete(this);
+ read_only_heap_->OnCreateHeapObjectsComplete(this);
} else {
startup_deserializer->DeserializeInto(this);
}
@@ -3527,6 +3568,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
}
+ AddCrashKeysForIsolateAndHeapPointers(this);
return true;
}
@@ -3693,9 +3735,9 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
std::vector<Handle<FeedbackVector>> vectors;
{
- HeapIterator heap_iterator(heap());
- for (HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
- current_obj = heap_iterator.next()) {
+ HeapObjectIterator heap_iterator(heap());
+ for (HeapObject current_obj = heap_iterator.Next(); !current_obj.is_null();
+ current_obj = heap_iterator.Next()) {
if (!current_obj.IsFeedbackVector()) continue;
FeedbackVector vector = FeedbackVector::cast(current_obj);
@@ -3907,13 +3949,31 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
if (!IsNoElementsProtectorIntact()) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
- this, factory()->no_elements_protector(),
+ this, "no_elements_protector", factory()->no_elements_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
}
+void Isolate::TraceProtectorInvalidation(const char* protector_name) {
+ static constexpr char kInvalidateProtectorTracingCategory[] =
+ "V8.InvalidateProtector";
+ static constexpr char kInvalidateProtectorTracingArg[] = "protector-name";
+
+ DCHECK(FLAG_trace_protector_invalidation);
+
+ // TODO(jgruber): Remove the PrintF once tracing can output to stdout.
+ i::PrintF("Invalidating protector cell %s in isolate %p\n", protector_name,
+ this);
+ TRACE_EVENT_INSTANT1("v8", kInvalidateProtectorTracingCategory,
+ TRACE_EVENT_SCOPE_THREAD, kInvalidateProtectorTracingArg,
+ protector_name);
+}
+
void Isolate::InvalidateIsConcatSpreadableProtector() {
DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi());
DCHECK(IsIsConcatSpreadableLookupChainIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("is_concat_spreadable_protector");
+ }
factory()->is_concat_spreadable_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsIsConcatSpreadableLookupChainIntact());
@@ -3922,6 +3982,9 @@ void Isolate::InvalidateIsConcatSpreadableProtector() {
void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(factory()->array_constructor_protector()->value().IsSmi());
DCHECK(IsArrayConstructorIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("array_constructor_protector");
+ }
factory()->array_constructor_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsArrayConstructorIntact());
@@ -3931,7 +3994,7 @@ void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(factory()->array_species_protector()->value().IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_species_protector(),
+ this, "array_species_protector", factory()->array_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArraySpeciesLookupChainIntact());
}
@@ -3940,25 +4003,30 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
DCHECK(IsTypedArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->typed_array_species_protector(),
+ this, "typed_array_species_protector",
+ factory()->typed_array_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsTypedArraySpeciesLookupChainIntact());
}
-void Isolate::InvalidateRegExpSpeciesProtector() {
- DCHECK(factory()->regexp_species_protector()->value().IsSmi());
- DCHECK(IsRegExpSpeciesLookupChainIntact());
+void Isolate::InvalidateRegExpSpeciesProtector(
+ Handle<NativeContext> native_context) {
+ DCHECK_EQ(*native_context, this->raw_native_context());
+ DCHECK(native_context->regexp_species_protector().value().IsSmi());
+ DCHECK(IsRegExpSpeciesLookupChainIntact(native_context));
+ Handle<PropertyCell> species_cell(native_context->regexp_species_protector(),
+ this);
PropertyCell::SetValueWithInvalidation(
- this, factory()->regexp_species_protector(),
+ this, "regexp_species_protector", species_cell,
handle(Smi::FromInt(kProtectorInvalid), this));
- DCHECK(!IsRegExpSpeciesLookupChainIntact());
+ DCHECK(!IsRegExpSpeciesLookupChainIntact(native_context));
}
void Isolate::InvalidatePromiseSpeciesProtector() {
DCHECK(factory()->promise_species_protector()->value().IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_species_protector(),
+ this, "promise_species_protector", factory()->promise_species_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseSpeciesLookupChainIntact());
}
@@ -3966,6 +4034,9 @@ void Isolate::InvalidatePromiseSpeciesProtector() {
void Isolate::InvalidateStringLengthOverflowProtector() {
DCHECK(factory()->string_length_protector()->value().IsSmi());
DCHECK(IsStringLengthOverflowIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("string_length_protector");
+ }
factory()->string_length_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsStringLengthOverflowIntact());
@@ -3975,7 +4046,7 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(factory()->array_iterator_protector()->value().IsSmi());
DCHECK(IsArrayIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_iterator_protector(),
+ this, "array_iterator_protector", factory()->array_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayIteratorLookupChainIntact());
}
@@ -3984,7 +4055,7 @@ void Isolate::InvalidateMapIteratorProtector() {
DCHECK(factory()->map_iterator_protector()->value().IsSmi());
DCHECK(IsMapIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->map_iterator_protector(),
+ this, "map_iterator_protector", factory()->map_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsMapIteratorLookupChainIntact());
}
@@ -3993,7 +4064,7 @@ void Isolate::InvalidateSetIteratorProtector() {
DCHECK(factory()->set_iterator_protector()->value().IsSmi());
DCHECK(IsSetIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->set_iterator_protector(),
+ this, "set_iterator_protector", factory()->set_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsSetIteratorLookupChainIntact());
}
@@ -4002,7 +4073,7 @@ void Isolate::InvalidateStringIteratorProtector() {
DCHECK(factory()->string_iterator_protector()->value().IsSmi());
DCHECK(IsStringIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->string_iterator_protector(),
+ this, "string_iterator_protector", factory()->string_iterator_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsStringIteratorLookupChainIntact());
}
@@ -4011,7 +4082,8 @@ void Isolate::InvalidateArrayBufferDetachingProtector() {
DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi());
DCHECK(IsArrayBufferDetachingIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->array_buffer_detaching_protector(),
+ this, "array_buffer_detaching_protector",
+ factory()->array_buffer_detaching_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsArrayBufferDetachingIntact());
}
@@ -4020,7 +4092,7 @@ void Isolate::InvalidatePromiseHookProtector() {
DCHECK(factory()->promise_hook_protector()->value().IsSmi());
DCHECK(IsPromiseHookProtectorIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_hook_protector(),
+ this, "promise_hook_protector", factory()->promise_hook_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseHookProtectorIntact());
}
@@ -4028,6 +4100,9 @@ void Isolate::InvalidatePromiseHookProtector() {
void Isolate::InvalidatePromiseResolveProtector() {
DCHECK(factory()->promise_resolve_protector()->value().IsSmi());
DCHECK(IsPromiseResolveLookupChainIntact());
+ if (FLAG_trace_protector_invalidation) {
+ TraceProtectorInvalidation("promise_resolve_protector");
+ }
factory()->promise_resolve_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
DCHECK(!IsPromiseResolveLookupChainIntact());
@@ -4037,7 +4112,7 @@ void Isolate::InvalidatePromiseThenProtector() {
DCHECK(factory()->promise_then_protector()->value().IsSmi());
DCHECK(IsPromiseThenLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
- this, factory()->promise_then_protector(),
+ this, "promise_then_protector", factory()->promise_then_protector(),
handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsPromiseThenLookupChainIntact());
}
@@ -4176,7 +4251,7 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
// set is still open (whether to clear it after every microtask or once
// during a microtask checkpoint). See also
// https://github.com/tc39/proposal-weakrefs/issues/39 .
- heap()->ClearKeepDuringJobSet();
+ heap()->ClearKeptObjects();
}
if (call_completed_callbacks_.empty()) return;
@@ -4261,7 +4336,7 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
}
Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
- Handle<Module> module) {
+ Handle<SourceTextModule> module) {
Handle<Object> host_meta(module->import_meta(), this);
if (host_meta->IsTheHole(this)) {
host_meta = factory()->NewJSObjectWithNullProto();
@@ -4269,7 +4344,7 @@ Handle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
v8::Local<v8::Context> api_context =
v8::Utils::ToLocal(Handle<Context>(native_context()));
host_initialize_import_meta_object_callback_(
- api_context, Utils::ToLocal(module),
+ api_context, Utils::ToLocal(Handle<Module>::cast(module)),
v8::Local<v8::Object>::Cast(v8::Utils::ToLocal(host_meta)));
}
module->set_import_meta(*host_meta);
@@ -4641,26 +4716,6 @@ AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
#endif // DEBUG
-bool InterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
- InterruptsScope* last_postpone_scope = nullptr;
- for (InterruptsScope* current = this; current; current = current->prev_) {
- // We only consider scopes related to passed flag.
- if (!(current->intercept_mask_ & flag)) continue;
- if (current->mode_ == kRunInterrupts) {
- // If innermost scope is kRunInterrupts scope, prevent interrupt from
- // being intercepted.
- break;
- } else {
- DCHECK_EQ(current->mode_, kPostponeInterrupts);
- last_postpone_scope = current;
- }
- }
- // If there is no postpone scope for passed flag then we should not intercept.
- if (!last_postpone_scope) return false;
- last_postpone_scope->intercepted_flags_ |= flag;
- return true;
-}
-
#undef TRACE_ISOLATE
} // namespace internal
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index 4b4bf9cd7c..2ead7bf844 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -23,6 +23,7 @@
#include "src/execution/futex-emulation.h"
#include "src/execution/isolate-data.h"
#include "src/execution/messages.h"
+#include "src/execution/stack-guard.h"
#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
@@ -69,7 +70,6 @@ class CodeTracer;
class CompilationCache;
class CompilationStatistics;
class CompilerDispatcher;
-class ContextSlotCache;
class Counters;
class Debug;
class DeoptimizerData;
@@ -91,8 +91,8 @@ class RootVisitor;
class RuntimeProfiler;
class SetupIsolateDelegate;
class Simulator;
-class StartupDeserializer;
class StandardFrame;
+class StartupDeserializer;
class StubCache;
class ThreadManager;
class ThreadState;
@@ -397,6 +397,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(OOMErrorCallback, oom_behavior, nullptr) \
V(LogEventCallback, event_logger, nullptr) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(ModifyCodeGenerationFromStringsCallback, modify_code_gen_callback, \
+ nullptr) \
V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
@@ -515,6 +517,8 @@ class Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
+ void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
+
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
@@ -900,6 +904,7 @@ class Isolate final : private HiddenFactory {
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
+ ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
static Isolate* FromHeap(Heap* heap) {
return reinterpret_cast<Isolate*>(reinterpret_cast<Address>(heap) -
OFFSET_OF(Isolate, heap_));
@@ -915,6 +920,9 @@ class Isolate final : private HiddenFactory {
static size_t isolate_root_bias() {
return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
}
+ static Isolate* FromRoot(Address isolate_root) {
+ return reinterpret_cast<Isolate*>(isolate_root - isolate_root_bias());
+ }
RootsTable& roots_table() { return isolate_data()->roots(); }
@@ -1168,7 +1176,8 @@ class Isolate final : private HiddenFactory {
inline bool IsArraySpeciesLookupChainIntact();
inline bool IsTypedArraySpeciesLookupChainIntact();
- inline bool IsRegExpSpeciesLookupChainIntact();
+ inline bool IsRegExpSpeciesLookupChainIntact(
+ Handle<NativeContext> native_context);
// Check that the @@species protector is intact, which guards the lookup of
// "constructor" on JSPromise instances, whose [[Prototype]] is the initial
@@ -1250,10 +1259,14 @@ class Isolate final : private HiddenFactory {
void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
UpdateNoElementsProtectorOnSetElement(object);
}
+
+ // The `protector_name` C string must be statically allocated.
+ void TraceProtectorInvalidation(const char* protector_name);
+
void InvalidateArrayConstructorProtector();
void InvalidateArraySpeciesProtector();
void InvalidateTypedArraySpeciesProtector();
- void InvalidateRegExpSpeciesProtector();
+ void InvalidateRegExpSpeciesProtector(Handle<NativeContext> native_context);
void InvalidatePromiseSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
@@ -1469,7 +1482,7 @@ class Isolate final : private HiddenFactory {
void SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback);
V8_EXPORT_PRIVATE Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
- Handle<Module> module);
+ Handle<SourceTextModule> module);
void RegisterEmbeddedFileWriter(EmbeddedFileWriterInterface* writer) {
embedded_file_writer_ = writer;
@@ -1647,6 +1660,7 @@ class Isolate final : private HiddenFactory {
std::unique_ptr<IsolateAllocator> isolate_allocator_;
Heap heap_;
+ ReadOnlyHeap* read_only_heap_ = nullptr;
const int id_;
EntryStackItem* entry_stack_ = nullptr;
@@ -1982,65 +1996,6 @@ class StackLimitCheck {
} \
} while (false)
-// Scope intercepts only interrupt which is part of its interrupt_mask and does
-// not affect other interrupts.
-class InterruptsScope {
- public:
- enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
-
- virtual ~InterruptsScope() {
- if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
- }
-
- // Find the scope that intercepts this interrupt.
- // It may be outermost PostponeInterruptsScope or innermost
- // SafeForInterruptsScope if any.
- // Return whether the interrupt has been intercepted.
- bool Intercept(StackGuard::InterruptFlag flag);
-
- InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
- : stack_guard_(isolate->stack_guard()),
- intercept_mask_(intercept_mask),
- intercepted_flags_(0),
- mode_(mode) {
- if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
- }
-
- private:
- StackGuard* stack_guard_;
- int intercept_mask_;
- int intercepted_flags_;
- Mode mode_;
- InterruptsScope* prev_;
-
- friend class StackGuard;
-};
-
-// Support for temporarily postponing interrupts. When the outermost
-// postpone scope is left the interrupts will be re-enabled and any
-// interrupts that occurred while in the scope will be taken into
-// account.
-class PostponeInterruptsScope : public InterruptsScope {
- public:
- PostponeInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
- : InterruptsScope(isolate, intercept_mask,
- InterruptsScope::kPostponeInterrupts) {}
- ~PostponeInterruptsScope() override = default;
-};
-
-// Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
-// innermost scope is SafeForInterruptsScope ignoring any outer
-// PostponeInterruptsScopes.
-class SafeForInterruptsScope : public InterruptsScope {
- public:
- SafeForInterruptsScope(Isolate* isolate,
- int intercept_mask = StackGuard::ALL_INTERRUPTS)
- : InterruptsScope(isolate, intercept_mask,
- InterruptsScope::kRunInterrupts) {}
- ~SafeForInterruptsScope() override = default;
-};
-
class StackTraceFailureMessage {
public:
explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index c76f546d62..d216d3bc39 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -7,13 +7,16 @@
#include <memory>
#include "src/api/api-inl.h"
+#include "src/base/v8-fallthrough.h"
#include "src/execution/execution.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
#include "src/logging/counters.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/keys.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/strings/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
@@ -303,7 +306,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
} // namespace
Handle<Object> StackFrameBase::GetEvalOrigin() {
- if (!HasScript()) return isolate_->factory()->undefined_value();
+ if (!HasScript() || !IsEval()) return isolate_->factory()->undefined_value();
return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
}
@@ -321,12 +324,6 @@ bool StackFrameBase::IsEval() {
GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
}
-MaybeHandle<String> StackFrameBase::ToString() {
- IncrementalStringBuilder builder(isolate_);
- ToString(builder);
- return builder.Finish();
-}
-
void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
DCHECK(!array->IsWasmFrame(frame_ix));
@@ -365,7 +362,7 @@ Handle<Object> JSStackFrame::GetFileName() {
}
Handle<Object> JSStackFrame::GetFunctionName() {
- Handle<String> result = JSFunction::GetName(function_);
+ Handle<String> result = JSFunction::GetDebugName(function_);
if (result->length() != 0) return result;
if (HasScript() &&
@@ -514,177 +511,6 @@ bool JSStackFrame::IsToplevel() {
return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
}
-namespace {
-
-bool IsNonEmptyString(Handle<Object> object) {
- return (object->IsString() && String::cast(*object).length() > 0);
-}
-
-void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
- IncrementalStringBuilder* builder) {
- if (call_site->IsNative()) {
- builder->AppendCString("native");
- return;
- }
-
- Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
- if (!file_name->IsString() && call_site->IsEval()) {
- Handle<Object> eval_origin = call_site->GetEvalOrigin();
- DCHECK(eval_origin->IsString());
- builder->AppendString(Handle<String>::cast(eval_origin));
- builder->AppendCString(", "); // Expecting source position to follow.
- }
-
- if (IsNonEmptyString(file_name)) {
- builder->AppendString(Handle<String>::cast(file_name));
- } else {
- // Source code does not originate from a file and is not native, but we
- // can still get the source position inside the source string, e.g. in
- // an eval string.
- builder->AppendCString("<anonymous>");
- }
-
- int line_number = call_site->GetLineNumber();
- if (line_number != StackFrameBase::kNone) {
- builder->AppendCharacter(':');
- Handle<String> line_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(line_number), isolate), isolate);
- builder->AppendString(line_string);
-
- int column_number = call_site->GetColumnNumber();
- if (column_number != StackFrameBase::kNone) {
- builder->AppendCharacter(':');
- Handle<String> column_string = isolate->factory()->NumberToString(
- handle(Smi::FromInt(column_number), isolate), isolate);
- builder->AppendString(column_string);
- }
- }
-}
-
-int StringIndexOf(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (pattern->length() > subject->length()) return -1;
- return String::IndexOf(isolate, subject, pattern, 0);
-}
-
-// Returns true iff
-// 1. the subject ends with '.' + pattern, or
-// 2. subject == pattern.
-bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
- Handle<String> pattern) {
- if (String::Equals(isolate, subject, pattern)) return true;
-
- FlatStringReader subject_reader(isolate, String::Flatten(isolate, subject));
- FlatStringReader pattern_reader(isolate, String::Flatten(isolate, pattern));
-
- int pattern_index = pattern_reader.length() - 1;
- int subject_index = subject_reader.length() - 1;
- for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
- if (subject_index < 0) {
- return false;
- }
-
- const uc32 subject_char = subject_reader.Get(subject_index);
- if (i == pattern_reader.length()) {
- if (subject_char != '.') return false;
- } else if (subject_char != pattern_reader.Get(pattern_index)) {
- return false;
- }
-
- pattern_index--;
- subject_index--;
- }
-
- return true;
-}
-
-void AppendMethodCall(Isolate* isolate, JSStackFrame* call_site,
- IncrementalStringBuilder* builder) {
- Handle<Object> type_name = call_site->GetTypeName();
- Handle<Object> method_name = call_site->GetMethodName();
- Handle<Object> function_name = call_site->GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- Handle<String> function_string = Handle<String>::cast(function_name);
- if (IsNonEmptyString(type_name)) {
- Handle<String> type_string = Handle<String>::cast(type_name);
- bool starts_with_type_name =
- (StringIndexOf(isolate, function_string, type_string) == 0);
- if (!starts_with_type_name) {
- builder->AppendString(type_string);
- builder->AppendCharacter('.');
- }
- }
- builder->AppendString(function_string);
-
- if (IsNonEmptyString(method_name)) {
- Handle<String> method_string = Handle<String>::cast(method_name);
- if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
- builder->AppendCString(" [as ");
- builder->AppendString(method_string);
- builder->AppendCharacter(']');
- }
- }
- } else {
- if (IsNonEmptyString(type_name)) {
- builder->AppendString(Handle<String>::cast(type_name));
- builder->AppendCharacter('.');
- }
- if (IsNonEmptyString(method_name)) {
- builder->AppendString(Handle<String>::cast(method_name));
- } else {
- builder->AppendCString("<anonymous>");
- }
- }
-}
-
-} // namespace
-
-void JSStackFrame::ToString(IncrementalStringBuilder& builder) {
- Handle<Object> function_name = GetFunctionName();
-
- const bool is_toplevel = IsToplevel();
- const bool is_async = IsAsync();
- const bool is_promise_all = IsPromiseAll();
- const bool is_constructor = IsConstructor();
- const bool is_method_call = !(is_toplevel || is_constructor);
-
- if (is_async) {
- builder.AppendCString("async ");
- }
- if (is_promise_all) {
- // For `Promise.all(iterable)` frames we interpret the {offset_}
- // as the element index into `iterable` where the error occurred.
- builder.AppendCString("Promise.all (index ");
- Handle<String> index_string = isolate_->factory()->NumberToString(
- handle(Smi::FromInt(offset_), isolate_), isolate_);
- builder.AppendString(index_string);
- builder.AppendCString(")");
- return;
- }
- if (is_method_call) {
- AppendMethodCall(isolate_, this, &builder);
- } else if (is_constructor) {
- builder.AppendCString("new ");
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- builder.AppendCString("<anonymous>");
- }
- } else if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- } else {
- AppendFileLocation(isolate_, this, &builder);
- return;
- }
-
- builder.AppendCString(" (");
- AppendFileLocation(isolate_, this, &builder);
- builder.AppendCString(")");
-
- return;
-}
-
int JSStackFrame::GetPosition() const {
Handle<SharedFunctionInfo> shared = handle(function_->shared(), isolate_);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
@@ -749,41 +575,6 @@ Handle<Object> WasmStackFrame::GetWasmModuleName() {
return module_name;
}
-void WasmStackFrame::ToString(IncrementalStringBuilder& builder) {
- Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
- isolate_);
- MaybeHandle<String> module_name =
- WasmModuleObject::GetModuleNameOrNull(isolate_, module_object);
- MaybeHandle<String> function_name = WasmModuleObject::GetFunctionNameOrNull(
- isolate_, module_object, wasm_func_index_);
- bool has_name = !module_name.is_null() || !function_name.is_null();
- if (has_name) {
- if (module_name.is_null()) {
- builder.AppendString(function_name.ToHandleChecked());
- } else {
- builder.AppendString(module_name.ToHandleChecked());
- if (!function_name.is_null()) {
- builder.AppendCString(".");
- builder.AppendString(function_name.ToHandleChecked());
- }
- }
- builder.AppendCString(" (");
- }
-
- builder.AppendCString("wasm-function[");
-
- char buffer[16];
- SNPrintF(ArrayVector(buffer), "%u]", wasm_func_index_);
- builder.AppendCString(buffer);
-
- SNPrintF(ArrayVector(buffer), ":%d", GetPosition());
- builder.AppendCString(buffer);
-
- if (has_name) builder.AppendCString(")");
-
- return;
-}
-
int WasmStackFrame::GetPosition() const {
return IsInterpreted()
? offset_
@@ -791,6 +582,14 @@ int WasmStackFrame::GetPosition() const {
code_, offset_);
}
+int WasmStackFrame::GetColumnNumber() { return GetModuleOffset(); }
+
+int WasmStackFrame::GetModuleOffset() const {
+ const int function_offset =
+ wasm_instance_->module_object().GetFunctionOffset(wasm_func_index_);
+ return function_offset + GetPosition();
+}
+
Handle<Object> WasmStackFrame::Null() const {
return isolate_->factory()->null_value();
}
@@ -858,24 +657,6 @@ int AsmJsWasmStackFrame::GetColumnNumber() {
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
-void AsmJsWasmStackFrame::ToString(IncrementalStringBuilder& builder) {
- // The string should look exactly as the respective javascript frame string.
- // Keep this method in line to
- // JSStackFrame::ToString(IncrementalStringBuilder&).
- Handle<Object> function_name = GetFunctionName();
-
- if (IsNonEmptyString(function_name)) {
- builder.AppendString(Handle<String>::cast(function_name));
- builder.AppendCString(" (");
- }
-
- AppendFileLocation(isolate_, this, &builder);
-
- if (IsNonEmptyString(function_name)) builder.AppendCString(")");
-
- return;
-}
-
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
: isolate_(isolate), array_(array), frame_ix_(frame_ix) {}
@@ -914,8 +695,7 @@ StackFrameBase* FrameArrayIterator::Frame() {
namespace {
MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
- Handle<FrameArray> frame_array,
- int frame_index) {
+ Handle<StackTraceFrame> frame) {
Handle<JSFunction> target =
handle(isolate->native_context()->callsite_function(), isolate);
@@ -924,6 +704,14 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
isolate, obj,
JSObject::New(target, target, Handle<AllocationSite>::null()), Object);
+ // TODO(szuend): Introduce a new symbol "call_site_frame_symbol" and set
+ // it to the StackTraceFrame. The CallSite API builtins can then
+ // be implemented using StackFrameInfo objects.
+
+ Handle<FrameArray> frame_array(FrameArray::cast(frame->frame_array()),
+ isolate);
+ int frame_index = frame->frame_index();
+
Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
RETURN_ON_EXCEPTION(isolate,
JSObject::SetOwnPropertyIgnoreAttributes(
@@ -943,14 +731,16 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
// a JSArray of JSCallSite objects.
MaybeHandle<JSArray> GetStackFrames(Isolate* isolate,
- Handle<FrameArray> elems) {
- const int frame_count = elems->FrameCount();
+ Handle<FixedArray> elems) {
+ const int frame_count = elems->length();
Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; i++) {
Handle<Object> site;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, site,
- ConstructCallSite(isolate, elems, i), JSArray);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
+ isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, site, ConstructCallSite(isolate, frame),
+ JSArray);
frames->set(i, *site);
}
@@ -1013,13 +803,14 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
DCHECK(raw_stack->IsFixedArray());
- Handle<FrameArray> elems = Handle<FrameArray>::cast(raw_stack);
+ Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
if (!in_recursion) {
+ Handle<Context> error_context = error->GetCreationContext();
+ DCHECK(error_context->IsNativeContext());
+
if (isolate->HasPrepareStackTraceCallback()) {
- Handle<Context> error_context = error->GetCreationContext();
- DCHECK(!error_context.is_null() && error_context->IsNativeContext());
PrepareStackTraceScope scope(isolate);
Handle<JSArray> sites;
@@ -1033,7 +824,8 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Object);
return result;
} else {
- Handle<JSFunction> global_error = isolate->error_function();
+ Handle<JSFunction> global_error =
+ handle(error_context->error_function(), isolate);
// If there's a user-specified "prepareStackTrace" function, call it on
// the frames and use its result.
@@ -1080,11 +872,13 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
wasm::WasmCodeRefScope wasm_code_ref_scope;
- for (FrameArrayIterator it(isolate, elems); it.HasFrame(); it.Advance()) {
+ for (int i = 0; i < elems->length(); ++i) {
builder.AppendCString("\n at ");
- StackFrameBase* frame = it.Frame();
- frame->ToString(builder);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(elems->get(i)),
+ isolate);
+ SerializeStackTraceFrame(isolate, frame, builder);
+
if (isolate->has_pending_exception()) {
// CallSite.toString threw. Parts of the current frame might have been
// stringified already regardless. Still, try to append a string
@@ -1140,7 +934,7 @@ const char* MessageFormatter::TemplateString(MessageTemplate index) {
return STRING;
MESSAGE_TEMPLATES(CASE)
#undef CASE
- case MessageTemplate::kLastMessage:
+ case MessageTemplate::kMessageCount:
default:
return nullptr;
}
@@ -1183,7 +977,7 @@ MaybeHandle<String> MessageFormatter::Format(Isolate* isolate,
MaybeHandle<Object> ErrorUtils::Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- bool suppress_detailed_trace) {
+ StackTraceCollection stack_trace_collection) {
// 1. If NewTarget is undefined, let newTarget be the active function object,
// else let newTarget be NewTarget.
@@ -1217,17 +1011,19 @@ MaybeHandle<Object> ErrorUtils::Construct(
Object);
}
- // Optionally capture a more detailed stack trace for the message.
- if (!suppress_detailed_trace) {
- RETURN_ON_EXCEPTION(isolate, isolate->CaptureAndSetDetailedStackTrace(err),
- Object);
+ switch (stack_trace_collection) {
+ case StackTraceCollection::kDetailed:
+ RETURN_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetDetailedStackTrace(err), Object);
+ V8_FALLTHROUGH;
+ case StackTraceCollection::kSimple:
+ RETURN_ON_EXCEPTION(
+ isolate, isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
+ Object);
+ break;
+ case StackTraceCollection::kNone:
+ break;
}
-
- // Capture a simple stack trace for the stack property.
- RETURN_ON_EXCEPTION(isolate,
- isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
- Object);
-
return err;
}
@@ -1356,7 +1152,7 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
- no_caller, false);
+ no_caller, StackTraceCollection::kDetailed);
}
} // namespace internal
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index 0fc3692f64..23f32c2fe1 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,7 +12,7 @@
#include <memory>
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -24,7 +24,6 @@ class WasmCode;
// Forward declarations.
class AbstractCode;
class FrameArray;
-class IncrementalStringBuilder;
class JSMessageObject;
class LookupIterator;
class SharedFunctionInfo;
@@ -94,9 +93,6 @@ class StackFrameBase {
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
- MaybeHandle<String> ToString();
- virtual void ToString(IncrementalStringBuilder& builder) = 0;
-
// Used to signal that the requested field is unknown.
static const int kNone = -1;
@@ -139,8 +135,6 @@ class JSStackFrame : public StackFrameBase {
bool IsConstructor() override { return is_constructor_; }
bool IsStrict() const override { return is_strict_; }
- void ToString(IncrementalStringBuilder& builder) override;
-
private:
JSStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
@@ -177,7 +171,7 @@ class WasmStackFrame : public StackFrameBase {
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
- int GetColumnNumber() override { return kNone; }
+ int GetColumnNumber() override;
int GetPromiseIndex() const override { return kNone; }
@@ -189,8 +183,6 @@ class WasmStackFrame : public StackFrameBase {
bool IsStrict() const override { return false; }
bool IsInterpreted() const { return code_ == nullptr; }
- void ToString(IncrementalStringBuilder& builder) override;
-
protected:
Handle<Object> Null() const;
@@ -203,6 +195,8 @@ class WasmStackFrame : public StackFrameBase {
int offset_;
private:
+ int GetModuleOffset() const;
+
WasmStackFrame() = default;
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
@@ -224,8 +218,6 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
int GetLineNumber() override;
int GetColumnNumber() override;
- void ToString(IncrementalStringBuilder& builder) override;
-
private:
friend class FrameArrayIterator;
AsmJsWasmStackFrame() = default;
@@ -267,10 +259,13 @@ enum FrameSkipMode {
class ErrorUtils : public AllStatic {
public:
+ // |kNone| is useful when you don't need the stack information at all, for
+ // example when creating a deserialized error.
+ enum class StackTraceCollection { kDetailed, kSimple, kNone };
static MaybeHandle<Object> Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- bool suppress_detailed_trace);
+ StackTraceCollection stack_trace_collection);
static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
diff --git a/deps/v8/src/execution/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index 8088935154..3cc95205fa 100644
--- a/deps/v8/src/execution/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -253,7 +253,7 @@ void MicrotaskQueue::OnCompleted(Isolate* isolate) {
// set is still open (whether to clear it after every microtask or once
// during a microtask checkpoint). See also
// https://github.com/tc39/proposal-weakrefs/issues/39 .
- isolate->heap()->ClearKeepDuringJobSet();
+ isolate->heap()->ClearKeptObjects();
FireMicrotasksCompletedCallback(isolate);
}
diff --git a/deps/v8/src/execution/mips/frame-constants-mips.cc b/deps/v8/src/execution/mips/frame-constants-mips.cc
index 95d6eb951c..4c930e71a9 100644
--- a/deps/v8/src/execution/mips/frame-constants-mips.cc
+++ b/deps/v8/src/execution/mips/frame-constants-mips.cc
@@ -4,12 +4,11 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/codegen/assembler.h"
+#include "src/execution/mips/frame-constants-mips.h"
+
#include "src/codegen/mips/assembler-mips-inl.h"
-#include "src/codegen/mips/assembler-mips.h"
#include "src/execution/frame-constants.h"
-
-#include "src/execution/mips/frame-constants-mips.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index e0448f232a..6a3a160ec3 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -1356,8 +1356,8 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
return ret;
}
-void Simulator::round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs) {
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1373,32 +1373,32 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs) {
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1414,33 +1414,33 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_fcsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
template <typename T_fp, typename T_int>
-void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int) {
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1456,32 +1456,32 @@ void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_msacsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(toRound + 0.5);
- rounded_int = static_cast<T_int>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<T_int>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1;
+ *rounded_int -= 1;
+ *rounded -= 1;
}
break;
case kRoundToZero:
- rounded = trunc(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs) {
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1497,32 +1497,32 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs) {
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1538,26 +1538,26 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
@@ -2512,18 +2512,18 @@ float FPAbs<float>(float a) {
}
template <typename T>
-static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ *result = a;
} else if (std::isnan(a)) {
- result = b;
+ *result = b;
} else if (std::isnan(b)) {
- result = a;
+ *result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
- result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
return false;
}
@@ -2533,7 +2533,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
template <typename T>
static T FPUMin(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
return result;
} else {
return b < a ? b : a;
@@ -2543,7 +2543,7 @@ static T FPUMin(T a, T b) {
template <typename T>
static T FPUMax(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
return result;
} else {
return b > a ? b : a;
@@ -2553,7 +2553,7 @@ static T FPUMax(T a, T b) {
template <typename T>
static T FPUMinA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) < FPAbs(b)) {
result = a;
} else if (FPAbs(b) < FPAbs(a)) {
@@ -2568,7 +2568,7 @@ static T FPUMinA(T a, T b) {
template <typename T>
static T FPUMaxA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) > FPAbs(b)) {
result = a;
} else if (FPAbs(b) > FPAbs(a)) {
@@ -2822,7 +2822,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -2876,7 +2876,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
int64_t result;
double rounded;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -3489,7 +3489,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
int64_t result;
float rounded;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -3502,7 +3502,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_W_S: {
float rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -5271,128 +5271,128 @@ void Simulator::DecodeTypeMsa3R() {
}
template <typename T_int, typename T_fp, typename T_reg>
-void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
const T_int all_ones = static_cast<T_int>(-1);
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
switch (opcode) {
case FCUN: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCEQ: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUEQ: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLT: {
if (s_element >= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULT: {
if (s_element < t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLE: {
if (s_element > t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULE: {
if (s_element <= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCOR: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUNE: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCNE: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FADD:
- wd = bit_cast<T_int>(s_element + t_element);
+ *wd = bit_cast<T_int>(s_element + t_element);
break;
case FSUB:
- wd = bit_cast<T_int>(s_element - t_element);
+ *wd = bit_cast<T_int>(s_element - t_element);
break;
case FMUL:
- wd = bit_cast<T_int>(s_element * t_element);
+ *wd = bit_cast<T_int>(s_element * t_element);
break;
case FDIV: {
if (t_element == 0) {
- wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- wd = bit_cast<T_int>(s_element / t_element);
+ *wd = bit_cast<T_int>(s_element / t_element);
}
} break;
case FMADD:
- wd = bit_cast<T_int>(
- std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FMSUB:
- wd = bit_cast<T_int>(
- std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, -t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FEXP2:
- wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ *wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
break;
case FMIN:
- wd = bit_cast<T_int>(std::min(s_element, t_element));
+ *wd = bit_cast<T_int>(std::min(s_element, t_element));
break;
case FMAX:
- wd = bit_cast<T_int>(std::max(s_element, t_element));
+ *wd = bit_cast<T_int>(std::max(s_element, t_element));
break;
case FMIN_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
} break;
case FMAX_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
} break;
case FSOR:
@@ -5414,7 +5414,7 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
}
template <typename T_int, typename T_int_dbl, typename T_reg>
-void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
// using T_uint = typename std::make_unsigned<T_int>::type;
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
@@ -5432,16 +5432,16 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
if (product == min_fix_dbl) {
product = max_fix_dbl;
}
- wd = static_cast<T_int>(product >> shift);
+ *wd = static_cast<T_int>(product >> shift);
} break;
case MADD_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUB_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MULR_Q: {
@@ -5449,23 +5449,23 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
if (product == min_fix_dbl) {
- wd = static_cast<T_int>(max_fix_dbl >> shift);
+ *wd = static_cast<T_int>(max_fix_dbl >> shift);
break;
}
- wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ *wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
} break;
case MADDR_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUBR_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
default:
@@ -5588,19 +5588,19 @@ void Simulator::DecodeTypeMsa3RF() {
#undef PACK_FLOAT16
#undef FEXDO_DF
case FTQ:
-#define FTQ_DF(source, dst, fp_type, int_type) \
- element = bit_cast<fp_type>(source) * \
- (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
- if (element > std::numeric_limits<int_type>::max()) { \
- dst = std::numeric_limits<int_type>::max(); \
- } else if (element < std::numeric_limits<int_type>::min()) { \
- dst = std::numeric_limits<int_type>::min(); \
- } else if (std::isnan(element)) { \
- dst = 0; \
- } else { \
- int_type fixed_point; \
- round_according_to_msacsr(element, element, fixed_point); \
- dst = fixed_point; \
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, &element, &fixed_point); \
+ dst = fixed_point; \
}
switch (DecodeMsaDataFormat()) {
@@ -5623,13 +5623,13 @@ void Simulator::DecodeTypeMsa3RF() {
}
break;
#undef FTQ_DF
-#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, &(wd)); \
}
-#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, &(wd)); \
}
case MADD_Q:
case MSUB_Q:
@@ -5859,7 +5859,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
#undef QUIET_BIT_D
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
-T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
Simulator* sim) {
using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
@@ -5878,37 +5878,37 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_INFINITE:
if (std::signbit(element)) {
- dst = NEG_INFINITY_BIT;
+ *dst = NEG_INFINITY_BIT;
} else {
- dst = POS_INFINITY_BIT;
+ *dst = POS_INFINITY_BIT;
}
break;
case FP_NAN:
if (isSnan(element)) {
- dst = SNAN_BIT;
+ *dst = SNAN_BIT;
} else {
- dst = QNAN_BIT;
+ *dst = QNAN_BIT;
}
break;
case FP_NORMAL:
if (std::signbit(element)) {
- dst = NEG_NORMAL_BIT;
+ *dst = NEG_NORMAL_BIT;
} else {
- dst = POS_NORMAL_BIT;
+ *dst = POS_NORMAL_BIT;
}
break;
case FP_SUBNORMAL:
if (std::signbit(element)) {
- dst = NEG_SUBNORMAL_BIT;
+ *dst = NEG_SUBNORMAL_BIT;
} else {
- dst = POS_SUBNORMAL_BIT;
+ *dst = POS_SUBNORMAL_BIT;
}
break;
case FP_ZERO:
if (std::signbit(element)) {
- dst = NEG_ZERO_BIT;
+ *dst = NEG_ZERO_BIT;
} else {
- dst = POS_ZERO_BIT;
+ *dst = POS_ZERO_BIT;
}
break;
default:
@@ -5932,11 +5932,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= min_int) {
- dst = element >= max_int ? max_int : min_int;
+ *dst = element >= max_int ? max_int : min_int;
} else {
- dst = static_cast<T_int>(std::trunc(element));
+ *dst = static_cast<T_int>(std::trunc(element));
}
break;
}
@@ -5944,49 +5944,49 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= 0) {
- dst = element >= max_int ? max_int : 0;
+ *dst = element >= max_int ? max_int : 0;
} else {
- dst = static_cast<T_uint>(std::trunc(element));
+ *dst = static_cast<T_uint>(std::trunc(element));
}
break;
}
case FSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::sqrt(element));
+ *dst = bit_cast<T_int>(std::sqrt(element));
}
break;
}
case FRSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / std::sqrt(element));
+ *dst = bit_cast<T_int>(1 / std::sqrt(element));
}
break;
}
case FRCP: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / element);
+ *dst = bit_cast<T_int>(1 / element);
}
break;
}
case FRINT: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
T_int dummy;
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
- dst = bit_cast<T_int>(element);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, &dummy);
+ *dst = bit_cast<T_int>(element);
}
break;
}
@@ -5995,19 +5995,19 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_NORMAL:
case FP_SUBNORMAL:
- dst = bit_cast<T_int>(std::logb(element));
+ *dst = bit_cast<T_int>(std::logb(element));
break;
case FP_ZERO:
- dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
break;
case FP_NAN:
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
break;
case FP_INFINITE:
if (element < 0) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
}
break;
default:
@@ -6020,11 +6020,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < min_int || element > max_int) {
- dst = element > max_int ? max_int : min_int;
+ *dst = element > max_int ? max_int : min_int;
} else {
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
break;
}
@@ -6032,22 +6032,22 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < 0 || element > max_uint) {
- dst = element > max_uint ? max_uint : 0;
+ *dst = element > max_uint ? max_uint : 0;
} else {
T_uint res;
- sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
- dst = *reinterpret_cast<T_int*>(&res);
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
+ *dst = *reinterpret_cast<T_int*>(&res);
}
break;
}
case FFINT_S:
- dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
using uT_src = typename std::make_unsigned<T_src>::type;
- dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
@@ -6157,12 +6157,12 @@ void Simulator::DecodeTypeMsa2RF() {
switch (DecodeMsaDataFormat()) {
case MSA_WORD:
for (int i = 0; i < kMSALanesWord; i++) {
- Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], &wd.w[i], this);
}
break;
case MSA_DWORD:
for (int i = 0; i < kMSALanesDword; i++) {
- Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], &wd.d[i], this);
}
break;
default:
diff --git a/deps/v8/src/execution/mips/simulator-mips.h b/deps/v8/src/execution/mips/simulator-mips.h
index b5712d1a82..28e38fd0a5 100644
--- a/deps/v8/src/execution/mips/simulator-mips.h
+++ b/deps/v8/src/execution/mips/simulator-mips.h
@@ -258,16 +258,16 @@ class Simulator : public SimulatorBase {
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round64_error(float original, float rounded);
- void round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs);
- void round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs);
template <typename Tfp, typename Tint>
- void round_according_to_msacsr(Tfp toRound, Tfp& rounded, Tint& rounded_int);
- void round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs);
- void round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs);
+ void round_according_to_msacsr(Tfp toRound, Tfp* rounded, Tint* rounded_int);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
diff --git a/deps/v8/src/execution/mips64/frame-constants-mips64.cc b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
index 68398605ba..97ef183592 100644
--- a/deps/v8/src/execution/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
@@ -4,10 +4,9 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/codegen/assembler.h"
#include "src/codegen/mips64/assembler-mips64-inl.h"
-#include "src/codegen/mips64/assembler-mips64.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
#include "src/execution/mips64/frame-constants-mips64.h"
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 7c45e7f82d..3fbf1961a8 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -1285,8 +1285,8 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
}
// For cvt instructions only
-void Simulator::round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs) {
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1302,32 +1302,32 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs) {
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1343,33 +1343,33 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
// for cvt instructions only
-void Simulator::round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs) {
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1385,32 +1385,32 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int32_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int32_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int32_t>(*rounded);
break;
}
}
-void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs) {
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1426,33 +1426,33 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
// the next representable value down. Behave like floor_w_d.
switch (FCSR_ & 3) {
case kRoundToNearest:
- rounded = std::floor(fs + 0.5);
- rounded_int = static_cast<int64_t>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - fs == 0.5) {
+ *rounded = std::floor(fs + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.f;
+ *rounded_int -= 1;
+ *rounded -= 1.f;
}
break;
case kRoundToZero:
- rounded = trunc(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = trunc(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::ceil(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(fs);
- rounded_int = static_cast<int64_t>(rounded);
+ *rounded = std::floor(fs);
+ *rounded_int = static_cast<int64_t>(*rounded);
break;
}
}
template <typename T_fp, typename T_int>
-void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int) {
+void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int) {
// 0 RN (round to nearest): Round a result to the nearest
// representable value; if the result is exactly halfway between
// two representable values, round to zero. Behave like round_w_d.
@@ -1468,26 +1468,26 @@ void Simulator::round_according_to_msacsr(T_fp toRound, T_fp& rounded,
// the next representable value down. Behave like floor_w_d.
switch (get_msacsr_rounding_mode()) {
case kRoundToNearest:
- rounded = std::floor(toRound + 0.5);
- rounded_int = static_cast<T_int>(rounded);
- if ((rounded_int & 1) != 0 && rounded_int - toRound == 0.5) {
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<T_int>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
// If the number is halfway between two integers,
// round to the even one.
- rounded_int--;
- rounded -= 1.;
+ *rounded_int -= 1;
+ *rounded -= 1.;
}
break;
case kRoundToZero:
- rounded = trunc(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToPlusInf:
- rounded = std::ceil(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
case kRoundToMinusInf:
- rounded = std::floor(toRound);
- rounded_int = static_cast<T_int>(rounded);
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<T_int>(*rounded);
break;
}
}
@@ -2507,18 +2507,18 @@ float FPAbs<float>(float a) {
}
template <typename T>
-static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ *result = a;
} else if (std::isnan(a)) {
- result = b;
+ *result = b;
} else if (std::isnan(b)) {
- result = a;
+ *result = a;
} else if (b == a) {
// Handle -0.0 == 0.0 case.
// std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
// negates the result.
- result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
} else {
return false;
}
@@ -2528,7 +2528,7 @@ static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
template <typename T>
static T FPUMin(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
return result;
} else {
return b < a ? b : a;
@@ -2538,7 +2538,7 @@ static T FPUMin(T a, T b) {
template <typename T>
static T FPUMax(T a, T b) {
T result;
- if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
return result;
} else {
return b > a ? b : a;
@@ -2548,7 +2548,7 @@ static T FPUMax(T a, T b) {
template <typename T>
static T FPUMinA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) < FPAbs(b)) {
result = a;
} else if (FPAbs(b) < FPAbs(a)) {
@@ -2563,7 +2563,7 @@ static T FPUMinA(T a, T b) {
template <typename T>
static T FPUMaxA(T a, T b) {
T result;
- if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
if (FPAbs(a) > FPAbs(b)) {
result = a;
} else if (FPAbs(b) > FPAbs(a)) {
@@ -2829,7 +2829,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_L_S: {
float rounded;
int64_t result;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -2839,7 +2839,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
case CVT_W_S: {
float rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -3189,7 +3189,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_W_D: { // Convert double to word.
double rounded;
int32_t result;
- round_according_to_fcsr(fs, rounded, result, fs);
+ round_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUWordResult(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register_word_invalid_result(fs, rounded);
@@ -3243,7 +3243,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
case CVT_L_D: { // Mips64r2: Truncate double to 64-bit long-word.
double rounded;
int64_t result;
- round64_according_to_fcsr(fs, rounded, result, fs);
+ round64_according_to_fcsr(fs, &rounded, &result, fs);
SetFPUResult(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
set_fpu_register_invalid_result64(fs, rounded);
@@ -5544,128 +5544,128 @@ void Simulator::DecodeTypeMsa3R() {
}
template <typename T_int, typename T_fp, typename T_reg>
-void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
const T_int all_ones = static_cast<T_int>(-1);
const T_fp s_element = *reinterpret_cast<T_fp*>(&ws);
const T_fp t_element = *reinterpret_cast<T_fp*>(&wt);
switch (opcode) {
case FCUN: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCEQ: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUEQ: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLT: {
if (s_element >= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULT: {
if (s_element < t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCLE: {
if (s_element > t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCULE: {
if (s_element <= t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCOR: {
if (std::isnan(s_element) || std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FCUNE: {
if (s_element != t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = all_ones;
+ *wd = all_ones;
} else {
- wd = 0;
+ *wd = 0;
}
} break;
case FCNE: {
if (s_element == t_element || std::isnan(s_element) ||
std::isnan(t_element)) {
- wd = 0;
+ *wd = 0;
} else {
- wd = all_ones;
+ *wd = all_ones;
}
} break;
case FADD:
- wd = bit_cast<T_int>(s_element + t_element);
+ *wd = bit_cast<T_int>(s_element + t_element);
break;
case FSUB:
- wd = bit_cast<T_int>(s_element - t_element);
+ *wd = bit_cast<T_int>(s_element - t_element);
break;
case FMUL:
- wd = bit_cast<T_int>(s_element * t_element);
+ *wd = bit_cast<T_int>(s_element * t_element);
break;
case FDIV: {
if (t_element == 0) {
- wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *wd = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- wd = bit_cast<T_int>(s_element / t_element);
+ *wd = bit_cast<T_int>(s_element / t_element);
}
} break;
case FMADD:
- wd = bit_cast<T_int>(
- std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FMSUB:
- wd = bit_cast<T_int>(
- std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(&wd)));
+ *wd = bit_cast<T_int>(
+ std::fma(-s_element, t_element, *reinterpret_cast<T_fp*>(wd)));
break;
case FEXP2:
- wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
+ *wd = bit_cast<T_int>(std::ldexp(s_element, static_cast<int>(wt)));
break;
case FMIN:
- wd = bit_cast<T_int>(std::min(s_element, t_element));
+ *wd = bit_cast<T_int>(std::min(s_element, t_element));
break;
case FMAX:
- wd = bit_cast<T_int>(std::max(s_element, t_element));
+ *wd = bit_cast<T_int>(std::max(s_element, t_element));
break;
case FMIN_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element);
} break;
case FMAX_A: {
- wd = bit_cast<T_int>(
+ *wd = bit_cast<T_int>(
std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element);
} break;
case FSOR:
@@ -5687,7 +5687,7 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
}
template <typename T_int, typename T_int_dbl, typename T_reg>
-void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
+void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) {
// using T_uint = typename std::make_unsigned<T_int>::type;
using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
@@ -5705,16 +5705,16 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
if (product == min_fix_dbl) {
product = max_fix_dbl;
}
- wd = static_cast<T_int>(product >> shift);
+ *wd = static_cast<T_int>(product >> shift);
} break;
case MADD_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUB_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift)) >> shift;
- wd = static_cast<T_int>(
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift)) >> shift;
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MULR_Q: {
@@ -5722,23 +5722,23 @@ void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
bit_cast<T_uint_dbl>(std::numeric_limits<T_int_dbl>::min()) >> 1U;
const T_int_dbl max_fix_dbl = std::numeric_limits<T_int_dbl>::max() >> 1U;
if (product == min_fix_dbl) {
- wd = static_cast<T_int>(max_fix_dbl >> shift);
+ *wd = static_cast<T_int>(max_fix_dbl >> shift);
break;
}
- wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
+ *wd = static_cast<T_int>((product + (1 << (shift - 1))) >> shift);
} break;
case MADDR_Q: {
- result = (product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
case MSUBR_Q: {
- result = (-product + (static_cast<T_int_dbl>(wd) << shift) +
+ result = (-product + (static_cast<T_int_dbl>(*wd) << shift) +
(1 << (shift - 1))) >>
shift;
- wd = static_cast<T_int>(
+ *wd = static_cast<T_int>(
result > max_int ? max_int : result < min_int ? min_int : result);
} break;
default:
@@ -5861,19 +5861,19 @@ void Simulator::DecodeTypeMsa3RF() {
#undef PACK_FLOAT16
#undef FEXDO_DF
case FTQ:
-#define FTQ_DF(source, dst, fp_type, int_type) \
- element = bit_cast<fp_type>(source) * \
- (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
- if (element > std::numeric_limits<int_type>::max()) { \
- dst = std::numeric_limits<int_type>::max(); \
- } else if (element < std::numeric_limits<int_type>::min()) { \
- dst = std::numeric_limits<int_type>::min(); \
- } else if (std::isnan(element)) { \
- dst = 0; \
- } else { \
- int_type fixed_point; \
- round_according_to_msacsr(element, element, fixed_point); \
- dst = fixed_point; \
+#define FTQ_DF(source, dst, fp_type, int_type) \
+ element = bit_cast<fp_type>(source) * \
+ (1U << (sizeof(int_type) * kBitsPerByte - 1)); \
+ if (element > std::numeric_limits<int_type>::max()) { \
+ dst = std::numeric_limits<int_type>::max(); \
+ } else if (element < std::numeric_limits<int_type>::min()) { \
+ dst = std::numeric_limits<int_type>::min(); \
+ } else if (std::isnan(element)) { \
+ dst = 0; \
+ } else { \
+ int_type fixed_point; \
+ round_according_to_msacsr(element, &element, &fixed_point); \
+ dst = fixed_point; \
}
switch (DecodeMsaDataFormat()) {
@@ -5896,13 +5896,13 @@ void Simulator::DecodeTypeMsa3RF() {
}
break;
#undef FTQ_DF
-#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper<T1, T2>(opcode, ws, wt, &(wd)); \
}
-#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
- for (int i = 0; i < Lanes; i++) { \
- Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, wd); \
+#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \
+ for (int i = 0; i < Lanes; i++) { \
+ Msa3RFInstrHelper2<T1, T2>(opcode, ws, wt, &(wd)); \
}
case MADD_Q:
case MSUB_Q:
@@ -6139,7 +6139,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
#undef QUIET_BIT_D
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
-T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
+T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst,
Simulator* sim) {
using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
@@ -6158,37 +6158,37 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_INFINITE:
if (std::signbit(element)) {
- dst = NEG_INFINITY_BIT;
+ *dst = NEG_INFINITY_BIT;
} else {
- dst = POS_INFINITY_BIT;
+ *dst = POS_INFINITY_BIT;
}
break;
case FP_NAN:
if (isSnan(element)) {
- dst = SNAN_BIT;
+ *dst = SNAN_BIT;
} else {
- dst = QNAN_BIT;
+ *dst = QNAN_BIT;
}
break;
case FP_NORMAL:
if (std::signbit(element)) {
- dst = NEG_NORMAL_BIT;
+ *dst = NEG_NORMAL_BIT;
} else {
- dst = POS_NORMAL_BIT;
+ *dst = POS_NORMAL_BIT;
}
break;
case FP_SUBNORMAL:
if (std::signbit(element)) {
- dst = NEG_SUBNORMAL_BIT;
+ *dst = NEG_SUBNORMAL_BIT;
} else {
- dst = POS_SUBNORMAL_BIT;
+ *dst = POS_SUBNORMAL_BIT;
}
break;
case FP_ZERO:
if (std::signbit(element)) {
- dst = NEG_ZERO_BIT;
+ *dst = NEG_ZERO_BIT;
} else {
- dst = POS_ZERO_BIT;
+ *dst = POS_ZERO_BIT;
}
break;
default:
@@ -6212,11 +6212,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= min_int) {
- dst = element >= max_int ? max_int : min_int;
+ *dst = element >= max_int ? max_int : min_int;
} else {
- dst = static_cast<T_int>(std::trunc(element));
+ *dst = static_cast<T_int>(std::trunc(element));
}
break;
}
@@ -6224,49 +6224,49 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_int = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element >= max_int || element <= 0) {
- dst = element >= max_int ? max_int : 0;
+ *dst = element >= max_int ? max_int : 0;
} else {
- dst = static_cast<T_uint>(std::trunc(element));
+ *dst = static_cast<T_uint>(std::trunc(element));
}
break;
}
case FSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::sqrt(element));
+ *dst = bit_cast<T_int>(std::sqrt(element));
}
break;
}
case FRSQRT: {
T_fp element = bit_cast<T_fp>(src);
if (element < 0 || std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / std::sqrt(element));
+ *dst = bit_cast<T_int>(1 / std::sqrt(element));
}
break;
}
case FRCP: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(1 / element);
+ *dst = bit_cast<T_int>(1 / element);
}
break;
}
case FRINT: {
T_fp element = bit_cast<T_fp>(src);
if (std::isnan(element)) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
T_int dummy;
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dummy);
- dst = bit_cast<T_int>(element);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, &dummy);
+ *dst = bit_cast<T_int>(element);
}
break;
}
@@ -6275,19 +6275,19 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
switch (std::fpclassify(element)) {
case FP_NORMAL:
case FP_SUBNORMAL:
- dst = bit_cast<T_int>(std::logb(element));
+ *dst = bit_cast<T_int>(std::logb(element));
break;
case FP_ZERO:
- dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(-std::numeric_limits<T_fp>::infinity());
break;
case FP_NAN:
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
break;
case FP_INFINITE:
if (element < 0) {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::quiet_NaN());
} else {
- dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
+ *dst = bit_cast<T_int>(std::numeric_limits<T_fp>::infinity());
}
break;
default:
@@ -6300,11 +6300,11 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < min_int || element > max_int) {
- dst = element > max_int ? max_int : min_int;
+ *dst = element > max_int ? max_int : min_int;
} else {
- sim->round_according_to_msacsr<T_fp, T_int>(element, element, dst);
+ sim->round_according_to_msacsr<T_fp, T_int>(element, &element, dst);
}
break;
}
@@ -6312,22 +6312,22 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
T_fp element = bit_cast<T_fp>(src);
const T_uint max_uint = std::numeric_limits<T_uint>::max();
if (std::isnan(element)) {
- dst = 0;
+ *dst = 0;
} else if (element < 0 || element > max_uint) {
- dst = element > max_uint ? max_uint : 0;
+ *dst = element > max_uint ? max_uint : 0;
} else {
T_uint res;
- sim->round_according_to_msacsr<T_fp, T_uint>(element, element, res);
- dst = *reinterpret_cast<T_int*>(&res);
+ sim->round_according_to_msacsr<T_fp, T_uint>(element, &element, &res);
+ *dst = *reinterpret_cast<T_int*>(&res);
}
break;
}
case FFINT_S:
- dst = bit_cast<T_int>(static_cast<T_fp>(src));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
using uT_src = typename std::make_unsigned<T_src>::type;
- dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
+ *dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
UNREACHABLE();
@@ -6437,12 +6437,12 @@ void Simulator::DecodeTypeMsa2RF() {
switch (DecodeMsaDataFormat()) {
case MSA_WORD:
for (int i = 0; i < kMSALanesWord; i++) {
- Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], wd.w[i], this);
+ Msa2RFInstrHelper<int32_t, float>(opcode, ws.w[i], &wd.w[i], this);
}
break;
case MSA_DWORD:
for (int i = 0; i < kMSALanesDword; i++) {
- Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], wd.d[i], this);
+ Msa2RFInstrHelper<int64_t, double>(opcode, ws.d[i], &wd.d[i], this);
}
break;
default:
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.h b/deps/v8/src/execution/mips64/simulator-mips64.h
index d1251f5f0e..2bfcbe9d98 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.h
+++ b/deps/v8/src/execution/mips64/simulator-mips64.h
@@ -255,17 +255,17 @@ class Simulator : public SimulatorBase {
bool set_fcsr_round64_error(double original, double rounded);
bool set_fcsr_round_error(float original, float rounded);
bool set_fcsr_round64_error(float original, float rounded);
- void round_according_to_fcsr(double toRound, double& rounded,
- int32_t& rounded_int, double fs);
- void round64_according_to_fcsr(double toRound, double& rounded,
- int64_t& rounded_int, double fs);
- void round_according_to_fcsr(float toRound, float& rounded,
- int32_t& rounded_int, float fs);
- void round64_according_to_fcsr(float toRound, float& rounded,
- int64_t& rounded_int, float fs);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int, double fs);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int, double fs);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int, float fs);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int, float fs);
template <typename T_fp, typename T_int>
- void round_according_to_msacsr(T_fp toRound, T_fp& rounded,
- T_int& rounded_int);
+ void round_according_to_msacsr(T_fp toRound, T_fp* rounded,
+ T_int* rounded_int);
void set_fcsr_rounding_mode(FPURoundingMode mode);
void set_msacsr_rounding_mode(FPURoundingMode mode);
unsigned int get_fcsr_rounding_mode();
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 6cd4daa33c..96308f7f5b 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -342,7 +342,7 @@ void PPCDebugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 8093497168..8a82e32243 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -372,7 +372,7 @@ void S390Debugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -5149,27 +5149,6 @@ EVALUATE(STM) {
return length;
}
-EVALUATE(TM) {
- DCHECK_OPCODE(TM);
- // Test Under Mask (Mem - Imm) (8)
- DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
- int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
- intptr_t addr = b1_val + d1_val;
- uint8_t mem_val = ReadB(addr);
- uint8_t selected_bits = mem_val & imm_val;
- // CC0: Selected bits are zero
- // CC1: Selected bits mixed zeros and ones
- // CC3: Selected bits all ones
- if (0 == selected_bits) {
- condition_reg_ = CC_EQ; // CC0
- } else if (selected_bits == imm_val) {
- condition_reg_ = 0x1; // CC3
- } else {
- condition_reg_ = 0x4; // CC1
- }
- return length;
-}
-
EVALUATE(MVI) {
UNIMPLEMENTED();
USE(instr);
@@ -5595,7 +5574,8 @@ EVALUATE(LLILL) {
return 0;
}
-inline static int TestUnderMask(uint16_t val, uint16_t mask) {
+inline static int TestUnderMask(uint16_t val, uint16_t mask,
+ bool is_tm_or_tmy) {
// Test if all selected bits are zeros or mask is zero
if (0 == (mask & val)) {
return 0x8;
@@ -5607,6 +5587,13 @@ inline static int TestUnderMask(uint16_t val, uint16_t mask) {
}
// Now we know selected bits mixed zeros and ones
+ // Test if it is TM or TMY since they have
+ // different CC result from TMLL/TMLH/TMHH/TMHL
+ if (is_tm_or_tmy) {
+ return 0x4;
+ }
+
+ // Now we know the instruction is TMLL/TMLH/TMHH/TMHL
// Test if the leftmost bit is zero or one
#if defined(__GNUC__)
int leadingZeros = __builtin_clz(mask);
@@ -5639,7 +5626,8 @@ EVALUATE(TMLH) {
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint32_t value = get_low_register<uint32_t>(r1) >> 16;
uint32_t mask = i2 & 0x0000FFFF;
- condition_reg_ = TestUnderMask(value, mask);
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
return length; // DONE
}
@@ -5648,20 +5636,29 @@ EVALUATE(TMLL) {
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
uint32_t value = get_low_register<uint32_t>(r1) & 0x0000FFFF;
uint32_t mask = i2 & 0x0000FFFF;
- condition_reg_ = TestUnderMask(value, mask);
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
return length; // DONE
}
EVALUATE(TMHH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(TMHH);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_high_register<uint32_t>(r1) >> 16;
+ uint32_t mask = i2 & 0x0000FFFF;
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
+ return length;
}
EVALUATE(TMHL) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(TMHL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ uint32_t value = get_high_register<uint32_t>(r1) & 0x0000FFFF;
+ uint32_t mask = i2 & 0x0000FFFF;
+ bool is_tm_or_tmy = 0;
+ condition_reg_ = TestUnderMask(value, mask, is_tm_or_tmy);
+ return length;
}
EVALUATE(BRAS) {
@@ -9972,26 +9969,31 @@ EVALUATE(ECAG) {
return 0;
}
+EVALUATE(TM) {
+ DCHECK_OPCODE(TM);
+ // Test Under Mask (Mem - Imm) (8)
+ DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t selected_bits = mem_val & imm_val;
+ // is TM
+ bool is_tm_or_tmy = 1;
+ condition_reg_ = TestUnderMask(selected_bits, imm_val, is_tm_or_tmy);
+ return length;
+}
+
EVALUATE(TMY) {
DCHECK_OPCODE(TMY);
// Test Under Mask (Mem - Imm) (8)
- DECODE_SIY_INSTRUCTION(b1, d1, i2);
+ DECODE_SIY_INSTRUCTION(b1, d1_val, imm_val);
int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
- intptr_t d1_val = d1;
intptr_t addr = b1_val + d1_val;
uint8_t mem_val = ReadB(addr);
- uint8_t imm_val = i2;
uint8_t selected_bits = mem_val & imm_val;
- // CC0: Selected bits are zero
- // CC1: Selected bits mixed zeros and ones
- // CC3: Selected bits all ones
- if (0 == selected_bits) {
- condition_reg_ = CC_EQ; // CC0
- } else if (selected_bits == imm_val) {
- condition_reg_ = 0x1; // CC3
- } else {
- condition_reg_ = 0x4; // CC1
- }
+ // is TMY
+ bool is_tm_or_tmy = 1;
+ condition_reg_ = TestUnderMask(selected_bits, imm_val, is_tm_or_tmy);
return length;
}
diff --git a/deps/v8/src/execution/stack-guard.cc b/deps/v8/src/execution/stack-guard.cc
new file mode 100644
index 0000000000..e5c24cef1e
--- /dev/null
+++ b/deps/v8/src/execution/stack-guard.cc
@@ -0,0 +1,345 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/stack-guard.h"
+
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/execution/interrupts-scope.h"
+#include "src/execution/isolate.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/simulator.h"
+#include "src/logging/counters.h"
+#include "src/roots/roots-inl.h"
+#include "src/utils/memcopy.h"
+#include "src/wasm/wasm-engine.h"
+
+namespace v8 {
+namespace internal {
+
+void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
+ DCHECK_NOT_NULL(isolate_);
+ thread_local_.set_jslimit(kInterruptLimit);
+ thread_local_.set_climit(kInterruptLimit);
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::reset_limits(const ExecutionAccess& lock) {
+ DCHECK_NOT_NULL(isolate_);
+ thread_local_.set_jslimit(thread_local_.real_jslimit_);
+ thread_local_.set_climit(thread_local_.real_climit_);
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::SetStackLimit(uintptr_t limit) {
+ ExecutionAccess access(isolate_);
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ }
+ if (thread_local_.climit() == thread_local_.real_climit_) {
+ thread_local_.set_climit(limit);
+ }
+ thread_local_.real_climit_ = limit;
+ thread_local_.real_jslimit_ = jslimit;
+}
+
+void StackGuard::AdjustStackLimitForSimulator() {
+ ExecutionAccess access(isolate_);
+ uintptr_t climit = thread_local_.real_climit_;
+ // If the current limits are special (e.g. due to a pending interrupt) then
+ // leave them alone.
+ uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
+ if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
+ thread_local_.set_jslimit(jslimit);
+ isolate_->heap()->SetStackLimits();
+ }
+}
+
+void StackGuard::EnableInterrupts() {
+ ExecutionAccess access(isolate_);
+ if (has_pending_interrupts(access)) {
+ set_interrupt_limits(access);
+ }
+}
+
+void StackGuard::DisableInterrupts() {
+ ExecutionAccess access(isolate_);
+ reset_limits(access);
+}
+
+void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
+ ExecutionAccess access(isolate_);
+ DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
+ if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
+ // Intercept already requested interrupts.
+ int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
+ scope->intercepted_flags_ = intercepted;
+ thread_local_.interrupt_flags_ &= ~intercepted;
+ } else {
+ DCHECK_EQ(scope->mode_, InterruptsScope::kRunInterrupts);
+ // Restore postponed interrupts.
+ int restored_flags = 0;
+ for (InterruptsScope* current = thread_local_.interrupt_scopes_;
+ current != nullptr; current = current->prev_) {
+ restored_flags |= (current->intercepted_flags_ & scope->intercept_mask_);
+ current->intercepted_flags_ &= ~scope->intercept_mask_;
+ }
+ thread_local_.interrupt_flags_ |= restored_flags;
+ }
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ // Add scope to the chain.
+ scope->prev_ = thread_local_.interrupt_scopes_;
+ thread_local_.interrupt_scopes_ = scope;
+}
+
+void StackGuard::PopInterruptsScope() {
+ ExecutionAccess access(isolate_);
+ InterruptsScope* top = thread_local_.interrupt_scopes_;
+ DCHECK_NE(top->mode_, InterruptsScope::kNoop);
+ if (top->mode_ == InterruptsScope::kPostponeInterrupts) {
+ // Make intercepted interrupts active.
+ DCHECK_EQ(thread_local_.interrupt_flags_ & top->intercept_mask_, 0);
+ thread_local_.interrupt_flags_ |= top->intercepted_flags_;
+ } else {
+ DCHECK_EQ(top->mode_, InterruptsScope::kRunInterrupts);
+ // Postpone existing interupts if needed.
+ if (top->prev_) {
+ for (int interrupt = 1; interrupt < ALL_INTERRUPTS;
+ interrupt = interrupt << 1) {
+ InterruptFlag flag = static_cast<InterruptFlag>(interrupt);
+ if ((thread_local_.interrupt_flags_ & flag) &&
+ top->prev_->Intercept(flag)) {
+ thread_local_.interrupt_flags_ &= ~flag;
+ }
+ }
+ }
+ }
+ if (has_pending_interrupts(access)) set_interrupt_limits(access);
+ // Remove scope from chain.
+ thread_local_.interrupt_scopes_ = top->prev_;
+}
+
+bool StackGuard::CheckInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ return thread_local_.interrupt_flags_ & flag;
+}
+
+void StackGuard::RequestInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ // Check the chain of InterruptsScope for interception.
+ if (thread_local_.interrupt_scopes_ &&
+ thread_local_.interrupt_scopes_->Intercept(flag)) {
+ return;
+ }
+
+ // Not intercepted. Set as active interrupt flag.
+ thread_local_.interrupt_flags_ |= flag;
+ set_interrupt_limits(access);
+
+ // If this isolate is waiting in a futex, notify it to wake up.
+ isolate_->futex_wait_list_node()->NotifyWake();
+}
+
+void StackGuard::ClearInterrupt(InterruptFlag flag) {
+ ExecutionAccess access(isolate_);
+ // Clear the interrupt flag from the chain of InterruptsScope.
+ for (InterruptsScope* current = thread_local_.interrupt_scopes_;
+ current != nullptr; current = current->prev_) {
+ current->intercepted_flags_ &= ~flag;
+ }
+
+ // Clear the interrupt flag from the active interrupt flags.
+ thread_local_.interrupt_flags_ &= ~flag;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+}
+
+int StackGuard::FetchAndClearInterrupts() {
+ ExecutionAccess access(isolate_);
+
+ int result = 0;
+ if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
+ // The TERMINATE_EXECUTION interrupt is special, since it terminates
+ // execution but should leave V8 in a resumable state. If it exists, we only
+ // fetch and clear that bit. On resume, V8 can continue processing other
+ // interrupts.
+ result = TERMINATE_EXECUTION;
+ thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
+ if (!has_pending_interrupts(access)) reset_limits(access);
+ } else {
+ result = thread_local_.interrupt_flags_;
+ thread_local_.interrupt_flags_ = 0;
+ reset_limits(access);
+ }
+
+ return result;
+}
+
+char* StackGuard::ArchiveStackGuard(char* to) {
+ ExecutionAccess access(isolate_);
+ MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ ThreadLocal blank;
+
+ // Set the stack limits using the old thread_local_.
+ // TODO(isolates): This was the old semantics of constructing a ThreadLocal
+ // (as the ctor called SetStackLimits, which looked at the
+ // current thread_local_ from StackGuard)-- but is this
+ // really what was intended?
+ isolate_->heap()->SetStackLimits();
+ thread_local_ = blank;
+
+ return to + sizeof(ThreadLocal);
+}
+
+char* StackGuard::RestoreStackGuard(char* from) {
+ ExecutionAccess access(isolate_);
+ MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ isolate_->heap()->SetStackLimits();
+ return from + sizeof(ThreadLocal);
+}
+
+void StackGuard::FreeThreadResources() {
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ per_thread->set_stack_limit(thread_local_.real_climit_);
+}
+
+void StackGuard::ThreadLocal::Clear() {
+ real_jslimit_ = kIllegalLimit;
+ set_jslimit(kIllegalLimit);
+ real_climit_ = kIllegalLimit;
+ set_climit(kIllegalLimit);
+ interrupt_scopes_ = nullptr;
+ interrupt_flags_ = 0;
+}
+
+bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
+ bool should_set_stack_limits = false;
+ if (real_climit_ == kIllegalLimit) {
+ const uintptr_t kLimitSize = FLAG_stack_size * KB;
+ DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
+ uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+ set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
+ real_climit_ = limit;
+ set_climit(limit);
+ should_set_stack_limits = true;
+ }
+ interrupt_scopes_ = nullptr;
+ interrupt_flags_ = 0;
+ return should_set_stack_limits;
+}
+
+void StackGuard::ClearThread(const ExecutionAccess& lock) {
+ thread_local_.Clear();
+ isolate_->heap()->SetStackLimits();
+}
+
+void StackGuard::InitThread(const ExecutionAccess& lock) {
+ if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ uintptr_t stored_limit = per_thread->stack_limit();
+ // You should hold the ExecutionAccess lock when you call this.
+ if (stored_limit != 0) {
+ SetStackLimit(stored_limit);
+ }
+}
+
+// --- C a l l s t o n a t i v e s ---
+
+namespace {
+
+bool TestAndClear(int* bitfield, int mask) {
+ bool result = (*bitfield & mask);
+ *bitfield &= ~mask;
+ return result;
+}
+
+class ShouldBeZeroOnReturnScope final {
+ public:
+#ifndef DEBUG
+ explicit ShouldBeZeroOnReturnScope(int*) {}
+#else // DEBUG
+ explicit ShouldBeZeroOnReturnScope(int* v) : v_(v) {}
+ ~ShouldBeZeroOnReturnScope() { DCHECK_EQ(*v_, 0); }
+
+ private:
+ int* v_;
+#endif // DEBUG
+};
+
+} // namespace
+
+Object StackGuard::HandleInterrupts() {
+ TRACE_EVENT0("v8.execute", "V8.HandleInterrupts");
+
+ if (FLAG_verify_predictable) {
+ // Advance synthetic time by making a time request.
+ isolate_->heap()->MonotonicallyIncreasingTimeInMs();
+ }
+
+ // Fetch and clear interrupt bits in one go. See comments inside the method
+ // for special handling of TERMINATE_EXECUTION.
+ int interrupt_flags = FetchAndClearInterrupts();
+
+ // All interrupts should be fully processed when returning from this method.
+ ShouldBeZeroOnReturnScope should_be_zero_on_return(&interrupt_flags);
+
+ if (TestAndClear(&interrupt_flags, TERMINATE_EXECUTION)) {
+ TRACE_EVENT0("v8.execute", "V8.TerminateExecution");
+ return isolate_->TerminateExecution();
+ }
+
+ if (TestAndClear(&interrupt_flags, GC_REQUEST)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GCHandleGCRequest");
+ isolate_->heap()->HandleGCRequest();
+ }
+
+ if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "V8.WasmGrowSharedMemory");
+ isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
+ isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCDeoptMarkedAllocationSites");
+ isolate_->heap()->DeoptMarkedAllocationSites();
+ }
+
+ if (TestAndClear(&interrupt_flags, INSTALL_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.InstallOptimizedFunctions");
+ DCHECK(isolate_->concurrent_recompilation_enabled());
+ isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ }
+
+ if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
+ TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
+ // Callbacks must be invoked outside of ExecutionAccess lock.
+ isolate_->InvokeApiInterruptCallbacks();
+ }
+
+ if (TestAndClear(&interrupt_flags, LOG_WASM_CODE)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
+ isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
+ }
+
+ if (TestAndClear(&interrupt_flags, WASM_CODE_GC)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
+ isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
+ }
+
+ isolate_->counters()->stack_interrupts()->Increment();
+ isolate_->counters()->runtime_profiler_ticks()->Increment();
+ isolate_->runtime_profiler()->MarkCandidatesForOptimization();
+
+ return ReadOnlyRoots(isolate_).undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/execution/stack-guard.h b/deps/v8/src/execution/stack-guard.h
new file mode 100644
index 0000000000..d7477f1623
--- /dev/null
+++ b/deps/v8/src/execution/stack-guard.h
@@ -0,0 +1,186 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_STACK_GUARD_H_
+#define V8_EXECUTION_STACK_GUARD_H_
+
+#include "include/v8-internal.h"
+#include "src/base/atomicops.h"
+
+namespace v8 {
+namespace internal {
+
+class ExecutionAccess;
+class InterruptsScope;
+class Isolate;
+class Object;
+
+// StackGuard contains the handling of the limits that are used to limit the
+// number of nested invocations of JavaScript and the stack size used in each
+// invocation.
+class V8_EXPORT_PRIVATE StackGuard final {
+ public:
+ explicit StackGuard(Isolate* isolate) : isolate_(isolate) {}
+
+ // Pass the address beyond which the stack should not grow. The stack
+ // is assumed to grow downwards.
+ void SetStackLimit(uintptr_t limit);
+
+ // The simulator uses a separate JS stack. Limits on the JS stack might have
+ // to be adjusted in order to reflect overflows of the C stack, because we
+ // cannot rely on the interleaving of frames on the simulator.
+ void AdjustStackLimitForSimulator();
+
+ // Threading support.
+ char* ArchiveStackGuard(char* to);
+ char* RestoreStackGuard(char* from);
+ static int ArchiveSpacePerThread() { return sizeof(ThreadLocal); }
+ void FreeThreadResources();
+ // Sets up the default stack guard for this thread if it has not
+ // already been set up.
+ void InitThread(const ExecutionAccess& lock);
+ // Clears the stack guard for this thread so it does not look as if
+ // it has been set up.
+ void ClearThread(const ExecutionAccess& lock);
+
+#define INTERRUPT_LIST(V) \
+ V(TERMINATE_EXECUTION, TerminateExecution, 0) \
+ V(GC_REQUEST, GC, 1) \
+ V(INSTALL_CODE, InstallCode, 2) \
+ V(API_INTERRUPT, ApiInterrupt, 3) \
+ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
+ V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
+ V(LOG_WASM_CODE, LogWasmCode, 6) \
+ V(WASM_CODE_GC, WasmCodeGC, 7)
+
+#define V(NAME, Name, id) \
+ inline bool Check##Name() { return CheckInterrupt(NAME); } \
+ inline void Request##Name() { RequestInterrupt(NAME); } \
+ inline void Clear##Name() { ClearInterrupt(NAME); }
+ INTERRUPT_LIST(V)
+#undef V
+
+ // Flag used to set the interrupt causes.
+ enum InterruptFlag {
+#define V(NAME, Name, id) NAME = (1 << id),
+ INTERRUPT_LIST(V)
+#undef V
+#define V(NAME, Name, id) NAME |
+ ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
+#undef V
+ };
+
+ uintptr_t climit() { return thread_local_.climit(); }
+ uintptr_t jslimit() { return thread_local_.jslimit(); }
+ // This provides an asynchronous read of the stack limits for the current
+ // thread. There are no locks protecting this, but it is assumed that you
+ // have the global V8 lock if you are using multiple V8 threads.
+ uintptr_t real_climit() { return thread_local_.real_climit_; }
+ uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
+ Address address_of_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.jslimit_);
+ }
+ Address address_of_real_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
+ }
+
+ // If the stack guard is triggered, but it is not an actual
+ // stack overflow, then handle the interruption accordingly.
+ Object HandleInterrupts();
+
+ private:
+ bool CheckInterrupt(InterruptFlag flag);
+ void RequestInterrupt(InterruptFlag flag);
+ void ClearInterrupt(InterruptFlag flag);
+ int FetchAndClearInterrupts();
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ bool has_pending_interrupts(const ExecutionAccess& lock) {
+ return thread_local_.interrupt_flags_ != 0;
+ }
+
+ // You should hold the ExecutionAccess lock when calling this method.
+ inline void set_interrupt_limits(const ExecutionAccess& lock);
+
+ // Reset limits to actual values. For example after handling interrupt.
+ // You should hold the ExecutionAccess lock when calling this method.
+ inline void reset_limits(const ExecutionAccess& lock);
+
+ // Enable or disable interrupts.
+ void EnableInterrupts();
+ void DisableInterrupts();
+
+#if V8_TARGET_ARCH_64_BIT
+ static const uintptr_t kInterruptLimit = uintptr_t{0xfffffffffffffffe};
+ static const uintptr_t kIllegalLimit = uintptr_t{0xfffffffffffffff8};
+#else
+ static const uintptr_t kInterruptLimit = 0xfffffffe;
+ static const uintptr_t kIllegalLimit = 0xfffffff8;
+#endif
+
+ void PushInterruptsScope(InterruptsScope* scope);
+ void PopInterruptsScope();
+
+ class ThreadLocal final {
+ public:
+ ThreadLocal() { Clear(); }
+ // You should hold the ExecutionAccess lock when you call Initialize or
+ // Clear.
+ void Clear();
+
+ // Returns true if the heap's stack limits should be set, false if not.
+ bool Initialize(Isolate* isolate);
+
+ // The stack limit is split into a JavaScript and a C++ stack limit. These
+ // two are the same except when running on a simulator where the C++ and
+ // JavaScript stacks are separate. Each of the two stack limits have two
+ // values. The one eith the real_ prefix is the actual stack limit
+ // set for the VM. The one without the real_ prefix has the same value as
+ // the actual stack limit except when there is an interruption (e.g. debug
+ // break or preemption) in which case it is lowered to make stack checks
+ // fail. Both the generated code and the runtime system check against the
+ // one without the real_ prefix.
+ uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
+ uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
+
+ // jslimit_ and climit_ can be read without any lock.
+ // Writing requires the ExecutionAccess lock.
+ base::AtomicWord jslimit_;
+ base::AtomicWord climit_;
+
+ uintptr_t jslimit() {
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&jslimit_));
+ }
+ void set_jslimit(uintptr_t limit) {
+ return base::Relaxed_Store(&jslimit_,
+ static_cast<base::AtomicWord>(limit));
+ }
+ uintptr_t climit() {
+ return bit_cast<uintptr_t>(base::Relaxed_Load(&climit_));
+ }
+ void set_climit(uintptr_t limit) {
+ return base::Relaxed_Store(&climit_,
+ static_cast<base::AtomicWord>(limit));
+ }
+
+ InterruptsScope* interrupt_scopes_;
+ int interrupt_flags_;
+ };
+
+ // TODO(isolates): Technically this could be calculated directly from a
+ // pointer to StackGuard.
+ Isolate* isolate_;
+ ThreadLocal thread_local_;
+
+ friend class Isolate;
+ friend class StackLimitCheck;
+ friend class InterruptsScope;
+
+ DISALLOW_COPY_AND_ASSIGN(StackGuard);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_STACK_GUARD_H_
diff --git a/deps/v8/src/execution/x64/frame-constants-x64.cc b/deps/v8/src/execution/x64/frame-constants-x64.cc
index 2a55fea9c9..716a6d7082 100644
--- a/deps/v8/src/execution/x64/frame-constants-x64.cc
+++ b/deps/v8/src/execution/x64/frame-constants-x64.cc
@@ -8,6 +8,7 @@
#include "src/codegen/x64/assembler-x64-inl.h"
#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/OWNERS b/deps/v8/src/extensions/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/extensions/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/extensions/cputracemark-extension.cc b/deps/v8/src/extensions/cputracemark-extension.cc
new file mode 100644
index 0000000000..af85130ee8
--- /dev/null
+++ b/deps/v8/src/extensions/cputracemark-extension.cc
@@ -0,0 +1,56 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/cputracemark-extension.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Local<v8::FunctionTemplate>
+CpuTraceMarkExtension::GetNativeFunctionTemplate(v8::Isolate* isolate,
+ v8::Local<v8::String> str) {
+ return v8::FunctionTemplate::New(isolate, CpuTraceMarkExtension::Mark);
+}
+
+void CpuTraceMarkExtension::Mark(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1 || !args[0]->IsUint32()) {
+ args.GetIsolate()->ThrowException(
+ v8::String::NewFromUtf8(
+ args.GetIsolate(),
+ "First parameter to cputracemark() must be a unsigned int32.",
+ NewStringType::kNormal)
+ .ToLocalChecked());
+ }
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+#if !V8_LIBC_MSVCRT
+ // for non msvc build
+ uint32_t param =
+ args[0]->Uint32Value(args.GetIsolate()->GetCurrentContext()).ToChecked();
+
+ int magic_dummy;
+
+#if defined(__i386__) && defined(__pic__)
+ __asm__ __volatile__("push %%ebx; cpuid; pop %%ebx"
+ : "=a"(magic_dummy)
+ : "a"(0x4711 | ((unsigned)(param) << 16))
+ : "ecx", "edx");
+#else
+ __asm__ __volatile__("cpuid"
+ : "=a"(magic_dummy)
+ : "a"(0x4711 | ((unsigned)(param) << 16))
+ : "ecx", "edx", "ebx");
+#endif // defined(__i386__) && defined(__pic__)
+
+#else
+ // no msvc build support yet.
+#endif //! V8_LIBC_MSVCRT
+
+#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/cputracemark-extension.h b/deps/v8/src/extensions/cputracemark-extension.h
new file mode 100644
index 0000000000..9110cfe01b
--- /dev/null
+++ b/deps/v8/src/extensions/cputracemark-extension.h
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
+#define V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
+
+#include "include/v8.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class CpuTraceMarkExtension : public v8::Extension {
+ public:
+ explicit CpuTraceMarkExtension(const char* fun_name)
+ : v8::Extension("v8/cpumark",
+ BuildSource(buffer_, sizeof(buffer_), fun_name)) {}
+
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
+
+ private:
+ static void Mark(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ static const char* BuildSource(char* buf, size_t size, const char* fun_name) {
+ SNPrintF(Vector<char>(buf, static_cast<int>(size)), "native function %s();",
+ fun_name);
+ return buf;
+ }
+
+ char buffer_[50];
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 458aec38f3..8f897ae97e 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -124,11 +124,12 @@ void StatisticsExtension::GetCounters(
"amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
- HeapIterator iterator(reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
+ HeapObjectIterator iterator(
+ reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
int reloc_info_total = 0;
int source_position_table_total = 0;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsCode()) {
Code code = Code::cast(obj);
reloc_info_total += code.relocation_info().Size();
diff --git a/deps/v8/src/flags/OWNERS b/deps/v8/src/flags/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/flags/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 0ef23def1e..40edde3443 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -209,7 +209,9 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
V(harmony_weak_refs, "harmony weak references")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
+ V(harmony_intl_dateformat_quarter, "Add quarter option to DateTimeFormat")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
@@ -218,11 +220,14 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
#define HARMONY_STAGED_BASE(V)
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_intl_add_calendar_numbering_system, \
- "Add calendar and numberingSystem to DateTimeFormat") \
- V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features") \
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_add_calendar_numbering_system, \
+ "Add calendar and numberingSystem to DateTimeFormat") \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat") \
+ V(harmony_intl_dateformat_fractional_second_digits, \
+ "Add fractionalSecondDigits option to DateTimeFormat") \
V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
@@ -235,18 +240,16 @@ DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_import_meta, "harmony import.meta property") \
V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_global, "harmony global") \
- V(harmony_object_from_entries, "harmony Object.fromEntries()") \
- V(harmony_hashbang, "harmony hashbang syntax") \
V(harmony_numeric_separator, "harmony numeric separator between digits") \
V(harmony_promise_all_settled, "harmony Promise.allSettled")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
- V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \
- V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
+ V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \
+ V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat") \
+ V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -283,6 +286,12 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#define V8_ENABLE_RAW_HEAP_SNAPSHOTS_BOOL false
#endif // V8_ENABLE_RAW_HEAP_SNAPSHOTS
+#ifdef V8_ENABLE_DOUBLE_CONST_STORE_CHECK
+#define V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL true
+#else
+#define V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL false
+#endif
+
#ifdef V8_LITE_MODE
#define V8_LITE_BOOL true
#else
@@ -309,6 +318,9 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_IMPLICATION(future, write_protect_code_memory)
+DEFINE_BOOL(assert_types, false,
+ "generate runtime type assertions to test the typer")
+
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
@@ -331,6 +343,8 @@ DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(trace_block_coverage, false,
"trace collected block coverage information")
+DEFINE_BOOL(trace_protector_invalidation, false,
+ "trace protector cell invalidations")
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
// TODO(jkummerow): This currently adds too much load on the stub cache.
@@ -382,8 +396,7 @@ DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_INT(budget_for_feedback_vector_allocation, 1 * KB,
"The budget in amount of bytecode executed by a function before we "
"decide to allocate feedback vectors")
-DEFINE_BOOL(lazy_feedback_allocation, false, "Allocate feedback vectors lazily")
-DEFINE_IMPLICATION(future, lazy_feedback_allocation)
+DEFINE_BOOL(lazy_feedback_allocation, true, "Allocate feedback vectors lazily")
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
@@ -399,6 +412,8 @@ DEFINE_BOOL(print_bytecode, false,
DEFINE_BOOL(enable_lazy_source_positions, false,
"skip generating source positions during initial compile but "
"regenerate when actually required")
+DEFINE_BOOL(stress_lazy_source_positions, false,
+ "collect lazy source positions immediately after lazy compile")
DEFINE_STRING(print_bytecode_filter, "*",
"filter for selecting which functions to print bytecode")
#ifdef V8_TRACE_IGNITION
@@ -476,7 +491,7 @@ DEFINE_BOOL(trace_turbo_trimming, false, "trace TurboFan's graph trimmer")
DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
DEFINE_BOOL(trace_turbo_loop, false, "trace TurboFan's loop optimizations")
-DEFINE_BOOL(trace_alloc, false, "trace register allocator")
+DEFINE_BOOL(trace_turbo_alloc, false, "trace TurboFan's register allocator")
DEFINE_BOOL(trace_all_uses, false, "trace all use positions")
DEFINE_BOOL(trace_representation, false, "trace representation types")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
@@ -709,8 +724,7 @@ DEFINE_BOOL(wasm_lazy_validation, false,
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_tier_up)
-DEFINE_BOOL(wasm_code_gc, false, "enable garbage collection of wasm code")
-DEFINE_IMPLICATION(future, wasm_code_gc)
+DEFINE_BOOL(wasm_code_gc, true, "enable garbage collection of wasm code")
DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
DEFINE_BOOL(stress_wasm_code_gc, false,
"stress test garbage collection of wasm code")
@@ -733,11 +747,16 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
DEFINE_SIZE_T(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_SIZE_T(
+ max_heap_size, 0,
+ "max size of the heap (in Mbytes) "
+ "both max_semi_space_size and max_old_space_size take precedence. "
+ "All three flags cannot be specified at the same time.")
DEFINE_BOOL(huge_max_old_generation_size, false,
"Increase max size of the old space to 4 GB for x64 systems with"
"the physical memory bigger than 16 GB")
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
-DEFINE_BOOL(global_gc_scheduling, false,
+DEFINE_BOOL(global_gc_scheduling, true,
"enable GC scheduling based on global memory")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(random_gc_interval, 0,
@@ -760,6 +779,18 @@ DEFINE_BOOL(trace_idle_notification_verbose, false,
DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_IMPLICATION(trace_gc_verbose, trace_gc)
+DEFINE_BOOL(trace_gc_freelists, false,
+ "prints details of each freelist before and after "
+ "each major garbage collection")
+DEFINE_BOOL(trace_gc_freelists_verbose, false,
+ "prints details of freelists of each page before and after "
+ "each major garbage collection")
+DEFINE_IMPLICATION(trace_gc_freelists_verbose, trace_gc_freelists)
+DEFINE_BOOL(trace_evacuation_candidates, false,
+ "Show statistics about the pages evacuation by the compaction")
+DEFINE_INT(gc_freelist_strategy, 0,
+ "Freelist strategy to use: "
+ "1=FreeListFastAlloc. 2=FreeListMany. Anything else=FreeListLegacy")
DEFINE_INT(trace_allocation_stack_interval, -1,
"print stack trace after <n> free-list allocations")
@@ -910,6 +941,8 @@ DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
DEFINE_BOOL(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
+DEFINE_BOOL(enable_sse4_2, true,
+ "enable use of SSE4.2 instructions if available")
DEFINE_BOOL(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_BOOL(enable_avx, true, "enable use of AVX instructions if available")
@@ -967,6 +1000,8 @@ DEFINE_BOOL(experimental_stack_trace_frames, false,
DEFINE_BOOL(disallow_code_generation_from_strings, false,
"disallow eval and friends")
DEFINE_BOOL(expose_async_hooks, false, "expose async_hooks object")
+DEFINE_STRING(expose_cputracemark_as, nullptr,
+ "expose cputracemark extension under the specified name")
// builtins.cc
DEFINE_BOOL(allow_unsafe_function_constructor, false,
@@ -1184,6 +1219,12 @@ DEFINE_FLOAT(testing_float_flag, 2.5, "float-flag")
DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
+// Test flag for a check in %OptimizeFunctionOnNextCall
+DEFINE_BOOL(
+ testing_d8_test_runner, false,
+ "test runner turns on this flag to enable a check that the funciton was "
+ "prepared for optimization before marking it for optimization")
+
// mksnapshot.cc
DEFINE_STRING(embedded_src, nullptr,
"Path for the generated embedded data file. (mksnapshot only)")
diff --git a/deps/v8/src/handles/OWNERS b/deps/v8/src/handles/OWNERS
index 57fcdd4fac..57f0b54262 100644
--- a/deps/v8/src/handles/OWNERS
+++ b/deps/v8/src/handles/OWNERS
@@ -1,4 +1,7 @@
ishell@chromium.org
jkummerow@chromium.org
mlippautz@chromium.org
+ulan@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/handles/handles.cc b/deps/v8/src/handles/handles.cc
index e0a1f23b7b..7f320a271c 100644
--- a/deps/v8/src/handles/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -33,8 +33,8 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
Object object(*location_);
if (object.IsSmi()) return true;
HeapObject heap_object = HeapObject::cast(object);
- Isolate* isolate;
- if (!GetIsolateFromWritableObject(heap_object, &isolate)) return true;
+ if (IsReadOnlyHeapObject(heap_object)) return true;
+ Isolate* isolate = GetIsolateFromWritableObject(heap_object);
RootIndex root_index;
if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
RootsTable::IsImmortalImmovable(root_index)) {
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index 79eea3aaab..d826296e0c 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
hpayer@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 61b5ba1f8c..65d3f4a732 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -57,8 +57,6 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
heap->update_external_memory(-static_cast<intptr_t>(length));
}
-Space* LocalArrayBufferTracker::space() { return page_->owner(); }
-
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index e8ca57b543..b7950c2506 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -117,8 +117,6 @@ class LocalArrayBufferTracker {
// logic for updating external memory counters.
inline void AddInternal(JSArrayBuffer buffer, size_t length);
- inline Space* space();
-
Page* page_;
// The set contains raw heap pointers which are removed by the GC upon
// processing the tracker through its owning page.
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
new file mode 100644
index 0000000000..307f0ec973
--- /dev/null
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -0,0 +1,54 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/basic-memory-chunk.h"
+
+#include <cstdlib>
+
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/slots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Verify write barrier offsets match the the real offsets.
+STATIC_ASSERT(BasicMemoryChunk::Flag::INCREMENTAL_MARKING ==
+ heap_internals::MemoryChunk::kMarkingBit);
+STATIC_ASSERT(BasicMemoryChunk::Flag::FROM_PAGE ==
+ heap_internals::MemoryChunk::kFromPageBit);
+STATIC_ASSERT(BasicMemoryChunk::Flag::TO_PAGE ==
+ heap_internals::MemoryChunk::kToPageBit);
+STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ heap_internals::MemoryChunk::kFlagsOffset);
+STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ heap_internals::MemoryChunk::kHeapOffset);
+
+BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
+ Address area_end) {
+ const Address base = reinterpret_cast<Address>(this);
+ size_ = size;
+ marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+ header_sentinel_ = HeapObject::FromAddress(base).ptr();
+ DCHECK(HasHeaderSentinel(area_start));
+ area_start_ = area_start;
+ area_end_ = area_end;
+}
+
+// static
+bool BasicMemoryChunk::HasHeaderSentinel(Address slot_addr) {
+ Address base = BaseAddress(slot_addr);
+ if (slot_addr < base + kHeaderSize) return false;
+ return HeapObject::FromAddress(base) ==
+ ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
+}
+
+void BasicMemoryChunk::ReleaseMarkingBitmap() {
+ DCHECK_NOT_NULL(marking_bitmap_);
+ free(marking_bitmap_);
+ marking_bitmap_ = nullptr;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
new file mode 100644
index 0000000000..65fc072bd2
--- /dev/null
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -0,0 +1,229 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
+#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
+
+#include <type_traits>
+
+#include "src/base/atomic-utils.h"
+#include "src/common/globals.h"
+#include "src/heap/marking.h"
+
+namespace v8 {
+namespace internal {
+
+class MemoryChunk;
+
+class BasicMemoryChunk {
+ public:
+ enum Flag {
+ NO_FLAGS = 0u,
+ IS_EXECUTABLE = 1u << 0,
+ POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
+ POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
+ // A page in the from-space or a young large page that was not scavenged
+ // yet.
+ FROM_PAGE = 1u << 3,
+ // A page in the to-space or a young large page that was scavenged.
+ TO_PAGE = 1u << 4,
+ LARGE_PAGE = 1u << 5,
+ EVACUATION_CANDIDATE = 1u << 6,
+ NEVER_EVACUATE = 1u << 7,
+
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR = 1u << 8,
+
+ // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
+ // from new to old space during evacuation.
+ PAGE_NEW_OLD_PROMOTION = 1u << 9,
+
+ // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+ // within the new space during evacuation.
+ PAGE_NEW_NEW_PROMOTION = 1u << 10,
+
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
+
+ // This flag is intended to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE = 1u << 12,
+
+ // The memory chunk is already logically freed, however the actual freeing
+ // still has to be performed.
+ PRE_FREED = 1u << 13,
+
+ // |POOLED|: When actually freeing this chunk, only uncommit and do not
+ // give up the reservation as we still reuse the chunk at some point.
+ POOLED = 1u << 14,
+
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED = 1u << 15,
+
+ // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+ // on pages is sometimes aborted. The flag is used to avoid repeatedly
+ // triggering on the same page.
+ COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
+
+ // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
+ // to iterate the page.
+ SWEEP_TO_ITERATE = 1u << 17,
+
+ // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
+ // enabled.
+ INCREMENTAL_MARKING = 1u << 18,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+
+ // The memory chunk freeing bookkeeping has been performed but the chunk has
+ // not yet been freed.
+ UNREGISTERED = 1u << 20,
+
+ // The memory chunk belongs to the read-only heap and does not participate
+ // in garbage collection. This is used instead of owner for identity
+ // checking since read-only chunks have no owner once they are detached.
+ READ_ONLY_HEAP = 1u << 21,
+ };
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ BasicMemoryChunk(size_t size, Address area_start, Address area_end);
+
+ static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
+
+ Address address() const { return reinterpret_cast<Address>(this); }
+
+ size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
+
+ Address area_start() const { return area_start_; }
+
+ Address area_end() const { return area_end_; }
+ void set_area_end(Address area_end) { area_end_ = area_end; }
+
+ size_t area_size() const {
+ return static_cast<size_t>(area_end() - area_start());
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ void SetFlag(Flag flag) {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ flags_ |= flag;
+ } else {
+ base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsFlagSet(Flag flag) const {
+ return (GetFlags<access_mode>() & flag) != 0;
+ }
+
+ void ClearFlag(Flag flag) { flags_ &= ~flag; }
+
+ // Set or clear multiple flags at a time. The flags in the mask are set to
+ // the value in "flags", the rest retain the current value in |flags_|.
+ void SetFlags(uintptr_t flags, uintptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ uintptr_t GetFlags() const {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ return flags_;
+ } else {
+ return base::AsAtomicWord::Relaxed_Load(&flags_);
+ }
+ }
+
+ bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
+
+ // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+
+ bool Contains(Address addr) const {
+ return addr >= area_start() && addr < area_end();
+ }
+
+ // Checks whether |addr| can be a limit of addresses in this page. It's a
+ // limit if it's in the page, or if it's just after the last byte of the page.
+ bool ContainsLimit(Address addr) const {
+ return addr >= area_start() && addr <= area_end();
+ }
+
+ V8_EXPORT_PRIVATE static bool HasHeaderSentinel(Address slot_addr);
+
+ void ReleaseMarkingBitmap();
+
+ static const intptr_t kSizeOffset = 0;
+ static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+ static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
+ static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
+ static const intptr_t kHeaderSentinelOffset =
+ kHeapOffset + kSystemPointerSize;
+
+ static const size_t kHeaderSize =
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address header_sentinel_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize; // Address area_end_
+
+ protected:
+ // Overall size of the chunk, including the header and guards.
+ size_t size_;
+
+ uintptr_t flags_ = NO_FLAGS;
+
+ Bitmap* marking_bitmap_ = nullptr;
+
+ // TODO(v8:7464): Find a way to remove this.
+ // This goes against the spirit for the BasicMemoryChunk, but until C++14/17
+ // is the default it needs to live here because MemoryChunk is not standard
+ // layout under C++11.
+ Heap* heap_;
+
+ // This is used to distinguish the memory chunk header from the interior of a
+ // large page. The memory chunk header stores here an impossible tagged
+ // pointer: the tagger pointer of the page start. A field in a large object is
+ // guaranteed to not contain such a pointer.
+ Address header_sentinel_;
+
+ // Start and end of allocatable memory on this chunk.
+ Address area_start_;
+ Address area_end_;
+
+ friend class BasicMemoryChunkValidator;
+};
+
+STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
+
+class BasicMemoryChunkValidator {
+ // Computed offsets should match the compiler generated ones.
+ STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
+ offsetof(BasicMemoryChunk, size_));
+ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ offsetof(BasicMemoryChunk, flags_));
+ STATIC_ASSERT(BasicMemoryChunk::kMarkBitmapOffset ==
+ offsetof(BasicMemoryChunk, marking_bitmap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ offsetof(BasicMemoryChunk, heap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
+ offsetof(BasicMemoryChunk, header_sentinel_));
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BASIC_MEMORY_CHUNK_H_
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index cb34d732a4..c6c111bc0e 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -6,7 +6,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
-#include "src/heap/spaces-inl.h" // For HeapObjectIterator.
+#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -61,7 +61,7 @@ void CodeStatistics::ResetCodeAndMetadataStatistics(Isolate* isolate) {
// - by code comment (only in debug mode)
void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
Isolate* isolate) {
- HeapObjectIterator obj_it(space);
+ PagedSpaceObjectIterator obj_it(space);
for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
@@ -73,7 +73,7 @@ void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
// - by code comment (only in debug mode)
void CodeStatistics::CollectCodeStatistics(LargeObjectSpace* space,
Isolate* isolate) {
- LargeObjectIterator obj_it(space);
+ LargeObjectSpaceObjectIterator obj_it(space);
for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
diff --git a/deps/v8/src/heap/combined-heap.cc b/deps/v8/src/heap/combined-heap.cc
index ed60b438cb..0416bb62a4 100644
--- a/deps/v8/src/heap/combined-heap.cc
+++ b/deps/v8/src/heap/combined-heap.cc
@@ -3,16 +3,22 @@
// found in the LICENSE file.
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
-HeapObject CombinedHeapIterator::Next() {
+CombinedHeapObjectIterator::CombinedHeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
+ : heap_iterator_(heap, filtering),
+ ro_heap_iterator_(heap->isolate()->read_only_heap()) {}
+
+HeapObject CombinedHeapObjectIterator::Next() {
HeapObject object = ro_heap_iterator_.Next();
if (!object.is_null()) {
return object;
}
- return heap_iterator_.next();
+ return heap_iterator_.Next();
}
} // namespace internal
diff --git a/deps/v8/src/heap/combined-heap.h b/deps/v8/src/heap/combined-heap.h
index c331d95c3d..eaa012ec18 100644
--- a/deps/v8/src/heap/combined-heap.h
+++ b/deps/v8/src/heap/combined-heap.h
@@ -13,21 +13,19 @@ namespace v8 {
namespace internal {
// This class allows iteration over the entire heap (Heap and ReadOnlyHeap). It
-// uses the HeapIterator to iterate over non-read-only objects and accepts the
-// same filtering option. (Interrupting iteration while filtering unreachable
-// objects is still forbidden)
-class V8_EXPORT_PRIVATE CombinedHeapIterator final {
+// uses the HeapObjectIterator to iterate over non-read-only objects and accepts
+// the same filtering option. (Interrupting iteration while filtering
+// unreachable objects is still forbidden)
+class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
public:
- CombinedHeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering =
- HeapIterator::HeapObjectsFiltering::kNoFiltering)
- : heap_iterator_(heap, filtering),
- ro_heap_iterator_(heap->read_only_heap()) {}
+ CombinedHeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering =
+ HeapObjectIterator::HeapObjectsFiltering::kNoFiltering);
HeapObject Next();
private:
- HeapIterator heap_iterator_;
- ReadOnlyHeapIterator ro_heap_iterator_;
+ HeapObjectIterator heap_iterator_;
+ ReadOnlyHeapObjectIterator ro_heap_iterator_;
};
V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 8ce96428e1..12bb28f1c8 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -121,11 +121,7 @@ class ConcurrentMarkingVisitor final
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object) {
#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race
- // in mark-bit initialization. See MemoryChunk::Initialize for the
- // corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
@@ -247,7 +243,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
- VisitMapPointer(weak_cell, weak_cell.map_slot());
+ VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
@@ -306,13 +302,13 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
return SeqOneByteString::SizeFor(object.synchronized_length());
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
return SeqTwoByteString::SizeFor(object.synchronized_length());
}
@@ -367,7 +363,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
- VisitMapPointer(shared_info, shared_info.map_slot());
+ VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
@@ -385,7 +381,7 @@ class ConcurrentMarkingVisitor final
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
object.MakeOlder();
@@ -453,7 +449,7 @@ class ConcurrentMarkingVisitor final
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array.map_slot());
+ VisitMapPointer(array);
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array.GetFirstPointerSlot(),
array.GetDescriptorSlot(0));
@@ -463,7 +459,7 @@ class ConcurrentMarkingVisitor final
int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array.map_slot());
+ VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
@@ -528,11 +524,7 @@ class ConcurrentMarkingVisitor final
void MarkObject(HeapObject object) {
#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race
- // in mark-bit initialization. See MemoryChunk::Initialize for the
- // corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(object)->SynchronizedHeapLoad();
#endif
if (marking_state_.WhiteToGrey(object)) {
shared_.Push(object);
@@ -631,7 +623,7 @@ class ConcurrentMarkingVisitor final
// Left trimming marks the array black before over-writing the length.
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -656,7 +648,7 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object, ObjectSlot(object.map_slot().address()));
+ visitor.VisitPointer(object, object.map_slot());
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index c032f384b3..ab91367bc6 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -34,7 +34,7 @@ void LocalEmbedderHeapTracer::TraceEpilogue() {
EmbedderHeapTracer::TraceSummary summary;
remote_tracer_->TraceEpilogue(&summary);
- remote_stats_.allocated_size = summary.allocated_size;
+ remote_stats_.used_size = summary.allocated_size;
// Force a check next time increased memory is reported. This allows for
// setting limits close to actual heap sizes.
remote_stats_.allocated_size_limit_for_check = 0;
@@ -118,6 +118,10 @@ void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
heap->StartIncrementalMarkingIfAllocationLimitIsReached(
heap->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
+ if (heap->AllocationLimitOvershotByLargeMargin()) {
+ heap->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 4309fb722a..eae29cbf5c 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -77,8 +77,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
void IncreaseAllocatedSize(size_t bytes) {
+ remote_stats_.used_size += bytes;
remote_stats_.allocated_size += bytes;
- remote_stats_.accumulated_allocated_size += bytes;
if (remote_stats_.allocated_size >
remote_stats_.allocated_size_limit_for_check) {
StartIncrementalMarkingIfNeeded();
@@ -87,12 +87,15 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
}
+ void DecreaseAllocatedSize(size_t bytes) {
+ DCHECK_GE(remote_stats_.used_size, bytes);
+ remote_stats_.used_size -= bytes;
+ }
+
void StartIncrementalMarkingIfNeeded();
+ size_t used_size() const { return remote_stats_.used_size; }
size_t allocated_size() const { return remote_stats_.allocated_size; }
- size_t accumulated_allocated_size() const {
- return remote_stats_.accumulated_allocated_size;
- }
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -109,16 +112,16 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool embedder_worklist_empty_ = false;
struct RemoteStatistics {
- // Allocated size of objects in bytes reported by the embedder. Updated via
+ // Used size of objects in bytes reported by the embedder. Updated via
// TraceSummary at the end of tracing and incrementally when the GC is not
// in progress.
+ size_t used_size = 0;
+ // Totally bytes allocated by the embedder. Monotonically
+ // increasing value. Used to approximate allocation rate.
size_t allocated_size = 0;
- // Limit for |allocated_size_| in bytes to avoid checking for starting a GC
+ // Limit for |allocated_size| in bytes to avoid checking for starting a GC
// on each increment.
size_t allocated_size_limit_for_check = 0;
- // Totally accumulated bytes allocated by the embedder. Monotonically
- // increasing value. Used to approximate allocation rate.
- size_t accumulated_allocated_size = 0;
} remote_stats_;
friend class EmbedderStackStateScope;
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 32237da877..9aa705047c 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -104,6 +104,15 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
allocation);
}
+Handle<JSObject> Factory::NewFastOrSlowJSObjectFromMap(
+ Handle<Map> map, int number_of_slow_properties, AllocationType allocation,
+ Handle<AllocationSite> allocation_site) {
+ return map->is_dictionary_map()
+ ? NewSlowJSObjectFromMap(map, number_of_slow_properties,
+ allocation, allocation_site)
+ : NewJSObjectFromMap(map, allocation, allocation_site);
+}
+
Handle<Object> Factory::NewURIError() {
return NewError(isolate()->uri_error_function(),
MessageTemplate::kURIMalformed);
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 03896f7827..19c3665622 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -580,7 +580,7 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
if (has_different_size_backing_store) {
DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
has_seen_proto);
- description->set_backing_store_size(isolate(), backing_store_size);
+ description->set_backing_store_size(backing_store_size);
}
description->set_flags(0);
@@ -1232,8 +1232,8 @@ Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
result->set_hash_field(String::kEmptyHashField);
result->set_length(length);
- result->set_first(isolate(), *left, mode);
- result->set_second(isolate(), *right, mode);
+ result->set_first(*left, mode);
+ result->set_second(*right, mode);
return result;
}
@@ -1314,7 +1314,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
slice->set_hash_field(String::kEmptyHashField);
slice->set_length(length);
- slice->set_parent(isolate(), *str);
+ slice->set_parent(*str);
slice->set_offset(offset);
return slice;
}
@@ -1483,7 +1483,7 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
return context_table;
}
-Handle<Context> Factory::NewModuleContext(Handle<Module> module,
+Handle<Context> Factory::NewModuleContext(Handle<SourceTextModule> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
@@ -1611,17 +1611,7 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
Handle<Struct> Factory::NewStruct(InstanceType type,
AllocationType allocation) {
- Map map;
- switch (type) {
-#define MAKE_CASE(TYPE, Name, name) \
- case TYPE: \
- map = *name##_map(); \
- break;
- STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- }
+ Map map = Map::GetStructMap(isolate(), type);
int size = map.instance_size();
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Struct> str(Struct::cast(result), isolate());
@@ -1640,10 +1630,17 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(
NewStruct(ACCESSOR_INFO_TYPE, AllocationType::kOld));
+ DisallowHeapAllocation no_gc;
info->set_name(*empty_string());
info->set_flags(0); // Must clear the flags, it was initialized as undefined.
info->set_is_sloppy(true);
info->set_initial_property_attributes(NONE);
+
+ // Clear some other fields that should not be undefined.
+ info->set_getter(Smi::kZero);
+ info->set_setter(Smi::kZero);
+ info->set_js_getter(Smi::kZero);
+
return info;
}
@@ -1970,15 +1967,15 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
// |layout_descriptor| are set.
map.set_visitor_id(Map::GetVisitorId(map));
map.set_bit_field(0);
- map.set_bit_field2(Map::IsExtensibleBit::kMask);
+ map.set_bit_field2(Map::NewTargetIsBaseBit::encode(true));
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
+ Map::IsExtensibleBit::encode(true);
map.set_bit_field3(bit_field3);
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(elements_kind);
- map.set_new_target_is_base(true);
isolate()->counters()->maps_created()->Increment();
if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
return map;
@@ -2293,9 +2290,9 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// as the result.
Handle<Object> no_caller;
- MaybeHandle<Object> maybe_error =
- ErrorUtils::Construct(isolate(), constructor, constructor, message,
- SKIP_NONE, no_caller, false);
+ MaybeHandle<Object> maybe_error = ErrorUtils::Construct(
+ isolate(), constructor, constructor, message, SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kDetailed);
if (maybe_error.is_null()) {
DCHECK(isolate()->has_pending_exception());
maybe_error = handle(isolate()->pending_exception(), isolate());
@@ -2341,7 +2338,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<JSFunction> function(JSFunction::cast(New(map, allocation)),
isolate());
- function->initialize_properties();
+ function->initialize_properties(isolate());
function->initialize_elements();
function->set_shared(*info);
function->set_code(info->GetCode());
@@ -2563,9 +2560,10 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
AllocationType::kOld);
}
-Handle<ModuleInfo> Factory::NewModuleInfo() {
- return NewFixedArrayWithMap<ModuleInfo>(
- RootIndex::kModuleInfoMap, ModuleInfo::kLength, AllocationType::kOld);
+Handle<SourceTextModuleInfo> Factory::NewSourceTextModuleInfo() {
+ return NewFixedArrayWithMap<SourceTextModuleInfo>(
+ RootIndex::kModuleInfoMap, SourceTextModuleInfo::kLength,
+ AllocationType::kOld);
}
Handle<PreparseData> Factory::NewPreparseData(int data_length,
@@ -2585,15 +2583,14 @@ Handle<PreparseData> Factory::NewPreparseData(int data_length,
Handle<UncompiledDataWithoutPreparseData>
Factory::NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id) {
+ int32_t end_position) {
Handle<UncompiledDataWithoutPreparseData> result(
UncompiledDataWithoutPreparseData::cast(New(
uncompiled_data_without_preparse_data_map(), AllocationType::kOld)),
isolate());
UncompiledData::Initialize(*result, *inferred_name, start_position,
- end_position, function_literal_id);
+ end_position);
return result;
}
@@ -2601,7 +2598,6 @@ Handle<UncompiledDataWithPreparseData>
Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
int32_t start_position,
int32_t end_position,
- int32_t function_literal_id,
Handle<PreparseData> preparse_data) {
Handle<UncompiledDataWithPreparseData> result(
UncompiledDataWithPreparseData::cast(
@@ -2609,8 +2605,7 @@ Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
isolate());
UncompiledDataWithPreparseData::Initialize(
- *result, *inferred_name, start_position, end_position,
- function_literal_id, *preparse_data);
+ *result, *inferred_name, start_position, end_position, *preparse_data);
return result;
}
@@ -2755,7 +2750,7 @@ Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) {
Handle<Map> new_map = Map::Copy(
isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
Map::SetPrototype(isolate(), new_map, null_value());
- JSObject::MigrateToMap(result, new_map);
+ JSObject::MigrateToMap(isolate(), result, new_map);
return result;
}
@@ -2886,12 +2881,14 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
return js_obj;
}
-Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
- AllocationType allocation) {
+Handle<JSObject> Factory::NewSlowJSObjectFromMap(
+ Handle<Map> map, int capacity, AllocationType allocation,
+ Handle<AllocationSite> allocation_site) {
DCHECK(map->is_dictionary_map());
Handle<NameDictionary> object_properties =
NameDictionary::New(isolate(), capacity);
- Handle<JSObject> js_object = NewJSObjectFromMap(map, allocation);
+ Handle<JSObject> js_object =
+ NewJSObjectFromMap(map, allocation, allocation_site);
js_object->set_raw_properties_or_hash(*object_properties);
return js_object;
}
@@ -2910,43 +2907,54 @@ Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
DCHECK(elements->IsNumberDictionary());
object_map =
JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
- JSObject::MigrateToMap(object, object_map);
+ JSObject::MigrateToMap(isolate(), object, object_map);
object->set_elements(*elements);
}
return object;
}
-Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
- AllocationType allocation) {
- NativeContext native_context = isolate()->raw_native_context();
- Map map = native_context.GetInitialJSArrayMap(elements_kind);
- if (map.is_null()) {
- JSFunction array_function = native_context.array_function();
- map = array_function.initial_map();
- }
- return Handle<JSArray>::cast(
- NewJSObjectFromMap(handle(map, isolate()), allocation));
-}
-
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
int capacity,
ArrayStorageAllocationMode mode,
AllocationType allocation) {
- Handle<JSArray> array = NewJSArray(elements_kind, allocation);
- NewJSArrayStorage(array, length, capacity, mode);
- return array;
+ DCHECK(capacity >= length);
+ if (capacity == 0) {
+ return NewJSArrayWithElements(empty_fixed_array(), elements_kind, length,
+ allocation);
+ }
+
+ HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms =
+ NewJSArrayStorage(elements_kind, capacity, mode);
+ return inner_scope.CloseAndEscape(NewJSArrayWithUnverifiedElements(
+ elms, elements_kind, length, allocation));
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
int length,
AllocationType allocation) {
- DCHECK(length <= elements->length());
- Handle<JSArray> array = NewJSArray(elements_kind, allocation);
+ Handle<JSArray> array = NewJSArrayWithUnverifiedElements(
+ elements, elements_kind, length, allocation);
+ JSObject::ValidateElements(*array);
+ return array;
+}
+Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
+ AllocationType allocation) {
+ DCHECK(length <= elements->length());
+ NativeContext native_context = isolate()->raw_native_context();
+ Map map = native_context.GetInitialJSArrayMap(elements_kind);
+ if (map.is_null()) {
+ JSFunction array_function = native_context.array_function();
+ map = array_function.initial_map();
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(
+ NewJSObjectFromMap(handle(map, isolate()), allocation));
+ DisallowHeapAllocation no_gc;
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
- JSObject::ValidateElements(*array);
return array;
}
@@ -2961,8 +2969,17 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms =
+ NewJSArrayStorage(array->GetElementsKind(), capacity, mode);
+
+ array->set_elements(*elms);
+ array->set_length(Smi::FromInt(length));
+}
+
+Handle<FixedArrayBase> Factory::NewJSArrayStorage(
+ ElementsKind elements_kind, int capacity, ArrayStorageAllocationMode mode) {
+ DCHECK_GT(capacity, 0);
Handle<FixedArrayBase> elms;
- ElementsKind elements_kind = array->GetElementsKind();
if (IsDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
elms = NewFixedDoubleArray(capacity);
@@ -2979,9 +2996,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
elms = NewFixedArrayWithHoles(capacity);
}
}
-
- array->set_elements(*elms);
- array->set_length(Smi::FromInt(length));
+ return elms;
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
@@ -3020,9 +3035,10 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
return Handle<JSGeneratorObject>::cast(NewJSObjectFromMap(map));
}
-Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
- Handle<ModuleInfo> module_info(code->scope_info().ModuleDescriptorInfo(),
- isolate());
+Handle<SourceTextModule> Factory::NewSourceTextModule(
+ Handle<SharedFunctionInfo> code) {
+ Handle<SourceTextModuleInfo> module_info(
+ code->scope_info().ModuleDescriptorInfo(), isolate());
Handle<ObjectHashTable> exports =
ObjectHashTable::New(isolate(), module_info->RegularExportCount());
Handle<FixedArray> regular_exports =
@@ -3035,8 +3051,10 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
: empty_fixed_array();
ReadOnlyRoots roots(isolate());
- Handle<Module> module =
- Handle<Module>::cast(NewStruct(MODULE_TYPE, AllocationType::kOld));
+ Handle<SourceTextModule> module(
+ SourceTextModule::cast(
+ New(source_text_module_map(), AllocationType::kOld)),
+ isolate());
module->set_code(*code);
module->set_exports(*exports);
module->set_regular_exports(*regular_exports);
@@ -3053,6 +3071,28 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
return module;
}
+Handle<SyntheticModule> Factory::NewSyntheticModule(
+ Handle<String> module_name, Handle<FixedArray> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
+ ReadOnlyRoots roots(isolate());
+ Handle<SyntheticModule> module(
+ SyntheticModule::cast(New(synthetic_module_map(), AllocationType::kOld)),
+ isolate());
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate(), static_cast<int>(export_names->length()));
+ Handle<Foreign> evaluation_steps_foreign =
+ NewForeign(reinterpret_cast<i::Address>(evaluation_steps));
+ module->set_exports(*exports);
+ module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module->set_module_namespace(roots.undefined_value());
+ module->set_status(Module::kUninstantiated);
+ module->set_exception(roots.the_hole_value());
+ module->set_name(*module_name);
+ module->set_export_names(*export_names);
+ module->set_evaluation_steps(*evaluation_steps_foreign);
+ return module;
+}
+
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
AllocationType allocation) {
Handle<JSFunction> array_buffer_fun(
@@ -3274,7 +3314,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
DCHECK(map->prototype().IsNull(isolate()));
Handle<JSProxy> result(JSProxy::cast(New(map, AllocationType::kYoung)),
isolate());
- result->initialize_properties();
+ result->initialize_properties(isolate());
result->set_target(*target);
result->set_handler(*handler);
return result;
@@ -3335,10 +3375,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
false);
TRACE_EVENT_OBJECT_CREATED_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
- TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()));
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope,
+ shared->TraceID(literal)));
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
- TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()),
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope,
+ shared->TraceID(literal)),
shared->ToTracedValue(literal));
return shared;
}
@@ -3447,6 +3489,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
*empty_feedback_metadata(), SKIP_WRITE_BARRIER);
}
share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_function_literal_id(kFunctionLiteralIdInvalid);
#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -3639,68 +3682,82 @@ Handle<StackTraceFrame> Factory::NewStackTraceFrame(
return frame;
}
-Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
- Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
- stack_frame_info->set_line_number(0);
- stack_frame_info->set_column_number(0);
- stack_frame_info->set_script_id(0);
- stack_frame_info->set_promise_all_index(-1);
- stack_frame_info->set_script_name(*null_value());
- stack_frame_info->set_script_name_or_source_url(*null_value());
- stack_frame_info->set_function_name(*null_value());
- stack_frame_info->set_flag(0);
- return stack_frame_info;
-}
-
Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<FrameArray> frame_array, int index) {
FrameArrayIterator it(isolate(), frame_array, index);
DCHECK(it.HasFrame());
- Handle<StackFrameInfo> info = NewStackFrameInfo();
- info->set_flag(0);
-
const bool is_wasm = frame_array->IsAnyWasmFrame(index);
- info->set_is_wasm(is_wasm);
+ StackFrameBase* frame = it.Frame();
- // Line numbers are 1-based, for Wasm we need to adjust.
- int line = it.Frame()->GetLineNumber();
- if (is_wasm && line >= 0) line++;
- info->set_line_number(line);
+ int line = frame->GetLineNumber();
+ int column = frame->GetColumnNumber();
- // Column numbers are 1-based. For Wasm we use the position
- // as the iterator does not currently provide a column number.
- const int column =
- is_wasm ? it.Frame()->GetPosition() + 1 : it.Frame()->GetColumnNumber();
- info->set_column_number(column);
+ const int script_id = frame->GetScriptId();
- info->set_script_id(it.Frame()->GetScriptId());
- info->set_script_name(*it.Frame()->GetFileName());
- info->set_script_name_or_source_url(*it.Frame()->GetScriptNameOrSourceUrl());
+ Handle<Object> script_name = frame->GetFileName();
+ Handle<Object> script_or_url = frame->GetScriptNameOrSourceUrl();
// TODO(szuend): Adjust this, once it is decided what name to use in both
// "simple" and "detailed" stack traces. This code is for
// backwards compatibility to fullfill test expectations.
- auto function_name = it.Frame()->GetFunctionName();
+ auto function_name = frame->GetFunctionName();
+ bool is_user_java_script = false;
if (!is_wasm) {
- Handle<Object> function = it.Frame()->GetFunction();
+ Handle<Object> function = frame->GetFunction();
if (function->IsJSFunction()) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(function);
- function_name = JSFunction::GetDebugName(fun);
- const bool is_user_java_script = fun->shared().IsUserJavaScript();
- info->set_is_user_java_script(is_user_java_script);
+ is_user_java_script = fun->shared().IsUserJavaScript();
}
}
+
+ Handle<Object> method_name = undefined_value();
+ Handle<Object> type_name = undefined_value();
+ Handle<Object> eval_origin = frame->GetEvalOrigin();
+ Handle<Object> wasm_module_name = frame->GetWasmModuleName();
+
+ // MethodName and TypeName are expensive to look up, so they are only
+ // included when they are strictly needed by the stack trace
+ // serialization code.
+ // Note: The {is_method_call} predicate needs to be kept in sync with
+ // the corresponding predicate in the stack trace serialization code
+ // in stack-frame-info.cc.
+ const bool is_toplevel = frame->IsToplevel();
+ const bool is_constructor = frame->IsConstructor();
+ const bool is_method_call = !(is_toplevel || is_constructor);
+ if (is_method_call) {
+ method_name = frame->GetMethodName();
+ type_name = frame->GetTypeName();
+ }
+
+ Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
+ NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+
+ DisallowHeapAllocation no_gc;
+
+ info->set_flag(0);
+ info->set_is_wasm(is_wasm);
+ info->set_is_asmjs_wasm(frame_array->IsAsmJsWasmFrame(index));
+ info->set_is_user_java_script(is_user_java_script);
+ info->set_line_number(line);
+ info->set_column_number(column);
+ info->set_script_id(script_id);
+
+ info->set_script_name(*script_name);
+ info->set_script_name_or_source_url(*script_or_url);
info->set_function_name(*function_name);
- info->set_wasm_module_name(*it.Frame()->GetWasmModuleName());
- info->set_is_eval(it.Frame()->IsEval());
- info->set_is_constructor(it.Frame()->IsConstructor());
- info->set_is_toplevel(it.Frame()->IsToplevel());
- info->set_is_async(it.Frame()->IsAsync());
- info->set_is_promise_all(it.Frame()->IsPromiseAll());
- info->set_promise_all_index(it.Frame()->GetPromiseIndex());
+ info->set_method_name(*method_name);
+ info->set_type_name(*type_name);
+ info->set_eval_origin(*eval_origin);
+ info->set_wasm_module_name(*wasm_module_name);
+
+ info->set_is_eval(frame->IsEval());
+ info->set_is_constructor(is_constructor);
+ info->set_is_toplevel(is_toplevel);
+ info->set_is_async(frame->IsAsync());
+ info->set_is_promise_all(frame->IsPromiseAll());
+ info->set_promise_all_index(frame->GetPromiseIndex());
return info;
}
@@ -3785,7 +3842,8 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
return map;
}
-Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
+Handle<LoadHandler> Factory::NewLoadHandler(int data_count,
+ AllocationType allocation) {
Handle<Map> map;
switch (data_count) {
case 1:
@@ -3800,7 +3858,7 @@ Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
default:
UNREACHABLE();
}
- return handle(LoadHandler::cast(New(map, AllocationType::kOld)), isolate());
+ return handle(LoadHandler::cast(New(map, allocation)), isolate());
}
Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 5af2529021..3ccbe6856f 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -53,16 +53,18 @@ class JSSetIterator;
class JSTypedArray;
class JSWeakMap;
class LoadHandler;
-class ModuleInfo;
class NativeContext;
class NewFunctionArgs;
class PreparseData;
class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
+class SourceTextModule;
+class SourceTextModuleInfo;
class StackFrameInfo;
class StackTraceFrame;
class StoreHandler;
+class SyntheticModule;
class TemplateObjectDescription;
class UncompiledDataWithoutPreparseData;
class UncompiledDataWithPreparseData;
@@ -406,7 +408,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<ScriptContextTable> NewScriptContextTable();
// Create a module context.
- Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<Context> NewModuleContext(Handle<SourceTextModule> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
@@ -461,7 +463,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
Handle<StackTraceFrame> NewStackTraceFrame(Handle<FrameArray> frame_array,
int index);
- Handle<StackFrameInfo> NewStackFrameInfo();
Handle<StackFrameInfo> NewStackFrameInfo(Handle<FrameArray> frame_array,
int index);
Handle<SourcePositionTableWithFrameCache>
@@ -626,10 +627,19 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSObject> NewJSObjectFromMap(
Handle<Map> map, AllocationType allocation = AllocationType::kYoung,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ // Like NewJSObjectFromMap, but includes allocating a properties dictionary.
Handle<JSObject> NewSlowJSObjectFromMap(
Handle<Map> map,
int number_of_slow_properties = NameDictionary::kInitialCapacity,
- AllocationType allocation = AllocationType::kYoung);
+ AllocationType allocation = AllocationType::kYoung,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ // Calls NewJSObjectFromMap or NewSlowJSObjectFromMap depending on whether the
+ // map is a dictionary map.
+ inline Handle<JSObject> NewFastOrSlowJSObjectFromMap(
+ Handle<Map> map,
+ int number_of_slow_properties = NameDictionary::kInitialCapacity,
+ AllocationType allocation = AllocationType::kYoung,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
// Allocates and initializes a new JavaScript object with the given
// {prototype} and {properties}. The newly created object will be
// in dictionary properties mode. The {elements} can either be the
@@ -680,7 +690,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSModuleNamespace> NewJSModuleNamespace();
- Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
+ Handle<SourceTextModule> NewSourceTextModule(Handle<SharedFunctionInfo> code);
+ Handle<SyntheticModule> NewSyntheticModule(
+ Handle<String> module_name, Handle<FixedArray> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps);
Handle<JSArrayBuffer> NewJSArrayBuffer(
SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
@@ -760,19 +773,18 @@ class V8_EXPORT_PRIVATE Factory {
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
- Handle<ModuleInfo> NewModuleInfo();
+ Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
Handle<PreparseData> NewPreparseData(int data_length, int children_length);
Handle<UncompiledDataWithoutPreparseData>
NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id);
+ int32_t end_position);
Handle<UncompiledDataWithPreparseData> NewUncompiledDataWithPreparseData(
Handle<String> inferred_name, int32_t start_position,
- int32_t end_position, int32_t function_literal_id, Handle<PreparseData>);
+ int32_t end_position, Handle<PreparseData>);
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -884,7 +896,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
int number_of_properties);
- Handle<LoadHandler> NewLoadHandler(int data_count);
+ Handle<LoadHandler> NewLoadHandler(
+ int data_count, AllocationType allocation = AllocationType::kOld);
Handle<StoreHandler> NewStoreHandler(int data_count);
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
@@ -1074,11 +1087,20 @@ class V8_EXPORT_PRIVATE Factory {
Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
const char* string, bool check_cache);
- // Create a JSArray with no elements and no length.
- Handle<JSArray> NewJSArray(
- ElementsKind elements_kind,
+ // Creates a new JSArray with the given backing storage. Performs no
+ // verification of the backing storage because it may not yet be filled.
+ Handle<JSArray> NewJSArrayWithUnverifiedElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
AllocationType allocation = AllocationType::kYoung);
+ // Creates the backing storage for a JSArray. This handle must be discarded
+ // before returning the JSArray reference to code outside Factory, which might
+ // decide to left-trim the backing store. To avoid unnecessary HandleScopes,
+ // this method requires capacity greater than zero.
+ Handle<FixedArrayBase> NewJSArrayStorage(
+ ElementsKind elements_kind, int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
int maybe_builtin_index, FunctionKind kind = kNormalFunction);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index fab663d767..77e6b99997 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -18,9 +18,9 @@ namespace internal {
static size_t CountTotalHolesSize(Heap* heap) {
size_t holes_size = 0;
- PagedSpaces spaces(heap);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(heap);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
}
@@ -150,9 +150,11 @@ GCTracer::GCTracer(Heap* heap)
allocation_time_ms_(0.0),
new_space_allocation_counter_bytes_(0),
old_generation_allocation_counter_bytes_(0),
+ embedder_allocation_counter_bytes_(0),
allocation_duration_since_gc_(0.0),
new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0),
+ embedder_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
start_counter_(0),
average_mutator_duration_(0),
@@ -264,6 +266,12 @@ void GCTracer::Start(GarbageCollector collector,
counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
} else {
counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics before collection:\n");
+ heap_->PrintFreeListsStats();
+ }
}
}
@@ -377,6 +385,14 @@ void GCTracer::Stop(GarbageCollector collector) {
}
}
+void GCTracer::NotifySweepingCompleted() {
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics after sweeping completed:\n");
+ heap_->PrintFreeListsStats();
+ }
+}
+
void GCTracer::SampleAllocation(double current_ms,
size_t new_space_counter_bytes,
size_t old_generation_counter_bytes,
@@ -948,10 +964,9 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
}
double GCTracer::EmbedderSpeedInBytesPerMillisecond() const {
- if (recorded_embedder_speed_ != 0.0) {
- return recorded_embedder_speed_;
- }
- return kConservativeSpeedInBytesPerMillisecond;
+ // Note: Returning 0 is ok here as callers check for whether embedder speeds
+ // have been recorded at all.
+ return recorded_embedder_speed_;
}
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 4ddd0ef1c2..ec54b6c1ab 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -216,6 +216,8 @@ class V8_EXPORT_PRIVATE GCTracer {
// Stop collecting data and print results.
void Stop(GarbageCollector collector);
+ void NotifySweepingCompleted();
+
void NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling);
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 77e4870913..d59f8abe9f 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -33,20 +33,20 @@ double MemoryController<Trait>::MaxGrowingFactor(size_t max_heap_size) {
constexpr double kMaxSmallFactor = 2.0;
constexpr double kHighFactor = 4.0;
- size_t max_size_in_mb = max_heap_size / MB;
- max_size_in_mb = Max(max_size_in_mb, Trait::kMinSize);
+ size_t max_size = max_heap_size;
+ max_size = Max(max_size, Trait::kMinSize);
// If we are on a device with lots of memory, we allow a high heap
// growing factor.
- if (max_size_in_mb >= Trait::kMaxSize) {
+ if (max_size >= Trait::kMaxSize) {
return kHighFactor;
}
- DCHECK_GE(max_size_in_mb, Trait::kMinSize);
- DCHECK_LT(max_size_in_mb, Trait::kMaxSize);
+ DCHECK_GE(max_size, Trait::kMinSize);
+ DCHECK_LT(max_size, Trait::kMaxSize);
// On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_size_in_mb - Trait::kMinSize) *
+ double factor = (max_size - Trait::kMinSize) *
(kMaxSmallFactor - kMinSmallFactor) /
(Trait::kMaxSize - Trait::kMinSize) +
kMinSmallFactor;
@@ -126,8 +126,9 @@ size_t MemoryController<Trait>::MinimumAllocationLimitGrowingStep(
template <typename Trait>
size_t MemoryController<Trait>::CalculateAllocationLimit(
- Heap* heap, size_t current_size, size_t max_size, size_t new_space_capacity,
- double factor, Heap::HeapGrowingMode growing_mode) {
+ Heap* heap, size_t current_size, size_t min_size, size_t max_size,
+ size_t new_space_capacity, double factor,
+ Heap::HeapGrowingMode growing_mode) {
switch (growing_mode) {
case Heap::HeapGrowingMode::kConservative:
case Heap::HeapGrowingMode::kSlow:
@@ -155,9 +156,11 @@ size_t MemoryController<Trait>::CalculateAllocationLimit(
static_cast<uint64_t>(current_size) +
MinimumAllocationLimitGrowingStep(growing_mode)) +
new_space_capacity;
+ const uint64_t limit_above_min_size = Max<uint64_t>(limit, min_size);
const uint64_t halfway_to_the_max =
(static_cast<uint64_t>(current_size) + max_size) / 2;
- const size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
+ const size_t result =
+ static_cast<size_t>(Min(limit_above_min_size, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
Isolate::FromHeap(heap)->PrintWithTimestamp(
"[%s] Limit: old size: %zu KB, new limit: %zu KB (%.1f)\n",
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
index bba1588669..d4a3534cd7 100644
--- a/deps/v8/src/heap/heap-controller.h
+++ b/deps/v8/src/heap/heap-controller.h
@@ -14,9 +14,8 @@ namespace v8 {
namespace internal {
struct BaseControllerTrait {
- // Sizes are in MB.
- static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
- static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
+ static constexpr size_t kMinSize = 128u * Heap::kPointerMultiplier * MB;
+ static constexpr size_t kMaxSize = 1024u * Heap::kPointerMultiplier * MB;
static constexpr double kMinGrowingFactor = 1.1;
static constexpr double kMaxGrowingFactor = 4.0;
@@ -43,7 +42,7 @@ class V8_EXPORT_PRIVATE MemoryController : public AllStatic {
double mutator_speed);
static size_t CalculateAllocationLimit(Heap* heap, size_t current_size,
- size_t max_size,
+ size_t min_size, size_t max_size,
size_t new_space_capacity,
double factor,
Heap::HeapGrowingMode growing_mode);
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 4ce35bd961..f2f7a7f692 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -263,15 +263,13 @@ void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
}
bool Heap::CanAllocateInReadOnlySpace() {
- return !deserialization_complete_ &&
- (isolate()->serializer_enabled() ||
- !isolate()->initialized_from_snapshot());
+ return read_only_space()->writable();
}
void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
- AllocationSpace allocation_space = memory_chunk->owner()->identity();
+ AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
@@ -374,13 +372,12 @@ bool Heap::InToPage(HeapObject heap_object) {
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
// static
-Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
+Heap* Heap::FromWritableHeapObject(HeapObject obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
- SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
- static_cast<ReadOnlySpace*>(chunk->owner())->writable());
+ SLOW_DCHECK(chunk->IsWritable());
Heap* heap = chunk->heap();
SLOW_DCHECK(heap != nullptr);
return heap;
@@ -408,7 +405,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
return AllocationMemento();
}
HeapObject candidate = HeapObject::FromAddress(memento_address);
- MapWordSlot candidate_map_slot = candidate.map_slot();
+ ObjectSlot candidate_map_slot = candidate.map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
@@ -614,8 +611,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
- DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
- (chunk_->owner()->identity() == CODE_LO_SPACE));
+ DCHECK(chunk_->owner_identity() == CODE_SPACE ||
+ (chunk_->owner_identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 6c5f20ac72..5687284b1e 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -11,9 +11,6 @@
#include "src/heap/heap-write-barrier.h"
#include "src/common/globals.h"
-// TODO(jkummerow): Get rid of this by moving GetIsolateFromWritableObject
-// elsewhere.
-#include "src/execution/isolate.h"
#include "src/objects/code.h"
#include "src/objects/compressed-slots-inl.h"
#include "src/objects/fixed-array.h"
@@ -42,27 +39,21 @@ V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors);
+V8_EXPORT_PRIVATE void Heap_GenerationalEphemeronKeyBarrierSlow(
+ Heap* heap, EphemeronHashTable table, Address slot);
+
// Do not use these internal details anywhere outside of this file. These
// internals are only intended to shortcut write barrier checks.
namespace heap_internals {
-struct Space {
- static constexpr uintptr_t kIdOffset = 9 * kSystemPointerSize;
- V8_INLINE AllocationSpace identity() {
- return *reinterpret_cast<AllocationSpace*>(reinterpret_cast<Address>(this) +
- kIdOffset);
- }
-};
-
struct MemoryChunk {
- static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
+ static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset =
- kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
- static constexpr uintptr_t kOwnerOffset =
- kHeapOffset + 2 * kSystemPointerSize;
+ kSizetSize + kUIntptrSize + kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
+ static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
@@ -84,13 +75,12 @@ struct MemoryChunk {
V8_INLINE Heap* GetHeap() {
Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) +
kHeapOffset);
- SLOW_DCHECK(heap != nullptr);
+ DCHECK_NOT_NULL(heap);
return heap;
}
- V8_INLINE Space* GetOwner() {
- return *reinterpret_cast<Space**>(reinterpret_cast<Address>(this) +
- kOwnerOffset);
+ V8_INLINE bool InReadOnlySpace() const {
+ return GetFlags() & kReadOnlySpaceBit;
}
};
@@ -122,8 +112,7 @@ inline void GenerationalEphemeronKeyBarrierInternal(EphemeronHashTable table,
return;
}
- Heap* heap = GetHeapFromWritableObject(table);
- heap->RecordEphemeronKeyWrite(table, slot);
+ Heap_GenerationalEphemeronKeyBarrierSlow(table_chunk->GetHeap(), table, slot);
}
inline void MarkingBarrierInternal(HeapObject object, Address slot,
@@ -231,27 +220,16 @@ inline WriteBarrierMode GetWriteBarrierModeForObject(
return UPDATE_WRITE_BARRIER;
}
-inline bool ObjectInYoungGeneration(const Object object) {
+inline bool ObjectInYoungGeneration(Object object) {
if (object.IsSmi()) return false;
return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
->InYoungGeneration();
}
-inline Heap* GetHeapFromWritableObject(const HeapObject object) {
+inline bool IsReadOnlyHeapObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- return chunk->GetHeap();
-}
-
-inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) {
- heap_internals::MemoryChunk* chunk =
- heap_internals::MemoryChunk::FromHeapObject(obj);
- if (chunk->GetOwner()->identity() == RO_SPACE) {
- *isolate = nullptr;
- return false;
- }
- *isolate = Isolate::FromHeap(chunk->GetHeap());
- return true;
+ return chunk->InReadOnlySpace();
}
} // namespace internal
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index ead17f9396..1126fd6f4b 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -41,7 +41,7 @@ void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
HeapObject descriptor_array,
int number_of_own_descriptors);
-Heap* GetHeapFromWritableObject(const HeapObject object);
+inline bool IsReadOnlyHeapObject(HeapObject object);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 52387b5bc1..7feb1c11ba 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -5,6 +5,7 @@
#include "src/heap/heap.h"
#include <cinttypes>
+#include <iomanip>
#include <unordered_map>
#include <unordered_set>
@@ -63,7 +64,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
@@ -118,6 +119,12 @@ void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
number_of_own_descriptors);
}
+void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
+ EphemeronHashTable table,
+ Address slot) {
+ heap->RecordEphemeronKeyWrite(table, slot);
+}
+
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
@@ -164,29 +171,21 @@ struct Heap::StrongRootsList {
class IdleScavengeObserver : public AllocationObserver {
public:
- IdleScavengeObserver(Heap& heap, intptr_t step_size)
+ IdleScavengeObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
- heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated);
}
private:
- Heap& heap_;
+ Heap* heap_;
};
Heap::Heap()
: isolate_(isolate()),
- initial_max_old_generation_size_(max_old_generation_size_),
- initial_max_old_generation_size_threshold_(0),
- initial_old_generation_size_(
- Min(max_old_generation_size_, kMaxInitialOldGenerationSize)),
memory_pressure_level_(MemoryPressureLevel::kNone),
- old_generation_allocation_limit_(initial_old_generation_size_),
- global_allocation_limit_(initial_old_generation_size_),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
- is_current_gc_forced_(false),
external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -207,23 +206,87 @@ size_t Heap::MaxReserved() {
max_old_generation_size_);
}
-size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
- const size_t old_space_physical_memory_factor = 4;
- size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
- old_space_physical_memory_factor *
- kPointerMultiplier);
- size_t max_size_in_mb = V8HeapTrait::kMaxSize;
+size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
+ // Compute the semi space size and cap it.
+ size_t ratio = old_generation <= kOldGenerationLowMemory
+ ? kOldGenerationToSemiSpaceRatioLowMemory
+ : kOldGenerationToSemiSpaceRatio;
+ size_t semi_space = old_generation / ratio;
+ semi_space = Min<size_t>(semi_space, kMaxSemiSpaceSize);
+ semi_space = Max<size_t>(semi_space, kMinSemiSpaceSize);
+ semi_space = RoundUp(semi_space, Page::kPageSize);
+ return YoungGenerationSizeFromSemiSpaceSize(semi_space);
+}
+
+size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
+ // Compute the old generation size and cap it.
+ uint64_t old_generation = physical_memory /
+ kPhysicalMemoryToOldGenerationRatio *
+ kPointerMultiplier;
+ old_generation =
+ Min<uint64_t>(old_generation, MaxOldGenerationSize(physical_memory));
+ old_generation = Max<uint64_t>(old_generation, V8HeapTrait::kMinSize);
+ old_generation = RoundUp(old_generation, Page::kPageSize);
+
+ size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
+ static_cast<size_t>(old_generation));
+ return static_cast<size_t>(old_generation) + young_generation;
+}
+
+void Heap::GenerationSizesFromHeapSize(size_t heap_size,
+ size_t* young_generation_size,
+ size_t* old_generation_size) {
+ // Initialize values for the case when the given heap size is too small.
+ *young_generation_size = 0;
+ *old_generation_size = 0;
+ // Binary search for the largest old generation size that fits to the given
+ // heap limit considering the correspondingly sized young generation.
+ size_t lower = 0, upper = heap_size;
+ while (lower + 1 < upper) {
+ size_t old_generation = lower + (upper - lower) / 2;
+ size_t young_generation =
+ YoungGenerationSizeFromOldGenerationSize(old_generation);
+ if (old_generation + young_generation <= heap_size) {
+ // This size configuration fits into the given heap limit.
+ *young_generation_size = young_generation;
+ *old_generation_size = old_generation;
+ lower = old_generation;
+ } else {
+ upper = old_generation;
+ }
+ }
+}
+size_t Heap::MinYoungGenerationSize() {
+ return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
+}
+
+size_t Heap::MinOldGenerationSize() {
+ size_t paged_space_count =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
+ return paged_space_count * Page::kPageSize;
+}
+
+size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
+ size_t max_size = V8HeapTrait::kMaxSize;
// Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
// systems with physical memory bigger than 16GB.
constexpr bool x64_bit = Heap::kPointerMultiplier >= 2;
if (FLAG_huge_max_old_generation_size && x64_bit &&
physical_memory / GB > 16) {
- DCHECK_LE(max_size_in_mb, 4096);
- max_size_in_mb = 4096; // 4GB
+ DCHECK_EQ(max_size / GB, 2);
+ max_size *= 2;
}
+ return max_size;
+}
- return Max(Min(computed_size, max_size_in_mb), V8HeapTrait::kMinSize);
+size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
+ return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
+}
+
+size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
+ size_t young_generation_size) {
+ return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
}
size_t Heap::Capacity() {
@@ -234,10 +297,10 @@ size_t Heap::Capacity() {
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->Capacity();
}
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
@@ -246,10 +309,10 @@ size_t Heap::OldGenerationCapacity() {
size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->CommittedMemory();
}
return total + lo_space_->Size() + code_lo_space_->Size();
@@ -273,8 +336,8 @@ size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->CommittedPhysicalMemory();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->CommittedPhysicalMemory();
}
return total;
@@ -301,8 +364,8 @@ size_t Heap::Available() {
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->Available();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->Available();
}
total += memory_allocator()->Available();
@@ -311,7 +374,7 @@ size_t Heap::Available() {
bool Heap::CanExpandOldGeneration(size_t size) {
if (force_oom_) return false;
- if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
+ if (OldGenerationCapacity() + size > max_old_generation_size_) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
// the total allocated memory size over the maximum heap size.
@@ -443,6 +506,81 @@ void Heap::PrintShortHeapStatistics() {
total_gc_time_ms_);
}
+void Heap::PrintFreeListsStats() {
+ DCHECK(FLAG_trace_gc_freelists);
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ PrintIsolate(isolate_,
+ "Freelists statistics per Page: "
+ "[category: length || total free bytes]\n");
+ }
+
+ std::vector<int> categories_lengths(
+ old_space()->free_list()->number_of_categories(), 0);
+ std::vector<size_t> categories_sums(
+ old_space()->free_list()->number_of_categories(), 0);
+ unsigned int pageCnt = 0;
+
+ // This loops computes freelists lengths and sum.
+ // If FLAG_trace_gc_freelists_verbose is enabled, it also prints
+ // the stats of each FreeListCategory of each Page.
+ for (Page* page : *old_space()) {
+ std::ostringstream out_str;
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ out_str << "Page " << std::setw(4) << pageCnt;
+ }
+
+ for (int cat = kFirstCategory;
+ cat <= old_space()->free_list()->last_category(); cat++) {
+ FreeListCategory* free_list =
+ page->free_list_category(static_cast<FreeListCategoryType>(cat));
+ int length = free_list->FreeListLength();
+ size_t sum = free_list->SumFreeList();
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ out_str << "[" << cat << ": " << std::setw(4) << length << " || "
+ << std::setw(6) << sum << " ]"
+ << (cat == old_space()->free_list()->last_category() ? "\n"
+ : ", ");
+ }
+ categories_lengths[cat] += length;
+ categories_sums[cat] += sum;
+ }
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ PrintIsolate(isolate_, "%s", out_str.str().c_str());
+ }
+
+ pageCnt++;
+ }
+
+ // Print statistics about old_space (pages, free/wasted/used memory...).
+ PrintIsolate(
+ isolate_,
+ "%d pages. Free space: %.1f MB (waste: %.2f). "
+ "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
+ pageCnt, static_cast<double>(old_space_->Available()) / MB,
+ static_cast<double>(old_space_->Waste()) / MB,
+ static_cast<double>(old_space_->Size()) / MB,
+ static_cast<double>(old_space_->Capacity()) / MB,
+ static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
+
+ // Print global statistics of each FreeListCategory (length & sum).
+ PrintIsolate(isolate_,
+ "FreeLists global statistics: "
+ "[category: length || total free KB]\n");
+ std::ostringstream out_str;
+ for (int cat = kFirstCategory;
+ cat <= old_space()->free_list()->last_category(); cat++) {
+ out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
+ << std::fixed << std::setprecision(2)
+ << static_cast<double>(categories_sums[cat]) / KB << " KB]"
+ << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
+ }
+ PrintIsolate(isolate_, "%s", out_str.str().c_str());
+}
+
void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
HeapStatistics stats;
reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
@@ -483,7 +621,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
MEMBER("malloced_memory") << stats.malloced_memory() << ","
MEMBER("external_memory") << stats.external_memory() << ","
MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
- MEMBER("pages") << LIST(
+ MEMBER("spaces") << LIST(
SpaceStatistics(RO_SPACE) << "," <<
SpaceStatistics(NEW_SPACE) << "," <<
SpaceStatistics(OLD_SPACE) << "," <<
@@ -693,8 +831,8 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->SizeOfObjects();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->SizeOfObjects();
}
return total;
}
@@ -750,8 +888,8 @@ void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- for (SpaceIterator it(this); it.has_next();) {
- Space* space = it.next();
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
if (space == new_space()) {
space->AddAllocationObserver(new_space_observer);
} else {
@@ -764,8 +902,8 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- for (SpaceIterator it(this); it.has_next();) {
- Space* space = it.next();
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
if (space == new_space()) {
space->RemoveAllocationObserver(new_space_observer);
} else {
@@ -1194,27 +1332,27 @@ intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
return 0;
}
-void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
- if (objects.size() == 0) return;
+void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
+ if (objects->size() == 0) return;
- sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
+ sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
intptr_t c = CompareWords(size, a, b);
if (c != 0) return c < 0;
return a < b;
});
std::vector<std::pair<int, HeapObject>> duplicates;
- HeapObject current = objects[0];
+ HeapObject current = (*objects)[0];
int count = 1;
- for (size_t i = 1; i < objects.size(); i++) {
- if (CompareWords(size, current, objects[i]) == 0) {
+ for (size_t i = 1; i < objects->size(); i++) {
+ if (CompareWords(size, current, (*objects)[i]) == 0) {
count++;
} else {
if (count > 1) {
duplicates.push_back(std::make_pair(count - 1, current));
}
count = 1;
- current = objects[i];
+ current = (*objects)[i];
}
}
if (count > 1) {
@@ -1274,29 +1412,30 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
- new_lo_space_->SetCapacity(new_space_->Capacity());
+ new_lo_space_->SetCapacity(new_space_->Capacity() *
+ kNewLargeObjectSpaceToSemiSpaceRatio);
UncommitFromSpace();
EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
std::map<int, std::vector<HeapObject>> objects_by_size;
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
- HeapObjectIterator it(space);
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ PagedSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
{
- LargeObjectIterator it(lo_space());
+ LargeObjectSpaceObjectIterator it(lo_space());
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
- ReportDuplicates(it->first, it->second);
+ ReportDuplicates(it->first, &it->second);
}
}
}
@@ -1669,7 +1808,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
for (int space = FIRST_SPACE;
- space < SerializerDeserializer::kNumberOfSpaces; space++) {
+ space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
+ space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) {
@@ -1727,8 +1867,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space.address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
- DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
- space);
+ DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space)));
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
@@ -1993,14 +2132,16 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
old_generation_allocation_limit_ =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
- this, old_gen_size, max_old_generation_size_, new_space_capacity,
- v8_growing_factor, mode);
+ this, old_gen_size, min_old_generation_size_,
+ max_old_generation_size_, new_space_capacity, v8_growing_factor,
+ mode);
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
global_allocation_limit_ =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), max_global_memory_size_,
- new_space_capacity, global_growing_factor, mode);
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity,
+ global_growing_factor, mode);
}
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
@@ -2008,8 +2149,9 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
old_generation_size_configured_) {
size_t new_old_generation_limit =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
- this, old_gen_size, max_old_generation_size_, new_space_capacity,
- v8_growing_factor, mode);
+ this, old_gen_size, min_old_generation_size_,
+ max_old_generation_size_, new_space_capacity, v8_growing_factor,
+ mode);
if (new_old_generation_limit < old_generation_allocation_limit_) {
old_generation_allocation_limit_ = new_old_generation_limit;
}
@@ -2017,8 +2159,9 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
DCHECK_GT(global_growing_factor, 0);
size_t new_global_limit =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), max_global_memory_size_,
- new_space_capacity, global_growing_factor, mode);
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity,
+ global_growing_factor, mode);
if (new_global_limit < global_allocation_limit_) {
global_allocation_limit_ = new_global_limit;
}
@@ -2433,8 +2576,8 @@ void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
if (!young_strings_.empty()) {
v->VisitRootPointers(
Root::kExternalStringsTable, nullptr,
- FullObjectSlot(&young_strings_[0]),
- FullObjectSlot(&young_strings_[young_strings_.size()]));
+ FullObjectSlot(young_strings_.data()),
+ FullObjectSlot(young_strings_.data() + young_strings_.size()));
}
}
@@ -2596,6 +2739,7 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
#ifdef V8_HOST_ARCH_32_BIT
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
@@ -2981,7 +3125,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// We do not create a filler for objects in a large object space.
if (!IsLargeObject(object)) {
HeapObject filler =
- CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kNo);
DCHECK(!filler.is_null());
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
@@ -3229,7 +3373,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE;
+ i < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@@ -3634,8 +3779,8 @@ void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
- for (SpaceIterator it(this); it.has_next();) {
- it.next()->Print();
+ for (SpaceIterator it(this); it.HasNext();) {
+ it.Next()->Print();
}
}
@@ -3704,6 +3849,9 @@ const char* Heap::GarbageCollectionReasonToString(
}
bool Heap::Contains(HeapObject value) {
+ if (ReadOnlyHeap::Contains(value)) {
+ return false;
+ }
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -3736,7 +3884,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) {
case NEW_LO_SPACE:
return new_lo_space_->Contains(value);
case RO_SPACE:
- return read_only_space_->Contains(value);
+ return ReadOnlyHeap::Contains(value);
}
UNREACHABLE();
}
@@ -3842,9 +3990,9 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
// TODO(v8:7464): Always verify read-only space once PagedSpace::Verify
- // supports verifying shared read-only space. Currently HeapObjectIterator is
- // explicitly disabled for read-only space when sharing is enabled, because it
- // relies on PagedSpace::heap_ being non-null.
+ // supports verifying shared read-only space. Currently
+ // PagedSpaceObjectIterator is explicitly disabled for read-only space when
+ // sharing is enabled, because it relies on PagedSpace::heap_ being non-null.
#ifndef V8_SHARED_RO_HEAP
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
@@ -3997,17 +4145,17 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->VerifyCountersAfterSweeping();
}
}
void Heap::VerifyCountersBeforeConcurrentSweeping() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
@@ -4259,89 +4407,139 @@ void Heap::IterateBuiltins(RootVisitor* v) {
#endif // V8_EMBEDDED_BUILTINS
}
-// TODO(1236194): Since the heap size is configurable on the command line
-// and through the API, we should gracefully handle the case that the heap
-// size is not big enough to fit all the initial objects.
-void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
- size_t max_old_generation_size_in_mb,
- size_t code_range_size_in_mb) {
- // Overwrite default configuration.
- if (max_semi_space_size_in_kb != 0) {
+namespace {
+size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
+ const size_t kGlobalMemoryToV8Ratio = 2;
+ return Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
+ static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
+}
+} // anonymous namespace
+
+void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
+ // Initialize max_semi_space_size_.
+ {
+ max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
+ if (constraints.max_young_generation_size_in_bytes() > 0) {
+ max_semi_space_size_ = SemiSpaceSizeFromYoungGenerationSize(
+ constraints.max_young_generation_size_in_bytes());
+ }
+ if (FLAG_max_semi_space_size > 0) {
+ max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
+ } else if (FLAG_max_heap_size > 0) {
+ size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
+ size_t young_generation_size, old_generation_size;
+ if (FLAG_max_old_space_size > 0) {
+ old_generation_size = static_cast<size_t>(FLAG_max_old_space_size) * MB;
+ young_generation_size = max_heap_size > old_generation_size
+ ? max_heap_size - old_generation_size
+ : 0;
+ } else {
+ GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
+ &old_generation_size);
+ }
+ max_semi_space_size_ =
+ SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
+ }
+ if (FLAG_stress_compaction) {
+ // This will cause more frequent GCs when stressing.
+ max_semi_space_size_ = MB;
+ }
+ // The new space size must be a power of two to support single-bit testing
+ // for containment.
+ // TODO(ulan): Rounding to a power of 2 is not longer needed. Remove it.
max_semi_space_size_ =
- RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
- }
- if (max_old_generation_size_in_mb != 0) {
- max_old_generation_size_ = max_old_generation_size_in_mb * MB;
+ static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
+ static_cast<uint64_t>(max_semi_space_size_)));
+ max_semi_space_size_ = Max(max_semi_space_size_, kMinSemiSpaceSize);
+ max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
}
- // If max space size flags are specified overwrite the configuration.
- if (FLAG_max_semi_space_size > 0) {
- max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
- }
- if (FLAG_max_old_space_size > 0) {
+ // Initialize max_old_generation_size_ and max_global_memory_.
+ {
+ max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
+ if (constraints.max_old_generation_size_in_bytes() > 0) {
+ max_old_generation_size_ = constraints.max_old_generation_size_in_bytes();
+ }
+ if (FLAG_max_old_space_size > 0) {
+ max_old_generation_size_ =
+ static_cast<size_t>(FLAG_max_old_space_size) * MB;
+ } else if (FLAG_max_heap_size > 0) {
+ size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
+ size_t young_generation_size =
+ YoungGenerationSizeFromSemiSpaceSize(max_semi_space_size_);
+ max_old_generation_size_ = max_heap_size > young_generation_size
+ ? max_heap_size - young_generation_size
+ : 0;
+ }
max_old_generation_size_ =
- static_cast<size_t>(FLAG_max_old_space_size) * MB;
- }
-
- if (Page::kPageSize > MB) {
- max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
+ Max(max_old_generation_size_, MinOldGenerationSize());
max_old_generation_size_ =
- RoundUp<Page::kPageSize>(max_old_generation_size_);
- }
+ RoundDown<Page::kPageSize>(max_old_generation_size_);
- if (FLAG_stress_compaction) {
- // This will cause more frequent GCs when stressing.
- max_semi_space_size_ = MB;
+ max_global_memory_size_ =
+ GlobalMemorySizeFromV8Size(max_old_generation_size_);
}
- // The new space size must be a power of two to support single-bit testing
- // for containment.
- max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
- static_cast<uint64_t>(max_semi_space_size_)));
+ CHECK_IMPLIES(FLAG_max_heap_size > 0,
+ FLAG_max_semi_space_size == 0 || FLAG_max_old_space_size == 0);
- if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
- // Start with at least 1*MB semi-space on machines with a lot of memory.
- initial_semispace_size_ =
- Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
- }
-
- if (FLAG_min_semi_space_size > 0) {
- size_t initial_semispace_size =
- static_cast<size_t>(FLAG_min_semi_space_size) * MB;
- if (initial_semispace_size > max_semi_space_size_) {
- initial_semispace_size_ = max_semi_space_size_;
- if (FLAG_trace_gc) {
- PrintIsolate(isolate_,
- "Min semi-space size cannot be more than the maximum "
- "semi-space size of %zu MB\n",
- max_semi_space_size_ / MB);
- }
- } else {
+ // Initialize initial_semispace_size_.
+ {
+ initial_semispace_size_ = kMinSemiSpaceSize;
+ if (max_semi_space_size_ == kMaxSemiSpaceSize) {
+ // Start with at least 1*MB semi-space on machines with a lot of memory.
+ initial_semispace_size_ =
+ Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
+ }
+ if (constraints.initial_young_generation_size_in_bytes() > 0) {
+ initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
+ constraints.initial_young_generation_size_in_bytes());
+ }
+ if (FLAG_min_semi_space_size > 0) {
initial_semispace_size_ =
- RoundUp<Page::kPageSize>(initial_semispace_size);
+ static_cast<size_t>(FLAG_min_semi_space_size) * MB;
+ }
+ initial_semispace_size_ =
+ Min(initial_semispace_size_, max_semi_space_size_);
+ initial_semispace_size_ =
+ RoundDown<Page::kPageSize>(initial_semispace_size_);
+ }
+
+ // Initialize initial_old_space_size_.
+ {
+ initial_old_generation_size_ = kMaxInitialOldGenerationSize;
+ if (constraints.initial_old_generation_size_in_bytes() > 0) {
+ initial_old_generation_size_ =
+ constraints.initial_old_generation_size_in_bytes();
+ old_generation_size_configured_ = true;
+ }
+ if (FLAG_initial_old_space_size > 0) {
+ initial_old_generation_size_ =
+ static_cast<size_t>(FLAG_initial_old_space_size) * MB;
+ old_generation_size_configured_ = true;
}
+ initial_old_generation_size_ =
+ Min(initial_old_generation_size_, max_old_generation_size_ / 2);
+ initial_old_generation_size_ =
+ RoundDown<Page::kPageSize>(initial_old_generation_size_);
}
- initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
+ if (old_generation_size_configured_) {
+ // If the embedder pre-configures the initial old generation size,
+ // then allow V8 to skip full GCs below that threshold.
+ min_old_generation_size_ = initial_old_generation_size_;
+ min_global_memory_size_ =
+ GlobalMemorySizeFromV8Size(min_old_generation_size_);
+ }
if (FLAG_semi_space_growth_factor < 2) {
FLAG_semi_space_growth_factor = 2;
}
- // The old generation is paged and needs at least one page for each space.
- int paged_space_count =
- LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
- initial_max_old_generation_size_ = max_old_generation_size_ =
- Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
- max_old_generation_size_);
-
- if (FLAG_initial_old_space_size > 0) {
- initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
- } else {
- initial_old_generation_size_ =
- Min(max_old_generation_size_, kMaxInitialOldGenerationSize);
- }
old_generation_allocation_limit_ = initial_old_generation_size_;
+ global_allocation_limit_ =
+ GlobalMemorySizeFromV8Size(old_generation_allocation_limit_);
+ initial_max_old_generation_size_ = max_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(kMaxRegularHeapObjectSize >=
@@ -4349,12 +4547,11 @@ void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
- code_range_size_ = code_range_size_in_mb * MB;
+ code_range_size_ = constraints.code_range_size_in_bytes();
configured_ = true;
}
-
void Heap::AddToRingBuffer(const char* string) {
size_t first_part =
Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
@@ -4378,7 +4575,10 @@ void Heap::GetFromRingBuffer(char* buffer) {
memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
}
-void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
+void Heap::ConfigureHeapDefault() {
+ v8::ResourceConstraints constraints;
+ ConfigureHeap(constraints);
+}
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
@@ -4403,9 +4603,9 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
- HeapIterator iterator(this);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(this);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
InstanceType type = obj.map().instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
stats->objects_per_type[type]++;
@@ -4426,10 +4626,10 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
size_t Heap::OldGenerationSizeOfObjects() {
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->SizeOfObjects();
}
return total + lo_space_->SizeOfObjects();
@@ -4437,10 +4637,9 @@ size_t Heap::OldGenerationSizeOfObjects() {
size_t Heap::GlobalSizeOfObjects() {
const size_t on_heap_size = OldGenerationSizeOfObjects();
- const size_t embedder_size =
- local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->allocated_size()
- : 0;
+ const size_t embedder_size = local_embedder_heap_tracer()
+ ? local_embedder_heap_tracer()->used_size()
+ : 0;
return on_heap_size + embedder_size;
}
@@ -4455,6 +4654,40 @@ uint64_t Heap::PromotedExternalMemorySize() {
isolate_data->external_memory_at_last_mark_compact_);
}
+bool Heap::AllocationLimitOvershotByLargeMargin() {
+ // This guards against too eager finalization in small heaps.
+ // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
+ constexpr size_t kMarginForSmallHeaps = 32u * MB;
+
+ const size_t v8_overshoot =
+ old_generation_allocation_limit_ <
+ OldGenerationObjectsAndPromotedExternalMemorySize()
+ ? OldGenerationObjectsAndPromotedExternalMemorySize() -
+ old_generation_allocation_limit_
+ : 0;
+ const size_t global_overshoot =
+ global_allocation_limit_ < GlobalSizeOfObjects()
+ ? GlobalSizeOfObjects() - global_allocation_limit_
+ : 0;
+
+ // Bail out if the V8 and global sizes are still below their respective
+ // limits.
+ if (v8_overshoot == 0 && global_overshoot == 0) {
+ return false;
+ }
+
+ // Overshoot margin is 50% of allocation limit or half-way to the max heap
+ // with special handling of small heaps.
+ const size_t v8_margin =
+ Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
+ const size_t global_margin =
+ Min(Max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_global_memory_size_ - global_allocation_limit_) / 2);
+
+ return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
+}
+
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
!AllocationLimitOvershotByLargeMargin() &&
@@ -4508,7 +4741,7 @@ size_t Heap::GlobalMemoryAvailable() {
? GlobalSizeOfObjects() < global_allocation_limit_
? global_allocation_limit_ - GlobalSizeOfObjects()
: 0
- : 1;
+ : new_space_->Capacity() + 1;
}
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
@@ -4526,8 +4759,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (FLAG_stress_incremental_marking) {
return IncrementalMarkingLimit::kHardLimit;
}
- if (OldGenerationSizeOfObjects() <=
- IncrementalMarking::kActivationThreshold) {
+ if (incremental_marking()->IsBelowActivationThresholds()) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -4574,7 +4806,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
const size_t global_memory_available = GlobalMemoryAvailable();
if (old_generation_space_available > new_space_->Capacity() &&
- (global_memory_available > 0)) {
+ (global_memory_available > new_space_->Capacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -4609,10 +4841,10 @@ void Heap::DisableInlineAllocation() {
new_space()->UpdateInlineAllocationLimit(0);
// Update inline allocation limit for old spaces.
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
CodeSpaceMemoryModificationScope modification_scope(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->FreeLinearAllocationArea();
}
}
@@ -4769,7 +5001,6 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- read_only_heap_ = ro_heap;
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
}
@@ -4822,7 +5053,7 @@ void Heap::SetUpSpaces() {
if (FLAG_idle_time_scavenge) {
scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_.reset(new IdleScavengeObserver(
- *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
+ this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
@@ -4831,12 +5062,12 @@ void Heap::SetUpSpaces() {
if (FLAG_stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
- stress_marking_observer_ = new StressMarkingObserver(*this);
+ stress_marking_observer_ = new StressMarkingObserver(this);
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
if (FLAG_stress_scavenge > 0) {
- stress_scavenge_observer_ = new StressScavengeObserver(*this);
+ stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -4908,8 +5139,8 @@ int Heap::NextStressMarkingLimit() {
}
void Heap::NotifyDeserializationComplete() {
- PagedSpaces spaces(this);
- for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
@@ -5055,7 +5286,7 @@ void Heap::TearDown() {
tracer_.reset();
- read_only_heap_->OnHeapTearDown();
+ isolate()->read_only_heap()->OnHeapTearDown();
space_[RO_SPACE] = read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
@@ -5158,8 +5389,8 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
- HeapIterator iterator(this);
- for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
+ HeapObjectIterator iterator(this);
+ for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o.IsPrototypeInfo()) {
PrototypeInfo prototype_info = PrototypeInfo::cast(o);
if (prototype_info.prototype_users().IsWeakArrayList()) {
@@ -5309,7 +5540,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(slot.address());
}
}
@@ -5319,7 +5550,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
@@ -5332,17 +5563,16 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(start, end);
}
}
-PagedSpace* PagedSpaces::next() {
+PagedSpace* PagedSpaceIterator::Next() {
switch (counter_++) {
case RO_SPACE:
- // skip NEW_SPACE
- counter_++;
- return heap_->read_only_space();
+ case NEW_SPACE:
+ UNREACHABLE();
case OLD_SPACE:
return heap_->old_space();
case CODE_SPACE:
@@ -5359,17 +5589,16 @@ SpaceIterator::SpaceIterator(Heap* heap)
SpaceIterator::~SpaceIterator() = default;
-bool SpaceIterator::has_next() {
+bool SpaceIterator::HasNext() {
// Iterate until no more spaces.
return current_space_ != LAST_SPACE;
}
-Space* SpaceIterator::next() {
- DCHECK(has_next());
+Space* SpaceIterator::Next() {
+ DCHECK(HasNext());
return heap_->space(++current_space_);
}
-
class HeapObjectsFilter {
public:
virtual ~HeapObjectsFilter() = default;
@@ -5486,8 +5715,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
reachable_;
};
-HeapIterator::HeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering)
+HeapObjectIterator::HeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
filtering_(filtering),
filter_(nullptr),
@@ -5503,11 +5732,10 @@ HeapIterator::HeapIterator(Heap* heap,
default:
break;
}
- object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ object_iterator_ = space_iterator_->Next()->GetObjectIterator();
}
-
-HeapIterator::~HeapIterator() {
+HeapObjectIterator::~HeapObjectIterator() {
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
@@ -5519,7 +5747,7 @@ HeapIterator::~HeapIterator() {
delete filter_;
}
-HeapObject HeapIterator::next() {
+HeapObject HeapObjectIterator::Next() {
if (filter_ == nullptr) return NextObject();
HeapObject obj = NextObject();
@@ -5527,7 +5755,7 @@ HeapObject HeapIterator::next() {
return obj;
}
-HeapObject HeapIterator::NextObject() {
+HeapObject HeapObjectIterator::NextObject() {
// No iterator means we are done.
if (object_iterator_.get() == nullptr) return HeapObject();
@@ -5537,8 +5765,8 @@ HeapObject HeapIterator::NextObject() {
return obj;
} else {
// Go though the spaces looking for one that has objects.
- while (space_iterator_->has_next()) {
- object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ while (space_iterator_->HasNext()) {
+ object_iterator_ = space_iterator_->Next()->GetObjectIterator();
obj = object_iterator_.get()->Next();
if (!obj.is_null()) {
return obj;
@@ -5686,7 +5914,7 @@ void Heap::AddDirtyJSFinalizationGroup(
// for the root pointing to the first JSFinalizationGroup.
}
-void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
+void Heap::KeepDuringJob(Handle<JSReceiver> target) {
DCHECK(FLAG_harmony_weak_refs);
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
@@ -5701,7 +5929,7 @@ void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
set_weak_refs_keep_during_job(*table);
}
-void Heap::ClearKeepDuringJobSet() {
+void Heap::ClearKeptObjects() {
set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
}
@@ -5844,7 +6072,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = map.instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
- AllocationSpace src = chunk->owner()->identity();
+ AllocationSpace src = chunk->owner_identity();
switch (src) {
case NEW_SPACE:
return dst == NEW_SPACE || dst == OLD_SPACE;
@@ -5864,7 +6092,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
size_t Heap::EmbedderAllocationCounter() const {
return local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->accumulated_allocated_size()
+ ? local_embedder_heap_tracer()->allocated_size()
: 0;
}
@@ -6133,16 +6361,16 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
- Space* chunk_owner = chunk->owner();
- AllocationSpace identity = chunk_owner->identity();
+ AllocationSpace identity = chunk->owner_identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
slim_chunk->InYoungGeneration());
+ // Read-only consistency.
+ CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
// Marking consistency.
- if (identity != RO_SPACE ||
- static_cast<ReadOnlySpace*>(chunk->owner())->writable()) {
+ if (chunk->IsWritable()) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
@@ -6155,25 +6383,6 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
- heap_internals::MemoryChunk::kMarkingBit,
- "Incremental marking flag inconsistent");
-static_assert(MemoryChunk::Flag::FROM_PAGE ==
- heap_internals::MemoryChunk::kFromPageBit,
- "From page flag inconsistent");
-static_assert(MemoryChunk::Flag::TO_PAGE ==
- heap_internals::MemoryChunk::kToPageBit,
- "To page flag inconsistent");
-static_assert(MemoryChunk::kFlagsOffset ==
- heap_internals::MemoryChunk::kFlagsOffset,
- "Flag offset inconsistent");
-static_assert(MemoryChunk::kHeapOffset ==
- heap_internals::MemoryChunk::kHeapOffset,
- "Heap offset inconsistent");
-static_assert(MemoryChunk::kOwnerOffset ==
- heap_internals::MemoryChunk::kOwnerOffset,
- "Owner offset inconsistent");
-
void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) {
local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index a242bd80d1..81f2b0dd8c 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -44,29 +44,20 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
-class ObjectBoilerplateDescription;
-class BytecodeArray;
-class CodeDataContainer;
-class DeoptimizationData;
-class HandlerTable;
class IncrementalMarking;
class JSArrayBuffer;
-class ExternalString;
using v8::MemoryPressureLevel;
class AllocationObserver;
class ArrayBufferCollector;
-class ArrayBufferTracker;
class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
-class HeapController;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
-class HistogramTimer;
class Isolate;
class JSFinalizationGroup;
class LocalEmbedderHeapTracer;
@@ -86,7 +77,6 @@ class Space;
class StoreBuffer;
class StressScavengeObserver;
class TimedHistogram;
-class TracePossibleWrapperReporter;
class WeakObjectRetainer;
enum ArrayStorageAllocationMode {
@@ -243,19 +233,24 @@ class Heap {
// should instead adapt it's heap size based on available physical memory.
static const int kPointerMultiplier = 1;
#else
- // TODO(ishell): kSystePointerMultiplier?
- static const int kPointerMultiplier = i::kSystemPointerSize / 4;
+ static const int kPointerMultiplier = i::kTaggedSize / 4;
#endif
static const size_t kMaxInitialOldGenerationSize =
256 * MB * kPointerMultiplier;
- // Semi-space size needs to be a multiple of page size.
- static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
- static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
+ // These constants control heap configuration based on the physical memory.
+ static constexpr size_t kPhysicalMemoryToOldGenerationRatio = 4;
+ static constexpr size_t kOldGenerationToSemiSpaceRatio = 128;
+ static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory = 256;
+ static constexpr size_t kOldGenerationLowMemory =
+ 128 * MB * kPointerMultiplier;
+ static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio = 1;
+ static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier;
+ static constexpr size_t kMaxSemiSpaceSize = 8192 * KB * kPointerMultiplier;
- STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
- STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMaxSemiSpaceSize % (1 << kPageSizeBits) == 0);
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -365,8 +360,8 @@ class Heap {
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
- V8_EXPORT_PRIVATE void RecordEphemeronKeyWrite(EphemeronHashTable table,
- Address key_slot);
+ V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite(
+ EphemeronHashTable table, Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
@@ -477,6 +472,12 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
+ // Print statistics of freelists of old_space:
+ // with FLAG_trace_gc_freelists: summary of each FreeListCategory.
+ // with FLAG_trace_gc_freelists_verbose: also prints the statistics of each
+ // FreeListCategory of each page.
+ void PrintFreeListsStats();
+
// Dump heap statistics in JSON format.
void DumpJSONHeapStatistics(std::stringstream& stream);
@@ -571,7 +572,7 @@ class Heap {
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
- int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
+ int64_t external_memory_hard_limit() { return max_old_generation_size_ / 2; }
V8_INLINE int64_t external_memory();
V8_INLINE void update_external_memory(int64_t delta);
@@ -619,13 +620,7 @@ class Heap {
// Initialization. ===========================================================
// ===========================================================================
- // Configure heap sizes
- // max_semi_space_size_in_kb: maximum semi-space size in KB
- // max_old_generation_size_in_mb: maximum old generation size in MB
- // code_range_size_in_mb: code range size in MB
- void ConfigureHeap(size_t max_semi_space_size_in_kb,
- size_t max_old_generation_size_in_mb,
- size_t code_range_size_in_mb);
+ void ConfigureHeap(const v8::ResourceConstraints& constraints);
void ConfigureHeapDefault();
// Prepares the heap, setting up for deserialization.
@@ -681,8 +676,6 @@ class Heap {
// Getters to other components. ==============================================
// ===========================================================================
- ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
-
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
@@ -748,8 +741,8 @@ class Heap {
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
- V8_EXPORT_PRIVATE void AddKeepDuringJobTarget(Handle<JSReceiver> target);
- void ClearKeepDuringJobSet();
+ V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
+ void ClearKeptObjects();
// ===========================================================================
// Inline allocation. ========================================================
@@ -986,8 +979,9 @@ class Heap {
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
+ // Checks whether an address/object is in the non-read-only heap (including
+ // auxiliary area and unused area). Use IsValidHeapObject if checking both
+ // heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value);
// Checks whether an address/object in a space.
@@ -998,7 +992,7 @@ class Heap {
// with off-heap Addresses.
bool InSpaceSlow(Address addr, AllocationSpace space);
- static inline Heap* FromWritableHeapObject(const HeapObject obj);
+ static inline Heap* FromWritableHeapObject(HeapObject obj);
// ===========================================================================
// Object statistics tracking. ===============================================
@@ -1042,23 +1036,21 @@ class Heap {
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
- V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
+ V8_EXPORT_PRIVATE static size_t HeapSizeFromPhysicalMemory(
+ uint64_t physical_memory);
+ V8_EXPORT_PRIVATE static void GenerationSizesFromHeapSize(
+ size_t heap_size, size_t* young_generation_size,
+ size_t* old_generation_size);
+ V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromOldGenerationSize(
+ size_t old_generation_size);
+ V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromSemiSpaceSize(
+ size_t semi_space_size);
+ V8_EXPORT_PRIVATE static size_t SemiSpaceSizeFromYoungGenerationSize(
+ size_t young_generation_size);
+ V8_EXPORT_PRIVATE static size_t MinYoungGenerationSize();
+ V8_EXPORT_PRIVATE static size_t MinOldGenerationSize();
+ V8_EXPORT_PRIVATE static size_t MaxOldGenerationSize(
uint64_t physical_memory);
-
- static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
- const uint64_t min_physical_memory = 512 * MB;
- const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
-
- uint64_t capped_physical_memory =
- Max(Min(physical_memory, max_physical_memory), min_physical_memory);
- // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
- size_t semi_space_size_in_kb =
- static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
- (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
- (max_physical_memory - min_physical_memory) +
- kMinSemiSpaceSizeInKB);
- return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
- }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -1185,6 +1177,11 @@ class Heap {
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
+ // We allow incremental marking to overshoot the V8 and global allocation
+ // limit for performace reasons. If the overshoot is too large then we are
+ // more eager to finalize incremental marking.
+ bool AllocationLimitOvershotByLargeMargin();
+
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1655,26 +1652,6 @@ class Heap {
OldGenerationObjectsAndPromotedExternalMemorySize());
}
- // We allow incremental marking to overshoot the allocation limit for
- // performace reasons. If the overshoot is too large then we are more
- // eager to finalize incremental marking.
- inline bool AllocationLimitOvershotByLargeMargin() {
- // This guards against too eager finalization in small heaps.
- // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
- size_t kMarginForSmallHeaps = 32u * MB;
- if (old_generation_allocation_limit_ >=
- OldGenerationObjectsAndPromotedExternalMemorySize())
- return false;
- uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
- old_generation_allocation_limit_;
- // Overshoot margin is 50% of allocation limit or half-way to the max heap
- // with special handling of small heaps.
- uint64_t margin =
- Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
- (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
- return overshoot >= margin;
- }
-
void UpdateTotalGCTime(double duration);
bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
@@ -1708,6 +1685,8 @@ class Heap {
return old_generation_allocation_limit_;
}
+ size_t global_allocation_limit() const { return global_allocation_limit_; }
+
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
@@ -1816,18 +1795,25 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
+ // These limits are initialized in Heap::ConfigureHeap based on the resource
+ // constraints and flags.
size_t code_range_size_ = 0;
- size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
- size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
- size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
- // TODO(mlippautz): Clarify whether this should be take some embedder
+ size_t max_semi_space_size_ = 0;
+ size_t initial_semispace_size_ = 0;
+ // Full garbage collections can be skipped if the old generation size
+ // is below this threshold.
+ size_t min_old_generation_size_ = 0;
+ // If the old generation size exceeds this limit, then V8 will
+ // crash with out-of-memory error.
+ size_t max_old_generation_size_ = 0;
+ // TODO(mlippautz): Clarify whether this should take some embedder
// configurable limit into account.
- size_t max_global_memory_size_ =
- Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
- static_cast<uint64_t>(max_old_generation_size_) * 2);
- size_t initial_max_old_generation_size_;
- size_t initial_max_old_generation_size_threshold_;
- size_t initial_old_generation_size_;
+ size_t min_global_memory_size_ = 0;
+ size_t max_global_memory_size_ = 0;
+
+ size_t initial_max_old_generation_size_ = 0;
+ size_t initial_max_old_generation_size_threshold_ = 0;
+ size_t initial_old_generation_size_ = 0;
bool old_generation_size_configured_ = false;
size_t maximum_committed_ = 0;
size_t old_generation_capacity_after_bootstrap_ = 0;
@@ -1861,8 +1847,6 @@ class Heap {
// and after context disposal.
int number_of_disposed_maps_ = 0;
- ReadOnlyHeap* read_only_heap_ = nullptr;
-
NewSpace* new_space_ = nullptr;
OldSpace* old_space_ = nullptr;
CodeSpace* code_space_ = nullptr;
@@ -1932,8 +1916,8 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- size_t old_generation_allocation_limit_;
- size_t global_allocation_limit_;
+ size_t old_generation_allocation_limit_ = 0;
+ size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
@@ -2034,9 +2018,10 @@ class Heap {
// Currently set GC callback flags that are used to pass information between
// the embedder and V8's GC.
- GCCallbackFlags current_gc_callback_flags_;
+ GCCallbackFlags current_gc_callback_flags_ =
+ GCCallbackFlags::kNoGCCallbackFlags;
- bool is_current_gc_forced_;
+ bool is_current_gc_forced_ = false;
ExternalStringTable external_string_table_;
@@ -2082,7 +2067,7 @@ class Heap {
friend class ConcurrentMarking;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class HeapIterator;
+ friend class HeapObjectIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
@@ -2115,9 +2100,6 @@ class Heap {
// Used in cctest.
friend class heap::HeapTester;
- FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
- FRIEND_TEST(HeapTest, ExternalLimitDefault);
- FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2245,56 +2227,56 @@ class VerifySmisVisitor : public RootVisitor {
};
// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old space, code space and optionally read only space. Returns each
-// space in turn, and null when it is done.
-class V8_EXPORT_PRIVATE PagedSpaces {
+// space, old space and code space. Returns each space in turn, and null when it
+// is done.
+class V8_EXPORT_PRIVATE PagedSpaceIterator {
public:
- explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
- PagedSpace* next();
+ explicit PagedSpaceIterator(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
+ PagedSpace* Next();
private:
Heap* heap_;
int counter_;
};
-
-class SpaceIterator : public Malloced {
+class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
virtual ~SpaceIterator();
- bool has_next();
- Space* next();
+ bool HasNext();
+ Space* Next();
private:
Heap* heap_;
int current_space_; // from enum AllocationSpace.
};
-// A HeapIterator provides iteration over the entire non-read-only heap. It
-// aggregates the specific iterators for the different spaces as these can only
-// iterate over one space only.
+// A HeapObjectIterator provides iteration over the entire non-read-only heap.
+// It aggregates the specific iterators for the different spaces as these can
+// only iterate over one space only.
//
-// HeapIterator ensures there is no allocation during its lifetime (using an
-// embedded DisallowHeapAllocation instance).
+// HeapObjectIterator ensures there is no allocation during its lifetime (using
+// an embedded DisallowHeapAllocation instance).
//
-// HeapIterator can skip free list nodes (that is, de-allocated heap objects
-// that still remain in the heap). As implementation of free nodes filtering
-// uses GC marks, it can't be used during MS/MC GC phases. Also, it is forbidden
-// to interrupt iteration in this mode, as this will leave heap objects marked
-// (and thus, unusable).
+// HeapObjectIterator can skip free list nodes (that is, de-allocated heap
+// objects that still remain in the heap). As implementation of free nodes
+// filtering uses GC marks, it can't be used during MS/MC GC phases. Also, it is
+// forbidden to interrupt iteration in this mode, as this will leave heap
+// objects marked (and thus, unusable).
//
-// See ReadOnlyHeapIterator if you need to iterate over read-only space objects,
-// or CombinedHeapIterator if you need to iterate over both heaps.
-class V8_EXPORT_PRIVATE HeapIterator {
+// See ReadOnlyHeapObjectIterator if you need to iterate over read-only space
+// objects, or CombinedHeapObjectIterator if you need to iterate over both
+// heaps.
+class V8_EXPORT_PRIVATE HeapObjectIterator {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
- explicit HeapIterator(Heap* heap,
- HeapObjectsFiltering filtering = kNoFiltering);
- ~HeapIterator();
+ explicit HeapObjectIterator(Heap* heap,
+ HeapObjectsFiltering filtering = kNoFiltering);
+ ~HeapObjectIterator();
- HeapObject next();
+ HeapObject Next();
private:
HeapObject NextObject();
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4a901dc17a..2980bdc8d4 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -37,14 +37,14 @@ using IncrementalMarkingMarkingVisitor =
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
- Heap* heap = incremental_marking_.heap();
+ Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
- incremental_marking_.AdvanceOnAllocation();
+ incremental_marking_->AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
- incremental_marking_.EnsureBlackAllocated(addr, size);
+ incremental_marking_->EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(
@@ -64,8 +64,8 @@ IncrementalMarking::IncrementalMarking(
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
- new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
- old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
+ new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
+ old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
DCHECK_NOT_NULL(marking_worklist_);
SetState(STOPPED);
}
@@ -246,6 +246,10 @@ bool IncrementalMarking::CanBeActivated() {
!heap_->isolate()->serializer_enabled();
}
+bool IncrementalMarking::IsBelowActivationThresholds() const {
+ return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
+ heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
+}
void IncrementalMarking::Deactivate() {
DeactivateIncrementalWriteBarrier();
@@ -253,16 +257,23 @@ void IncrementalMarking::Deactivate() {
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
- int old_generation_size_mb =
- static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
- int old_generation_limit_mb =
- static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ const size_t old_generation_size_mb =
+ heap()->OldGenerationSizeOfObjects() / MB;
+ const size_t old_generation_limit_mb =
+ heap()->old_generation_allocation_limit() / MB;
+ const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
+ const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
- "slack %dMB\n",
+ "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
+ "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
Heap::GarbageCollectionReasonToString(gc_reason),
old_generation_size_mb, old_generation_limit_mb,
- Max(0, old_generation_limit_mb - old_generation_size_mb));
+ old_generation_size_mb > old_generation_limit_mb
+ ? 0
+ : old_generation_limit_mb - old_generation_size_mb,
+ global_size_mb, global_limit_mb,
+ global_size_mb > global_limit_mb ? 0
+ : global_limit_mb - global_size_mb);
}
DCHECK(FLAG_incremental_marking);
DCHECK(state_ == STOPPED);
@@ -827,8 +838,8 @@ void IncrementalMarking::Stop() {
}
SpaceIterator it(heap_);
- while (it.has_next()) {
- Space* space = it.next();
+ while (it.HasNext()) {
+ Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 7284034191..74bb7cfd5a 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -79,9 +79,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static constexpr double kMaxStepSizeInMs = 5;
#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
+ static constexpr size_t kV8ActivationThreshold = 8 * MB;
+ static constexpr size_t kGlobalActivationThreshold = 16 * MB;
#else
- static const intptr_t kActivationThreshold = 0;
+ static constexpr size_t kV8ActivationThreshold = 0;
+ static constexpr size_t kGlobalActivationThreshold = 0;
#endif
#ifdef V8_CONCURRENT_MARKING
@@ -248,17 +250,19 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// generation.
void EnsureBlackAllocated(Address allocated, size_t size);
+ bool IsBelowActivationThresholds() const;
+
private:
class Observer : public AllocationObserver {
public:
- Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+ Observer(IncrementalMarking* incremental_marking, intptr_t step_size)
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override;
private:
- IncrementalMarking& incremental_marking_;
+ IncrementalMarking* incremental_marking_;
};
void StartMarking();
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index 1945e3275a..001f40193a 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -26,8 +26,12 @@ void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
}
}
+void ItemParallelJob::Task::WillRunOnForeground() {
+ runner_ = Runner::kForeground;
+}
+
void ItemParallelJob::Task::RunInternal() {
- RunInParallel();
+ RunInParallel(runner_);
on_finish_->Signal();
}
@@ -95,6 +99,7 @@ void ItemParallelJob::Run() {
// Contribute on main thread.
DCHECK(main_task);
+ main_task->WillRunOnForeground();
main_task->Run();
// Wait for background tasks.
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 54f09b87b5..0b739f8987 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -65,10 +65,11 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
+ enum class Runner { kForeground, kBackground };
explicit Task(Isolate* isolate);
~Task() override = default;
- virtual void RunInParallel() = 0;
+ virtual void RunInParallel(Runner runner) = 0;
protected:
// Retrieves a new item that needs to be processed. Returns |nullptr| if
@@ -99,13 +100,14 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// processing, e.g. scavenging).
void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index);
-
+ void WillRunOnForeground();
// We don't allow overriding this method any further.
void RunInternal() final;
std::vector<Item*>* items_ = nullptr;
size_t cur_index_ = 0;
size_t items_considered_ = 0;
+ Runner runner_ = Runner::kBackground;
base::Semaphore* on_finish_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(Task);
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 03be1100b1..3cd6620083 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -156,7 +156,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
- LargeObjectIterator it(lo_space);
+ LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
obj.Iterate(this);
@@ -456,6 +456,14 @@ void MarkCompactCollector::TearDown() {
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
+
+ if (FLAG_trace_evacuation_candidates) {
+ PrintIsolate(
+ isolate(),
+ "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
+ p->area_size() - p->allocated_bytes(), p->FreeListsLength());
+ }
+
p->MarkEvacuationCandidate();
evacuation_candidates_.push_back(p);
}
@@ -473,6 +481,9 @@ bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
DCHECK(evacuation_candidates_.empty());
+ if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
+ return false;
+
CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_code_space) {
@@ -513,7 +524,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
- ReadOnlyHeapIterator iterator(space);
+ ReadOnlyHeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
@@ -536,7 +547,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
+ LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
@@ -567,6 +578,8 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
+ heap()->tracer()->NotifySweepingCompleted();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
FullEvacuationVerifier verifier(heap());
@@ -629,6 +642,27 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
+ const bool in_standard_path =
+ !(FLAG_manual_evacuation_candidates_selection ||
+ FLAG_stress_compaction_random || FLAG_stress_compaction ||
+ FLAG_always_compact);
+ // Those variables will only be initialized if |in_standard_path|, and are not
+ // used otherwise.
+ size_t max_evacuated_bytes;
+ int target_fragmentation_percent;
+ size_t free_bytes_threshold;
+ if (in_standard_path) {
+ // We use two conditions to decide whether a page qualifies as an evacuation
+ // candidate, or not:
+ // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+ // between live bytes and capacity of this page (= area).
+ // * Evacuation quota: A global quota determining how much bytes should be
+ // compacted.
+ ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+ &max_evacuated_bytes);
+ free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
+ }
+
// Pairs of (live_bytes_in_page, page).
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
@@ -652,7 +686,15 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
- pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ if (in_standard_path) {
+ // Only the pages with at more than |free_bytes_threshold| free bytes are
+ // considered for evacuation.
+ if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
+ } else {
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
}
int candidate_count = 0;
@@ -691,25 +733,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
} else {
// The following approach determines the pages that should be evacuated.
//
- // We use two conditions to decide whether a page qualifies as an evacuation
- // candidate, or not:
- // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
- // between live bytes and capacity of this page (= area).
- // * Evacuation quota: A global quota determining how much bytes should be
- // compacted.
- //
- // The algorithm sorts all pages by live bytes and then iterates through
- // them starting with the page with the most free memory, adding them to the
- // set of evacuation candidates as long as both conditions (fragmentation
- // and quota) hold.
- size_t max_evacuated_bytes;
- int target_fragmentation_percent;
- ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
- &max_evacuated_bytes);
-
- const size_t free_bytes_threshold =
- target_fragmentation_percent * (area_size / 100);
-
// Sort pages from the most free to the least free, then select
// the first n pages for evacuation such that:
// - the total size of evacuated objects does not exceed the specified
@@ -722,10 +745,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
- size_t free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
- ((free_bytes >= free_bytes_threshold) &&
- ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
+ ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
}
@@ -735,9 +756,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
"fragmentation_limit_kb=%zu "
"fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
- space->name(), free_bytes / KB, free_bytes_threshold / KB,
- target_fragmentation_percent, total_live_bytes / KB,
- max_evacuated_bytes / KB);
+ space->name(), (area_size - live_bytes) / KB,
+ free_bytes_threshold / KB, target_fragmentation_percent,
+ total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
@@ -807,9 +828,9 @@ void MarkCompactCollector::Prepare() {
StartCompaction();
}
- PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(heap());
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->PrepareForMarkCompact();
}
heap()->account_external_memory_concurrently_freed();
@@ -1364,8 +1385,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object.map_slot().Relaxed_Store(
- MapWord::FromForwardingAddress(actual).ToMap());
+ object.set_map_word(MapWord::FromForwardingAddress(actual));
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1463,7 +1483,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
- if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
+ if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word().IsForwardingAddress());
return true;
@@ -2084,7 +2104,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
UncompiledData::Initialize(
uncompiled_data, inferred_name, start_position, end_position,
- kFunctionLiteralIdInvalid,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
@@ -2731,6 +2750,7 @@ class Evacuator : public Malloced {
inline void Finalize();
virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
+ virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
@@ -2819,6 +2839,10 @@ class FullEvacuator : public Evacuator {
return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
}
+ GCTracer::Scope::ScopeId GetTracingScope() override {
+ return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
+ }
+
inline void Finalize() {
Evacuator::Finalize();
@@ -2909,16 +2933,24 @@ class PageEvacuationTask : public ItemParallelJob::Task {
evacuator_(evacuator),
tracer_(isolate->heap()->tracer()) {}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(tracer_, evacuator_->GetTracingScope());
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
+ ProcessItems();
+ }
+ }
+
+ private:
+ void ProcessItems() {
EvacuationItem* item = nullptr;
while ((item = GetItem<EvacuationItem>()) != nullptr) {
evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
}
-
- private:
Evacuator* evacuator_;
GCTracer* tracer_;
};
@@ -3183,7 +3215,7 @@ void MarkCompactCollector::Evacuate() {
sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- DCHECK_EQ(OLD_SPACE, p->owner()->identity());
+ DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
}
}
@@ -3191,7 +3223,7 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
@@ -3218,24 +3250,35 @@ class UpdatingItem : public ItemParallelJob::Item {
class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpdatingTask(Isolate* isolate,
- GCTracer::BackgroundScope::ScopeId scope)
+ explicit PointersUpdatingTask(
+ Isolate* isolate, GCTracer::Scope::ScopeId scope,
+ GCTracer::BackgroundScope::ScopeId background_scope)
: ItemParallelJob::Task(isolate),
tracer_(isolate->heap()->tracer()),
- scope_(scope) {}
+ scope_(scope),
+ background_scope_(background_scope) {}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(tracer_, scope_);
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(tracer_, scope_);
+ UpdatePointers();
+ } else {
+ TRACE_BACKGROUND_GC(tracer_, background_scope_);
+ UpdatePointers();
+ }
+ }
+
+ private:
+ void UpdatePointers() {
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
}
-
- private:
GCTracer* tracer_;
- GCTracer::BackgroundScope::ScopeId scope_;
+ GCTracer::Scope::ScopeId scope_;
+ GCTracer::BackgroundScope::ScopeId background_scope_;
};
template <typename MarkingState>
@@ -3651,7 +3694,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_tasks + num_ephemeron_table_updating_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(),
+ isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.AddItem(new EphemeronTableUpdatingItem(heap()));
@@ -3684,7 +3727,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(),
+ isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
@@ -4194,8 +4237,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
+ isolate(), GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
{
@@ -4498,9 +4542,30 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
Page::kPageSize);
}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(collector_->heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(
+ collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ ProcessItems();
+ }
+ }
+
+ void MarkObject(Object object) {
+ if (!Heap::InYoungGeneration(object)) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (marking_state_->WhiteToGrey(heap_object)) {
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
+ }
+ }
+
+ private:
+ void ProcessItems() {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
@@ -4519,17 +4584,6 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
static_cast<void*>(this), marking_time);
}
}
-
- void MarkObject(Object object) {
- if (!Heap::InYoungGeneration(object)) return;
- HeapObject heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
- const int size = visitor_.Visit(heap_object);
- IncrementLiveBytes(heap_object, size);
- }
- }
-
- private:
void EmptyLocalMarkingWorklist() {
HeapObject object;
while (marking_worklist_.Pop(&object)) {
@@ -4761,6 +4815,10 @@ class YoungGenerationEvacuator : public Evacuator {
return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
}
+ GCTracer::Scope::ScopeId GetTracingScope() override {
+ return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
+ }
+
protected:
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 033f4fc6e9..2a63896242 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -1079,7 +1079,7 @@ class ObjectStatsVisitor {
namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
- CombinedHeapIterator iterator(heap);
+ CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
visitor->Visit(obj, obj.Size());
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index d96cded09a..ba0bfa2415 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/mark-compact.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
@@ -71,9 +72,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject host, MapWordSlot map_slot) {
+ HeapObject host) {
DCHECK(!host.map_word().IsForwardingAddress());
- static_cast<ConcreteVisitor*>(this)->VisitPointer(host, ObjectSlot(map_slot));
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
}
#define VISIT(TypeName, Type) \
@@ -88,8 +89,9 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
"concurrent marker"); \
} \
int size = TypeName::BodyDescriptor::SizeOf(map, object); \
- if (visitor->ShouldVisitMapPointer()) \
- visitor->VisitMapPointer(object, object.map_slot()); \
+ if (visitor->ShouldVisitMapPointer()) { \
+ visitor->VisitMapPointer(object); \
+ } \
TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
@@ -109,7 +111,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(size);
}
@@ -120,8 +122,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object.map_slot());
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object);
+ }
JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -132,8 +135,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object.map_slot());
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object);
+ }
JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -145,7 +149,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
@@ -157,7 +161,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(object.size());
}
@@ -169,7 +173,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
return size;
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 9ebd94427e..a5c291458f 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -54,12 +54,15 @@ namespace internal {
V(SmallOrderedHashMap, SmallOrderedHashMap) \
V(SmallOrderedHashSet, SmallOrderedHashSet) \
V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
+ V(SourceTextModule, SourceTextModule) \
V(Symbol, Symbol) \
+ V(SyntheticModule, SyntheticModule) \
V(ThinString, ThinString) \
V(TransitionArray, TransitionArray) \
V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
V(WasmCapiFunctionData, WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable, WasmIndirectFunctionTable) \
V(WasmInstanceObject, WasmInstanceObject)
#define FORWARD_DECLARE(TypeName, Type) class Type;
@@ -91,7 +94,7 @@ class HeapVisitor : public ObjectVisitor {
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject host, MapWordSlot map_slot);
+ V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
diff --git a/deps/v8/src/heap/read-only-heap-inl.h b/deps/v8/src/heap/read-only-heap-inl.h
new file mode 100644
index 0000000000..c725b4bca8
--- /dev/null
+++ b/deps/v8/src/heap/read-only-heap-inl.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_READ_ONLY_HEAP_INL_H_
+#define V8_HEAP_READ_ONLY_HEAP_INL_H_
+
+#include "src/heap/read-only-heap.h"
+
+#include "src/execution/isolate-utils-inl.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
+#ifdef V8_SHARED_RO_HEAP
+ // This fails if we are creating heap objects and the roots haven't yet been
+ // copied into the read-only heap or it has been cleared for testing.
+ if (shared_ro_heap_ != nullptr && shared_ro_heap_->init_complete_) {
+ return ReadOnlyRoots(shared_ro_heap_->read_only_roots_);
+ }
+#endif
+ return ReadOnlyRoots(GetHeapFromWritableObject(object));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_READ_ONLY_HEAP_INL_H_
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 1021bc147f..c325aea7e6 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -6,6 +6,7 @@
#include <cstring>
+#include "src/base/lsan.h"
#include "src/base/once.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -20,29 +21,53 @@ namespace internal {
#ifdef V8_SHARED_RO_HEAP
V8_DECLARE_ONCE(setup_ro_heap_once);
-ReadOnlyHeap* shared_ro_heap = nullptr;
+ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
#endif
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
- // Make sure we are only sharing read-only space when deserializing. Otherwise
- // we would be trying to create heap objects inside an already initialized
- // read-only space. Use ClearSharedHeapForTest if you need a new read-only
- // space.
- DCHECK_IMPLIES(shared_ro_heap != nullptr, des != nullptr);
-
- base::CallOnce(&setup_ro_heap_once, [isolate, des]() {
- shared_ro_heap = CreateAndAttachToIsolate(isolate);
- if (des != nullptr) shared_ro_heap->DeseralizeIntoIsolate(isolate, des);
- });
-
- isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
+ bool call_once_ran = false;
+ base::Optional<Checksum> des_checksum;
+#ifdef DEBUG
+ if (des != nullptr) des_checksum = des->GetChecksum();
+#endif // DEBUG
+
+ base::CallOnce(&setup_ro_heap_once,
+ [isolate, des, des_checksum, &call_once_ran]() {
+ USE(des_checksum);
+ shared_ro_heap_ = CreateAndAttachToIsolate(isolate);
+ if (des != nullptr) {
+#ifdef DEBUG
+ shared_ro_heap_->read_only_blob_checksum_ = des_checksum;
+#endif // DEBUG
+ shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
+ }
+ call_once_ran = true;
+ });
+
+ USE(call_once_ran);
+ USE(des_checksum);
+#ifdef DEBUG
+ const base::Optional<Checksum> last_checksum =
+ shared_ro_heap_->read_only_blob_checksum_;
+ if (last_checksum || des_checksum) {
+ // The read-only heap was set up from a snapshot. Make sure it's the always
+ // the same snapshot.
+ CHECK_EQ(last_checksum, des_checksum);
+ } else {
+ // The read-only heap objects were created. Make sure this happens only
+ // once, during this call.
+ CHECK(call_once_ran);
+ }
+#endif // DEBUG
+
+ isolate->SetUpFromReadOnlyHeap(shared_ro_heap_);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
- std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
+ std::memcpy(isolate_ro_roots, shared_ro_heap_->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
@@ -66,7 +91,7 @@ void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
// static
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
- isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
+ isolate->SetUpFromReadOnlyHeap(ro_heap);
return ro_heap;
}
@@ -77,6 +102,9 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
+ // N.B. Since pages are manually allocated with mmap, Lsan doesn't track
+ // their pointers. Seal explicitly ignores the necessary objects.
+ LSAN_IGNORE_OBJECT(this);
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
@@ -94,30 +122,17 @@ void ReadOnlyHeap::OnHeapTearDown() {
// static
void ReadOnlyHeap::ClearSharedHeapForTest() {
#ifdef V8_SHARED_RO_HEAP
- DCHECK_NOT_NULL(shared_ro_heap);
+ DCHECK_NOT_NULL(shared_ro_heap_);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this.
- delete shared_ro_heap;
- shared_ro_heap = nullptr;
+ shared_ro_heap_ = nullptr;
setup_ro_heap_once = 0;
#endif
}
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
- return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
-}
-
-// static
-ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
-#ifdef V8_SHARED_RO_HEAP
- // This fails if we are creating heap objects and the roots haven't yet been
- // copied into the read-only heap or it has been cleared for testing.
- if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
- return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
- }
-#endif
- return ReadOnlyRoots(GetHeapFromWritableObject(object));
+ return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
Object* ReadOnlyHeap::ExtendReadOnlyObjectCache() {
@@ -134,15 +149,15 @@ bool ReadOnlyHeap::read_only_object_cache_is_initialized() const {
return read_only_object_cache_.size() > 0;
}
-ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap)
- : ReadOnlyHeapIterator(ro_heap->read_only_space()) {}
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
+ : ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
-ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlySpace* ro_space)
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space_(ro_space),
current_page_(ro_space->first_page()),
current_addr_(current_page_->area_start()) {}
-HeapObject ReadOnlyHeapIterator::Next() {
+HeapObject ReadOnlyHeapObjectIterator::Next() {
if (current_page_ == nullptr) {
return HeapObject();
}
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index 697c9e26ef..4c1da62a15 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -5,7 +5,10 @@
#ifndef V8_HEAP_READ_ONLY_HEAP_H_
#define V8_HEAP_READ_ONLY_HEAP_H_
+#include <utility>
+
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
#include "src/roots/roots.h"
@@ -44,7 +47,8 @@ class ReadOnlyHeap final {
// Gets read-only roots from an appropriate root list: shared read-only root
// list if the shared read-only heap has been initialized or the isolate
// specific roots table.
- V8_EXPORT_PRIVATE static ReadOnlyRoots GetReadOnlyRoots(HeapObject object);
+ V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots(
+ HeapObject object);
// Clears any shared read-only heap artifacts for testing, forcing read-only
// heap to be re-created on next set up.
@@ -60,6 +64,8 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
+ using Checksum = std::pair<uint32_t, uint32_t>;
+
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
// Runs the read-only deserailizer and calls InitFromIsolate to complete
@@ -76,18 +82,25 @@ class ReadOnlyHeap final {
std::vector<Object> read_only_object_cache_;
#ifdef V8_SHARED_RO_HEAP
+#ifdef DEBUG
+ // The checksum of the blob the read-only heap was deserialized from, if any.
+ base::Optional<Checksum> read_only_blob_checksum_;
+#endif // DEBUG
+
Address read_only_roots_[kEntriesCount];
-#endif
+
+ V8_EXPORT_PRIVATE static ReadOnlyHeap* shared_ro_heap_;
+#endif // V8_SHARED_RO_HEAP
explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
// This class enables iterating over all read-only heap objects.
-class V8_EXPORT_PRIVATE ReadOnlyHeapIterator {
+class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public:
- explicit ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap);
- explicit ReadOnlyHeapIterator(ReadOnlySpace* ro_space);
+ explicit ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap);
+ explicit ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space);
HeapObject Next();
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index cd2344b349..ea7fe0149b 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
+#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
-#include "src/common/v8memory.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
@@ -309,7 +309,7 @@ class UpdateTypedSlotHelper {
SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory<Address>(entry_address) = code.entry();
+ base::Memory<Address>(entry_address) = code.entry();
}
return result;
}
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 50dc5f25c9..9c605f7089 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -97,8 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
}
#endif
}
@@ -110,9 +109,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
- Object old = source.map_slot().Release_CompareAndSwap(
- map, MapWord::FromForwardingAddress(target).ToMap());
- if (old != map) {
+ if (!source.synchronized_compare_and_swap_map_word(
+ MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
// Other task migrated the object.
return false;
}
@@ -215,9 +213,9 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
FLAG_young_generation_large_objects &&
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
- MemoryChunk::FromHeapObject(object)->owner()->identity());
- if (object.map_slot().Release_CompareAndSwap(
- map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
+ MemoryChunk::FromHeapObject(object)->owner_identity());
+ if (object.synchronized_compare_and_swap_map_word(
+ MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
@@ -314,8 +312,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(first).ToMap());
+ object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
return REMOVE_SLOT;
}
@@ -324,16 +321,15 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(target).ToMap());
+ object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
Map::ObjectFieldsFrom(map.visitor_id()));
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
+ object.synchronized_set_map_word(
+ MapWord::FromForwardingAddress(slot.ToHeapObject()));
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index c7666b7da7..70b514142f 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -41,10 +41,20 @@ class ScavengingTask final : public ItemParallelJob::Task {
scavenger_(scavenger),
barrier_(barrier) {}
- void RunInParallel() final {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ void RunInParallel(Runner runner) final {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ ProcessItems();
+ }
+ }
+
+ private:
+ void ProcessItems() {
double scavenging_time = 0.0;
{
barrier_->Start();
@@ -66,8 +76,6 @@ class ScavengingTask final : public ItemParallelJob::Task {
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
}
}
-
- private:
Heap* const heap_;
Scavenger* const scavenger_;
OneshotBarrier* const barrier_;
@@ -413,7 +421,7 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
- AllocationSpace space = page->owner()->identity();
+ AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page),
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 458fd819ae..a936521a7e 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -29,7 +29,6 @@
#include "src/objects/lookup-cache.h"
#include "src/objects/map.h"
#include "src/objects/microtask.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table.h"
@@ -37,11 +36,15 @@
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
+#include "src/objects/source-text-module.h"
#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/wasm/wasm-objects.h"
+#include "torque-generated/class-definitions-tq.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -485,7 +488,10 @@ bool Heap::CreateInitialMaps() {
uncompiled_data_with_preparse_data)
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
-
+ ALLOCATE_MAP(SOURCE_TEXT_MODULE_TYPE, SourceTextModule::kSize,
+ source_text_module)
+ ALLOCATE_MAP(SYNTHETIC_MODULE_TYPE, SyntheticModule::kSize,
+ synthetic_module)
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
@@ -870,10 +876,6 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_regexp_species_protector(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_iterator_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 308d4f51b1..3b4ed8d30a 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -42,9 +42,9 @@ PageRange::PageRange(Address start, Address limit)
}
// -----------------------------------------------------------------------------
-// SemiSpaceIterator
+// SemiSpaceObjectIterator
-HeapObject SemiSpaceIterator::Next() {
+HeapObject SemiSpaceObjectIterator::Next() {
while (current_ != limit_) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
@@ -63,9 +63,9 @@ HeapObject SemiSpaceIterator::Next() {
}
// -----------------------------------------------------------------------------
-// HeapObjectIterator
+// PagedSpaceObjectIterator
-HeapObject HeapObjectIterator::Next() {
+HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
@@ -73,7 +73,7 @@ HeapObject HeapObjectIterator::Next() {
return HeapObject();
}
-HeapObject HeapObjectIterator::FromCurrentPage() {
+HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
@@ -182,7 +182,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- category->set_free_list(&free_list_);
+ category->set_free_list(free_list());
added += category->available();
category->Relink();
});
@@ -204,13 +204,6 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false;
}
-bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
- Address base = BaseAddress(slot_addr);
- if (slot_addr < base + kHeaderSize) return false;
- return HeapObject::FromAddress(base) ==
- ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
-}
-
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
while (!HasHeaderSentinel(addr)) {
addr = BaseAddress(addr) - 1;
@@ -234,14 +227,21 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from,
MemoryChunk* to,
size_t amount) {
+ DCHECK_NOT_NULL(from->owner());
+ DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount);
}
+AllocationSpace MemoryChunk::owner_identity() const {
+ if (InReadOnlySpace()) return RO_SPACE;
+ return owner()->identity();
+}
+
void Page::MarkNeverAllocateForTesting() {
- DCHECK(this->owner()->identity() != NEW_SPACE);
+ DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
SetFlag(NEVER_EVACUATE);
@@ -315,10 +315,6 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
- return top(type) ? top(type)->page() : nullptr;
-}
-
FreeList* FreeListCategory::owner() { return free_list_; }
bool FreeListCategory::is_linked() {
@@ -376,7 +372,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
- DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
+ DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
@@ -389,7 +385,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
- DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
+ DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 2c8cbdfc32..438308a346 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -8,6 +8,7 @@
#include <utility>
#include "src/base/bits.h"
+#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
@@ -44,9 +45,9 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
// ----------------------------------------------------------------------------
-// HeapObjectIterator
+// PagedSpaceObjectIterator
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
@@ -57,28 +58,28 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
#endif
}
-HeapObjectIterator::HeapObjectIterator(Page* page)
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Page* page)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(reinterpret_cast<PagedSpace*>(page->owner())),
page_range_(page),
current_page_(page_range_.begin()) {
-#ifdef DEBUG
- Space* owner = page->owner();
+#ifdef V8_SHARED_RO_HEAP
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds.
-#ifdef V8_SHARED_RO_HEAP
- DCHECK_NE(owner->identity(), RO_SPACE);
-#endif
- // Do not access the heap of the read-only space.
- DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
- owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
+ DCHECK(!page->InReadOnlySpace());
+#endif // V8_SHARED_RO_HEAP
+
+#ifdef DEBUG
+ AllocationSpace owner = page->owner_identity();
+ DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
+ owner == CODE_SPACE);
#endif // DEBUG
}
// We have hit the end of the page and should advance to the next block of
// objects. This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
+bool PagedSpaceObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
@@ -105,14 +106,14 @@ PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
- for (SpaceIterator it(heap_); it.has_next();) {
- it.next()->PauseAllocationObservers();
+ for (SpaceIterator it(heap_); it.HasNext();) {
+ it.Next()->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
- for (SpaceIterator it(heap_); it.has_next();) {
- it.next()->ResumeAllocationObservers();
+ for (SpaceIterator it(heap_); it.HasNext();) {
+ it.Next()->ResumeAllocationObservers();
}
}
@@ -539,10 +540,13 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
-Heap* MemoryChunk::synchronized_heap() {
- return reinterpret_cast<Heap*>(
- base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
+#ifdef THREAD_SANITIZER
+void MemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
}
+#endif
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
@@ -561,8 +565,7 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE ||
- owner()->identity() == CODE_LO_SPACE);
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
@@ -596,8 +599,7 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE ||
- owner()->identity() == CODE_LO_SPACE);
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
@@ -688,16 +690,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Executability executable, Space* owner,
VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base);
-
DCHECK_EQ(base, chunk->address());
+ new (chunk) BasicMemoryChunk(size, area_start, area_end);
+ DCHECK(HasHeaderSentinel(area_start));
chunk->heap_ = heap;
- chunk->size_ = size;
- chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
- DCHECK(HasHeaderSentinel(area_start));
- chunk->area_start_ = area_start;
- chunk->area_end_ = area_end;
- chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
@@ -716,7 +713,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
- chunk->marking_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
@@ -724,25 +720,18 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->external_backing_store_bytes_
[ExternalBackingStoreType::kExternalString] = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- chunk->categories_[i] = nullptr;
- }
+ chunk->categories_ = nullptr;
- chunk->AllocateMarkingBitmap();
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
- } else {
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
- 0);
+ chunk->SetFlag(READ_ONLY_HEAP);
}
- DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
- DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
- DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
-
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) {
@@ -768,11 +757,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
return chunk;
}
-Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
+Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
- DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- page->owner()->identity()),
- page->area_size());
+ DCHECK_EQ(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
+ page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
@@ -783,8 +772,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
return page;
}
-Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
- DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
Page* page = static_cast<Page*>(chunk);
@@ -829,24 +817,31 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
}
void Page::AllocateFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ DCHECK_NULL(categories_);
+ categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ DCHECK_NULL(categories_[i]);
categories_[i] = new FreeListCategory(
reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
}
}
void Page::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
}
}
void Page::ReleaseFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- if (categories_[i] != nullptr) {
- delete categories_[i];
- categories_[i] = nullptr;
+ if (categories_ != nullptr) {
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ if (categories_[i] != nullptr) {
+ delete categories_[i];
+ categories_[i] = nullptr;
+ }
}
+ delete[] categories_;
+ categories_ = nullptr;
}
}
@@ -856,23 +851,21 @@ Page* Page::ConvertNewToOld(Page* old_page) {
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
- Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
+ Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
+ if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
return high_water_mark_;
}
-bool MemoryChunk::InOldSpace() const {
- return owner()->identity() == OLD_SPACE;
-}
+bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
bool MemoryChunk::InLargeObjectSpace() const {
- return owner()->identity() == LO_SPACE;
+ return owner_identity() == LO_SPACE;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
@@ -1131,15 +1124,15 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
- chunk->size_ -= bytes_to_free;
- chunk->area_end_ = new_area_end;
+ chunk->set_size(chunk->size() - bytes_to_free);
+ chunk->set_area_end(new_area_end);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
// Add guard page at the end.
size_t page_size = GetCommitPageSize();
- DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
+ DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
- reservation->SetPermissions(chunk->area_end_, page_size,
+ reservation->SetPermissions(chunk->area_end(), page_size,
PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
@@ -1181,7 +1174,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- chunk->ReleaseAllocatedMemory();
+ chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
@@ -1191,7 +1184,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
- DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
+ DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
@@ -1251,7 +1244,7 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk, executable);
+ return owner->InitializePage(chunk);
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -1368,7 +1361,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-void MemoryChunk::ReleaseAllocatedMemory() {
+void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
if (mutex_ != nullptr) {
delete mutex_;
mutex_ = nullptr;
@@ -1377,20 +1370,29 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete page_protection_change_mutex_;
page_protection_change_mutex_ = nullptr;
}
+ if (code_object_registry_ != nullptr) {
+ delete code_object_registry_;
+ code_object_registry_ = nullptr;
+ }
+
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
+
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
- if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
- if (code_object_registry_ != nullptr) delete code_object_registry_;
+}
+void MemoryChunk::ReleaseAllAllocatedMemory() {
if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
}
+
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
@@ -1408,7 +1410,7 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
- SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
+ SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
&slot_set_[type], nullptr, slot_set);
if (old_slot_set != nullptr) {
@@ -1527,23 +1529,10 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
-void MemoryChunk::AllocateMarkingBitmap() {
- DCHECK_NULL(marking_bitmap_);
- marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
-}
-
-void MemoryChunk::ReleaseMarkingBitmap() {
- DCHECK_NOT_NULL(marking_bitmap_);
- free(marking_bitmap_);
- marking_bitmap_ = nullptr;
-}
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
void Space::CheckOffsetsAreConsistent() const {
- static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
- "ID offset inconsistent");
DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
}
@@ -1592,8 +1581,8 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
- Executability executable)
- : SpaceWithLinearArea(heap, space), executable_(executable) {
+ Executability executable, FreeList* free_list)
+ : SpaceWithLinearArea(heap, space, free_list), executable_(executable) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
@@ -1614,6 +1603,7 @@ void PagedSpace::RefillFreeList() {
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
+ DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
@@ -1713,21 +1703,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
- // Check for pages that still contain free list entries. Bail out for smaller
- // categories.
- const int minimum_category =
- static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
- Page* page = free_list()->GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kTiniest);
+ Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
@@ -1769,9 +1745,9 @@ size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
void PagedSpace::ResetFreeList() {
for (Page* page : *this) {
- free_list_.EvictFreeListItems(page);
+ free_list_->EvictFreeListItems(page);
}
- DCHECK(free_list_.IsEmpty());
+ DCHECK(free_list_->IsEmpty());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
@@ -1934,8 +1910,8 @@ void PagedSpace::ReleasePage(Page* page) {
page));
DCHECK_EQ(page->owner(), this);
- free_list_.EvictFreeListItems(page);
- DCHECK(!free_list_.ContainsPageFreeListItems(page));
+ free_list_->EvictFreeListItems(page);
+ DCHECK(!free_list_->ContainsPageFreeListItems(page));
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
@@ -1972,7 +1948,7 @@ void PagedSpace::SetReadAndWritable() {
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
+ return std::unique_ptr<ObjectIterator>(new PagedSpaceObjectIterator(this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
@@ -1998,7 +1974,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
}
size_t new_node_size = 0;
- FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
+ FreeSpace new_node = free_list_->Allocate(size_in_bytes, &new_node_size);
if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
@@ -2055,7 +2031,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
@@ -2066,8 +2042,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(isolate->heap()->map_space()->Contains(map) ||
- ReadOnlyHeap::Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -2118,7 +2094,7 @@ void PagedSpace::VerifyLiveBytes() {
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
@@ -2138,7 +2114,7 @@ void PagedSpace::VerifyCountersAfterSweeping() {
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFiller()) {
@@ -2185,7 +2161,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE),
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
@@ -2528,11 +2504,11 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
+ return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceIterator because verification doesn't assume
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
@@ -2560,8 +2536,7 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
@@ -2633,6 +2608,9 @@ bool SemiSpace::Commit() {
DCHECK(!is_committed());
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
+ // Pages in the new spaces can be moved to the old space by the full
+ // collector. Therefore, they must be initialized with the same FreeList as
+ // old pages.
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
@@ -2890,16 +2868,14 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
}
#endif
-
// -----------------------------------------------------------------------------
-// SemiSpaceIterator implementation.
+// SemiSpaceObjectIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
-
-void SemiSpaceIterator::Initialize(Address start, Address end) {
+void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
@@ -2925,19 +2901,22 @@ void FreeListCategory::Reset() {
set_prev(nullptr);
set_next(nullptr);
available_ = 0;
+ length_ = 0;
}
FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace node = top();
- if (node.is_null() || static_cast<size_t>(node.Size()) < minimum_size) {
+ DCHECK(!node.is_null());
+ if (static_cast<size_t>(node.Size()) < minimum_size) {
*node_size = 0;
return FreeSpace();
}
set_top(node.next());
*node_size = node.Size();
available_ -= *node_size;
+ length_--;
return node;
}
@@ -2951,12 +2930,13 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
if (size >= minimum_size) {
DCHECK_GE(available_, size);
available_ -= size;
+ length_--;
if (cur_node == top()) {
set_top(cur_node.next());
}
if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
- if (chunk->owner()->identity() == CODE_SPACE) {
+ if (chunk->owner_identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
prev_non_evac_node.set_next(cur_node.next());
@@ -2976,6 +2956,7 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
free_space.set_next(top());
set_top(free_space);
available_ += size_in_bytes;
+ length_++;
if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
owner()->AddCategory(this);
}
@@ -2983,17 +2964,14 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
+ Map free_space_map = ReadOnlyRoots(heap).free_space_map();
FreeSpace n = top();
while (!n.is_null()) {
- MapWordSlot map_location = n.map_slot();
- // We can't use .is_null() here because *map_location returns an
- // Object (for which "is null" is not defined, as it would be
- // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
- if (map_location.contains_value(kNullAddress)) {
- map_location.store(ReadOnlyRoots(heap).free_space_map());
+ ObjectSlot map_slot = n.map_slot();
+ if (map_slot.contains_value(kNullAddress)) {
+ map_slot.store(free_space_map);
} else {
- DCHECK(map_location.contains_value(
- ReadOnlyRoots(heap).free_space_map().ptr()));
+ DCHECK(map_slot.contains_value(free_space_map.ptr()));
}
n = n.next();
}
@@ -3004,21 +2982,50 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-FreeList::FreeList() : wasted_bytes_(0) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i] = nullptr;
+// ------------------------------------------------
+// Generic FreeList methods (alloc/free related)
+
+FreeList* FreeList::CreateFreeList() {
+ if (FLAG_gc_freelist_strategy == 1) {
+ return new FreeListFastAlloc();
+ } else if (FLAG_gc_freelist_strategy == 2) {
+ return new FreeListMany();
+ } else {
+ return new FreeListLegacy();
}
- Reset();
}
+FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
+ FreeListCategory* category = categories_[type];
+ if (category == nullptr) return FreeSpace();
+ FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ if (category->is_empty()) {
+ RemoveCategory(category);
+ }
+ return node;
+}
-void FreeList::Reset() {
- ForAllFreeListCategories(
- [](FreeListCategory* category) { category->Reset(); });
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i] = nullptr;
+FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
+ size_t minimum_size,
+ size_t* node_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace node;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->SearchForNodeInList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ if (current->is_empty()) {
+ RemoveCategory(current);
+ }
+ return node;
+ }
}
- wasted_bytes_ = 0;
+ return node;
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
@@ -3026,7 +3033,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
page->DecreaseAllocatedBytes(size_in_bytes);
// Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < kMinBlockSize) {
+ if (size_in_bytes < min_block_size_) {
page->add_wasted_memory(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
@@ -3041,52 +3048,22 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return 0;
}
-FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
- }
- RemoveCategory(current);
- }
- return node;
-}
+// ------------------------------------------------
+// FreeListLegacy implementation
-FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- if (categories_[type] == nullptr) return FreeSpace();
- FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- }
- return node;
-}
+FreeListLegacy::FreeListLegacy() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kHuge + 1;
+ last_category_ = kHuge;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
-FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t* node_size,
- size_t minimum_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->SearchForNodeInList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
- }
- if (current->is_empty()) {
- RemoveCategory(current);
- }
- }
- return node;
+ Reset();
}
-FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
+
+FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
// First try the allocation fast path: try to allocate the minimum element
@@ -3094,21 +3071,31 @@ FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
FreeListCategoryType type =
SelectFastAllocationFreeListCategoryType(size_in_bytes);
for (int i = type; i < kHuge && node.is_null(); i++) {
- node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
}
if (node.is_null()) {
// Next search the huge list for free list nodes. This takes linear time in
// the number of huge elements.
- node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
+ node = SearchForNodeInList(kHuge, size_in_bytes, node_size);
}
if (node.is_null() && type != kHuge) {
- // We didn't find anything in the huge list. Now search the best fitting
- // free list for a node that has at least the requested size.
+ // We didn't find anything in the huge list.
type = SelectFreeListCategoryType(size_in_bytes);
- node = TryFindNodeIn(type, size_in_bytes, node_size);
+
+ if (type == kTiniest) {
+ // For this tiniest object, the tiny list hasn't been searched yet.
+ // Now searching the tiny list.
+ node = TryFindNodeIn(kTiny, size_in_bytes, node_size);
+ }
+
+ if (node.is_null()) {
+ // Now search the best fitting free list for a node that has at least the
+ // requested size.
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ }
}
if (!node.is_null()) {
@@ -3119,6 +3106,122 @@ FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
return node;
}
+// ------------------------------------------------
+// FreeListFastAlloc implementation
+
+FreeListFastAlloc::FreeListFastAlloc() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kHuge + 1;
+ last_category_ = kHuge;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
+
+FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ // Try to allocate the biggest element possible (to make the most of later
+ // bump-pointer allocations).
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = kHuge; i >= type && node.is_null(); i--) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListMany implementation
+
+// Cf. the declaration of |categories_max| in |spaces.h| to see how this is
+// computed.
+const size_t FreeListMany::categories_max[kNumberOfCategories] = {
+ 24, 32, 40, 48, 56, 64, 72,
+ 80, 88, 96, 104, 112, 120, 128,
+ 136, 144, 152, 160, 168, 176, 184,
+ 192, 200, 208, 216, 224, 232, 240,
+ 248, 256, 384, 512, 768, 1024, 1536,
+ 2048, 3072, 4080, 4088, 4096, 6144, 8192,
+ 12288, 16384, 24576, 32768, 49152, 65536, Page::kPageSize};
+
+FreeListMany::FreeListMany() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kNumberOfCategories;
+ last_category_ = number_of_categories_ - 1;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
+ if (maximum_freed < categories_max[0]) {
+ return 0;
+ }
+ for (int cat = kFirstCategory + 1; cat < last_category_; cat++) {
+ if (maximum_freed <= categories_max[cat]) {
+ return categories_max[cat - 1];
+ }
+ }
+ return maximum_freed;
+}
+
+Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(last_category_);
+ for (int cat = last_category_ - 1; !page && cat >= minimum_category; cat--) {
+ page = GetPageForCategoryType(cat);
+ }
+ return page;
+}
+
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = type; i < last_category_ && node.is_null(); i++) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (non alloc/free related)
+
+void FreeList::Reset() {
+ ForAllFreeListCategories(
+ [](FreeListCategory* category) { category->Reset(); });
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ categories_[i] = nullptr;
+ }
+ wasted_bytes_ = 0;
+}
+
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
@@ -3148,7 +3251,7 @@ void FreeList::RepairLists(Heap* heap) {
bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
- DCHECK_LT(type, kNumberOfCategories);
+ DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
@@ -3165,7 +3268,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
void FreeList::RemoveCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
- DCHECK_LT(type, kNumberOfCategories);
+ DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
// Common double-linked list removal.
@@ -3193,8 +3296,16 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
PrintF("null\n");
}
+int MemoryChunk::FreeListsLength() {
+ int length = 0;
+ for (int cat = kFirstCategory; cat <= free_list()->last_category(); cat++) {
+ if (categories_[cat] != nullptr) {
+ length += categories_[cat]->FreeListLength();
+ }
+ }
+ return length;
+}
-#ifdef DEBUG
size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace cur = top();
@@ -3209,20 +3320,10 @@ size_t FreeListCategory::SumFreeList() {
return sum;
}
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- length++;
- cur = cur.next();
- if (length == kVeryLongFreeList) return length;
- }
- return length;
-}
-
+#ifdef DEBUG
bool FreeList::IsVeryLong() {
int len = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
while (it.HasNext()) {
len += it.Next()->FreeListLength();
@@ -3254,7 +3355,7 @@ void PagedSpace::PrepareForMarkCompact() {
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
+ free_list_->Reset();
}
size_t PagedSpace::SizeOfObjects() {
@@ -3347,7 +3448,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes));
}
@@ -3366,18 +3467,21 @@ void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
ReadOnlySpace::ReadOnlySpace(Heap* heap)
- : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
+ : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}
void ReadOnlyPage::MakeHeaderRelocatable() {
- if (mutex_ != nullptr) {
- delete mutex_;
- heap_ = nullptr;
- mutex_ = nullptr;
- local_tracker_ = nullptr;
- reservation_.Reset();
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ // Detached read-only space needs to have a valid marking bitmap and free list
+ // categories. Instruct Lsan to ignore them if required.
+ LSAN_IGNORE_OBJECT(categories_);
+ for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
+ LSAN_IGNORE_OBJECT(categories_[i]);
}
+ LSAN_IGNORE_OBJECT(marking_bitmap_);
+ heap_ = nullptr;
+ owner_ = nullptr;
}
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
@@ -3396,7 +3500,7 @@ void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
- free_list_.RepairLists(heap());
+ free_list_->RepairLists(heap());
// Each page may have a small free space that is not tracked by a free list.
// Those free spaces still contain null as their map pointer.
// Overwrite them with new fillers.
@@ -3422,7 +3526,7 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
- ReadOnlyHeapIterator iterator(this);
+ ReadOnlyHeapObjectIterator iterator(this);
for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o.IsSeqOneByteString()) {
SeqOneByteString::cast(o).clear_padding();
@@ -3480,13 +3584,14 @@ void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
}
// -----------------------------------------------------------------------------
-// LargeObjectIterator
+// LargeObjectSpaceObjectIterator
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
+ LargeObjectSpace* space) {
current_ = space->first_page();
}
-HeapObject LargeObjectIterator::Next() {
+HeapObject LargeObjectSpaceObjectIterator::Next() {
if (current_ == nullptr) return HeapObject();
HeapObject object = current_->GetObject();
@@ -3501,7 +3606,10 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
+ : Space(heap, id, new NoFreeList()),
+ size_(0),
+ page_count_(0),
+ objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
@@ -3584,7 +3692,7 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
- LargeObjectIterator it(this);
+ LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
@@ -3614,7 +3722,7 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
}
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
- DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
@@ -3697,7 +3805,8 @@ bool LargeObjectSpace::ContainsSlow(Address addr) {
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
+ return std::unique_ptr<ObjectIterator>(
+ new LargeObjectSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
@@ -3722,8 +3831,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() ||
@@ -3787,7 +3895,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
#ifdef DEBUG
void LargeObjectSpace::Print() {
StdoutStream os;
- LargeObjectIterator it(this);
+ LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
obj.Print(os);
}
@@ -3796,9 +3904,9 @@ void LargeObjectSpace::Print() {
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
- this->owner()->name());
- printf(" --------------------------------------\n");
- HeapObjectIterator objects(this);
+ Heap::GetSpaceName(this->owner_identity()));
+ PrintF(" --------------------------------------\n");
+ PagedSpaceObjectIterator objects(this);
unsigned mark_size = 0;
for (HeapObject object = objects.Next(); !object.is_null();
object = objects.Next()) {
@@ -3811,8 +3919,8 @@ void Page::Print() {
object.ShortPrint();
PrintF("\n");
}
- printf(" --------------------------------------\n");
- printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
+ PrintF(" --------------------------------------\n");
+ PrintF(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
heap()->incremental_marking()->marking_state()->live_bytes(this));
}
@@ -3856,7 +3964,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
- DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
return result;
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 7522cac9cb..384c731f37 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -20,6 +20,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
@@ -119,19 +120,10 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-enum FreeListCategoryType {
- kTiniest,
- kTiny,
- kSmall,
- kMedium,
- kLarge,
- kHuge,
-
- kFirstCategory = kTiniest,
- kLastCategory = kHuge,
- kNumberOfCategories = kLastCategory + 1,
- kInvalidCategory
-};
+using FreeListCategoryType = int;
+
+static const FreeListCategoryType kFirstCategory = 0;
+static const FreeListCategoryType kInvalidCategory = -1;
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
@@ -151,12 +143,14 @@ class FreeListCategory {
page_(page),
type_(kInvalidCategory),
available_(0),
+ length_(0),
prev_(nullptr),
next_(nullptr) {}
void Initialize(FreeListCategoryType type) {
type_ = type;
available_ = 0;
+ length_ = 0;
prev_ = nullptr;
next_ = nullptr;
}
@@ -188,10 +182,8 @@ class FreeListCategory {
void set_free_list(FreeList* free_list) { free_list_ = free_list; }
-#ifdef DEBUG
size_t SumFreeList();
- int FreeListLength();
-#endif
+ int FreeListLength() { return length_; }
private:
// For debug builds we accurately compute free lists lengths up until
@@ -218,6 +210,9 @@ class FreeListCategory {
// category.
size_t available_;
+ // |length_|: Total blocks in this free list category.
+ int length_;
+
// |top_|: Points to the top FreeSpace in the free list category.
FreeSpace top_;
@@ -230,6 +225,327 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
+// A free list maintains free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer. When the limit is hit we need to
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
+// categories would scatter allocation more.
+class FreeList {
+ public:
+ // Creates a Freelist of the default class (FreeListLegacy for now).
+ V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
+
+ virtual ~FreeList() = default;
+
+ // Returns how much memory can be allocated after freeing maximum_freed
+ // memory.
+ virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because the freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
+
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) = 0;
+
+ // Returns a page containing an entry for a given type, or nullptr otherwise.
+ V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
+
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ size_t Available() {
+ size_t available = 0;
+ ForAllFreeListCategories([&available](FreeListCategory* category) {
+ available += category->available();
+ });
+ return available;
+ }
+
+ bool IsEmpty() {
+ bool empty = true;
+ ForAllFreeListCategories([&empty](FreeListCategory* category) {
+ if (!category->is_empty()) empty = false;
+ });
+ return empty;
+ }
+
+ // Used after booting the VM.
+ void RepairLists(Heap* heap);
+
+ V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
+ bool ContainsPageFreeListItems(Page* page);
+
+ int number_of_categories() { return number_of_categories_; }
+ FreeListCategoryType last_category() { return last_category_; }
+
+ size_t wasted_bytes() { return wasted_bytes_; }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+ FreeListCategory* current = categories_[type];
+ while (current != nullptr) {
+ FreeListCategory* next = current->next();
+ callback(current);
+ current = next;
+ }
+ }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < number_of_categories(); i++) {
+ ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+ }
+ }
+
+ bool AddCategory(FreeListCategory* category);
+ V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+ void PrintCategories(FreeListCategoryType type);
+
+#ifdef DEBUG
+ size_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
+ protected:
+ class FreeListCategoryIterator final {
+ public:
+ FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+ : current_(free_list->categories_[type]) {}
+
+ bool HasNext() const { return current_ != nullptr; }
+
+ FreeListCategory* Next() {
+ DCHECK(HasNext());
+ FreeListCategory* tmp = current_;
+ current_ = current_->next();
+ return tmp;
+ }
+
+ private:
+ FreeListCategory* current_;
+ };
+
+ // Tries to retrieve a node from the first category in a given |type|.
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Searches a given |type| for a node of at least |minimum_size|.
+ FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Returns the smallest category in which an object of |size_in_bytes| could
+ // fit.
+ virtual FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) = 0;
+
+ FreeListCategory* top(FreeListCategoryType type) const {
+ return categories_[type];
+ }
+
+ Page* GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
+ }
+
+ int number_of_categories_ = 0;
+ FreeListCategoryType last_category_ = 0;
+ size_t min_block_size_ = 0;
+
+ std::atomic<size_t> wasted_bytes_{0};
+ FreeListCategory** categories_ = nullptr;
+
+ friend class FreeListCategory;
+ friend class Page;
+ friend class MemoryChunk;
+ friend class ReadOnlyPage;
+};
+
+// FreeList used for spaces that don't have freelists
+// (only the LargeObject space for now).
+class NoFreeList final : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) final {
+ FATAL("NoFreeList can't be used as a standard FreeList. ");
+ }
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ Page* GetPageForSize(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+
+ private:
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class V8_EXPORT_PRIVATE Space : public Malloced {
+ public:
+ Space(Heap* heap, AllocationSpace id, FreeList* free_list)
+ : allocation_observers_paused_(false),
+ heap_(heap),
+ id_(id),
+ committed_(0),
+ max_committed_(0),
+ free_list_(std::unique_ptr<FreeList>(free_list)) {
+ external_backing_store_bytes_ =
+ new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
+ external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
+ external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
+ 0;
+ CheckOffsetsAreConsistent();
+ }
+
+ void CheckOffsetsAreConsistent() const;
+
+ static inline void MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
+
+ virtual ~Space() {
+ delete[] external_backing_store_bytes_;
+ external_backing_store_bytes_ = nullptr;
+ }
+
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ bool IsDetached() const { return heap_ == nullptr; }
+
+ AllocationSpace identity() { return id_; }
+
+ const char* name() { return Heap::GetSpaceName(id_); }
+
+ virtual void AddAllocationObserver(AllocationObserver* observer);
+
+ virtual void RemoveAllocationObserver(AllocationObserver* observer);
+
+ virtual void PauseAllocationObservers();
+
+ virtual void ResumeAllocationObservers();
+
+ virtual void StartNextInlineAllocationStep() {}
+
+ void AllocationStep(int bytes_since_last, Address soon_object, int size);
+
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual size_t CommittedMemory() { return committed_; }
+
+ virtual size_t MaximumCommittedMemory() { return max_committed_; }
+
+ // Returns allocated size.
+ virtual size_t Size() = 0;
+
+ // Returns size of objects. Can differ from the allocated size
+ // (e.g. see LargeObjectSpace).
+ virtual size_t SizeOfObjects() { return Size(); }
+
+ // Approximate amount of physical memory committed for this space.
+ virtual size_t CommittedPhysicalMemory() = 0;
+
+ // Return the available bytes without growing.
+ virtual size_t Available() = 0;
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (id_ == CODE_SPACE) {
+ return RoundDown(size, kCodeAlignment);
+ } else {
+ return RoundDown(size, kTaggedSize);
+ }
+ }
+
+ virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
+
+ void AccountCommitted(size_t bytes) {
+ DCHECK_GE(committed_ + bytes, committed_);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(size_t bytes) {
+ DCHECK_GE(committed_, committed_ - bytes);
+ committed_ -= bytes;
+ }
+
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ // Returns amount of off-heap memory in-use by objects in this Space.
+ virtual size_t ExternalBackingStoreBytes(
+ ExternalBackingStoreType type) const {
+ return external_backing_store_bytes_[type];
+ }
+
+ void* GetRandomMmapAddr();
+
+ MemoryChunk* first_page() { return memory_chunk_list_.front(); }
+ MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+
+ base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+
+ FreeList* free_list() { return free_list_.get(); }
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ intptr_t GetNextInlineAllocationStepSize();
+ bool AllocationObserversActive() {
+ return !allocation_observers_paused_ && !allocation_observers_.empty();
+ }
+
+ void DetachFromHeap() { heap_ = nullptr; }
+
+ std::vector<AllocationObserver*> allocation_observers_;
+
+ // The List manages the pages that belong to the given space.
+ base::List<MemoryChunk> memory_chunk_list_;
+
+ // Tracks off-heap memory used by this space.
+ std::atomic<size_t>* external_backing_store_bytes_;
+
+ static const intptr_t kIdOffset = 9 * kSystemPointerSize;
+
+ bool allocation_observers_paused_;
+ Heap* heap_;
+ AllocationSpace id_;
+
+ // Keeps track of committed memory in a space.
+ size_t committed_;
+ size_t max_committed_;
+
+ std::unique_ptr<FreeList> free_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(Space);
+};
+
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
@@ -265,7 +581,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
-class MemoryChunk {
+class MemoryChunk : public BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
@@ -274,74 +590,6 @@ class MemoryChunk {
}
};
- enum Flag {
- NO_FLAGS = 0u,
- IS_EXECUTABLE = 1u << 0,
- POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
- POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
- // A page in the from-space or a young large page that was not scavenged
- // yet.
- FROM_PAGE = 1u << 3,
- // A page in the to-space or a young large page that was scavenged.
- TO_PAGE = 1u << 4,
- LARGE_PAGE = 1u << 5,
- EVACUATION_CANDIDATE = 1u << 6,
- NEVER_EVACUATE = 1u << 7,
-
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR = 1u << 8,
-
- // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
- // from new to old space during evacuation.
- PAGE_NEW_OLD_PROMOTION = 1u << 9,
-
- // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
- // within the new space during evacuation.
- PAGE_NEW_NEW_PROMOTION = 1u << 10,
-
- // This flag is intended to be used for testing. Works only when both
- // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
- // are set. It forces the page to become an evacuation candidate at next
- // candidates selection cycle.
- FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
-
- // This flag is intended to be used for testing.
- NEVER_ALLOCATE_ON_PAGE = 1u << 12,
-
- // The memory chunk is already logically freed, however the actual freeing
- // still has to be performed.
- PRE_FREED = 1u << 13,
-
- // |POOLED|: When actually freeing this chunk, only uncommit and do not
- // give up the reservation as we still reuse the chunk at some point.
- POOLED = 1u << 14,
-
- // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
- // has been aborted and needs special handling by the sweeper.
- COMPACTION_WAS_ABORTED = 1u << 15,
-
- // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
- // on pages is sometimes aborted. The flag is used to avoid repeatedly
- // triggering on the same page.
- COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
-
- // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
- // to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17,
-
- // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
- // enabled.
- INCREMENTAL_MARKING = 1u << 18,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
-
- // The memory chunk freeing bookkeeping has been performed but the chunk has
- // not yet been freed.
- UNREGISTERED = 1u << 20
- };
-
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
@@ -370,36 +618,12 @@ class MemoryChunk {
kSweepingInProgress,
};
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = 0;
- static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
- static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
- static const intptr_t kReservationOffset =
- kMarkBitmapOffset + kSystemPointerSize;
- static const intptr_t kHeapOffset =
- kReservationOffset + 3 * kSystemPointerSize;
- static const intptr_t kHeaderSentinelOffset =
- kHeapOffset + kSystemPointerSize;
- static const intptr_t kOwnerOffset =
- kHeaderSentinelOffset + kSystemPointerSize;
-
static const size_t kHeaderSize =
- kSizeOffset // NOLINT
- + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kSystemPointerSize // Bitmap* marking_bitmap_
- + 3 * kSystemPointerSize // VirtualMemory reservation_
- + kSystemPointerSize // Heap* heap_
- + kSystemPointerSize // Address header_sentinel_
- + kSystemPointerSize // Address area_start_
- + kSystemPointerSize // Address area_end_
- + kSystemPointerSize // Address owner_
- + kSizetSize // size_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
+ BasicMemoryChunk::kHeaderSize // Parent size.
+ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ + kSystemPointerSize // Address owner_
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
@@ -415,9 +639,8 @@ class MemoryChunk {
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // base::ListNode
- + kSystemPointerSize * kNumberOfCategories
- // FreeListCategory categories_[kNumberOfCategories]
- + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ + kSystemPointerSize // FreeListCategory** categories__
+ + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kSystemPointerSize // Bitmap* young_generation_bitmap_
+ kSystemPointerSize; // CodeObjectRegistry* code_object_registry_
@@ -428,14 +651,12 @@ class MemoryChunk {
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
- static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
-
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromHeapObject(const HeapObject o) {
+ static MemoryChunk* FromHeapObject(HeapObject o) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
@@ -465,22 +686,8 @@ class MemoryChunk {
void DiscardUnusedMemory(Address addr, size_t size);
- Address address() const {
- return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
- }
-
base::Mutex* mutex() { return mutex_; }
- bool Contains(Address addr) {
- return addr >= area_start() && addr < area_end();
- }
-
- // Checks whether |addr| can be a limit of addresses in this page. It's a
- // limit if it's in the page, or if it's just after the last byte of the page.
- bool ContainsLimit(Address addr) {
- return addr >= area_start() && addr <= area_end();
- }
-
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
@@ -491,15 +698,17 @@ class MemoryChunk {
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
- size_t size() const { return size_; }
- void set_size(size_t size) { size_ = size; }
-
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
- Heap* synchronized_heap();
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
template <RememberedSetType type>
bool ContainsSlots() {
@@ -547,12 +756,7 @@ class MemoryChunk {
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
- void AllocateMarkingBitmap();
- void ReleaseMarkingBitmap();
-
- Address area_start() { return area_start_; }
- Address area_end() { return area_end_; }
- size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
+ int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
@@ -596,36 +800,6 @@ class MemoryChunk {
return this->address() + (index << kTaggedSizeLog2);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- void SetFlag(Flag flag) {
- if (access_mode == AccessMode::NON_ATOMIC) {
- flags_ |= flag;
- } else {
- base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
- }
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsFlagSet(Flag flag) {
- return (GetFlags<access_mode>() & flag) != 0;
- }
-
- void ClearFlag(Flag flag) { flags_ &= ~flag; }
- // Set or clear multiple flags at a time. The flags in the mask are set to
- // the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(uintptr_t flags, uintptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- uintptr_t GetFlags() {
- if (access_mode == AccessMode::NON_ATOMIC) {
- return flags_;
- } else {
- return base::AsAtomicWord::Relaxed_Load(&flags_);
- }
- }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -653,12 +827,11 @@ class MemoryChunk {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
- bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
- bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
-
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
- return (flags_ & kIsInYoungGenerationMask) != 0;
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
@@ -667,11 +840,20 @@ class MemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+ // Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
- static inline bool HasHeaderSentinel(Address slot_addr);
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
+
+ // Gets the chunk's allocation space, potentially dealing with a null owner_
+ // (like read-only chunks have).
+ inline AllocationSpace owner_identity() const;
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
@@ -693,14 +875,20 @@ class MemoryChunk {
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
+ FreeList* free_list() { return owner()->free_list(); }
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
VirtualMemory reservation);
- // Should be called when memory chunk is about to be freed.
- void ReleaseAllocatedMemory();
+ // Release all memory allocated by the chunk. Should be called when memory
+ // chunk is about to be freed.
+ void ReleaseAllAllocatedMemory();
+ // Release memory allocated by the chunk, except that which is needed by
+ // read-only space chunks.
+ void ReleaseAllocatedMemoryNeededForWritableChunk();
// Sets the requested page permissions only if the write unprotect counter
// has reached 0.
@@ -719,29 +907,12 @@ class MemoryChunk {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
- size_t size_;
- uintptr_t flags_;
-
- Bitmap* marking_bitmap_;
-
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
- Heap* heap_;
-
- // This is used to distinguish the memory chunk header from the interior of a
- // large page. The memory chunk header stores here an impossible tagged
- // pointer: the tagger pointer of the page start. A field in a large object is
- // guaranteed to not contain such a pointer.
- Address header_sentinel_;
-
// The space owning this memory chunk.
std::atomic<Space*> owner_;
- // Start and end of allocatable memory on this chunk.
- Address area_start_;
- Address area_end_;
-
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
@@ -792,7 +963,7 @@ class MemoryChunk {
base::ListNode<MemoryChunk> list_node_;
- FreeListCategory* categories_[kNumberOfCategories];
+ FreeListCategory** categories_;
LocalArrayBufferTracker* local_tracker_;
@@ -807,10 +978,8 @@ class MemoryChunk {
friend class ConcurrentMarkingState;
friend class IncrementalMarkingState;
friend class MajorAtomicMarkingState;
- friend class MajorMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
- friend class MemoryChunkValidator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
@@ -819,7 +988,7 @@ class MemoryChunk {
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 512K. Large object pages may be larger.
+// A page is a memory chunk of a size 256K. Large object pages may be larger.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -840,7 +1009,7 @@ class Page : public MemoryChunk {
static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
- static Page* FromHeapObject(const HeapObject o) {
+ static Page* FromHeapObject(HeapObject o) {
return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
@@ -873,7 +1042,7 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
callback(categories_[i]);
}
}
@@ -884,8 +1053,8 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
Address address_in_page = address() + offset;
- DCHECK_GE(address_in_page, area_start_);
- DCHECK_LT(address_in_page, area_end_);
+ DCHECK_GE(address_in_page, area_start());
+ DCHECK_LT(address_in_page, area_end());
return address_in_page;
}
@@ -963,7 +1132,7 @@ class LargePage : public MemoryChunk {
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
- static LargePage* FromHeapObject(const HeapObject o) {
+ static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
@@ -986,162 +1155,11 @@ class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
-
-// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class V8_EXPORT_PRIVATE Space : public Malloced {
- public:
- Space(Heap* heap, AllocationSpace id)
- : allocation_observers_paused_(false),
- heap_(heap),
- id_(id),
- committed_(0),
- max_committed_(0) {
- external_backing_store_bytes_ =
- new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
- external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
- external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
- 0;
- CheckOffsetsAreConsistent();
- }
-
- void CheckOffsetsAreConsistent() const;
-
- static inline void MoveExternalBackingStoreBytes(
- ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
-
- virtual ~Space() {
- delete[] external_backing_store_bytes_;
- external_backing_store_bytes_ = nullptr;
- }
-
- Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
- // Identity used in error reporting.
- AllocationSpace identity() { return id_; }
-
- const char* name() { return Heap::GetSpaceName(id_); }
-
- virtual void AddAllocationObserver(AllocationObserver* observer);
-
- virtual void RemoveAllocationObserver(AllocationObserver* observer);
-
- virtual void PauseAllocationObservers();
-
- virtual void ResumeAllocationObservers();
-
- virtual void StartNextInlineAllocationStep() {}
-
- void AllocationStep(int bytes_since_last, Address soon_object, int size);
-
- // Return the total amount committed memory for this space, i.e., allocatable
- // memory and page headers.
- virtual size_t CommittedMemory() { return committed_; }
-
- virtual size_t MaximumCommittedMemory() { return max_committed_; }
-
- // Returns allocated size.
- virtual size_t Size() = 0;
-
- // Returns size of objects. Can differ from the allocated size
- // (e.g. see LargeObjectSpace).
- virtual size_t SizeOfObjects() { return Size(); }
-
- // Approximate amount of physical memory committed for this space.
- virtual size_t CommittedPhysicalMemory() = 0;
-
- // Return the available bytes without growing.
- virtual size_t Available() = 0;
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (id_ == CODE_SPACE) {
- return RoundDown(size, kCodeAlignment);
- } else {
- return RoundDown(size, kTaggedSize);
- }
- }
-
- virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
-
- void AccountCommitted(size_t bytes) {
- DCHECK_GE(committed_ + bytes, committed_);
- committed_ += bytes;
- if (committed_ > max_committed_) {
- max_committed_ = committed_;
- }
- }
-
- void AccountUncommitted(size_t bytes) {
- DCHECK_GE(committed_, committed_ - bytes);
- committed_ -= bytes;
- }
-
- inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- // Returns amount of off-heap memory in-use by objects in this Space.
- virtual size_t ExternalBackingStoreBytes(
- ExternalBackingStoreType type) const {
- return external_backing_store_bytes_[type];
- }
-
- void* GetRandomMmapAddr();
-
- MemoryChunk* first_page() { return memory_chunk_list_.front(); }
- MemoryChunk* last_page() { return memory_chunk_list_.back(); }
-
- base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
- intptr_t GetNextInlineAllocationStepSize();
- bool AllocationObserversActive() {
- return !allocation_observers_paused_ && !allocation_observers_.empty();
- }
-
- void DetachFromHeap() { heap_ = nullptr; }
-
- std::vector<AllocationObserver*> allocation_observers_;
-
- // The List manages the pages that belong to the given space.
- base::List<MemoryChunk> memory_chunk_list_;
-
- // Tracks off-heap memory used by this space.
- std::atomic<size_t>* external_backing_store_bytes_;
-
- private:
- static const intptr_t kIdOffset = 9 * kSystemPointerSize;
-
- bool allocation_observers_paused_;
- Heap* heap_;
- AllocationSpace id_;
-
- // Keeps track of committed memory in a space.
- size_t committed_;
- size_t max_committed_;
-
- DISALLOW_COPY_AND_ASSIGN(Space);
-};
-
-class MemoryChunkValidator {
- // Computed offsets should match the compiler generated ones.
- STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
-
- // Validate our estimates on the header size.
- STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
- STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
- STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-};
-
+// Validate our estimates on the header size.
+STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
// The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory
@@ -1205,7 +1223,7 @@ class MemoryAllocator {
chunk = GetMemoryChunkSafe<kRegular>();
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
- chunk->ReleaseAllocatedMemory();
+ chunk->ReleaseAllAllocatedMemory();
}
}
return chunk;
@@ -1597,17 +1615,17 @@ class PageRange {
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
+// A PagedSpaceObjectIterator iterates objects from the bottom of the given
+// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
-class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
+class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
- explicit HeapObjectIterator(PagedSpace* space);
- explicit HeapObjectIterator(Page* page);
+ explicit PagedSpaceObjectIterator(PagedSpace* space);
+ explicit PagedSpaceObjectIterator(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
@@ -1629,7 +1647,6 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
PageRange::iterator current_page_;
};
-
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
@@ -1761,13 +1778,6 @@ class AllocationStats {
#endif
};
-// A free list maintaining free blocks of memory. The free list is organized in
-// a way to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which is
-// divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
// The free list is organized in categories as follows:
// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
@@ -1782,11 +1792,9 @@ class AllocationStats {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
-class FreeList {
+class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
public:
- // This method returns how much memory can be allocated after freeing
- // maximum_freed memory.
- static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
+ size_t GuaranteedAllocatable(size_t maximum_freed) override {
if (maximum_freed <= kTiniestListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
@@ -1803,7 +1811,50 @@ class FreeList {
return maximum_freed;
}
- static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+ Page* GetPageForSize(size_t size_in_bytes) override {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ if (!page && static_cast<int>(kSmall) >= minimum_category)
+ page = GetPageForCategoryType(kSmall);
+ if (!page && static_cast<int>(kTiny) >= minimum_category)
+ page = GetPageForCategoryType(kTiny);
+ if (!page && static_cast<int>(kTiniest) >= minimum_category)
+ page = GetPageForCategoryType(kTiniest);
+ return page;
+ }
+
+ FreeListLegacy();
+ ~FreeListLegacy();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
+
+ private:
+ enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
+
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ static const size_t kTiniestListMax = 0xa * kTaggedSize;
+ static const size_t kTinyListMax = 0x1f * kTaggedSize;
+ static const size_t kSmallListMax = 0xff * kTaggedSize;
+ static const size_t kMediumListMax = 0x7ff * kTaggedSize;
+ static const size_t kLargeListMax = 0x1fff * kTaggedSize;
+ static const size_t kTinyAllocationMax = kTiniestListMax;
+ static const size_t kSmallAllocationMax = kTinyListMax;
+ static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kLargeAllocationMax = kMediumListMax;
+
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
@@ -1818,152 +1869,145 @@ class FreeList {
return kHuge;
}
- FreeList();
-
- // Adds a node on the free list. The block of size {size_in_bytes} starting
- // at {start} is placed on the free list. The return value is the number of
- // bytes that were not added to the free list, because they freed memory block
- // was too small. Bookkeeping information will be written to the block, i.e.,
- // its contents will be destroyed. The start address should be word aligned,
- // and the size should be a non-zero multiple of the word size.
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
-
- // Allocates a free space node frome the free list of at least size_in_bytes
- // bytes. Returns the actual node size in node_size which can be bigger than
- // size_in_bytes. This method returns null if the allocation request cannot be
- // handled by the free list.
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- size_t Available() {
- size_t available = 0;
- ForAllFreeListCategories([&available](FreeListCategory* category) {
- available += category->available();
- });
- return available;
- }
-
- bool IsEmpty() {
- bool empty = true;
- ForAllFreeListCategories([&empty](FreeListCategory* category) {
- if (!category->is_empty()) empty = false;
- });
- return empty;
+ // Returns the category to be used to allocate |size_in_bytes| in the fast
+ // path. The tiny categories are not used for fast allocation.
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallAllocationMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumAllocationMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeAllocationMax) {
+ return kLarge;
+ }
+ return kHuge;
}
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
- V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
- bool ContainsPageFreeListItems(Page* page);
-
- size_t wasted_bytes() { return wasted_bytes_; }
+ friend class FreeListCategory;
+ friend class heap::HeapTester;
+};
- template <typename Callback>
- void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
- FreeListCategory* current = categories_[type];
- while (current != nullptr) {
- FreeListCategory* next = current->next();
- callback(current);
- current = next;
+// Inspired by FreeListLegacy.
+// Only has 3 categories: Medium, Large and Huge.
+// Any block that would have belong to tiniest, tiny or small in FreeListLegacy
+// is considered wasted.
+// Allocation is done only in Huge, Medium and Large (in that order),
+// using a first-fit strategy (only the first block of each freelist is ever
+// considered though). Performances is supposed to be better than
+// FreeListLegacy, but memory usage should be higher (because fragmentation will
+// probably be higher).
+class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override {
+ if (maximum_freed <= kMediumListMax) {
+ // Since we are not iterating over all list entries, we cannot guarantee
+ // that we can find the maximum freed block in that free list.
+ return 0;
+ } else if (maximum_freed <= kLargeListMax) {
+ return kLargeAllocationMax;
}
+ return kHugeAllocationMax;
}
- template <typename Callback>
- void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
- }
+ Page* GetPageForSize(size_t size_in_bytes) override {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ return page;
}
- bool AddCategory(FreeListCategory* category);
- V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
- void PrintCategories(FreeListCategoryType type);
-
- // Returns a page containing an entry for a given type, or nullptr otherwise.
- inline Page* GetPageForCategoryType(FreeListCategoryType type);
+ FreeListFastAlloc();
+ ~FreeListFastAlloc();
-#ifdef DEBUG
- size_t SumFreeLists();
- bool IsVeryLong();
-#endif
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
private:
- class FreeListCategoryIterator {
- public:
- FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
- : current_(free_list->categories_[type]) {}
+ enum { kMedium, kLarge, kHuge };
- bool HasNext() { return current_ != nullptr; }
-
- FreeListCategory* Next() {
- DCHECK(HasNext());
- FreeListCategory* tmp = current_;
- current_ = current_->next();
- return tmp;
- }
-
- private:
- FreeListCategory* current_;
- };
-
- // The size range of blocks, in bytes.
- static const size_t kMinBlockSize = 3 * kTaggedSize;
+ static const size_t kMinBlockSize = 0xff * kTaggedSize;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
- static const size_t kTiniestListMax = 0xa * kTaggedSize;
- static const size_t kTinyListMax = 0x1f * kTaggedSize;
- static const size_t kSmallListMax = 0xff * kTaggedSize;
static const size_t kMediumListMax = 0x7ff * kTaggedSize;
static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kTinyAllocationMax = kTiniestListMax;
- static const size_t kSmallAllocationMax = kTinyListMax;
- static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kMediumAllocationMax = kMinBlockSize;
static const size_t kLargeAllocationMax = kMediumListMax;
+ static const size_t kHugeAllocationMax = kLargeListMax;
- // Walks all available categories for a given |type| and tries to retrieve
- // a node. Returns nullptr if the category is empty.
- FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty or the top entry is smaller
- // than minimum_size.
- FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size);
-
- // The tiny categories are not used for fast allocation.
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- if (size_in_bytes <= kSmallAllocationMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumAllocationMax) {
+ // Returns the category used to hold an object of size |size_in_bytes|.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ if (size_in_bytes <= kMediumListMax) {
return kMedium;
- } else if (size_in_bytes <= kLargeAllocationMax) {
+ } else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
- FreeListCategory* top(FreeListCategoryType type) const {
- return categories_[type];
+ Page* GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
}
+};
- std::atomic<size_t> wasted_bytes_;
- FreeListCategory* categories_[kNumberOfCategories];
+// Use 49 Freelists: on per size between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_max| for the size of each
+// Freelist. Allocation is done using a best-fit strategy (considering only the
+// first element of each category though).
+// Performances are expected to be worst than FreeListLegacy, but memory
+// consumption should be lower (since fragmentation should be lower).
+class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
- friend class FreeListCategory;
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMany();
+ ~FreeListMany();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
+
+ private:
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ // Categories boundaries generated with:
+ // perl -E '
+ // @cat = map {$_*8} 3..32, 48, 64;
+ // while ($cat[-1] <= 32768) {
+ // push @cat, $cat[-1]+$cat[-3], $cat[-1]*2
+ // }
+ // push @cat, 4080, 4088;
+ // @cat = sort { $a <=> $b } @cat;
+ // push @cat, "Page::kPageSize";
+ // say join ", ", @cat;
+ // say "\n", scalar @cat'
+ // Note the special case for 4080 and 4088 bytes: experiments have shown that
+ // this category classes are more used than others of similar sizes
+ static const int kNumberOfCategories = 49;
+ static const size_t categories_max[kNumberOfCategories];
+
+ // Return the smallest category that could hold |size_in_bytes| bytes.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ for (int cat = kFirstCategory; cat < last_category_; cat++) {
+ if (size_in_bytes <= categories_max[cat]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2029,8 +2073,8 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
- SpaceWithLinearArea(Heap* heap, AllocationSpace id)
- : Space(heap, id), top_on_previous_step_(0) {
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
+ : Space(heap, id, free_list), top_on_previous_step_(0) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
@@ -2091,7 +2135,8 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
- PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
+ FreeList* free_list);
~PagedSpace() override { TearDown(); }
@@ -2119,14 +2164,14 @@ class V8_EXPORT_PRIVATE PagedSpace
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
- free_list_.Reset();
+ free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- size_t Available() override { return free_list_.Available(); }
+ size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
@@ -2140,7 +2185,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
- virtual size_t Waste() { return free_list_.wasted_bytes(); }
+ virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
@@ -2173,7 +2218,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
+ size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
@@ -2181,7 +2226,7 @@ class V8_EXPORT_PRIVATE PagedSpace
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
@@ -2211,7 +2256,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
- Page* InitializePage(MemoryChunk* chunk, Executability executable);
+ Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
@@ -2275,8 +2320,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// sweeper.
virtual void RefillFreeList();
- FreeList* free_list() { return &free_list_; }
-
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
@@ -2368,9 +2411,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The space's free list.
- FreeList free_list_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
@@ -2396,7 +2436,7 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE),
+ : Space(heap, NEW_SPACE, new NoFreeList()),
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
@@ -2465,7 +2505,7 @@ class SemiSpace : public Space {
void RemovePage(Page* page);
void PrependPage(Page* page);
- Page* InitializePage(MemoryChunk* chunk, Executability executable);
+ Page* InitializePage(MemoryChunk* chunk);
// Age mark accessors.
Address age_mark() { return age_mark_; }
@@ -2552,19 +2592,18 @@ class SemiSpace : public Space {
int pages_used_;
friend class NewSpace;
- friend class SemiSpaceIterator;
+ friend class SemiSpaceObjectIterator;
};
-
-// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space. It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace. New objects allocated after the
// iterator is created are not iterated.
-class SemiSpaceIterator : public ObjectIterator {
+class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceIterator(NewSpace* space);
+ explicit SemiSpaceObjectIterator(NewSpace* space);
inline HeapObject Next() override;
@@ -2821,7 +2860,7 @@ class V8_EXPORT_PRIVATE NewSpace
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
- friend class SemiSpaceIterator;
+ friend class SemiSpaceObjectIterator;
};
class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
@@ -2840,7 +2879,7 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
- : PagedSpace(heap, id, executable) {}
+ : PagedSpace(heap, id, executable, FreeList::CreateFreeList()) {}
bool is_local() override { return true; }
@@ -2886,7 +2925,9 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
+ explicit OldSpace(Heap* heap)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
@@ -2901,7 +2942,8 @@ class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
+ explicit CodeSpace(Heap* heap)
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
@@ -2918,7 +2960,9 @@ class CodeSpace : public PagedSpace {
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
- explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
+ explicit MapSpace(Heap* heap)
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -2946,6 +2990,9 @@ class ReadOnlySpace : public PagedSpace {
bool writable() const { return !is_marked_read_only_; }
+ bool Contains(Address a) = delete;
+ bool Contains(Object o) = delete;
+
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
@@ -3056,7 +3103,7 @@ class LargeObjectSpace : public Space {
size_t objects_size_; // size of objects
private:
- friend class LargeObjectIterator;
+ friend class LargeObjectSpaceObjectIterator;
};
class NewLargeObjectSpace : public LargeObjectSpace {
@@ -3112,9 +3159,9 @@ class CodeLargeObjectSpace : public LargeObjectSpace {
std::unordered_map<Address, LargePage*> chunk_map_;
};
-class LargeObjectIterator : public ObjectIterator {
+class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
- explicit LargeObjectIterator(LargeObjectSpace* space);
+ explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index e59e72d3a6..7d0dcfc370 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -104,16 +104,7 @@ void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
Address start, Address end) {
- // In GC the store buffer has to be empty at any time.
- DCHECK(store_buffer->Empty());
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
+ UNREACHABLE();
}
void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
diff --git a/deps/v8/src/heap/stress-marking-observer.cc b/deps/v8/src/heap/stress-marking-observer.cc
index 091f279a78..bb7720e118 100644
--- a/deps/v8/src/heap/stress-marking-observer.cc
+++ b/deps/v8/src/heap/stress-marking-observer.cc
@@ -9,14 +9,14 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
-StressMarkingObserver::StressMarkingObserver(Heap& heap)
+StressMarkingObserver::StressMarkingObserver(Heap* heap)
: AllocationObserver(64), heap_(heap) {}
void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
- heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
- heap_.incremental_marking()->EnsureBlackAllocated(soon_object, size);
+ heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+ kNoGCCallbackFlags);
+ heap_->incremental_marking()->EnsureBlackAllocated(soon_object, size);
}
} // namespace internal
diff --git a/deps/v8/src/heap/stress-marking-observer.h b/deps/v8/src/heap/stress-marking-observer.h
index 37ebb82197..5736ba9289 100644
--- a/deps/v8/src/heap/stress-marking-observer.h
+++ b/deps/v8/src/heap/stress-marking-observer.h
@@ -12,12 +12,12 @@ namespace internal {
class StressMarkingObserver : public AllocationObserver {
public:
- explicit StressMarkingObserver(Heap& heap);
+ explicit StressMarkingObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
private:
- Heap& heap_;
+ Heap* heap_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
index b91825c38b..5aa3419ed7 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.cc
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
-StressScavengeObserver::StressScavengeObserver(Heap& heap)
+StressScavengeObserver::StressScavengeObserver(Heap* heap)
: AllocationObserver(64),
heap_(heap),
has_requested_gc_(false),
@@ -21,22 +21,22 @@ StressScavengeObserver::StressScavengeObserver(Heap& heap)
limit_percentage_ = NextLimit();
if (FLAG_trace_stress_scavenge && !FLAG_fuzzer_gc_analysis) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[StressScavenge] %d%% is the new limit\n", limit_percentage_);
}
}
void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
- if (has_requested_gc_ || heap_.new_space()->Capacity() == 0) {
+ if (has_requested_gc_ || heap_->new_space()->Capacity() == 0) {
return;
}
double current_percent =
- heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
}
@@ -49,11 +49,11 @@ void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
if (static_cast<int>(current_percent) >= limit_percentage_) {
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
+ heap_->isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
}
has_requested_gc_ = true;
- heap_.isolate()->stack_guard()->RequestGC();
+ heap_->isolate()->stack_guard()->RequestGC();
}
}
@@ -63,15 +63,15 @@ bool StressScavengeObserver::HasRequestedGC() const {
void StressScavengeObserver::RequestedGCDone() {
double current_percent =
- heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
limit_percentage_ = NextLimit(static_cast<int>(current_percent));
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
- heap_.isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
- limit_percentage_);
+ heap_->isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
+ limit_percentage_);
}
has_requested_gc_ = false;
@@ -87,7 +87,7 @@ int StressScavengeObserver::NextLimit(int min) {
return max;
}
- return min + heap_.isolate()->fuzzer_rng()->NextInt(max - min + 1);
+ return min + heap_->isolate()->fuzzer_rng()->NextInt(max - min + 1);
}
} // namespace internal
diff --git a/deps/v8/src/heap/stress-scavenge-observer.h b/deps/v8/src/heap/stress-scavenge-observer.h
index b39b2eac59..4996323b75 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.h
+++ b/deps/v8/src/heap/stress-scavenge-observer.h
@@ -12,7 +12,7 @@ namespace internal {
class StressScavengeObserver : public AllocationObserver {
public:
- explicit StressScavengeObserver(Heap& heap);
+ explicit StressScavengeObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
@@ -24,7 +24,7 @@ class StressScavengeObserver : public AllocationObserver {
double MaxNewSpaceSizeReached() const;
private:
- Heap& heap_;
+ Heap* heap_;
int limit_percentage_;
bool has_requested_gc_;
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 8f7b55bf2b..cbb7d717b0 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -184,7 +184,7 @@ void Sweeper::StartSweeperTasks() {
void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
if (!page->SweepingDone()) {
- ParallelSweepPage(page, page->owner()->identity());
+ ParallelSweepPage(page, page->owner_identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
@@ -370,7 +370,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
- return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
+
+ return static_cast<int>(
+ p->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
@@ -500,7 +502,7 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
}
void Sweeper::EnsurePageIsIterable(Page* page) {
- AllocationSpace space = page->owner()->identity();
+ AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) {
SweepOrWaitUntilSweepingCompleted(page);
} else {
@@ -573,7 +575,7 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(sweeping_in_progress_);
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
- DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
@@ -581,7 +583,7 @@ void Sweeper::AddPageForIterability(Page* page) {
}
void Sweeper::MakeIterable(Page* page) {
- DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index fa1291f6f3..51788b41e4 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
ishell@chromium.org
jkummerow@chromium.org
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 98c86c2263..7aebf857a2 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -107,12 +107,12 @@ void AccessorAssembler::HandlePolymorphicCase(
// Load the {feedback} array length.
TNode<IntPtrT> length = LoadAndUntagWeakFixedArrayLength(feedback);
- CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(1), length));
+ CSA_ASSERT(this, IntPtrLessThanOrEqual(IntPtrConstant(kEntrySize), length));
- // This is a hand-crafted loop that only compares against the {length}
- // in the end, since we already know that we will have at least a single
- // entry in the {feedback} array anyways.
- TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
+ // This is a hand-crafted loop that iterates backwards and only compares
+ // against zero at the end, since we already know that we will have at least a
+ // single entry in the {feedback} array anyways.
+ TVARIABLE(IntPtrT, var_index, IntPtrSub(length, IntPtrConstant(kEntrySize)));
Label loop(this, &var_index), loop_next(this);
Goto(&loop);
BIND(&loop);
@@ -131,18 +131,19 @@ void AccessorAssembler::HandlePolymorphicCase(
BIND(&loop_next);
var_index =
- Signed(IntPtrAdd(var_index.value(), IntPtrConstant(kEntrySize)));
- Branch(IntPtrLessThan(var_index.value(), length), &loop, if_miss);
+ Signed(IntPtrSub(var_index.value(), IntPtrConstant(kEntrySize)));
+ Branch(IntPtrGreaterThanOrEqual(var_index.value(), IntPtrConstant(0)),
+ &loop, if_miss);
}
}
void AccessorAssembler::HandleLoadICHandlerCase(
- const LoadICParameters* p, TNode<Object> handler, Label* miss,
+ const LazyLoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
ElementSupport support_elements, LoadAccessMode access_mode) {
Comment("have_handler");
- VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder);
+ VARIABLE(var_holder, MachineRepresentation::kTagged, p->holder());
VARIABLE(var_smi_handler, MachineRepresentation::kTagged, handler);
Variable* vars[] = {&var_holder, &var_smi_handler};
@@ -152,6 +153,14 @@ void AccessorAssembler::HandleLoadICHandlerCase(
Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+ BIND(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler);
+ HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss, exit_point, ic_mode,
+ access_mode);
+ }
+
// |handler| is a Smi, encoding what to do. See SmiHandler methods
// for the encoding format.
BIND(&if_smi_handler);
@@ -161,49 +170,34 @@ void AccessorAssembler::HandleLoadICHandlerCase(
support_elements, access_mode);
}
- BIND(&try_proto_handler);
- {
- GotoIf(IsCodeMap(LoadMap(CAST(handler))), &call_handler);
- HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss, exit_point, ic_mode,
- access_mode);
- }
-
BIND(&call_handler);
{
- exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, handler, p->context,
- p->receiver, p->name, p->slot, p->vector);
+ exit_point->ReturnCallStub(LoadWithVectorDescriptor{}, handler,
+ p->context(), p->receiver(), p->name(),
+ p->slot(), p->vector());
}
}
-void AccessorAssembler::HandleLoadCallbackProperty(const LoadICParameters* p,
- TNode<JSObject> holder,
- TNode<WordT> handler_word,
- ExitPoint* exit_point) {
+void AccessorAssembler::HandleLoadCallbackProperty(
+ const LazyLoadICParameters* p, TNode<JSObject> holder,
+ TNode<WordT> handler_word, ExitPoint* exit_point) {
Comment("native_data_property_load");
TNode<IntPtrT> descriptor =
Signed(DecodeWord<LoadHandler::DescriptorBits>(handler_word));
- Label runtime(this, Label::kDeferred);
Callable callable = CodeFactory::ApiGetter(isolate());
TNode<AccessorInfo> accessor_info =
CAST(LoadDescriptorValue(LoadMap(holder), descriptor));
- GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
- exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+ exit_point->ReturnCallStub(callable, p->context(), p->receiver(), holder,
accessor_info);
-
- BIND(&runtime);
- exit_point->ReturnCallRuntime(Runtime::kLoadCallbackProperty, p->context,
- p->receiver, holder, accessor_info, p->name);
}
void AccessorAssembler::HandleLoadAccessor(
- const LoadICParameters* p, TNode<CallHandlerInfo> call_handler_info,
+ const LazyLoadICParameters* p, TNode<CallHandlerInfo> call_handler_info,
TNode<WordT> handler_word, TNode<DataHandler> handler,
TNode<IntPtrT> handler_kind, ExitPoint* exit_point) {
Comment("api_getter");
- Label runtime(this, Label::kDeferred);
// Context is stored either in data2 or data3 field depending on whether
// the access check is enabled for this handler or not.
TNode<MaybeObject> maybe_context = Select<MaybeObject>(
@@ -215,39 +209,31 @@ void AccessorAssembler::HandleLoadAccessor(
CSA_CHECK(this, IsNotCleared(maybe_context));
TNode<Object> context = GetHeapObjectAssumeWeak(maybe_context);
- GotoIf(IsRuntimeCallStatsEnabled(), &runtime);
- {
- TNode<Foreign> foreign = CAST(
- LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset));
- TNode<WordT> callback = TNode<WordT>::UncheckedCast(LoadObjectField(
- foreign, Foreign::kForeignAddressOffset, MachineType::Pointer()));
- TNode<Object> data =
- LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
-
- VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
- Label load(this);
- GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
- &load);
+ TNode<Foreign> foreign = CAST(
+ LoadObjectField(call_handler_info, CallHandlerInfo::kJsCallbackOffset));
+ TNode<WordT> callback = TNode<WordT>::UncheckedCast(LoadObjectField(
+ foreign, Foreign::kForeignAddressOffset, MachineType::Pointer()));
+ TNode<Object> data =
+ LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
- CSA_ASSERT(
- this,
- WordEqual(handler_kind,
- IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver());
+ Label load(this);
+ GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kApiGetter)),
+ &load);
- api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
- Goto(&load);
+ CSA_ASSERT(
+ this,
+ WordEqual(handler_kind,
+ IntPtrConstant(LoadHandler::kApiGetterHolderIsPrototype)));
- BIND(&load);
- Callable callable = CodeFactory::CallApiCallback(isolate());
- TNode<IntPtrT> argc = IntPtrConstant(0);
- exit_point->Return(CallStub(callable, context, callback, argc, data,
- api_holder.value(), p->receiver));
- }
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver())));
+ Goto(&load);
- BIND(&runtime);
- exit_point->ReturnCallRuntime(Runtime::kLoadAccessorProperty, context,
- p->receiver, SmiTag(handler_kind),
- call_handler_info);
+ BIND(&load);
+ Callable callable = CodeFactory::CallApiCallback(isolate());
+ TNode<IntPtrT> argc = IntPtrConstant(0);
+ exit_point->Return(CallStub(callable, context, callback, argc, data,
+ api_holder.value(), p->receiver()));
}
void AccessorAssembler::HandleLoadField(Node* holder, Node* handler_word,
@@ -305,7 +291,7 @@ TNode<MaybeObject> AccessorAssembler::LoadDescriptorValueOrFieldType(
}
void AccessorAssembler::HandleLoadICSmiHandlerCase(
- const LoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
+ const LazyLoadICParameters* p, Node* holder, SloppyTNode<Smi> smi_handler,
SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
OnNonExistent on_nonexistent, ElementSupport support_elements,
LoadAccessMode access_mode) {
@@ -333,7 +319,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&if_element);
Comment("element_load");
- Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* intptr_index = TryToIntptr(p->name(), miss);
Node* is_jsarray_condition =
IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
Node* elements_kind =
@@ -399,7 +385,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Label if_oob(this, Label::kDeferred);
Comment("indexed string");
- Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* intptr_index = TryToIntptr(p->name(), miss);
Node* length = LoadStringLengthAsWord(holder);
GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
@@ -430,7 +416,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
}
void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
- const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
TNode<WordT> handler_word, Label* rebox_double, Variable* var_double_value,
SloppyTNode<Object> handler, Label* miss, ExitPoint* exit_point,
OnNonExistent on_nonexistent, ElementSupport support_elements) {
@@ -438,7 +424,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
interceptor(this, Label::kDeferred), nonexistent(this),
accessor(this, Label::kDeferred), global(this, Label::kDeferred),
module_export(this, Label::kDeferred), proxy(this, Label::kDeferred),
- native_data_property(this), api_getter(this);
+ native_data_property(this, Label::kDeferred),
+ api_getter(this, Label::kDeferred);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kField)), &field);
GotoIf(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kConstant)),
@@ -479,8 +466,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&nonexistent);
// This is a handler for a load of a non-existent value.
if (on_nonexistent == OnNonExistent::kThrowReferenceError) {
- exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
- p->name);
+ exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context(),
+ p->name());
} else {
DCHECK_EQ(OnNonExistent::kReturnUndefined, on_nonexistent);
exit_point->Return(UndefinedConstant());
@@ -502,7 +489,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
&var_name_index, miss);
BIND(&found);
{
@@ -511,7 +498,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
LoadPropertyFromNameDictionary(properties, var_name_index.value(),
&var_details, &var_value);
Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
+ p->context(), p->receiver(), miss);
exit_point->Return(value);
}
}
@@ -527,7 +514,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(getter)));
Callable callable = CodeFactory::Call(isolate());
- exit_point->Return(CallJS(callable, p->context, getter, p->receiver));
+ exit_point->Return(CallJS(callable, p->context(), getter, p->receiver()));
}
BIND(&native_data_property);
@@ -548,13 +535,13 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
if (support_elements == kSupportElements) {
DCHECK_NE(on_nonexistent, OnNonExistent::kThrowReferenceError);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
&to_name_failed);
BIND(&if_unique_name);
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, var_unique.value(), p->receiver,
+ p->context(), holder, var_unique.value(), p->receiver(),
SmiConstant(on_nonexistent));
BIND(&if_index);
@@ -563,13 +550,15 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
Goto(&to_name_failed);
BIND(&to_name_failed);
+ // TODO(duongn): use GetPropertyWithReceiver builtin once
+ // |lookup_element_in_holder| supports elements.
exit_point->ReturnCallRuntime(Runtime::kGetPropertyWithReceiver,
- p->context, holder, p->name, p->receiver,
- SmiConstant(on_nonexistent));
+ p->context(), holder, p->name(),
+ p->receiver(), SmiConstant(on_nonexistent));
} else {
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, holder, p->name, p->receiver,
+ p->context(), holder, p->name(), p->receiver(),
SmiConstant(on_nonexistent));
}
}
@@ -583,16 +572,16 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
holder, PropertyCell::kPropertyDetailsRawOffset);
GotoIf(IsTheHole(value), miss);
- exit_point->Return(
- CallGetterIfAccessor(value, details, p->context, p->receiver, miss));
+ exit_point->Return(CallGetterIfAccessor(value, details, p->context(),
+ p->receiver(), miss));
}
BIND(&interceptor);
{
Comment("load_interceptor");
exit_point->ReturnCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- p->context, p->name, p->receiver, holder,
- p->slot, p->vector);
+ p->context(), p->name(), p->receiver(),
+ holder, p->slot(), p->vector());
}
BIND(&module_export);
@@ -600,7 +589,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
Comment("module export");
Node* index = DecodeWord<LoadHandler::ExportsIndexBits>(handler_word);
Node* module =
- LoadObjectField(p->receiver, JSModuleNamespace::kModuleOffset,
+ LoadObjectField(p->receiver(), JSModuleNamespace::kModuleOffset,
MachineType::TaggedPointer());
TNode<ObjectHashTable> exports = CAST(LoadObjectField(
module, Module::kExportsOffset, MachineType::TaggedPointer()));
@@ -615,8 +604,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
BIND(&is_the_hole);
{
Node* message = SmiConstant(MessageTemplate::kNotDefined);
- exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
- message, p->name);
+ exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context(),
+ message, p->name());
}
}
@@ -625,7 +614,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
}
void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
- const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
Label* miss, ExitPoint* exit_point) {
Label return_true(this), return_false(this), return_lookup(this),
normal(this), global(this);
@@ -676,8 +665,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
WordEqual(handler_kind,
IntPtrConstant(LoadHandler::kModuleExport)))));
exit_point->ReturnCallStub(
- Builtins::CallableFor(isolate(), Builtins::kHasProperty), p->context,
- p->receiver, p->name);
+ Builtins::CallableFor(isolate(), Builtins::kHasProperty), p->context(),
+ p->receiver(), p->name());
}
BIND(&normal);
@@ -686,7 +675,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerHasNamedCase(
TNode<NameDictionary> properties = CAST(LoadSlowProperties(holder));
TVARIABLE(IntPtrT, var_name_index);
Label found(this);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()), &found,
&var_name_index, miss);
BIND(&found);
@@ -780,8 +769,8 @@ Node* AccessorAssembler::HandleProtoHandler(
CSA_ASSERT(this, IsWeakOrCleared(data2));
TNode<Object> expected_native_context =
GetHeapObjectAssumeWeak(data2, miss);
- EmitAccessCheck(expected_native_context, p->context, p->receiver, &done,
- miss);
+ EmitAccessCheck(expected_native_context, p->context(), p->receiver(),
+ &done, miss);
}
// Dictionary lookup on receiver is not necessary for Load/StoreGlobalIC
@@ -791,14 +780,14 @@ Node* AccessorAssembler::HandleProtoHandler(
{
DCHECK_EQ(ICMode::kNonGlobalIC, ic_mode);
CSA_ASSERT(this, Word32BinaryNot(HasInstanceType(
- p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+ p->receiver(), JS_GLOBAL_OBJECT_TYPE)));
TNode<NameDictionary> properties =
- CAST(LoadSlowProperties(p->receiver));
+ CAST(LoadSlowProperties(p->receiver()));
TVARIABLE(IntPtrT, var_name_index);
Label found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name), &found,
- &var_name_index, &done);
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
+ &found, &var_name_index, &done);
BIND(&found);
{
if (on_found_on_receiver) {
@@ -816,7 +805,7 @@ Node* AccessorAssembler::HandleProtoHandler(
}
void AccessorAssembler::HandleLoadICProtoHandler(
- const LoadICParameters* p, Node* handler, Variable* var_holder,
+ const LazyLoadICParameters* p, Node* handler, Variable* var_holder,
Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode, LoadAccessMode access_mode) {
DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
@@ -837,7 +826,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
&var_value);
Node* value =
CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, p->receiver, miss);
+ p->context(), p->receiver(), miss);
exit_point->Return(value);
}
},
@@ -910,8 +899,8 @@ void AccessorAssembler::HandleStoreICNativeDataProperty(
Node* accessor_info = LoadDescriptorValue(LoadMap(holder), descriptor);
CSA_CHECK(this, IsAccessorInfo(accessor_info));
- TailCallRuntime(Runtime::kStoreCallbackProperty, p->context, p->receiver,
- holder, accessor_info, p->name, p->value);
+ TailCallRuntime(Runtime::kStoreCallbackProperty, p->context(), p->receiver(),
+ holder, accessor_info, p->name(), p->value());
}
void AccessorAssembler::HandleStoreICHandlerCase(
@@ -927,7 +916,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
// for the encoding format.
BIND(&if_smi_handler);
{
- Node* holder = p->receiver;
+ Node* holder = p->receiver();
Node* handler_word = SmiUntag(CAST(handler));
Label if_fast_smi(this), if_proxy(this);
@@ -949,7 +938,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
NameDictionaryLookup<NameDictionary>(
- properties, CAST(p->name), &dictionary_found, &var_name_index, miss);
+ properties, CAST(p->name()), &dictionary_found, &var_name_index, miss);
BIND(&dictionary_found);
{
Node* details = LoadDetailsByKeyIndex<NameDictionary>(
@@ -961,8 +950,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value);
- Return(p->value);
+ p->value());
+ Return(p->value());
}
BIND(&if_fast_smi);
@@ -984,7 +973,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&data);
// Handle non-transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, miss);
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value(), miss);
}
BIND(&if_proxy);
@@ -1008,8 +997,8 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&call_handler);
{
TailCallStub(StoreWithVectorDescriptor{}, CAST(strong_handler),
- CAST(p->context), p->receiver, p->name, p->value, p->slot,
- p->vector);
+ p->context(), p->receiver(), p->name(), p->value(),
+ p->slot(), p->vector());
}
}
@@ -1027,7 +1016,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
{
TNode<PropertyCell> property_cell = CAST(map_or_property_cell);
ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(property_cell, p->value, &direct_exit,
+ StoreGlobalIC_PropertyCellCase(property_cell, p->value(), &direct_exit,
miss);
}
BIND(&store_transition);
@@ -1035,7 +1024,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
TNode<Map> map = CAST(map_or_property_cell);
HandleStoreICTransitionMapHandlerCase(p, map, miss,
kCheckPrototypeValidity);
- Return(p->value);
+ Return(p->value());
}
}
}
@@ -1064,10 +1053,10 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor)));
if (flags & kValidateTransitionHandler) {
TNode<Name> key = LoadKeyByKeyIndex(descriptors, last_key_index);
- GotoIf(WordNotEqual(key, p->name), miss);
+ GotoIf(WordNotEqual(key, p->name()), miss);
} else {
CSA_ASSERT(this, WordEqual(LoadKeyByKeyIndex(descriptors, last_key_index),
- p->name));
+ p->name()));
}
Node* details = LoadDetailsByKeyIndex(descriptors, last_key_index);
if (flags & kValidateTransitionHandler) {
@@ -1088,14 +1077,14 @@ void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
// DontEnum attribute is allowed only for private symbols and vice versa.
Branch(Word32Equal(
IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
- IsPrivateSymbol(p->name)),
+ IsPrivateSymbol(p->name())),
&attributes_ok, miss);
BIND(&attributes_ok);
}
- OverwriteExistingFastDataProperty(p->receiver, transition_map, descriptors,
- last_key_index, details, p->value, miss,
+ OverwriteExistingFastDataProperty(p->receiver(), transition_map, descriptors,
+ last_key_index, details, p->value(), miss,
true);
}
@@ -1207,7 +1196,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
&double_rep, &tagged_rep);
BIND(&double_rep);
{
- TNode<Float64T> double_value = ChangeNumberToFloat64(value);
+ TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
if (FLAG_unbox_double_fields) {
if (do_transitioning_store) {
StoreMap(object, object_map);
@@ -1275,7 +1264,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
Int32Constant(Representation::kDouble)),
&cont);
{
- Node* double_value = ChangeNumberToFloat64(value);
+ Node* double_value = ChangeNumberToFloat64(CAST(value));
Node* mutable_heap_number =
AllocateMutableHeapNumberWithValue(double_value);
var_value.Bind(mutable_heap_number);
@@ -1301,7 +1290,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
{
Node* mutable_heap_number =
LoadPropertyArrayElement(properties, backing_store_index);
- TNode<Float64T> double_value = ChangeNumberToFloat64(value);
+ TNode<Float64T> double_value = ChangeNumberToFloat64(CAST(value));
Label if_mutable(this);
GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
@@ -1370,7 +1359,7 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
Callable callable = CodeFactory::Call(isolate());
- Return(CallJS(callable, p->context, setter, p->receiver, p->value));
+ Return(CallJS(callable, p->context(), setter, p->receiver(), p->value()));
}
void AccessorAssembler::HandleStoreICProtoHandler(
@@ -1388,8 +1377,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
&if_transitioning_element_store);
BIND(&if_element_store);
{
- TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context,
- p->receiver, p->name, p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, code_handler, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
}
BIND(&if_transitioning_element_store);
@@ -1401,9 +1391,9 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(IsDeprecatedMap(transition_map), miss);
- TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context,
- p->receiver, p->name, transition_map, p->value, p->slot,
- p->vector);
+ TailCallStub(StoreTransitionDescriptor{}, code_handler, p->context(),
+ p->receiver(), p->name(), transition_map, p->value(),
+ p->slot(), p->vector());
}
};
}
@@ -1422,8 +1412,8 @@ void AccessorAssembler::HandleStoreICProtoHandler(
GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
StoreValueByKeyIndex<NameDictionary>(
- CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value);
- Return(p->value);
+ CAST(properties), UncheckedCast<IntPtrT>(name_index), p->value());
+ Return(p->value());
},
miss, ic_mode);
@@ -1470,16 +1460,17 @@ void AccessorAssembler::HandleStoreICProtoHandler(
// case is covered above by LookupOnReceiver bit handling of the smi
// handler.
Label slow(this);
- TNode<Map> receiver_map = LoadMap(p->receiver);
+ TNode<Map> receiver_map = LoadMap(p->receiver());
InvalidateValidityCellIfPrototype(receiver_map);
- TNode<NameDictionary> properties = CAST(LoadSlowProperties(p->receiver));
- Add<NameDictionary>(properties, CAST(p->name), p->value, &slow);
- Return(p->value);
+ TNode<NameDictionary> properties =
+ CAST(LoadSlowProperties(p->receiver()));
+ Add<NameDictionary>(properties, CAST(p->name()), p->value(), &slow);
+ Return(p->value());
BIND(&slow);
- TailCallRuntime(Runtime::kAddDictionaryProperty, p->context, p->receiver,
- p->name, p->value);
+ TailCallRuntime(Runtime::kAddDictionaryProperty, p->context(),
+ p->receiver(), p->name(), p->value());
}
BIND(&if_accessor);
@@ -1513,7 +1504,7 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Node* data =
LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset);
- VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver);
+ VARIABLE(api_holder, MachineRepresentation::kTagged, p->receiver());
Label store(this);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kApiSetter)),
&store);
@@ -1523,20 +1514,20 @@ void AccessorAssembler::HandleStoreICProtoHandler(
WordEqual(handler_kind,
IntPtrConstant(StoreHandler::kApiSetterHolderIsPrototype)));
- api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver)));
+ api_holder.Bind(LoadMapPrototype(LoadMap(p->receiver())));
Goto(&store);
BIND(&store);
Callable callable = CodeFactory::CallApiCallback(isolate());
TNode<IntPtrT> argc = IntPtrConstant(1);
Return(CallStub(callable, context, callback, argc, data,
- api_holder.value(), p->receiver, p->value));
+ api_holder.value(), p->receiver(), p->value()));
}
BIND(&if_store_global_proxy);
{
ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(holder, p->value, &direct_exit, miss);
+ StoreGlobalIC_PropertyCellCase(holder, p->value(), &direct_exit, miss);
}
}
}
@@ -1551,13 +1542,13 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
to_name_failed(this, Label::kDeferred);
if (support_elements == kSupportElements) {
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
&to_name_failed);
BIND(&if_unique_name);
- CallBuiltin(Builtins::kProxySetProperty, p->context, proxy,
- var_unique.value(), p->value, p->receiver);
- Return(p->value);
+ CallBuiltin(Builtins::kProxySetProperty, p->context(), proxy,
+ var_unique.value(), p->value(), p->receiver());
+ Return(p->value());
// The index case is handled earlier by the runtime.
BIND(&if_index);
@@ -1566,12 +1557,12 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
Goto(&to_name_failed);
BIND(&to_name_failed);
- TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context, proxy,
- p->name, p->value, p->receiver);
+ TailCallRuntime(Runtime::kSetPropertyWithReceiver, p->context(), proxy,
+ p->name(), p->value(), p->receiver());
} else {
- Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
- TailCallBuiltin(Builtins::kProxySetProperty, p->context, proxy, name,
- p->value, p->receiver);
+ Node* name = CallBuiltin(Builtins::kToName, p->context(), p->name());
+ TailCallBuiltin(Builtins::kProxySetProperty, p->context(), proxy, name,
+ p->value(), p->receiver());
}
}
@@ -1979,7 +1970,7 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("typed elements");
// Check if buffer has been detached.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
GotoIf(IsDetachedBuffer(buffer), miss);
// Bounds check.
@@ -1988,7 +1979,8 @@ void AccessorAssembler::EmitElementLoad(
if (access_mode == LoadAccessMode::kHas) {
exit_point->Return(TrueConstant());
} else {
- Node* backing_store = LoadJSTypedArrayBackingStore(CAST(object));
+ TNode<RawPtrT> backing_store =
+ LoadJSTypedArrayBackingStore(CAST(object));
Label uint8_elements(this), int8_elements(this), uint16_elements(this),
int16_elements(this), uint32_elements(this), int32_elements(this),
@@ -2102,13 +2094,13 @@ void AccessorAssembler::NameDictionaryNegativeLookup(Node* object,
}
void AccessorAssembler::InvalidateValidityCellIfPrototype(Node* map,
- Node* bitfield2) {
+ Node* bitfield3) {
Label is_prototype(this), cont(this);
- if (bitfield2 == nullptr) {
- bitfield2 = LoadMapBitField2(map);
+ if (bitfield3 == nullptr) {
+ bitfield3 = LoadMapBitField3(map);
}
- Branch(IsSetWord32(bitfield2, Map::IsPrototypeMapBit::kMask), &is_prototype,
+ Branch(IsSetWord32(bitfield3, Map::IsPrototypeMapBit::kMask), &is_prototype,
&cont);
BIND(&is_prototype);
@@ -2217,7 +2209,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
TVARIABLE(IntPtrT, var_name_index);
Label* notfound = use_stub_cache == kUseStubCache ? &try_stub_cache
: &lookup_prototype_chain;
- DescriptorLookup(p->name, descriptors, bitfield3, &if_descriptor_found,
+ DescriptorLookup(p->name(), descriptors, bitfield3, &if_descriptor_found,
&var_name_index, notfound);
BIND(&if_descriptor_found);
@@ -2232,20 +2224,21 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Label stub_cache(this);
BIND(&try_stub_cache);
// When there is no feedback vector don't use stub cache.
- GotoIfNot(IsUndefined(p->vector), &stub_cache);
+ GotoIfNot(IsUndefined(p->vector()), &stub_cache);
// Fall back to the slow path for private symbols.
- Branch(IsPrivateSymbol(p->name), slow, &lookup_prototype_chain);
+ Branch(IsPrivateSymbol(p->name()), slow, &lookup_prototype_chain);
BIND(&stub_cache);
Comment("stub cache probe for fast property load");
TVARIABLE(MaybeObject, var_handler);
Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name,
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, p->name(),
&found_handler, &var_handler, &stub_cache_miss);
BIND(&found_handler);
{
- HandleLoadICHandlerCase(p, CAST(var_handler.value()), &stub_cache_miss,
- &direct_exit);
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()),
+ &stub_cache_miss, &direct_exit);
}
BIND(&stub_cache_miss);
@@ -2253,8 +2246,8 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// TODO(jkummerow): Check if the property exists on the prototype
// chain. If it doesn't, then there's no point in missing.
Comment("KeyedLoadGeneric_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context(), p->receiver(),
+ p->name(), p->slot(), p->vector());
}
}
@@ -2267,7 +2260,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index);
TNode<NameDictionary> properties = CAST(LoadSlowProperties(receiver));
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name),
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
&dictionary_found, &var_name_index,
&lookup_prototype_chain);
BIND(&dictionary_found);
@@ -2281,7 +2274,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
BIND(&if_found_on_receiver);
{
Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, receiver, slow);
+ p->context(), receiver, slow);
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
Return(value);
}
@@ -2290,14 +2283,14 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
{
VARIABLE(var_holder_map, MachineRepresentation::kTagged);
VARIABLE(var_holder_instance_type, MachineRepresentation::kWord32);
- Label return_undefined(this);
+ Label return_undefined(this), is_private_symbol(this);
Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type};
Label loop(this, arraysize(merged_variables), merged_variables);
var_holder_map.Bind(receiver_map);
var_holder_instance_type.Bind(instance_type);
- // Private symbols must not be looked up on the prototype chain.
- GotoIf(IsPrivateSymbol(p->name), &return_undefined);
+ GotoIf(IsPrivateSymbol(p->name()), &is_private_symbol);
+
Goto(&loop);
BIND(&loop);
{
@@ -2312,9 +2305,9 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
var_holder_map.Bind(proto_map);
var_holder_instance_type.Bind(proto_instance_type);
Label next_proto(this), return_value(this, &var_value), goto_slow(this);
- TryGetOwnProperty(p->context, receiver, proto, proto_map,
- proto_instance_type, p->name, &return_value, &var_value,
- &next_proto, &goto_slow);
+ TryGetOwnProperty(p->context(), receiver, proto, proto_map,
+ proto_instance_type, p->name(), &return_value,
+ &var_value, &next_proto, &goto_slow);
// This trampoline and the next are required to appease Turbofan's
// variable merging.
@@ -2328,6 +2321,16 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Return(var_value.value());
}
+ BIND(&is_private_symbol);
+ {
+ CSA_ASSERT(this, IsPrivateSymbol(p->name()));
+
+ // For private names that don't exist on the receiver, we bail
+ // to the runtime to throw. For private symbols, we just return
+ // undefined.
+ Branch(IsPrivateName(p->name()), slow, &return_undefined);
+ }
+
BIND(&return_undefined);
Return(UndefinedConstant());
}
@@ -2338,11 +2341,11 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
GotoIfNot(InstanceTypeEqual(instance_type, JS_PROXY_TYPE), slow);
// Private field/symbol lookup is not supported.
- GotoIf(IsPrivateSymbol(p->name), slow);
+ GotoIf(IsPrivateSymbol(p->name()), slow);
direct_exit.ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kProxyGetProperty),
- p->context, receiver /*holder is the same as receiver*/, p->name,
+ p->context(), receiver /*holder is the same as receiver*/, p->name(),
receiver, SmiConstant(OnNonExistent::kReturnUndefined));
}
}
@@ -2400,26 +2403,24 @@ void AccessorAssembler::TryProbeStubCacheTable(
const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
- // Check that the key in the entry matches the name.
Node* key_base = ExternalConstant(
ExternalReference::Create(stub_cache->key_reference(table)));
- Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
- GotoIf(WordNotEqual(name, entry_key), if_miss);
-
- // Get the map entry from the cache.
- DCHECK_EQ(kSystemPointerSize * 2,
- stub_cache->map_reference(table).address() -
- stub_cache->key_reference(table).address());
- Node* entry_map =
- Load(MachineType::Pointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kSystemPointerSize * 2)));
- GotoIf(WordNotEqual(map, entry_map), if_miss);
-
- DCHECK_EQ(kSystemPointerSize, stub_cache->value_reference(table).address() -
- stub_cache->key_reference(table).address());
+
+ // Check that the key in the entry matches the name.
+ DCHECK_EQ(0, offsetof(StubCache::Entry, key));
+ Node* cached_key = Load(MachineType::TaggedPointer(), key_base, entry_offset);
+ GotoIf(WordNotEqual(name, cached_key), if_miss);
+
+ // Check that the map in the entry matches.
+ Node* cached_map = Load(
+ MachineType::TaggedPointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(offsetof(StubCache::Entry, map))));
+ GotoIf(WordNotEqual(map, cached_map), if_miss);
+
TNode<MaybeObject> handler = ReinterpretCast<MaybeObject>(
Load(MachineType::AnyTagged(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kSystemPointerSize))));
+ IntPtrAdd(entry_offset,
+ IntPtrConstant(offsetof(StubCache::Entry, value)))));
// We found the handler.
*var_handler = handler;
@@ -2462,7 +2463,7 @@ void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
//////////////////// Entry points into private implementation (one per stub).
-void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
+void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
ExitPoint* exit_point) {
// Must be kept in sync with LoadIC.
@@ -2476,10 +2477,10 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Label stub_call(this, Label::kDeferred), miss(this, Label::kDeferred),
no_feedback(this, Label::kDeferred);
- Node* recv_map = LoadReceiverMap(p->receiver);
+ Node* recv_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(recv_map), &miss);
- GotoIf(IsUndefined(p->vector), &no_feedback);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
// Inlined fast path.
{
@@ -2489,7 +2490,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Label try_polymorphic(this), if_handler(this, &var_handler);
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, recv_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), recv_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -2513,8 +2514,9 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
Callable ic =
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Noninlined);
Node* code_target = HeapConstant(ic.code());
- exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context,
- p->receiver, p->name, p->slot, p->vector);
+ exit_point->ReturnCallStub(ic.descriptor(), code_target, p->context(),
+ p->receiver(), p->name(), p->slot(),
+ p->vector());
}
BIND(&no_feedback);
@@ -2523,15 +2525,16 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LoadICParameters* p,
// Call into the stub that implements the non-inlined parts of LoadIC.
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
- p->context, p->receiver, p->name, p->slot, p->vector);
+ p->context(), p->receiver(), p->name(), p->slot(), p->vector());
}
BIND(&miss);
{
Comment("LoadIC_BytecodeHandler_miss");
- exit_point->ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context,
- p->receiver, p->name, p->slot, p->vector);
+ exit_point->ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context(),
+ p->receiver(), p->name(), p->slot(),
+ p->vector());
}
}
@@ -2544,15 +2547,19 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
Label if_handler(this, &var_handler), non_inlined(this, Label::kDeferred),
try_polymorphic(this), miss(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
- HandleLoadICHandlerCase(p, CAST(var_handler.value()), &miss, &direct_exit);
+ {
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit);
+ }
BIND(&try_polymorphic);
TNode<HeapObject> strong_feedback = GetHeapObjectIfStrong(feedback, &miss);
@@ -2571,8 +2578,9 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) {
}
BIND(&miss);
- direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
+ direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, p->context(),
+ p->receiver(), p->name(), p->slot(),
+ p->vector());
}
void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
@@ -2595,7 +2603,7 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
GotoIfNot(WordEqual(feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&try_uninitialized);
- TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
+ TryProbeStubCache(isolate()->load_stub_cache(), p->receiver(), p->name(),
if_handler, var_handler, miss);
}
@@ -2606,24 +2614,24 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
miss);
exit_point->ReturnCallStub(
Builtins::CallableFor(isolate(), Builtins::kLoadIC_Uninitialized),
- p->context, p->receiver, p->name, p->slot, p->vector);
+ p->context(), p->receiver(), p->name(), p->slot(), p->vector());
}
}
void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
Label miss(this, Label::kDeferred),
check_function_prototype(this);
- Node* receiver = p->receiver;
+ Node* receiver = p->receiver();
GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
- GotoIf(IsUndefined(p->vector), &check_function_prototype);
+ GotoIf(IsUndefined(p->vector()), &check_function_prototype);
// Optimistically write the state transition to the vector.
- StoreFeedbackVectorSlot(p->vector, p->slot,
+ StoreFeedbackVectorSlot(p->vector(), p->slot(),
LoadRoot(RootIndex::kpremonomorphic_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
- StoreWeakReferenceInFeedbackVector(p->vector, p->slot, receiver_map,
+ StoreWeakReferenceInFeedbackVector(p->vector(), p->slot(), receiver_map,
kTaggedSize, SMI_PARAMETERS);
Goto(&check_function_prototype);
@@ -2634,7 +2642,7 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
Label not_function_prototype(this, Label::kDeferred);
GotoIfNot(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE),
&not_function_prototype);
- GotoIfNot(IsPrototypeString(p->name), &not_function_prototype);
+ GotoIfNot(IsPrototypeString(p->name()), &not_function_prototype);
GotoIfPrototypeRequiresRuntimeLookup(CAST(receiver), CAST(receiver_map),
&not_function_prototype);
@@ -2648,16 +2656,16 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
BIND(&miss);
{
Label call_runtime(this, Label::kDeferred);
- GotoIf(IsUndefined(p->vector), &call_runtime);
+ GotoIf(IsUndefined(p->vector()), &call_runtime);
// Undo the optimistic state transition.
- StoreFeedbackVectorSlot(p->vector, p->slot,
+ StoreFeedbackVectorSlot(p->vector(), p->slot(),
LoadRoot(RootIndex::kuninitialized_symbol),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
Goto(&call_runtime);
BIND(&call_runtime);
- TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
- p->slot, p->vector);
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context(), p->receiver(),
+ p->name(), p->slot(), p->vector());
}
}
@@ -2750,8 +2758,8 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
CAST(LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX));
Node* holder = LoadContextElement(native_context, Context::EXTENSION_INDEX);
- LoadICParameters p(context, receiver, lazy_name(),
- ParameterToTagged(slot, slot_mode), vector, holder);
+ LazyLoadICParameters p([=] { return context; }, receiver, lazy_name,
+ ParameterToTagged(slot, slot_mode), vector, holder);
HandleLoadICHandlerCase(&p, handler, miss, exit_point, ICMode::kGlobalIC,
on_nonexistent);
@@ -2767,20 +2775,22 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
try_polymorphic_name(this, Label::kDeferred),
miss(this, Label::kDeferred), generic(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
- GotoIf(IsUndefined(p->vector), &generic);
+ GotoIf(IsUndefined(p->vector()), &generic);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
- HandleLoadICHandlerCase(
- p, CAST(var_handler.value()), &miss, &direct_exit, ICMode::kNonGlobalIC,
- OnNonExistent::kReturnUndefined, kSupportElements, access_mode);
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit, ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kSupportElements,
+ access_mode);
}
BIND(&try_polymorphic);
@@ -2807,14 +2817,15 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
TailCallBuiltin(access_mode == LoadAccessMode::kLoad
? Builtins::kKeyedLoadIC_Megamorphic
: Builtins::kKeyedHasIC_Megamorphic,
- p->context, p->receiver, p->name, p->slot, p->vector);
+ p->context(), p->receiver(), p->name(), p->slot(),
+ p->vector());
}
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a weak fixed array in the next
// slot.
- Node* name = p->name;
+ Node* name = p->name();
Comment("KeyedLoadIC_try_polymorphic_name");
VARIABLE(var_name, MachineRepresentation::kTagged, name);
VARIABLE(var_index, MachineType::PointerRepresentation());
@@ -2857,36 +2868,37 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p,
TailCallBuiltin(access_mode == LoadAccessMode::kLoad
? Builtins::kKeyedLoadIC_PolymorphicName
: Builtins::kKeyedHasIC_PolymorphicName,
- p->context, p->receiver, name, p->slot, p->vector);
+ p->context(), p->receiver(), name, p->slot(),
+ p->vector());
}
}
BIND(&miss);
{
Comment("KeyedLoadIC_miss");
- TailCallRuntime(access_mode == LoadAccessMode::kLoad
- ? Runtime::kKeyedLoadIC_Miss
- : Runtime::kKeyedHasIC_Miss,
- p->context, p->receiver, p->name, p->slot, p->vector);
+ TailCallRuntime(
+ access_mode == LoadAccessMode::kLoad ? Runtime::kKeyedLoadIC_Miss
+ : Runtime::kKeyedHasIC_Miss,
+ p->context(), p->receiver(), p->name(), p->slot(), p->vector());
}
}
void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
VARIABLE(var_index, MachineType::PointerRepresentation());
- VARIABLE(var_unique, MachineRepresentation::kTagged, p->name);
+ VARIABLE(var_unique, MachineRepresentation::kTagged, p->name());
Label if_index(this), if_unique_name(this), if_notunique(this),
if_other(this, Label::kDeferred), if_runtime(this, Label::kDeferred);
- Node* receiver = p->receiver;
+ Node* receiver = p->receiver();
GotoIf(TaggedIsSmi(receiver), &if_runtime);
GotoIf(IsNullOrUndefined(receiver), &if_runtime);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+ TryToName(p->name(), &if_index, &var_index, &if_unique_name, &var_unique,
&if_other, &if_notunique);
BIND(&if_other);
{
- Node* name = CallBuiltin(Builtins::kToName, p->context, p->name);
+ Node* name = CallBuiltin(Builtins::kToName, p->context(), p->name());
var_unique.Bind(name);
TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique,
&if_runtime, &if_notunique);
@@ -2902,8 +2914,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
BIND(&if_unique_name);
{
- LoadICParameters pp = *p;
- pp.name = var_unique.value();
+ LoadICParameters pp(p, var_unique.value());
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
@@ -2929,8 +2940,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
// successfully internalizing the incoming string. Past experiments
// with this have shown that it causes too much traffic on the stub
// cache. We may want to re-evaluate that in the future.
- LoadICParameters pp = *p;
- pp.name = var_unique.value();
+ LoadICParameters pp(p, var_unique.value());
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
GenericPropertyLoad(receiver, receiver_map, instance_type, &pp,
@@ -2946,7 +2956,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
Comment("KeyedLoadGeneric_slow");
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
// TODO(jkummerow): Should we use the GetProperty TF stub instead?
- TailCallRuntime(Runtime::kGetProperty, p->context, p->receiver,
+ TailCallRuntime(Runtime::kGetProperty, p->context(), p->receiver(),
var_unique.value());
}
}
@@ -2956,12 +2966,12 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
TVARIABLE(MaybeObject, var_handler);
Label if_handler(this, &var_handler), miss(this, Label::kDeferred);
- Node* receiver = p->receiver;
+ Node* receiver = p->receiver();
Node* receiver_map = LoadReceiverMap(receiver);
- Node* name = p->name;
- Node* vector = p->vector;
- Node* slot = p->slot;
- Node* context = p->context;
+ Node* name = p->name();
+ Node* vector = p->vector();
+ Node* slot = p->slot();
+ TNode<Context> context = p->context();
// When we get here, we know that the {name} matches the recorded
// feedback name in the {vector} and can safely be used for the
@@ -2980,9 +2990,11 @@ void AccessorAssembler::KeyedLoadICPolymorphicName(const LoadICParameters* p,
BIND(&if_handler);
{
ExitPoint direct_exit(this);
- HandleLoadICHandlerCase(
- p, CAST(var_handler.value()), &miss, &direct_exit, ICMode::kNonGlobalIC,
- OnNonExistent::kReturnUndefined, kOnlyProperties, access_mode);
+ LazyLoadICParameters lazy_p(p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit, ICMode::kNonGlobalIC,
+ OnNonExistent::kReturnUndefined, kOnlyProperties,
+ access_mode);
}
BIND(&miss);
@@ -3006,14 +3018,14 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
try_uninitialized(this, Label::kDeferred), miss(this, Label::kDeferred),
no_feedback(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
- GotoIf(IsUndefined(p->vector), &no_feedback);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3039,7 +3051,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&try_uninitialized);
- TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+ TryProbeStubCache(isolate()->store_stub_cache(), p->receiver(), p->name(),
&if_handler, &var_handler, &miss);
}
BIND(&try_uninitialized);
@@ -3052,21 +3064,22 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
BIND(&no_feedback);
{
- TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context, p->receiver,
- p->name, p->value, p->slot, p->vector);
+ TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
}
BIND(&miss);
{
- TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreIC_Miss, p->context(), p->value(), p->slot(),
+ p->vector(), p->receiver(), p->name());
}
}
void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
Label if_lexical_var(this), if_heapobject(this);
TNode<MaybeObject> maybe_weak_ref =
- LoadFeedbackVectorSlot(pp->vector, pp->slot, 0, SMI_PARAMETERS);
+ LoadFeedbackVectorSlot(pp->vector(), pp->slot(), 0, SMI_PARAMETERS);
Branch(TaggedIsSmi(maybe_weak_ref), &if_lexical_var, &if_heapobject);
BIND(&if_heapobject);
@@ -3081,31 +3094,32 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
CAST(GetHeapObjectAssumeWeak(maybe_weak_ref, &try_handler));
ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(property_cell, pp->value, &direct_exit,
+ StoreGlobalIC_PropertyCellCase(property_cell, pp->value(), &direct_exit,
&miss);
BIND(&try_handler);
{
Comment("StoreGlobalIC_try_handler");
TNode<MaybeObject> handler = LoadFeedbackVectorSlot(
- pp->vector, pp->slot, kTaggedSize, SMI_PARAMETERS);
+ pp->vector(), pp->slot(), kTaggedSize, SMI_PARAMETERS);
GotoIf(WordEqual(handler, LoadRoot(RootIndex::kuninitialized_symbol)),
&miss);
- StoreICParameters p = *pp;
- DCHECK_NULL(p.receiver);
- Node* native_context = LoadNativeContext(p.context);
- p.receiver =
- LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX);
+ DCHECK_NULL(pp->receiver());
+ Node* native_context = LoadNativeContext(pp->context());
+ StoreICParameters p(
+ pp->context(),
+ LoadContextElement(native_context, Context::GLOBAL_PROXY_INDEX),
+ pp->name(), pp->value(), pp->slot(), pp->vector());
HandleStoreICHandlerCase(&p, handler, &miss, ICMode::kGlobalIC);
}
BIND(&miss);
{
- TailCallRuntime(Runtime::kStoreGlobalIC_Miss, pp->context, pp->value,
- pp->slot, pp->vector, pp->name);
+ TailCallRuntime(Runtime::kStoreGlobalIC_Miss, pp->context(), pp->value(),
+ pp->slot(), pp->vector(), pp->name());
}
}
@@ -3118,9 +3132,9 @@ void AccessorAssembler::StoreGlobalIC(const StoreICParameters* pp) {
TNode<IntPtrT> slot_index =
Signed(DecodeWord<FeedbackNexus::SlotIndexBits>(lexical_handler));
TNode<Context> script_context =
- LoadScriptContext(CAST(pp->context), context_index);
- StoreContextElement(script_context, slot_index, pp->value);
- Return(pp->value);
+ LoadScriptContext(pp->context(), context_index);
+ StoreContextElement(script_context, slot_index, pp->value());
+ Return(pp->value());
}
}
@@ -3199,14 +3213,14 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
no_feedback(this, Label::kDeferred),
try_polymorphic_name(this, Label::kDeferred);
- Node* receiver_map = LoadReceiverMap(p->receiver);
+ Node* receiver_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(receiver_map), &miss);
- GotoIf(IsUndefined(p->vector), &no_feedback);
+ GotoIf(IsUndefined(p->vector()), &no_feedback);
// Check monomorphic case.
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), receiver_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
{
@@ -3237,19 +3251,19 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
BIND(&no_feedback);
{
- TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context,
- p->receiver, p->name, p->value, p->slot);
+ TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context(),
+ p->receiver(), p->name(), p->value(), p->slot());
}
BIND(&try_polymorphic_name);
{
// We might have a name in feedback, and a fixed array in the next slot.
Comment("KeyedStoreIC_try_polymorphic_name");
- GotoIfNot(WordEqual(strong_feedback, p->name), &miss);
+ GotoIfNot(WordEqual(strong_feedback, p->name()), &miss);
// If the name comparison succeeded, we know we have a feedback vector
// with at least one map/handler pair.
TNode<MaybeObject> feedback_element = LoadFeedbackVectorSlot(
- p->vector, p->slot, kTaggedSize, SMI_PARAMETERS);
+ p->vector(), p->slot(), kTaggedSize, SMI_PARAMETERS);
TNode<WeakFixedArray> array = CAST(feedback_element);
HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
&miss);
@@ -3258,8 +3272,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
BIND(&miss);
{
Comment("KeyedStoreIC_miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context(), p->value(),
+ p->slot(), p->vector(), p->receiver(), p->name());
}
}
@@ -3272,13 +3286,13 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred);
- Node* array_map = LoadReceiverMap(p->receiver);
+ Node* array_map = LoadReceiverMap(p->receiver());
GotoIf(IsDeprecatedMap(array_map), &miss);
- GotoIf(IsUndefined(p->vector), &miss);
+ GotoIf(IsUndefined(p->vector()), &miss);
TNode<MaybeObject> feedback =
- TryMonomorphicCase(p->slot, p->vector, array_map, &if_handler,
+ TryMonomorphicCase(p->slot(), p->vector(), array_map, &if_handler,
&var_handler, &try_polymorphic);
BIND(&if_handler);
@@ -3289,8 +3303,9 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
TNode<HeapObject> handler = CAST(var_handler.value());
Label if_transitioning_element_store(this);
GotoIfNot(IsCode(handler), &if_transitioning_element_store);
- TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), CAST(p->context),
- p->receiver, p->name, p->value, p->slot, p->vector);
+ TailCallStub(StoreWithVectorDescriptor{}, CAST(handler), p->context(),
+ p->receiver(), p->name(), p->value(), p->slot(),
+ p->vector());
BIND(&if_transitioning_element_store);
{
@@ -3301,8 +3316,9 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIf(IsDeprecatedMap(transition_map), &miss);
Node* code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
CSA_ASSERT(this, IsCode(code));
- TailCallStub(StoreTransitionDescriptor{}, code, p->context, p->receiver,
- p->name, transition_map, p->value, p->slot, p->vector);
+ TailCallStub(StoreTransitionDescriptor{}, code, p->context(),
+ p->receiver(), p->name(), transition_map, p->value(),
+ p->slot(), p->vector());
}
}
@@ -3327,16 +3343,17 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
GotoIfNot(
WordEqual(strong_feedback, LoadRoot(RootIndex::kmegamorphic_symbol)),
&miss);
- TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context,
- p->value, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context(),
+ p->value(), p->receiver(), p->name());
}
}
BIND(&miss);
{
Comment("StoreInArrayLiteralIC_miss");
- TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, p->context, p->value,
- p->slot, p->vector, p->receiver, p->name);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Miss, p->context(),
+ p->value(), p->slot(), p->vector(), p->receiver(),
+ p->name());
}
}
@@ -3349,7 +3366,7 @@ void AccessorAssembler::GenerateLoadIC() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC(&p);
@@ -3362,7 +3379,7 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3372,7 +3389,8 @@ void AccessorAssembler::GenerateLoadIC_Megamorphic() {
&var_handler, &miss);
BIND(&if_handler);
- LoadICParameters p(context, receiver, name, slot, vector);
+ LazyLoadICParameters p([=] { return context; }, receiver,
+ [=] { return CAST(name); }, slot, vector);
HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
BIND(&miss);
@@ -3387,7 +3405,7 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
TVARIABLE(MaybeObject, var_handler);
@@ -3403,7 +3421,11 @@ void AccessorAssembler::GenerateLoadIC_Noninlined() {
&miss, &direct_exit);
BIND(&if_handler);
- HandleLoadICHandlerCase(&p, CAST(var_handler.value()), &miss, &direct_exit);
+ {
+ LazyLoadICParameters lazy_p(&p);
+ HandleLoadICHandlerCase(&lazy_p, CAST(var_handler.value()), &miss,
+ &direct_exit);
+ }
BIND(&miss);
direct_exit.ReturnCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
@@ -3417,7 +3439,7 @@ void AccessorAssembler::GenerateLoadIC_Uninitialized() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
LoadIC_Uninitialized(&p);
@@ -3429,7 +3451,7 @@ void AccessorAssembler::GenerateLoadICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC, context, receiver, name, slot, vector);
@@ -3441,7 +3463,7 @@ void AccessorAssembler::GenerateLoadICTrampoline_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kLoadIC_Megamorphic, context, receiver, name, slot,
@@ -3454,13 +3476,13 @@ void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
ExitPoint direct_exit(this);
LoadGlobalIC(
vector, slot,
// lazy_context
- [=] { return CAST(context); },
+ [=] { return context; },
// lazy_name
[=] { return CAST(name); }, typeof_mode, &direct_exit);
}
@@ -3470,7 +3492,7 @@ void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
Callable callable =
@@ -3485,7 +3507,7 @@ void AccessorAssembler::GenerateKeyedLoadIC() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kLoad);
@@ -3498,7 +3520,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICGeneric(&p);
@@ -3510,7 +3532,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC, context, receiver, name, slot,
@@ -3523,7 +3545,7 @@ void AccessorAssembler::GenerateKeyedLoadICTrampoline_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, context, receiver, name,
@@ -3537,7 +3559,7 @@ void AccessorAssembler::GenerateKeyedLoadIC_PolymorphicName() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kLoad);
@@ -3550,7 +3572,7 @@ void AccessorAssembler::GenerateStoreGlobalIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, nullptr, name, value, slot, vector);
StoreGlobalIC(&p);
@@ -3562,7 +3584,7 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
@@ -3576,7 +3598,7 @@ void AccessorAssembler::GenerateStoreIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, receiver, name, value, slot, vector);
StoreIC(&p);
@@ -3589,7 +3611,7 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
@@ -3604,7 +3626,7 @@ void AccessorAssembler::GenerateKeyedStoreIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, receiver, name, value, slot, vector);
KeyedStoreIC(&p);
@@ -3617,7 +3639,7 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Node* vector = LoadFeedbackVectorForStub();
TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
@@ -3632,7 +3654,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
Node* value = Parameter(Descriptor::kValue);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
StoreICParameters p(context, array, index, value, slot, vector);
StoreInArrayLiteralIC(&p);
@@ -3640,7 +3662,7 @@ void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
void AccessorAssembler::GenerateCloneObjectIC_Slow() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<HeapObject> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Object> source = CAST(Parameter(Descriptor::kSource));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
@@ -3655,7 +3677,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
LoadObjectField(object_fn, JSFunction::kPrototypeOrInitialMapOffset));
CSA_ASSERT(this, IsMap(initial_map));
- TNode<JSObject> result = CAST(AllocateJSObjectFromMap(initial_map));
+ TNode<JSObject> result = AllocateJSObjectFromMap(initial_map);
{
Label did_set_proto_if_needed(this);
@@ -3672,28 +3694,16 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
}
ReturnIf(IsNullOrUndefined(source), result);
+ source = ToObject_Inline(context, source);
- CSA_ASSERT(this, IsJSReceiver(source));
-
- Label call_runtime(this, Label::kDeferred);
- Label done(this);
-
- TNode<Map> map = LoadMap(source);
- TNode<Int32T> type = LoadMapInstanceType(map);
- {
- Label cont(this);
- GotoIf(IsJSObjectInstanceType(type), &cont);
- GotoIf(InstanceTypeEqual(type, JS_PROXY_TYPE), &call_runtime);
- GotoIfNot(IsStringInstanceType(type), &done);
- Branch(SmiEqual(LoadStringLengthAsSmi(CAST(source)), SmiConstant(0)), &done,
- &call_runtime);
- BIND(&cont);
- }
+ Label call_runtime(this, Label::kDeferred), done(this);
+ TNode<Map> source_map = LoadMap(CAST(source));
+ GotoIfNot(IsJSObjectMap(source_map), &call_runtime);
GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(source))), &call_runtime);
ForEachEnumerableOwnProperty(
- context, map, CAST(source), kPropertyAdditionOrder,
+ context, source_map, CAST(source), kPropertyAdditionOrder,
[=](TNode<Name> key, TNode<Object> value) {
SetPropertyInLiteral(context, result, key, value);
},
@@ -3710,17 +3720,17 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() {
void AccessorAssembler::GenerateCloneObjectIC() {
using Descriptor = CloneObjectWithVectorDescriptor;
- TNode<HeapObject> source = CAST(Parameter(Descriptor::kSource));
- Node* flags = Parameter(Descriptor::kFlags);
- Node* slot = Parameter(Descriptor::kSlot);
- Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Object> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
+ TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
+ TNode<HeapObject> vector = CAST(Parameter(Descriptor::kVector));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TVARIABLE(MaybeObject, var_handler);
- Label if_handler(this, &var_handler);
- Label miss(this, Label::kDeferred), try_polymorphic(this, Label::kDeferred),
+ Label if_handler(this, &var_handler), miss(this, Label::kDeferred),
+ try_polymorphic(this, Label::kDeferred),
try_megamorphic(this, Label::kDeferred), slow(this, Label::kDeferred);
- TNode<Map> source_map = LoadMap(UncheckedCast<HeapObject>(source));
+ TNode<Map> source_map = LoadReceiverMap(source);
GotoIf(IsDeprecatedMap(source_map), &miss);
GotoIf(IsUndefined(vector), &slow);
@@ -3735,11 +3745,12 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// Handlers for the CloneObjectIC stub are weak references to the Map of
// a result object.
TNode<Map> result_map = CAST(var_handler.value());
- TVARIABLE(Object, var_properties, EmptyFixedArrayConstant());
- TVARIABLE(FixedArrayBase, var_elements, EmptyFixedArrayConstant());
+ TVARIABLE(HeapObject, var_properties, EmptyFixedArrayConstant());
+ TVARIABLE(FixedArray, var_elements, EmptyFixedArrayConstant());
Label allocate_object(this);
GotoIf(IsNullOrUndefined(source), &allocate_object);
+ CSA_SLOW_ASSERT(this, IsJSObjectMap(source_map));
CSA_SLOW_ASSERT(this, IsJSObjectMap(result_map));
// The IC fast case should only be taken if the result map a compatible
@@ -3753,7 +3764,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// either an Smi, or a PropertyArray.
// FIXME: Make a CSA macro for this
TNode<Object> source_properties =
- LoadObjectField(source, JSObject::kPropertiesOrHashOffset);
+ LoadObjectField(CAST(source), JSObject::kPropertiesOrHashOffset);
{
GotoIf(TaggedIsSmi(source_properties), &allocate_object);
GotoIf(IsEmptyFixedArray(source_properties), &allocate_object);
@@ -3779,8 +3790,6 @@ void AccessorAssembler::GenerateCloneObjectIC() {
ReturnIf(IsNullOrUndefined(source), object);
// Lastly, clone any in-object properties.
- // Determine the inobject property capacity of both objects, and copy the
- // smaller number into the resulting object.
TNode<IntPtrT> source_start =
LoadMapInobjectPropertiesStartInWords(source_map);
TNode<IntPtrT> source_size = LoadMapInstanceSizeInWords(source_map);
@@ -3789,35 +3798,49 @@ void AccessorAssembler::GenerateCloneObjectIC() {
TNode<IntPtrT> field_offset_difference =
TimesTaggedSize(IntPtrSub(result_start, source_start));
- // If MutableHeapNumbers may be present in-object, allocations may occur
- // within this loop, thus the write barrier is required.
- //
- // TODO(caitp): skip the write barrier until the first MutableHeapNumber
- // field is found
- const bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields;
-
+ // Just copy the fields as raw data (pretending that there are no
+ // MutableHeapNumbers). This doesn't need write barriers.
BuildFastLoop(
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> field_offset =
TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
-
- if (may_use_mutable_heap_numbers) {
- TNode<Object> field = LoadObjectField(source, field_offset);
- field = CloneIfMutablePrimitive(field);
- TNode<IntPtrT> result_offset =
- IntPtrAdd(field_offset, field_offset_difference);
- StoreObjectField(object, result_offset, field);
- } else {
- // Copy fields as raw data.
- TNode<IntPtrT> field =
- LoadObjectField<IntPtrT>(source, field_offset);
- TNode<IntPtrT> result_offset =
- IntPtrAdd(field_offset, field_offset_difference);
- StoreObjectFieldNoWriteBarrier(object, result_offset, field);
- }
+ TNode<TaggedT> field =
+ LoadObjectField<TaggedT>(CAST(source), field_offset);
+ TNode<IntPtrT> result_offset =
+ IntPtrAdd(field_offset, field_offset_difference);
+ StoreObjectFieldNoWriteBarrier(object, result_offset, field);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+ // If MutableHeapNumbers can occur, we need to go through the {object}
+ // again here and properly clone them. We use a second loop here to
+ // ensure that the GC (and heap verifier) always sees properly initialized
+ // objects, i.e. never hits undefined values in double fields.
+ if (!FLAG_unbox_double_fields) {
+ BuildFastLoop(
+ source_start, source_size,
+ [=](Node* field_index) {
+ TNode<IntPtrT> result_offset =
+ IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)),
+ field_offset_difference);
+ TNode<Object> field = LoadObjectField(object, result_offset);
+ Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(field), &if_done);
+ Branch(IsMutableHeapNumber(CAST(field)), &if_mutableheapnumber,
+ &if_done);
+ BIND(&if_mutableheapnumber);
+ {
+ TNode<Object> value = AllocateMutableHeapNumberWithValue(
+ LoadHeapNumberValue(UncheckedCast<HeapNumber>(field)));
+ StoreObjectField(object, result_offset, value);
+ Goto(&if_done);
+ }
+ BIND(&if_done);
+ },
+ 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ }
+
Return(object);
}
@@ -3869,7 +3892,7 @@ void AccessorAssembler::GenerateKeyedHasIC() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadIC(&p, LoadAccessMode::kHas);
@@ -3880,7 +3903,7 @@ void AccessorAssembler::GenerateKeyedHasIC_Megamorphic() {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
// TODO(magardn): implement HasProperty handling in KeyedLoadICGeneric
Return(HasProperty(context, receiver, name,
HasPropertyLookupMode::kHasProperty));
@@ -3893,7 +3916,7 @@ void AccessorAssembler::GenerateKeyedHasIC_PolymorphicName() {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
Node* vector = Parameter(Descriptor::kVector);
- Node* context = Parameter(Descriptor::kContext);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
LoadICParameters p(context, receiver, name, slot, vector);
KeyedLoadICPolymorphicName(&p, LoadAccessMode::kHas);
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index b0d6291094..6127b244e3 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -68,21 +68,75 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
}
struct LoadICParameters {
- LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
- Node* vector, Node* holder = nullptr)
- : context(context),
- receiver(receiver),
- name(name),
- slot(slot),
- vector(vector),
- holder(holder ? holder : receiver) {}
-
- Node* context;
- Node* receiver;
- Node* name;
- Node* slot;
- Node* vector;
- Node* holder;
+ LoadICParameters(TNode<Context> context, Node* receiver, Node* name,
+ Node* slot, Node* vector, Node* holder = nullptr)
+ : context_(context),
+ receiver_(receiver),
+ name_(name),
+ slot_(slot),
+ vector_(vector),
+ holder_(holder ? holder : receiver) {}
+
+ LoadICParameters(const LoadICParameters* p, Node* unique_name)
+ : context_(p->context_),
+ receiver_(p->receiver_),
+ name_(unique_name),
+ slot_(p->slot_),
+ vector_(p->vector_),
+ holder_(p->holder_) {}
+
+ TNode<Context> context() const { return context_; }
+ Node* receiver() const { return receiver_; }
+ Node* name() const { return name_; }
+ Node* slot() const { return slot_; }
+ Node* vector() const { return vector_; }
+ Node* holder() const { return holder_; }
+
+ private:
+ TNode<Context> context_;
+ Node* receiver_;
+ Node* name_;
+ Node* slot_;
+ Node* vector_;
+ Node* holder_;
+ };
+
+ struct LazyLoadICParameters {
+ LazyLoadICParameters(LazyNode<Context> context, Node* receiver,
+ LazyNode<Object> name, Node* slot, Node* vector,
+ Node* holder = nullptr)
+ : context_(context),
+ receiver_(receiver),
+ name_(name),
+ slot_(slot),
+ vector_(vector),
+ holder_(holder ? holder : receiver) {}
+
+ explicit LazyLoadICParameters(const LoadICParameters* p)
+ : receiver_(p->receiver()),
+ slot_(p->slot()),
+ vector_(p->vector()),
+ holder_(p->holder()) {
+ TNode<Context> p_context = p->context();
+ context_ = [=] { return p_context; };
+ TNode<Object> p_name = TNode<Object>::UncheckedCast(p->name());
+ name_ = [=] { return p_name; };
+ }
+
+ TNode<Context> context() const { return context_(); }
+ Node* receiver() const { return receiver_; }
+ Node* name() const { return name_(); }
+ Node* slot() const { return slot_; }
+ Node* vector() const { return vector_; }
+ Node* holder() const { return holder_; }
+
+ private:
+ LazyNode<Context> context_;
+ Node* receiver_;
+ LazyNode<Object> name_;
+ Node* slot_;
+ Node* vector_;
+ Node* holder_;
};
void LoadGlobalIC(Node* vector, Node* slot,
@@ -93,7 +147,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// Specialized LoadIC for inlined bytecode handler, hand-tuned to omit frame
// construction on common paths.
- void LoadIC_BytecodeHandler(const LoadICParameters* p, ExitPoint* exit_point);
+ void LoadIC_BytecodeHandler(const LazyLoadICParameters* p,
+ ExitPoint* exit_point);
// Loads dataX field from the DataHandler object.
TNode<MaybeObject> LoadHandlerDataField(SloppyTNode<DataHandler> handler,
@@ -101,11 +156,15 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
protected:
struct StoreICParameters : public LoadICParameters {
- StoreICParameters(Node* context, Node* receiver, Node* name,
+ StoreICParameters(TNode<Context> context, Node* receiver, Node* name,
SloppyTNode<Object> value, Node* slot, Node* vector)
: LoadICParameters(context, receiver, name, slot, vector),
- value(value) {}
- SloppyTNode<Object> value;
+ value_(value) {}
+
+ SloppyTNode<Object> value() const { return value_; }
+
+ private:
+ SloppyTNode<Object> value_;
};
enum class LoadAccessMode { kLoad, kHas };
@@ -127,7 +186,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
- void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield2 = nullptr);
+ void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield3 = nullptr);
void OverwriteExistingFastDataProperty(Node* object, Node* object_map,
Node* descriptors,
@@ -182,13 +241,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// LoadIC implementation.
void HandleLoadICHandlerCase(
- const LoadICParameters* p, TNode<Object> handler, Label* miss,
+ const LazyLoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode = ICMode::kNonGlobalIC,
OnNonExistent on_nonexistent = OnNonExistent::kReturnUndefined,
ElementSupport support_elements = kOnlyProperties,
LoadAccessMode access_mode = LoadAccessMode::kLoad);
- void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
+ void HandleLoadICSmiHandlerCase(const LazyLoadICParameters* p, Node* holder,
SloppyTNode<Smi> smi_handler,
SloppyTNode<Object> handler, Label* miss,
ExitPoint* exit_point,
@@ -196,18 +255,18 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
ElementSupport support_elements,
LoadAccessMode access_mode);
- void HandleLoadICProtoHandler(const LoadICParameters* p, Node* handler,
+ void HandleLoadICProtoHandler(const LazyLoadICParameters* p, Node* handler,
Variable* var_holder, Variable* var_smi_handler,
Label* if_smi_handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode,
LoadAccessMode access_mode);
- void HandleLoadCallbackProperty(const LoadICParameters* p,
+ void HandleLoadCallbackProperty(const LazyLoadICParameters* p,
TNode<JSObject> holder,
TNode<WordT> handler_word,
ExitPoint* exit_point);
- void HandleLoadAccessor(const LoadICParameters* p,
+ void HandleLoadAccessor(const LazyLoadICParameters* p,
TNode<CallHandlerInfo> call_handler_info,
TNode<WordT> handler_word, TNode<DataHandler> handler,
TNode<IntPtrT> handler_kind, ExitPoint* exit_point);
@@ -220,13 +279,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
Node* receiver, Label* can_access, Label* miss);
void HandleLoadICSmiHandlerLoadNamedCase(
- const LoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
+ const LazyLoadICParameters* p, Node* holder, TNode<IntPtrT> handler_kind,
TNode<WordT> handler_word, Label* rebox_double,
Variable* var_double_value, SloppyTNode<Object> handler, Label* miss,
ExitPoint* exit_point, OnNonExistent on_nonexistent,
ElementSupport support_elements);
- void HandleLoadICSmiHandlerHasNamedCase(const LoadICParameters* p,
+ void HandleLoadICSmiHandlerHasNamedCase(const LazyLoadICParameters* p,
Node* holder,
TNode<IntPtrT> handler_kind,
Label* miss, ExitPoint* exit_point);
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index a7a5b988f6..50b7cd1ebb 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -176,16 +176,28 @@ Node* BinaryOpAssembler::Generate_AddWithFeedback(Node* context, Node* lhs,
Node* rhs_instance_type = LoadInstanceType(rhs);
Node* rhs_is_oddball = InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
- Branch(IsBigIntInstanceType(rhs_instance_type), &bigint,
- &call_with_any_feedback);
+ Goto(&call_with_any_feedback);
}
BIND(&bigint);
{
+ // Both {lhs} and {rhs} are of BigInt type.
+ Label bigint_too_big(this);
+ var_result.Bind(
+ CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs));
+ // Check for sentinel that signals BigIntTooBig exception.
+ GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
+
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
- var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(Operation::kAdd)));
Goto(&end);
+
+ BIND(&bigint_too_big);
+ {
+ // Update feedback to prevent deopt loop.
+ UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
+ feedback_vector, slot_id);
+ ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
+ }
}
BIND(&call_with_oddball_feedback);
@@ -363,8 +375,12 @@ Node* BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
BIND(&if_bigint);
{
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
- var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
- SmiConstant(op)));
+ if (op == Operation::kAdd) {
+ var_result.Bind(CallBuiltin(Builtins::kBigIntAdd, context, lhs, rhs));
+ } else {
+ var_result.Bind(CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
+ SmiConstant(op)));
+ }
Goto(&end);
}
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 439d342f1e..54795d4202 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -47,7 +47,7 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
*holder_lookup = kHolderIsReceiver;
return Handle<JSObject>::null();
}
- if (object_map->has_hidden_prototype()) {
+ if (object_map->IsJSGlobalProxyMap() && !object_map->prototype().IsNull()) {
JSObject raw_prototype = JSObject::cast(object_map->prototype());
Handle<JSObject> prototype(raw_prototype, raw_prototype.GetIsolate());
object_map = handle(prototype->map(), prototype->GetIsolate());
@@ -60,7 +60,6 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
-
bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
Handle<JSObject> holder) const {
DCHECK(is_simple_api_call());
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 38b15618ac..29373d85d8 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -16,23 +16,6 @@
namespace v8 {
namespace internal {
-Address IC::constant_pool() const {
- if (FLAG_enable_embedded_constant_pool) {
- return raw_constant_pool();
- } else {
- return kNullAddress;
- }
-}
-
-
-Address IC::raw_constant_pool() const {
- if (FLAG_enable_embedded_constant_pool) {
- return *constant_pool_address_;
- } else {
- return kNullAddress;
- }
-}
-
void IC::update_receiver_map(Handle<Object> receiver) {
if (receiver->IsSmi()) {
receiver_map_ = isolate_->factory()->heap_number_map();
@@ -50,13 +33,6 @@ bool IC::IsHandler(MaybeObject object) {
(heap_object.IsDataHandler() || heap_object.IsCode()));
}
-bool IC::HostIsDeoptimizedCode() const {
- Code host =
- isolate()->inner_pointer_to_code_cache()->GetCacheEntry(pc())->code;
- return (host.kind() == Code::OPTIMIZED_FUNCTION &&
- host.marked_for_deoptimization());
-}
-
bool IC::vector_needs_update() {
if (state() == NO_FEEDBACK) return false;
return (!vector_set_ &&
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 64a9f315bb..377e3df6ae 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -29,9 +29,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/struct-inl.h"
-#ifdef V8_TRACE_FEEDBACK_UPDATES
#include "src/utils/ostreams.h"
-#endif // V8_TRACE_FEEDBACK_UPDATES
#include "src/execution/runtime-profiler.h"
#include "src/objects/prototype.h"
#include "src/runtime/runtime-utils.h"
@@ -89,7 +87,6 @@ const char* GetModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type, Handle<Object> name) {
if (V8_LIKELY(!TracingFlags::is_ic_stats_enabled())) return;
- if (HostIsDeoptimizedCode()) return;
State new_state =
(state() == NO_FEEDBACK) ? NO_FEEDBACK : nexus()->ic_state();
TraceIC(type, name, state(), new_state);
@@ -126,20 +123,21 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
return;
}
+ JavaScriptFrameIterator it(isolate());
+ JavaScriptFrame* frame = it.frame();
+ JSFunction function = frame->function();
+
ICStats::instance()->Begin();
ICInfo& ic_info = ICStats::instance()->Current();
ic_info.type = keyed_prefix ? "Keyed" : "";
ic_info.type += type;
- Object maybe_function =
- Object(Memory<Address>(fp_ + JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(maybe_function.IsJSFunction());
- JSFunction function = JSFunction::cast(maybe_function);
int code_offset = 0;
if (function.IsInterpreted()) {
- code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+ code_offset = InterpretedFrame::GetBytecodeOffset(frame->fp());
} else {
- code_offset = static_cast<int>(pc() - function.code().InstructionStart());
+ code_offset =
+ static_cast<int>(frame->pc() - function.code().InstructionStart());
}
JavaScriptFrame::CollectFunctionAndOffsetForICStats(
function, function.abstract_code(), code_offset);
@@ -170,56 +168,11 @@ IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
target_maps_set_(false),
slow_stub_reason_(nullptr),
nexus_(vector, slot) {
- // To improve the performance of the (much used) IC code, we unfold a few
- // levels of the stack frame iteration code. This yields a ~35% speedup when
- // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
- const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
- Address* constant_pool = nullptr;
- if (FLAG_enable_embedded_constant_pool) {
- constant_pool = reinterpret_cast<Address*>(
- entry + ExitFrameConstants::kConstantPoolOffset);
- }
- Address* pc_address =
- reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
- Address fp = Memory<Address>(entry + ExitFrameConstants::kCallerFPOffset);
-#ifdef DEBUG
- StackFrameIterator it(isolate);
- for (int i = 0; i < 1; i++) it.Advance();
- StackFrame* frame = it.frame();
- DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
-#endif
- // For interpreted functions, some bytecode handlers construct a
- // frame. We have to skip the constructed frame to find the interpreted
- // function's frame. Check if the there is an additional frame, and if there
- // is skip this frame. However, the pc should not be updated. The call to
- // ICs happen from bytecode handlers.
- intptr_t frame_marker =
- Memory<intptr_t>(fp + TypedFrameConstants::kFrameTypeOffset);
- if (frame_marker == StackFrame::TypeToMarker(StackFrame::STUB)) {
- fp = Memory<Address>(fp + TypedFrameConstants::kCallerFPOffset);
- }
- fp_ = fp;
- if (FLAG_enable_embedded_constant_pool) {
- constant_pool_address_ = constant_pool;
- }
- pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
DCHECK_IMPLIES(!vector.is_null(), kind_ == nexus_.kind());
state_ = (vector.is_null()) ? NO_FEEDBACK : nexus_.ic_state();
old_state_ = state_;
}
-JSFunction IC::GetHostFunction() const {
- // Compute the JavaScript frame for the frame pointer of this IC
- // structure. We need this to be able to find the function
- // corresponding to the frame.
- StackFrameIterator it(isolate());
- while (it.frame()->fp() != this->fp()) it.Advance();
- JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
- // Find the function on the stack and both the active code for the
- // function and the original code.
- return frame->function();
-}
-
static void LookupForRead(LookupIterator* it, bool is_has_property) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -262,14 +215,14 @@ bool IC::ShouldRecomputeHandler(Handle<String> name) {
// monomorphic.
if (IsGlobalIC()) return true;
- maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
+ MaybeObjectHandle maybe_handler = nexus()->FindHandlerForMap(receiver_map());
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
// to a more general elements kind.
// TODO(verwaest): Check if the current map is actually what the old map
// would transition to.
- if (maybe_handler_.is_null()) {
+ if (maybe_handler.is_null()) {
if (!receiver_map()->IsJSObjectMap()) return false;
Map first_map = FirstTargetMap();
if (first_map.is_null()) return false;
@@ -320,27 +273,23 @@ MaybeHandle<Object> IC::ReferenceError(Handle<Name> name) {
isolate(), NewReferenceError(MessageTemplate::kNotDefined, name), Object);
}
-// static
-void IC::OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
- JSFunction host_function, const char* reason) {
- FeedbackVector vector = nexus->vector();
- FeedbackSlot slot = nexus->slot();
- OnFeedbackChanged(isolate, vector, slot, host_function, reason);
+void IC::OnFeedbackChanged(const char* reason) {
+ vector_set_ = true;
+ FeedbackVector vector = nexus()->vector();
+ FeedbackSlot slot = nexus()->slot();
+ OnFeedbackChanged(isolate(), vector, slot, reason);
}
// static
void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
- FeedbackSlot slot, JSFunction host_function,
- const char* reason) {
+ FeedbackSlot slot, const char* reason) {
if (FLAG_trace_opt_verbose) {
- // TODO(leszeks): The host function is only needed for this print, we could
- // remove it as a parameter if we're of with removing this trace (or only
- // tracing the feedback vector, not the function name).
if (vector.profiler_ticks() != 0) {
- PrintF("[resetting ticks for ");
- host_function.ShortPrint();
- PrintF(" due from %d due to IC change: %s]\n", vector.profiler_ticks(),
- reason);
+ StdoutStream os;
+ os << "[resetting ticks for ";
+ vector.shared_function_info().ShortPrint(os);
+ os << " from " << vector.profiler_ticks()
+ << " due to IC change: " << reason << "]" << std::endl;
}
}
vector.set_profiler_ticks(0);
@@ -348,7 +297,6 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
#ifdef V8_TRACE_FEEDBACK_UPDATES
if (FLAG_trace_feedback_updates) {
int slot_count = vector.metadata().slot_count();
-
StdoutStream os;
if (slot.IsInvalid()) {
os << "[Feedback slots in ";
@@ -368,19 +316,20 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
#endif
isolate->runtime_profiler()->NotifyICChanged();
- // TODO(2029): When an optimized function is patched, it would
- // be nice to propagate the corresponding type information to its
- // unoptimized version for the benefit of later inlining.
}
-static bool MigrateDeprecated(Handle<Object> object) {
+namespace {
+
+bool MigrateDeprecated(Isolate* isolate, Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (!receiver->map().is_deprecated()) return false;
- JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+ JSObject::MigrateInstance(isolate, receiver);
return true;
}
+} // namespace
+
bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
DCHECK_EQ(MEGAMORPHIC, new_state);
DCHECK_IMPLIES(!is_keyed(), key->IsName());
@@ -389,15 +338,13 @@ bool IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
// functions doesn't improve performance.
bool changed =
nexus()->ConfigureMegamorphic(key->IsName() ? PROPERTY : ELEMENT);
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Megamorphic");
+ OnFeedbackChanged("Megamorphic");
return changed;
}
void IC::ConfigureVectorState(Handle<Map> map) {
nexus()->ConfigurePremonomorphic(map);
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Premonomorphic");
+ OnFeedbackChanged("Premonomorphic");
}
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
@@ -415,9 +362,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
nexus()->ConfigureMonomorphic(name, map, handler);
}
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(),
- IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
+ OnFeedbackChanged(IsLoadGlobalIC() ? "LoadGlobal" : "Monomorphic");
}
void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
@@ -427,8 +372,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
if (!is_keyed()) name = Handle<Name>::null();
nexus()->ConfigurePolymorphic(name, maps, handlers);
- vector_set_ = true;
- OnFeedbackChanged(isolate(), nexus(), GetHostFunction(), "Polymorphic");
+ OnFeedbackChanged("Polymorphic");
}
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
@@ -454,7 +398,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
object, name);
}
- if (MigrateDeprecated(object)) use_ic = false;
+ if (MigrateDeprecated(isolate(), object)) use_ic = false;
if (state() != UNINITIALIZED) {
JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
@@ -573,11 +517,10 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name,
MapHandles maps;
MaybeObjectHandles handlers;
- TargetMaps(&maps);
+ nexus()->ExtractMapsAndHandlers(&maps, &handlers);
int number_of_maps = static_cast<int>(maps.size());
int deprecated_maps = 0;
int handler_to_overwrite = -1;
- if (!nexus()->FindHandlers(&handlers, number_of_maps)) return false;
for (int i = 0; i < number_of_maps; i++) {
Handle<Map> current_map = maps.at(i);
@@ -642,9 +585,8 @@ void IC::UpdateMonomorphicIC(const MaybeObjectHandle& handler,
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
MapHandles maps;
MaybeObjectHandles handlers;
- TargetMaps(&maps);
- if (!nexus()->FindHandlers(&handlers, static_cast<int>(maps.size()))) return;
- for (int i = 0; i < static_cast<int>(maps.size()); i++) {
+ nexus()->ExtractMapsAndHandlers(&maps, &handlers);
+ for (size_t i = 0; i < maps.size(); ++i) {
UpdateMegamorphicCache(maps.at(i), name, handlers.at(i));
}
}
@@ -706,8 +648,7 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
}
void LoadIC::UpdateCaches(LookupIterator* lookup) {
- if (!FLAG_lazy_feedback_allocation && state() == UNINITIALIZED &&
- !IsLoadGlobalIC()) {
+ if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
@@ -763,17 +704,6 @@ void IC::UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
}
}
-void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
- DCHECK_EQ(LookupIterator::ACCESSOR, lookup->state());
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
- if (IsAnyLoad() || IsAnyHas()) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Accessor);
- } else {
- DCHECK(IsAnyStore());
- TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Accessor);
- }
-}
-
Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
ReadOnlyRoots roots(isolate());
@@ -1035,7 +965,8 @@ bool KeyedLoadIC::CanChangeToAllowOutOfBounds(Handle<Map> receiver_map) {
void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
KeyedAccessLoadMode load_mode) {
Handle<Map> receiver_map(receiver->map(), isolate());
- DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE); // Checked by caller.
+ DCHECK(receiver_map->instance_type() !=
+ JS_PRIMITIVE_WRAPPER_TYPE); // Checked by caller.
MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
@@ -1046,8 +977,8 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
for (Handle<Map> map : target_receiver_maps) {
if (map.is_null()) continue;
- if (map->instance_type() == JS_VALUE_TYPE) {
- set_slow_stub_reason("JSValue");
+ if (map->instance_type() == JS_PRIMITIVE_WRAPPER_TYPE) {
+ set_slow_stub_reason("JSPrimitiveWrapper");
return;
}
if (map->instance_type() == JS_PROXY_TYPE) {
@@ -1091,7 +1022,8 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
- if (target_receiver_maps.size() > kMaxKeyedPolymorphism) {
+ if (static_cast<int>(target_receiver_maps.size()) >
+ FLAG_max_polymorphic_map_count) {
set_slow_stub_reason("max polymorph exceeded");
return;
}
@@ -1228,7 +1160,9 @@ namespace {
bool ConvertKeyToIndex(Handle<Object> receiver, Handle<Object> key,
uint32_t* index, InlineCacheState state) {
if (!FLAG_use_ic || state == NO_FEEDBACK) return false;
- if (receiver->IsAccessCheckNeeded() || receiver->IsJSValue()) return false;
+ if (receiver->IsAccessCheckNeeded() || receiver->IsJSPrimitiveWrapper()) {
+ return false;
+ }
// For regular JSReceiver or String receivers, the {key} must be a positive
// array index.
@@ -1299,7 +1233,7 @@ MaybeHandle<Object> KeyedLoadIC::RuntimeLoad(Handle<Object> object,
MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
Handle<Object> key) {
- if (MigrateDeprecated(object)) {
+ if (MigrateDeprecated(isolate(), object)) {
return RuntimeLoad(object, key);
}
@@ -1459,7 +1393,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
StoreOrigin store_origin) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object)) {
+ if (MigrateDeprecated(isolate(), object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result, Object::SetProperty(isolate(), object, name, value),
@@ -1779,9 +1713,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
for (Handle<Map> map : target_receiver_maps) {
- if (!map.is_null() && map->instance_type() == JS_VALUE_TYPE) {
+ if (!map.is_null() && map->instance_type() == JS_PRIMITIVE_WRAPPER_TYPE) {
DCHECK(!IsStoreInArrayLiteralICKind(kind()));
- set_slow_stub_reason("JSValue");
+ set_slow_stub_reason("JSPrimitiveWrapper");
return;
}
}
@@ -1836,7 +1770,10 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// If the maximum number of receiver maps has been exceeded, use the
// megamorphic version of the IC.
- if (target_receiver_maps.size() > kMaxKeyedPolymorphism) return;
+ if (static_cast<int>(target_receiver_maps.size()) >
+ FLAG_max_polymorphic_map_count) {
+ return;
+ }
// Make sure all polymorphic handlers have the same store mode, otherwise the
// megamorphic stub must be used.
@@ -2001,13 +1938,9 @@ bool MayHaveTypedArrayInPrototypeChain(Handle<JSObject> object) {
KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver, uint32_t index) {
bool oob_access = IsOutOfBoundsAccess(receiver, index);
// Don't consider this a growing store if the store would send the receiver to
- // dictionary mode. Also make sure we don't consider this a growing store if
- // there's any JSTypedArray in the {receiver}'s prototype chain, since that
- // prototype is going to swallow all stores that are out-of-bounds for said
- // prototype, and we just let the runtime deal with the complexity of this.
+ // dictionary mode.
bool allow_growth = receiver->IsJSArray() && oob_access &&
- !receiver->WouldConvertToSlowElements(index) &&
- !MayHaveTypedArrayInPrototypeChain(receiver);
+ !receiver->WouldConvertToSlowElements(index);
if (allow_growth) {
return STORE_AND_GROW_HANDLE_COW;
}
@@ -2024,7 +1957,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> value) {
// TODO(verwaest): Let SetProperty do the migration, since storing a property
// might deprecate the current map again, if value does not fit.
- if (MigrateDeprecated(object)) {
+ if (MigrateDeprecated(isolate(), object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
@@ -2109,6 +2042,16 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (!old_receiver_map.is_null()) {
if (is_arguments) {
set_slow_stub_reason("arguments receiver");
+ } else if (object->IsJSArray() && IsGrowStoreMode(store_mode) &&
+ JSArray::HasReadOnlyLength(Handle<JSArray>::cast(object))) {
+ set_slow_stub_reason("array has read only length");
+ } else if (object->IsJSArray() && MayHaveTypedArrayInPrototypeChain(
+ Handle<JSObject>::cast(object))) {
+ // Make sure we don't handle this in IC if there's any JSTypedArray in
+ // the {receiver}'s prototype chain, since that prototype is going to
+ // swallow all stores that are out-of-bounds for said prototype, and we
+ // just let the runtime deal with the complexity of this.
+ set_slow_stub_reason("typed array in the prototype chain of an Array");
} else if (key_is_valid_index) {
if (old_receiver_map->is_abandoned_prototype_map()) {
set_slow_stub_reason("receiver with prototype map");
@@ -2160,7 +2103,8 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
DCHECK(!array->map().IsMapInArrayPrototypeChain(isolate()));
DCHECK(index->IsNumber());
- if (!FLAG_use_ic || state() == NO_FEEDBACK || MigrateDeprecated(array)) {
+ if (!FLAG_use_ic || state() == NO_FEEDBACK ||
+ MigrateDeprecated(isolate(), array)) {
StoreOwnElement(isolate(), array, index, value);
TraceIC("StoreInArrayLiteralIC", index);
return;
@@ -2585,10 +2529,9 @@ static bool CanFastCloneObject(Handle<Map> map) {
return true;
}
-static Handle<Map> FastCloneObjectMap(Isolate* isolate,
- Handle<HeapObject> source, int flags) {
- Handle<Map> source_map(source->map(), isolate);
- SLOW_DCHECK(source->IsNullOrUndefined() || CanFastCloneObject(source_map));
+static Handle<Map> FastCloneObjectMap(Isolate* isolate, Handle<Map> source_map,
+ int flags) {
+ SLOW_DCHECK(CanFastCloneObject(source_map));
Handle<JSFunction> constructor(isolate->native_context()->object_function(),
isolate);
DCHECK(constructor->has_initial_map());
@@ -2613,9 +2556,10 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate,
Map::SetPrototype(isolate, map, isolate->factory()->null_value());
}
- if (source->IsNullOrUndefined() || !source_map->NumberOfOwnDescriptors()) {
+ if (source_map->NumberOfOwnDescriptors() == 0) {
return map;
}
+ DCHECK(!source_map->IsNullOrUndefinedMap());
if (map.is_identical_to(initial_map)) {
map = Map::Copy(isolate, map, "InitializeClonedDescriptors");
@@ -2640,7 +2584,7 @@ static Handle<Map> FastCloneObjectMap(Isolate* isolate,
}
static MaybeHandle<JSObject> CloneObjectSlowPath(Isolate* isolate,
- Handle<HeapObject> source,
+ Handle<Object> source,
int flags) {
Handle<JSObject> new_object;
if (flags & ObjectLiteral::kHasNullPrototype) {
@@ -2664,35 +2608,31 @@ static MaybeHandle<JSObject> CloneObjectSlowPath(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<HeapObject> source = args.at<HeapObject>(0);
+ Handle<Object> source = args.at<Object>(0);
int flags = args.smi_at(1);
- MigrateDeprecated(source);
-
- FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
- Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
- if (maybe_vector->IsUndefined()) {
- RETURN_RESULT_OR_FAILURE(isolate,
- CloneObjectSlowPath(isolate, source, flags));
- }
-
- DCHECK(maybe_vector->IsFeedbackVector());
- Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(maybe_vector);
-
- FeedbackNexus nexus(vector, slot);
- Handle<Map> source_map(source->map(), isolate);
+ if (!MigrateDeprecated(isolate, source)) {
+ FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
+ Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
+ if (maybe_vector->IsFeedbackVector()) {
+ FeedbackNexus nexus(Handle<FeedbackVector>::cast(maybe_vector), slot);
+ if (!source->IsSmi() && !nexus.IsMegamorphic()) {
+ Handle<Map> source_map(Handle<HeapObject>::cast(source)->map(),
+ isolate);
+ if (CanFastCloneObject(source_map)) {
+ Handle<Map> target_map =
+ FastCloneObjectMap(isolate, source_map, flags);
+ nexus.ConfigureCloneObject(source_map, target_map);
+ return *target_map;
+ }
- if (!CanFastCloneObject(source_map) || nexus.IsMegamorphic()) {
- // Migrate to slow mode if needed.
- nexus.ConfigureMegamorphic();
- RETURN_RESULT_OR_FAILURE(isolate,
- CloneObjectSlowPath(isolate, source, flags));
+ nexus.ConfigureMegamorphic();
+ }
+ }
}
- Handle<Map> result_map = FastCloneObjectMap(isolate, source, flags);
- nexus.ConfigureCloneObject(source_map, result_map);
-
- return *result_map;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ CloneObjectSlowPath(isolate, source, flags));
}
RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
@@ -2718,46 +2658,6 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
return *value;
}
-RUNTIME_FUNCTION(Runtime_LoadCallbackProperty) {
- Handle<JSObject> receiver = args.at<JSObject>(0);
- Handle<JSObject> holder = args.at<JSObject>(1);
- Handle<AccessorInfo> info = args.at<AccessorInfo>(2);
- Handle<Name> name = args.at<Name>(3);
- HandleScope scope(isolate);
-
- DCHECK(info->IsCompatibleReceiver(*receiver));
-
- PropertyCallbackArguments custom_args(isolate, info->data(), *receiver,
- *holder, Just(kThrowOnError));
- Handle<Object> result = custom_args.CallAccessorGetter(info, name);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- return *result;
-}
-
-RUNTIME_FUNCTION(Runtime_LoadAccessorProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 3);
- Handle<JSObject> receiver = args.at<JSObject>(0);
- int handler_kind = args.smi_at(1);
- Handle<CallHandlerInfo> call_handler_info = args.at<CallHandlerInfo>(2);
-
- Object holder = *receiver;
- if (handler_kind == LoadHandler::kApiGetterHolderIsPrototype) {
- holder = receiver->map().prototype();
- } else {
- DCHECK_EQ(handler_kind, LoadHandler::kApiGetter);
- }
-
- // Call the accessor without additional arguments.
- FunctionCallbackArguments custom(isolate, call_handler_info->data(),
- *receiver, holder, HeapObject(), nullptr, 0);
- Handle<Object> result_handle = custom.Call(*call_handler_info);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (result_handle.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- return *result_handle;
-}
-
/**
* Loads a property with an interceptor performing post interceptor
* lookup if interceptor failed.
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 39462d55e5..29f3b4a60a 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -7,8 +7,8 @@
#include <vector>
+#include "src/common/message-template.h"
#include "src/execution/isolate.h"
-#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
#include "src/objects/feedback-vector.h"
@@ -29,8 +29,6 @@ class IC {
// Alias the inline cache state type to make the IC code more readable.
using State = InlineCacheState;
- static constexpr int kMaxKeyedPolymorphism = 4;
-
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot,
@@ -62,25 +60,15 @@ class IC {
// Nofity the IC system that a feedback has changed.
static void OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
- FeedbackSlot slot, JSFunction host_function,
- const char* reason);
+ FeedbackSlot slot, const char* reason);
- static void OnFeedbackChanged(Isolate* isolate, FeedbackNexus* nexus,
- JSFunction host_function, const char* reason);
+ void OnFeedbackChanged(const char* reason);
protected:
- Address fp() const { return fp_; }
- Address pc() const { return *pc_address_; }
-
void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
Isolate* isolate() const { return isolate_; }
- // Get the caller function object.
- JSFunction GetHostFunction() const;
-
- inline bool HostIsDeoptimizedCode() const;
-
bool is_vector_set() { return vector_set_; }
inline bool vector_needs_update();
@@ -106,8 +94,6 @@ class IC {
Handle<Object> key);
MaybeHandle<Object> ReferenceError(Handle<Name> name);
- void TraceHandlerCacheHitStats(LookupIterator* lookup);
-
void UpdateMonomorphicIC(const MaybeObjectHandle& handler, Handle<Name> name);
bool UpdatePolymorphicIC(Handle<Name> name, const MaybeObjectHandle& handler);
void UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
@@ -158,28 +144,12 @@ class IC {
FeedbackNexus* nexus() { return &nexus_; }
private:
- inline Address constant_pool() const;
- inline Address raw_constant_pool() const;
-
void FindTargetMaps() {
if (target_maps_set_) return;
target_maps_set_ = true;
nexus()->ExtractMaps(&target_maps_);
}
- // Frame pointer for the frame that uses (calls) the IC.
- Address fp_;
-
- // All access to the program counter and constant pool of an IC structure is
- // indirect to make the code GC safe. This feature is crucial since
- // GetProperty and SetProperty are called and they in turn might
- // invoke the garbage collector.
- Address* pc_address_;
-
- // The constant pool of the code which originally called the IC (which might
- // be for the breakpointed copy of the original code).
- Address* constant_pool_address_;
-
Isolate* isolate_;
bool vector_set_;
@@ -187,7 +157,6 @@ class IC {
State state_;
FeedbackSlotKind kind_;
Handle<Map> receiver_map_;
- MaybeObjectHandle maybe_handler_;
MapHandles target_maps_;
bool target_maps_set_;
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index f7e79ee6d7..7e87b015d4 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -185,7 +185,7 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
TNode<Int32T> instance_type = LoadMapInstanceType(prototype_map);
GotoIf(IsCustomElementsReceiverInstanceType(instance_type),
non_fast_elements);
- Node* elements_kind = LoadMapElementsKind(prototype_map);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(prototype_map);
GotoIf(IsFastElementsKind(elements_kind), &loop_body);
GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
Goto(non_fast_elements);
@@ -500,7 +500,7 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
if_grow(this), if_nonfast(this), if_typed_array(this),
if_dictionary(this);
Node* elements = LoadElements(receiver);
- Node* elements_kind = LoadMapElementsKind(receiver_map);
+ TNode<Int32T> elements_kind = LoadMapElementsKind(receiver_map);
Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
BIND(&if_fast);
@@ -775,7 +775,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
Label descriptor_found(this), lookup_transition(this);
TVARIABLE(IntPtrT, var_name_index);
- DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
+ DescriptorLookup(p->name(), descriptors, bitfield3, &descriptor_found,
&var_name_index, &lookup_transition);
BIND(&descriptor_found);
@@ -801,18 +801,18 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&data_property);
{
- CheckForAssociatedProtector(p->name, slow);
+ CheckForAssociatedProtector(p->name(), slow);
OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
- name_index, details, p->value, slow,
+ name_index, details, p->value(), slow,
false);
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
}
BIND(&lookup_transition);
{
Comment("lookup transition");
TNode<Map> transition_map = FindCandidateStoreICTransitionMapHandler(
- receiver_map, CAST(p->name), slow);
+ receiver_map, CAST(p->name()), slow);
// Validate the transition handler candidate and apply the transition.
StoreTransitionMapFlags flags = kValidateTransitionHandler;
@@ -820,7 +820,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
flags = StoreTransitionMapFlags(flags | kCheckPrototypeValidity);
}
HandleStoreICTransitionMapHandlerCase(p, transition_map, slow, flags);
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
}
@@ -833,7 +833,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
TVARIABLE(IntPtrT, var_name_index);
Label dictionary_found(this, &var_name_index), not_found(this);
TNode<NameDictionary> properties = CAST(LoadSlowProperties(CAST(receiver)));
- NameDictionaryLookup<NameDictionary>(properties, CAST(p->name),
+ NameDictionaryLookup<NameDictionary>(properties, CAST(p->name()),
&dictionary_found, &var_name_index,
&not_found);
BIND(&dictionary_found);
@@ -858,38 +858,47 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&overwrite);
{
- CheckForAssociatedProtector(p->name, slow);
+ CheckForAssociatedProtector(p->name(), slow);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value);
- exit_point->Return(p->value);
+ p->value());
+ exit_point->Return(p->value());
}
}
BIND(&not_found);
{
- CheckForAssociatedProtector(p->name, slow);
- Label extensible(this);
- Node* bitfield2 = LoadMapBitField2(receiver_map);
- GotoIf(IsPrivateSymbol(p->name), &extensible);
- Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield2), &extensible, slow);
+ CheckForAssociatedProtector(p->name(), slow);
+ Label extensible(this), is_private_symbol(this);
+ Node* bitfield3 = LoadMapBitField3(receiver_map);
+ GotoIf(IsPrivateSymbol(p->name()), &is_private_symbol);
+ Branch(IsSetWord32<Map::IsExtensibleBit>(bitfield3), &extensible, slow);
+
+ BIND(&is_private_symbol);
+ {
+ CSA_ASSERT(this, IsPrivateSymbol(p->name()));
+ // For private names, we miss to the runtime which will throw.
+ // For private symbols, we extend and store an own property.
+ Branch(IsPrivateName(p->name()), slow, &extensible);
+ }
BIND(&extensible);
if (ShouldCheckPrototype()) {
DCHECK(ShouldCallSetter());
LookupPropertyOnPrototypeChain(
- receiver_map, p->name, &accessor, &var_accessor_pair,
+ receiver_map, p->name(), &accessor, &var_accessor_pair,
&var_accessor_holder,
ShouldReconfigureExisting() ? nullptr : &readonly, slow);
}
Label add_dictionary_property_slow(this);
- InvalidateValidityCellIfPrototype(receiver_map, bitfield2);
- Add<NameDictionary>(properties, CAST(p->name), p->value,
+ InvalidateValidityCellIfPrototype(receiver_map, bitfield3);
+ Add<NameDictionary>(properties, CAST(p->name()), p->value(),
&add_dictionary_property_slow);
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
BIND(&add_dictionary_property_slow);
- exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty, p->context,
- p->receiver, p->name, p->value);
+ exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty,
+ p->context(), p->receiver(), p->name(),
+ p->value());
}
}
@@ -908,8 +917,8 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
GotoIfNot(IsCallableMap(setter_map), &not_callable);
Callable callable = CodeFactory::Call(isolate());
- CallJS(callable, p->context, setter, receiver, p->value);
- exit_point->Return(p->value);
+ CallJS(callable, p->context(), setter, receiver, p->value());
+ exit_point->Return(p->value());
BIND(&not_callable);
{
@@ -917,17 +926,17 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
exit_point->ReturnCallRuntime(
- Runtime::kThrowTypeError, p->context,
- SmiConstant(MessageTemplate::kNoSetterInCallback), p->name,
+ Runtime::kThrowTypeError, p->context(),
+ SmiConstant(MessageTemplate::kNoSetterInCallback), p->name(),
var_accessor_holder.value());
} else {
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
} else {
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context,
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
SmiConstant(MessageTemplate::kNoSetterInCallback),
- p->name, var_accessor_holder.value());
- exit_point->Return(p->value);
+ p->name(), var_accessor_holder.value());
+ exit_point->Return(p->value());
}
}
}
@@ -939,17 +948,17 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
LanguageMode language_mode;
if (maybe_language_mode.To(&language_mode)) {
if (language_mode == LanguageMode::kStrict) {
- Node* type = Typeof(p->receiver);
- ThrowTypeError(p->context, MessageTemplate::kStrictReadOnlyProperty,
- p->name, type, p->receiver);
+ Node* type = Typeof(p->receiver());
+ ThrowTypeError(p->context(), MessageTemplate::kStrictReadOnlyProperty,
+ p->name(), type, p->receiver());
} else {
- exit_point->Return(p->value);
+ exit_point->Return(p->value());
}
} else {
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context,
+ CallRuntime(Runtime::kThrowTypeErrorIfStrict, p->context(),
SmiConstant(MessageTemplate::kStrictReadOnlyProperty),
- p->name, Typeof(p->receiver), p->receiver);
- exit_point->Return(p->value);
+ p->name(), Typeof(p->receiver()), p->receiver());
+ exit_point->Return(p->value());
}
}
}
@@ -1062,7 +1071,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
BIND(&store_property);
{
- StoreICParameters p(context, receiver, name, value, slot, vector);
+ StoreICParameters p(CAST(context), receiver, name, value, slot, vector);
EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
}
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index bdac1ce334..04381bf693 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h" // For InYoungGeneration().
#include "src/ic/ic-inl.h"
#include "src/logging/counters.h"
+#include "src/objects/tagged-value-inl.h"
namespace v8 {
namespace internal {
@@ -85,25 +86,28 @@ void StubCache::Set(Name name, Map map, MaybeObject handler) {
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- MaybeObject old_handler(primary->value);
-
+ MaybeObject old_handler(
+ TaggedValue::ToMaybeObject(isolate(), primary->value));
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
if (old_handler != MaybeObject::FromObject(
- isolate_->builtins()->builtin(Builtins::kIllegal)) &&
- primary->map != kNullAddress) {
- Map old_map = Map::cast(Object(primary->map));
- int seed = PrimaryOffset(Name::cast(Object(primary->key)), old_map);
- int secondary_offset =
- SecondaryOffset(Name::cast(Object(primary->key)), seed);
+ isolate()->builtins()->builtin(Builtins::kIllegal)) &&
+ !primary->map.IsSmi()) {
+ Map old_map =
+ Map::cast(StrongTaggedValue::ToObject(isolate(), primary->map));
+ int seed = PrimaryOffset(
+ Name::cast(StrongTaggedValue::ToObject(isolate(), primary->key)),
+ old_map);
+ int secondary_offset = SecondaryOffset(
+ Name::cast(StrongTaggedValue::ToObject(isolate(), primary->key)), seed);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
// Update primary cache.
- primary->key = name.ptr();
- primary->value = handler.ptr();
- primary->map = map.ptr();
+ primary->key = StrongTaggedValue(name);
+ primary->value = TaggedValue(handler);
+ primary->map = StrongTaggedValue(map);
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
}
@@ -111,13 +115,13 @@ MaybeObject StubCache::Get(Name name, Map map) {
DCHECK(CommonStubCacheChecks(this, name, map, MaybeObject()));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- if (primary->key == name.ptr() && primary->map == map.ptr()) {
- return MaybeObject(primary->value);
+ if (primary->key == name && primary->map == map) {
+ return TaggedValue::ToMaybeObject(isolate(), primary->value);
}
int secondary_offset = SecondaryOffset(name, primary_offset);
Entry* secondary = entry(secondary_, secondary_offset);
- if (secondary->key == name.ptr() && secondary->map == map.ptr()) {
- return MaybeObject(secondary->value);
+ if (secondary->key == name && secondary->map == map) {
+ return TaggedValue::ToMaybeObject(isolate(), secondary->value);
}
return MaybeObject();
}
@@ -127,14 +131,14 @@ void StubCache::Clear() {
isolate_->builtins()->builtin(Builtins::kIllegal));
Name empty_string = ReadOnlyRoots(isolate()).empty_string();
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = empty_string.ptr();
- primary_[i].map = kNullAddress;
- primary_[i].value = empty.ptr();
+ primary_[i].key = StrongTaggedValue(empty_string);
+ primary_[i].map = StrongTaggedValue(Smi::zero());
+ primary_[i].value = TaggedValue(empty);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = empty_string.ptr();
- secondary_[j].map = kNullAddress;
- secondary_[j].value = empty.ptr();
+ secondary_[j].key = StrongTaggedValue(empty_string);
+ secondary_[j].map = StrongTaggedValue(Smi::zero());
+ secondary_[j].value = TaggedValue(empty);
}
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 61318245e6..87acc0e007 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -6,6 +6,7 @@
#define V8_IC_STUB_CACHE_H_
#include "src/objects/name.h"
+#include "src/objects/tagged-value.h"
namespace v8 {
namespace internal {
@@ -31,15 +32,14 @@ class SCTableReference {
class V8_EXPORT_PRIVATE StubCache {
public:
struct Entry {
- // The values here have plain Address types because they are read
- // directly from generated code. As a nice side effect, this keeps
- // #includes lightweight.
- Address key;
+ // {key} is a tagged Name pointer, may be cleared by setting to empty
+ // string.
+ StrongTaggedValue key;
// {value} is a tagged heap object reference (weak or strong), equivalent
// to a MaybeObject's payload.
- Address value;
- // {map} is a tagged Map pointer, or nullptr.
- Address map;
+ TaggedValue value;
+ // {map} is a tagged Map pointer, may be cleared by setting to Smi::zero().
+ StrongTaggedValue map;
};
void Initialize();
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
index c5a41de1fd..aa006edd35 100644
--- a/deps/v8/src/init/OWNERS
+++ b/deps/v8/src/init/OWNERS
@@ -1,5 +1,14 @@
ahaas@chromium.org
bmeurer@chromium.org
-jkummerow@chromium.org
+ftang@chromium.org
+gsathya@chromium.org
+ishell@chromium.org
jgruber@chromium.org
+jkummerow@chromium.org
+marja@chromium.org
+mathias@chromium.org
+ulan@chromium.org
+verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index a080f8cdf0..176749781c 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -12,6 +12,7 @@
#include "src/debug/debug.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/microtask-queue.h"
+#include "src/extensions/cputracemark-extension.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
#include "src/extensions/gc-extension.h"
@@ -123,6 +124,11 @@ static const char* GCFunctionName() {
return flag_given ? FLAG_expose_gc_as : "gc";
}
+static bool isValidCpuTraceMarkFunctionName() {
+ return FLAG_expose_cputracemark_as != nullptr &&
+ strlen(FLAG_expose_cputracemark_as) != 0;
+}
+
void Bootstrapper::InitializeOncePerProcess() {
v8::RegisterExtension(v8::base::make_unique<FreeBufferExtension>());
v8::RegisterExtension(v8::base::make_unique<GCExtension>(GCFunctionName()));
@@ -130,6 +136,10 @@ void Bootstrapper::InitializeOncePerProcess() {
v8::RegisterExtension(v8::base::make_unique<StatisticsExtension>());
v8::RegisterExtension(v8::base::make_unique<TriggerFailureExtension>());
v8::RegisterExtension(v8::base::make_unique<IgnitionStatisticsExtension>());
+ if (isValidCpuTraceMarkFunctionName()) {
+ v8::RegisterExtension(v8::base::make_unique<CpuTraceMarkExtension>(
+ FLAG_expose_cputracemark_as));
+ }
}
void Bootstrapper::TearDown() {
@@ -343,7 +353,7 @@ void Bootstrapper::LogAllMaps() {
void Bootstrapper::DetachGlobal(Handle<Context> env) {
isolate_->counters()->errors_thrown_per_context()->AddSample(
- env->GetErrorsThrown());
+ env->native_context().GetErrorsThrown());
ReadOnlyRoots roots(isolate_);
Handle<JSGlobalProxy> global_proxy(env->global_proxy(), isolate_);
@@ -1242,7 +1252,6 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
JS_GLOBAL_PROXY_TYPE);
}
global_proxy_function->initial_map().set_is_access_check_needed(true);
- global_proxy_function->initial_map().set_has_hidden_prototype(true);
global_proxy_function->initial_map().set_may_have_interesting_symbols(true);
native_context()->set_global_proxy_function(*global_proxy_function);
@@ -1424,7 +1433,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, object_function, "is", Builtins::kObjectIs,
2, true);
SimpleInstallFunction(isolate_, object_function, "preventExtensions",
- Builtins::kObjectPreventExtensions, 1, false);
+ Builtins::kObjectPreventExtensions, 1, true);
SimpleInstallFunction(isolate_, object_function, "seal",
Builtins::kObjectSeal, 1, false);
@@ -1432,41 +1441,34 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, object_function, "create", Builtins::kObjectCreate, 2, false);
native_context()->set_object_create(*object_create);
- Handle<JSFunction> object_define_properties =
- SimpleInstallFunction(isolate_, object_function, "defineProperties",
- Builtins::kObjectDefineProperties, 2, true);
- native_context()->set_object_define_properties(*object_define_properties);
+ SimpleInstallFunction(isolate_, object_function, "defineProperties",
+ Builtins::kObjectDefineProperties, 2, true);
- Handle<JSFunction> object_define_property =
- SimpleInstallFunction(isolate_, object_function, "defineProperty",
- Builtins::kObjectDefineProperty, 3, true);
- native_context()->set_object_define_property(*object_define_property);
+ SimpleInstallFunction(isolate_, object_function, "defineProperty",
+ Builtins::kObjectDefineProperty, 3, true);
SimpleInstallFunction(isolate_, object_function, "freeze",
Builtins::kObjectFreeze, 1, false);
- Handle<JSFunction> object_get_prototype_of =
- SimpleInstallFunction(isolate_, object_function, "getPrototypeOf",
- Builtins::kObjectGetPrototypeOf, 1, false);
- native_context()->set_object_get_prototype_of(*object_get_prototype_of);
+ SimpleInstallFunction(isolate_, object_function, "getPrototypeOf",
+ Builtins::kObjectGetPrototypeOf, 1, true);
SimpleInstallFunction(isolate_, object_function, "setPrototypeOf",
- Builtins::kObjectSetPrototypeOf, 2, false);
+ Builtins::kObjectSetPrototypeOf, 2, true);
SimpleInstallFunction(isolate_, object_function, "isExtensible",
- Builtins::kObjectIsExtensible, 1, false);
+ Builtins::kObjectIsExtensible, 1, true);
SimpleInstallFunction(isolate_, object_function, "isFrozen",
Builtins::kObjectIsFrozen, 1, false);
- Handle<JSFunction> object_is_sealed =
- SimpleInstallFunction(isolate_, object_function, "isSealed",
- Builtins::kObjectIsSealed, 1, false);
- native_context()->set_object_is_sealed(*object_is_sealed);
+ SimpleInstallFunction(isolate_, object_function, "isSealed",
+ Builtins::kObjectIsSealed, 1, false);
- Handle<JSFunction> object_keys = SimpleInstallFunction(
- isolate_, object_function, "keys", Builtins::kObjectKeys, 1, true);
- native_context()->set_object_keys(*object_keys);
+ SimpleInstallFunction(isolate_, object_function, "keys",
+ Builtins::kObjectKeys, 1, true);
SimpleInstallFunction(isolate_, object_function, "entries",
Builtins::kObjectEntries, 1, true);
+ SimpleInstallFunction(isolate_, object_function, "fromEntries",
+ Builtins::kObjectFromEntries, 1, false);
SimpleInstallFunction(isolate_, object_function, "values",
Builtins::kObjectValues, 1, true);
@@ -1637,7 +1639,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(isolate_, array_function);
// Cache the array maps, needed by ArrayConstructorStub
- CacheInitialJSArrayMaps(native_context(), initial_map);
+ CacheInitialJSArrayMaps(isolate_, native_context(), initial_map);
// Set up %ArrayPrototype%.
// The %ArrayPrototype% has TERMINAL_FAST_ELEMENTS_KIND in order to ensure
@@ -1648,10 +1650,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::SetPrototype(array_function, proto);
native_context()->set_initial_array_prototype(*proto);
- Handle<JSFunction> is_arraylike = SimpleInstallFunction(
- isolate_, array_function, "isArray", Builtins::kArrayIsArray, 1, true);
- native_context()->set_is_arraylike(*is_arraylike);
-
+ SimpleInstallFunction(isolate_, array_function, "isArray",
+ Builtins::kArrayIsArray, 1, true);
SimpleInstallFunction(isolate_, array_function, "from",
Builtins::kArrayFrom, 1, false);
SimpleInstallFunction(isolate_, array_function, "of", Builtins::kArrayOf, 0,
@@ -1786,15 +1786,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- N u m b e r ---
Handle<JSFunction> number_fun = InstallFunction(
- isolate_, global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate_->initial_object_prototype(), Builtins::kNumberConstructor);
+ isolate_, global, "Number", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, isolate_->initial_object_prototype(),
+ Builtins::kNumberConstructor);
number_fun->shared().DontAdaptArguments();
number_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, number_fun,
Context::NUMBER_FUNCTION_INDEX);
// Create the %NumberPrototype%
- Handle<JSValue> prototype = Handle<JSValue>::cast(
+ Handle<JSPrimitiveWrapper> prototype = Handle<JSPrimitiveWrapper>::cast(
factory->NewJSObject(number_fun, AllocationType::kOld));
prototype->set_value(Smi::kZero);
JSFunction::SetPrototype(number_fun, prototype);
@@ -1869,15 +1870,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- B o o l e a n ---
Handle<JSFunction> boolean_fun = InstallFunction(
- isolate_, global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate_->initial_object_prototype(), Builtins::kBooleanConstructor);
+ isolate_, global, "Boolean", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, isolate_->initial_object_prototype(),
+ Builtins::kBooleanConstructor);
boolean_fun->shared().DontAdaptArguments();
boolean_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
// Create the %BooleanPrototype%
- Handle<JSValue> prototype = Handle<JSValue>::cast(
+ Handle<JSPrimitiveWrapper> prototype = Handle<JSPrimitiveWrapper>::cast(
factory->NewJSObject(boolean_fun, AllocationType::kOld));
prototype->set_value(ReadOnlyRoots(isolate_).false_value());
JSFunction::SetPrototype(boolean_fun, prototype);
@@ -1895,8 +1897,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
- isolate_, global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
- isolate_->initial_object_prototype(), Builtins::kStringConstructor);
+ isolate_, global, "String", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, isolate_->initial_object_prototype(),
+ Builtins::kStringConstructor);
string_fun->shared().DontAdaptArguments();
string_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, string_fun,
@@ -1929,7 +1932,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
false);
// Create the %StringPrototype%
- Handle<JSValue> prototype = Handle<JSValue>::cast(
+ Handle<JSPrimitiveWrapper> prototype = Handle<JSPrimitiveWrapper>::cast(
factory->NewJSObject(string_fun, AllocationType::kOld));
prototype->set_value(ReadOnlyRoots(isolate_).empty_string());
JSFunction::SetPrototype(string_fun, prototype);
@@ -2090,9 +2093,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // --- S y m b o l ---
- Handle<JSFunction> symbol_fun = InstallFunction(
- isolate_, global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kSymbolConstructor);
+ Handle<JSFunction> symbol_fun =
+ InstallFunction(isolate_, global, "Symbol", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, factory->the_hole_value(),
+ Builtins::kSymbolConstructor);
symbol_fun->shared().set_length(0);
symbol_fun->shared().DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@@ -2592,6 +2596,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<RegExpMatchInfo> last_match_info = factory->NewRegExpMatchInfo();
native_context()->set_regexp_last_match_info(*last_match_info);
+ // Install the species protector cell.
+ {
+ Handle<PropertyCell> cell =
+ factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ native_context()->set_regexp_species_protector(*cell);
+ }
+
// Force the RegExp constructor to fast properties, so that we can use the
// fast paths for various things like
//
@@ -2691,6 +2703,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<EmbedderDataArray> embedder_data = factory->NewEmbedderDataArray(0);
native_context()->set_embedder_data(*embedder_data);
+ { // -- g l o b a l T h i s
+ Handle<JSGlobalProxy> global_proxy(native_context()->global_proxy(),
+ isolate_);
+ JSObject::AddProperty(isolate_, global, factory->globalThis_string(),
+ global_proxy, DONT_ENUM);
+ }
+
{ // -- J S O N
Handle<JSObject> json_object =
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
@@ -3393,9 +3412,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- B i g I n t
- Handle<JSFunction> bigint_fun = InstallFunction(
- isolate_, global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
- factory->the_hole_value(), Builtins::kBigIntConstructor);
+ Handle<JSFunction> bigint_fun =
+ InstallFunction(isolate_, global, "BigInt", JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, factory->the_hole_value(),
+ Builtins::kBigIntConstructor);
bigint_fun->shared().DontAdaptArguments();
bigint_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, bigint_fun,
@@ -3642,15 +3662,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
JSObject::AddProperty(isolate_, global, reflect_string, reflect, DONT_ENUM);
- Handle<JSFunction> define_property =
SimpleInstallFunction(isolate_, reflect, "defineProperty",
Builtins::kReflectDefineProperty, 3, true);
- native_context()->set_reflect_define_property(*define_property);
- Handle<JSFunction> delete_property =
SimpleInstallFunction(isolate_, reflect, "deleteProperty",
Builtins::kReflectDeleteProperty, 2, true);
- native_context()->set_reflect_delete_property(*delete_property);
Handle<JSFunction> apply = SimpleInstallFunction(
isolate_, reflect, "apply", Builtins::kReflectApply, 3, false);
@@ -4232,28 +4248,20 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_hashbang)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_bigint)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(
+ harmony_intl_dateformat_fractional_second_digits)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_quarter)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_datetime_style)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_numberformat_unified)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
-void Genesis::InitializeGlobal_harmony_global() {
- if (!FLAG_harmony_global) return;
-
- Factory* factory = isolate()->factory();
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
- Handle<JSGlobalProxy> global_proxy(native_context()->global_proxy(),
- isolate());
- JSObject::AddProperty(isolate_, global, factory->globalThis_string(),
- global_proxy, DONT_ENUM);
-}
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -4285,6 +4293,9 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
isolate(), finalization_group_name, JS_FINALIZATION_GROUP_TYPE,
JSFinalizationGroup::kSize, 0, finalization_group_prototype,
Builtins::kFinalizationGroupConstructor);
+ InstallWithIntrinsicDefaultProto(
+ isolate(), finalization_group_fun,
+ Context::JS_FINALIZATION_GROUP_FUNCTION_INDEX);
finalization_group_fun->shared().DontAdaptArguments();
finalization_group_fun->shared().set_length(1);
@@ -4301,7 +4312,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
finalization_group_fun, DONT_ENUM);
SimpleInstallFunction(isolate(), finalization_group_prototype, "register",
- Builtins::kFinalizationGroupRegister, 3, false);
+ Builtins::kFinalizationGroupRegister, 2, false);
SimpleInstallFunction(isolate(), finalization_group_prototype, "unregister",
Builtins::kFinalizationGroupUnregister, 1, false);
@@ -4315,7 +4326,6 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<Map> weak_ref_map =
factory->NewMap(JS_WEAK_REF_TYPE, JSWeakRef::kSize);
DCHECK(weak_ref_map->IsJSObjectMap());
- native_context()->set_js_weak_ref_map(*weak_ref_map);
Handle<JSObject> weak_ref_prototype = factory->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
@@ -4332,6 +4342,8 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
Handle<JSFunction> weak_ref_fun = CreateFunction(
isolate(), weak_ref_name, JS_WEAK_REF_TYPE, JSWeakRef::kSize, 0,
weak_ref_prototype, Builtins::kWeakRefConstructor);
+ InstallWithIntrinsicDefaultProto(isolate(), weak_ref_fun,
+ Context::JS_WEAK_REF_FUNCTION_INDEX);
weak_ref_fun->shared().DontAdaptArguments();
weak_ref_fun->shared().set_length(1);
@@ -4355,7 +4367,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
JSObject::ForceSetPrototype(cleanup_iterator_prototype, iterator_prototype);
InstallToStringTag(isolate(), cleanup_iterator_prototype,
- "JSFinalizationGroupCleanupIterator");
+ "FinalizationGroup Cleanup Iterator");
SimpleInstallFunction(isolate(), cleanup_iterator_prototype, "next",
Builtins::kFinalizationGroupCleanupIteratorNext, 0,
@@ -4498,12 +4510,6 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
#endif // V8_INTL_SUPPORT
-void Genesis::InitializeGlobal_harmony_object_from_entries() {
- if (!FLAG_harmony_object_from_entries) return;
- SimpleInstallFunction(isolate(), isolate()->object_function(), "fromEntries",
- Builtins::kObjectFromEntries, 1, false);
-}
-
Handle<JSFunction> Genesis::CreateArrayBuffer(
Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
@@ -4598,7 +4604,7 @@ void Genesis::InstallInternalPackedArray(Handle<JSObject> target,
}
JSObject::NormalizeProperties(
- prototype, KEEP_INOBJECT_PROPERTIES, 6,
+ isolate(), prototype, KEEP_INOBJECT_PROPERTIES, 6,
"OptimizeInternalPackedArrayPrototypeForAdding");
InstallInternalPackedArrayFunction(prototype, "push");
InstallInternalPackedArrayFunction(prototype, "pop");
@@ -4681,14 +4687,14 @@ bool Genesis::InstallNatives() {
"Bootstrapping");
{
- // Builtin function for OpaqueReference -- a JSValue-based object,
- // that keeps its field isolated from JavaScript code. It may store
+ // Builtin function for OpaqueReference -- a JSPrimitiveWrapper-based
+ // object, that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
Handle<JSObject> prototype = factory()->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- Handle<JSFunction> opaque_reference_fun =
- CreateFunction(isolate(), factory()->empty_string(), JS_VALUE_TYPE,
- JSValue::kSize, 0, prototype, Builtins::kIllegal);
+ Handle<JSFunction> opaque_reference_fun = CreateFunction(
+ isolate(), factory()->empty_string(), JS_PRIMITIVE_WRAPPER_TYPE,
+ JSPrimitiveWrapper::kSize, 0, prototype, Builtins::kIllegal);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -5080,6 +5086,8 @@ bool Genesis::InstallExtensions(Isolate* isolate,
(!FLAG_trace_ignition_dispatches ||
InstallExtension(isolate, "v8/ignition-statistics",
&extension_states)) &&
+ (!isValidCpuTraceMarkFunctionName() ||
+ InstallExtension(isolate, "v8/cpumark", &extension_states)) &&
InstallRequestedExtensions(isolate, extensions, &extension_states);
}
@@ -5516,7 +5524,6 @@ Genesis::Genesis(Isolate* isolate,
Handle<Map> global_proxy_map = isolate->factory()->NewMap(
JS_GLOBAL_PROXY_TYPE, proxy_size, TERMINAL_FAST_ELEMENTS_KIND);
global_proxy_map->set_is_access_check_needed(true);
- global_proxy_map->set_has_hidden_prototype(true);
global_proxy_map->set_may_have_interesting_symbols(true);
// A remote global proxy has no native context.
@@ -5525,9 +5532,6 @@ Genesis::Genesis(Isolate* isolate,
// Configure the hidden prototype chain of the global proxy.
JSObject::ForceSetPrototype(global_proxy, global_object);
global_proxy->map().SetConstructor(*global_constructor);
- // TODO(dcheng): This is a hack. Why does this need to be manually called
- // here? Line 4812 should have taken care of it?
- global_proxy->map().set_has_hidden_prototype(true);
global_proxy_ = global_proxy;
}
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index 2293dc67d7..ce5a4f1a8b 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -29,13 +29,15 @@
V(_, endRange_string, "endRange") \
V(_, engineering_string, "engineering") \
V(_, era_string, "era") \
- V(_, first_string, "first") \
- V(_, format_string, "format") \
- V(_, except_zero_string, "except-zero") \
+ V(_, exceptZero_string, "exceptZero") \
V(_, exponentInteger_string, "exponentInteger") \
V(_, exponentMinusSign_string, "exponentMinusSign") \
V(_, exponentSeparator_string, "exponentSeparator") \
+ V(_, first_string, "first") \
+ V(_, format_string, "format") \
V(_, fraction_string, "fraction") \
+ V(_, fractionalSecond_string, "fractionalSecond") \
+ V(_, fractionalSecondDigits_string, "fractionalSecondDigits") \
V(_, full_string, "full") \
V(_, granularity_string, "granularity") \
V(_, grapheme_string, "grapheme") \
@@ -68,7 +70,7 @@
V(_, minute_string, "minute") \
V(_, month_string, "month") \
V(_, nan_string, "nan") \
- V(_, narrow_symbol_string, "narrow-symbol") \
+ V(_, narrowSymbol_string, "narrowSymbol") \
V(_, never_string, "never") \
V(_, none_string, "none") \
V(_, notation_string, "notation") \
@@ -414,10 +416,12 @@
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_COPY_PARALLEL) \
F(MC_EVACUATE_EPILOGUE) \
F(MC_EVACUATE_PROLOGUE) \
F(MC_EVACUATE_REBALANCE) \
F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN) \
F(MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
@@ -445,15 +449,18 @@
F(MINOR_MC_EVACUATE) \
F(MINOR_MC_EVACUATE_CLEAN_UP) \
F(MINOR_MC_EVACUATE_COPY) \
+ F(MINOR_MC_EVACUATE_COPY_PARALLEL) \
F(MINOR_MC_EVACUATE_EPILOGUE) \
F(MINOR_MC_EVACUATE_PROLOGUE) \
F(MINOR_MC_EVACUATE_REBALANCE) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
F(MINOR_MC_MARK) \
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_PARALLEL) \
F(MINOR_MC_MARK_SEED) \
F(MINOR_MC_MARK_ROOTS) \
F(MINOR_MC_MARK_WEAK) \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 98d5715411..85ef1f4d83 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -156,7 +156,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
if (Heap::ShouldZapGarbage()) {
for (Address address = committed_region_address;
address < committed_region_size; address += kSystemPointerSize) {
- Memory<Address>(address) = static_cast<Address>(kZapValue);
+ base::Memory<Address>(address) = static_cast<Address>(kZapValue);
}
}
}
diff --git a/deps/v8/src/init/setup-isolate-deserialize.cc b/deps/v8/src/init/setup-isolate-deserialize.cc
index 8a73ff0c8a..ff0268d3c8 100644
--- a/deps/v8/src/init/setup-isolate-deserialize.cc
+++ b/deps/v8/src/init/setup-isolate-deserialize.cc
@@ -7,7 +7,6 @@
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 863940ef4b..d39e12c733 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -32,7 +32,7 @@ action("protocol_compatibility") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "$_inspector_protocol/check_protocol_compatibility.py"
inputs = [
- "js_protocol.pdl",
+ v8_inspector_js_protocol,
]
_stamp = "$target_gen_dir/js_protocol.stamp"
outputs = [
@@ -41,7 +41,7 @@ action("protocol_compatibility") {
args = [
"--stamp",
rebase_path(_stamp, root_build_dir),
- rebase_path("js_protocol.pdl", root_build_dir),
+ rebase_path(v8_inspector_js_protocol, root_build_dir),
]
}
@@ -53,10 +53,10 @@ inspector_protocol_generate("protocol_generated_sources") {
inspector_protocol_dir = _inspector_protocol
out_dir = target_gen_dir
- config_file = "inspector_protocol_config.json"
+ config_file = v8_path_prefix + "/src/inspector/inspector_protocol_config.json"
inputs = [
- "js_protocol.pdl",
- "inspector_protocol_config.json",
+ v8_inspector_js_protocol,
+ config_file,
]
outputs = _protocol_generated
}
@@ -65,7 +65,9 @@ config("inspector_config") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
configs = [ "../../:internal_config" ]
- include_dirs = [ "../../include" ]
+ include_dirs = [
+ "../../include",
+ ]
}
v8_header_set("inspector_test_headers") {
@@ -96,6 +98,7 @@ v8_source_set("inspector") {
":inspector_string_conversions",
"../..:v8_version",
"../../third_party/inspector_protocol:encoding",
+ "../../third_party/inspector_protocol:bindings",
]
public_deps = [
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 5122d5d997..e5fa06fd54 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -5,11 +5,12 @@ include_rules = [
"+src/base/compiler-specific.h",
"+src/base/logging.h",
"+src/base/macros.h",
+ "+src/base/memory.h",
"+src/base/platform/platform.h",
"+src/base/platform/mutex.h",
"+src/base/safe_conversions.h",
+ "+src/base/template-utils.h",
"+src/base/v8-fallthrough.h",
- "+src/common/v8memory.h",
"+src/numbers/conversions.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index 55f8ac7875..a979205084 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
alph@chromium.org
caseq@chromium.org
dgozman@chromium.org
@@ -7,12 +5,6 @@ kozyatinskiy@chromium.org
pfeldman@chromium.org
yangguo@chromium.org
-# Changes to remote debugging protocol require devtools review to
-# ensure backwards compatibility and commitment to maintain.
-per-file js_protocol.pdl=set noparent
-per-file js_protocol.pdl=dgozman@chromium.org
-per-file js_protocol.pdl=pfeldman@chromium.org
-
per-file PRESUBMIT.py=file://INFRA_OWNERS
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index e660a61aeb..1edd559e4e 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -284,7 +284,7 @@ Response InjectedScript::getProperties(
int sessionId = m_sessionId;
v8::TryCatch tryCatch(isolate);
- *properties = Array<PropertyDescriptor>::create();
+ *properties = v8::base::make_unique<Array<PropertyDescriptor>>();
std::vector<PropertyMirror> mirrors;
PropertyAccumulator accumulator(&mirrors);
if (!ValueMirror::getProperties(context, object, ownProperties,
@@ -351,7 +351,7 @@ Response InjectedScript::getProperties(
descriptor->setValue(std::move(remoteObject));
descriptor->setWasThrown(true);
}
- (*properties)->addItem(std::move(descriptor));
+ (*properties)->emplace_back(std::move(descriptor));
}
return Response::OK();
}
@@ -362,8 +362,10 @@ Response InjectedScript::getInternalAndPrivateProperties(
internalProperties,
std::unique_ptr<protocol::Array<PrivatePropertyDescriptor>>*
privateProperties) {
- *internalProperties = protocol::Array<InternalPropertyDescriptor>::create();
- *privateProperties = protocol::Array<PrivatePropertyDescriptor>::create();
+ *internalProperties =
+ v8::base::make_unique<Array<InternalPropertyDescriptor>>();
+ *privateProperties =
+ v8::base::make_unique<Array<PrivatePropertyDescriptor>>();
if (!value->IsObject()) return Response::OK();
@@ -384,10 +386,10 @@ Response InjectedScript::getInternalAndPrivateProperties(
groupName, remoteObject.get());
if (!response.isSuccess()) return response;
(*internalProperties)
- ->addItem(InternalPropertyDescriptor::create()
- .setName(internalProperty.name)
- .setValue(std::move(remoteObject))
- .build());
+ ->emplace_back(InternalPropertyDescriptor::create()
+ .setName(internalProperty.name)
+ .setValue(std::move(remoteObject))
+ .build());
}
std::vector<PrivatePropertyMirror> privatePropertyWrappers =
ValueMirror::getPrivateProperties(m_context->context(), value_obj);
@@ -401,10 +403,10 @@ Response InjectedScript::getInternalAndPrivateProperties(
groupName, remoteObject.get());
if (!response.isSuccess()) return response;
(*privateProperties)
- ->addItem(PrivatePropertyDescriptor::create()
- .setName(privateProperty.name)
- .setValue(std::move(remoteObject))
- .build());
+ ->emplace_back(PrivatePropertyDescriptor::create()
+ .setName(privateProperty.name)
+ .setValue(std::move(remoteObject))
+ .build());
}
return Response::OK();
}
@@ -487,7 +489,6 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
&limit, &limit, &preview);
if (!preview) return nullptr;
- Array<PropertyPreview>* columns = preview->getProperties();
std::unordered_set<String16> selectedColumns;
v8::Local<v8::Array> v8Columns;
if (maybeColumns.ToLocal(&v8Columns)) {
@@ -500,18 +501,17 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
}
}
if (!selectedColumns.empty()) {
- for (size_t i = 0; i < columns->length(); ++i) {
- ObjectPreview* columnPreview = columns->get(i)->getValuePreview(nullptr);
+ for (const std::unique_ptr<PropertyPreview>& column :
+ *preview->getProperties()) {
+ ObjectPreview* columnPreview = column->getValuePreview(nullptr);
if (!columnPreview) continue;
- std::unique_ptr<Array<PropertyPreview>> filtered =
- Array<PropertyPreview>::create();
- Array<PropertyPreview>* columns = columnPreview->getProperties();
- for (size_t j = 0; j < columns->length(); ++j) {
- PropertyPreview* property = columns->get(j);
+ auto filtered = v8::base::make_unique<Array<PropertyPreview>>();
+ for (const std::unique_ptr<PropertyPreview>& property :
+ *columnPreview->getProperties()) {
if (selectedColumns.find(property->getName()) !=
selectedColumns.end()) {
- filtered->addItem(property->clone());
+ filtered->emplace_back(property->clone());
}
}
columnPreview->setProperties(std::move(filtered));
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index c4aa29ce99..684940c885 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -1,6 +1,6 @@
{
"protocol": {
- "path": "js_protocol.pdl",
+ "path": "../../include/js_protocol.pdl",
"package": "src/inspector/protocol",
"output": "protocol",
"namespace": ["v8_inspector", "protocol"],
@@ -44,5 +44,13 @@
"package": "src/inspector/protocol",
"output": "protocol",
"string_header": "src/inspector/string-util.h"
+ },
+
+ "encoding_lib": {
+ "namespace": "v8_inspector_protocol_encoding"
+ },
+
+ "bindings_lib": {
+ "namespace": "v8_inspector_protocol_bindings"
}
}
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index acf0159f27..3a91169ac8 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -200,6 +200,23 @@ String16 String16::fromUTF8(const char* stringStart, size_t length) {
return String16(UTF8ToUTF16(stringStart, length));
}
+String16 String16::fromUTF16LE(const UChar* stringStart, size_t length) {
+#ifdef V8_TARGET_BIG_ENDIAN
+ // Need to flip the byte order on big endian machines.
+ String16Builder builder;
+ builder.reserveCapacity(length);
+ for (size_t i = 0; i < length; i++) {
+ const UChar utf16be_char =
+ stringStart[i] << 8 | (stringStart[i] >> 8 & 0x00FF);
+ builder.append(utf16be_char);
+ }
+ return builder.toString();
+#else
+ // No need to do anything on little endian machines.
+ return String16(stringStart, length);
+#endif // V8_TARGET_BIG_ENDIAN
+}
+
std::string String16::utf8() const {
return UTF16ToUTF8(m_impl.data(), m_impl.size());
}
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 1b475a10a6..c1dd5cb929 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -26,7 +26,7 @@ class String16 {
String16(const String16&) V8_NOEXCEPT = default;
String16(String16&&) V8_NOEXCEPT = default;
String16(const UChar* characters, size_t size);
- String16(const UChar* characters); // NOLINT(runtime/explicit)
+ V8_EXPORT String16(const UChar* characters); // NOLINT(runtime/explicit)
String16(const char* characters); // NOLINT(runtime/explicit)
String16(const char* characters, size_t size);
explicit String16(const std::basic_string<UChar>& impl);
@@ -66,8 +66,13 @@ class String16 {
}
// Convenience methods.
- std::string utf8() const;
- static String16 fromUTF8(const char* stringStart, size_t length);
+ V8_EXPORT std::string utf8() const;
+ V8_EXPORT static String16 fromUTF8(const char* stringStart, size_t length);
+
+ // Instantiates a String16 in native endianness from UTF16 LE.
+ // On Big endian architectures, byte order needs to be flipped.
+ V8_EXPORT static String16 fromUTF16LE(const UChar* stringStart,
+ size_t length);
std::size_t hash() const {
if (!hash_code) {
@@ -91,6 +96,10 @@ class String16 {
inline String16 operator+(const String16& other) const {
return String16(m_impl + other.m_impl);
}
+ inline String16& operator+=(const String16& other) {
+ m_impl += other.m_impl;
+ return *this;
+ }
// Defined later, since it uses the String16Builder.
template <typename... T>
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index e81c04d66f..20c8951e2a 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -126,12 +126,6 @@ std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
}
// static
-std::unique_ptr<protocol::Value> StringUtil::parseProtocolMessage(
- const ProtocolMessage& message) {
- return parseJSON(message.json);
-}
-
-// static
ProtocolMessage StringUtil::jsonToMessage(String message) {
ProtocolMessage result;
result.json = std::move(message);
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index a9ce4ff424..513f436136 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -45,26 +45,33 @@ class StringUtil {
return s.find(needle);
}
static const size_t kNotFound = String::kNotFound;
- static void builderAppend(StringBuilder& builder, const String& s) {
+ static void builderAppend(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ const String& s) {
builder.append(s);
}
- static void builderAppend(StringBuilder& builder, UChar c) {
+ static void builderAppend(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ UChar c) {
builder.append(c);
}
- static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
+ static void builderAppend(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ const char* s, size_t len) {
builder.append(s, len);
}
static void builderAppendQuotedString(StringBuilder&, const String&);
- static void builderReserve(StringBuilder& builder, size_t capacity) {
+ static void builderReserve(
+ StringBuilder& builder, // NOLINT(runtime/references)
+ size_t capacity) {
builder.reserveCapacity(capacity);
}
- static String builderToString(StringBuilder& builder) {
+ static String builderToString(
+ StringBuilder& builder) { // NOLINT(runtime/references)
return builder.toString();
}
static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
- static std::unique_ptr<protocol::Value> parseProtocolMessage(
- const ProtocolMessage&);
static ProtocolMessage jsonToMessage(String message);
static ProtocolMessage binaryToMessage(std::vector<uint8_t> message);
@@ -76,6 +83,10 @@ class StringUtil {
return String16(data, length);
}
+ static String fromUTF16LE(const uint16_t* data, size_t length) {
+ return String16::fromUTF16LE(data, length);
+ }
+
static const uint8_t* CharactersLatin1(const String& s) { return nullptr; }
static const uint8_t* CharactersUTF8(const String& s) { return nullptr; }
static const uint16_t* CharactersUTF16(const String& s) {
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 4b7f181e35..458e4d4027 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -257,8 +257,8 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
v8::HandleScope handles(isolate);
v8::Local<v8::Context> context = inspectedContext->context();
- std::unique_ptr<protocol::Array<protocol::Runtime::RemoteObject>> args =
- protocol::Array<protocol::Runtime::RemoteObject>::create();
+ auto args =
+ v8::base::make_unique<protocol::Array<protocol::Runtime::RemoteObject>>();
v8::Local<v8::Value> value = m_arguments[0]->Get(isolate);
if (value->IsObject() && m_type == ConsoleAPIType::kTable &&
@@ -282,7 +282,7 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
inspectedContext = inspector->getContext(contextGroupId, contextId);
if (!inspectedContext) return nullptr;
if (wrapped) {
- args->addItem(std::move(wrapped));
+ args->emplace_back(std::move(wrapped));
} else {
args = nullptr;
}
@@ -297,7 +297,7 @@ V8ConsoleMessage::wrapArguments(V8InspectorSessionImpl* session,
args = nullptr;
break;
}
- args->addItem(std::move(wrapped));
+ args->emplace_back(std::move(wrapped));
}
}
return args;
@@ -341,14 +341,15 @@ void V8ConsoleMessage::reportToFrontend(protocol::Runtime::Frontend* frontend,
arguments = wrapArguments(session, generatePreview);
if (!inspector->hasConsoleMessageStorage(contextGroupId)) return;
if (!arguments) {
- arguments = protocol::Array<protocol::Runtime::RemoteObject>::create();
+ arguments = v8::base::make_unique<
+ protocol::Array<protocol::Runtime::RemoteObject>>();
if (!m_message.isEmpty()) {
std::unique_ptr<protocol::Runtime::RemoteObject> messageArg =
protocol::Runtime::RemoteObject::create()
.setType(protocol::Runtime::RemoteObject::TypeEnum::String)
.build();
messageArg->setValue(protocol::StringValue::create(m_message));
- arguments->addItem(std::move(messageArg));
+ arguments->emplace_back(std::move(messageArg));
}
}
Maybe<String16> consoleContext;
@@ -426,9 +427,11 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
message->m_v8Size +=
v8::debug::EstimatedValueSize(isolate, arguments.at(i));
}
- if (arguments.size())
- message->m_message =
- V8ValueStringBuilder::toString(arguments[0], v8Context);
+ for (size_t i = 0, num_args = arguments.size(); i < num_args; ++i) {
+ if (i) message->m_message += String16(" ");
+ message->m_message +=
+ V8ValueStringBuilder::toString(arguments[i], v8Context);
+ }
v8::Isolate::MessageErrorLevel clientLevel = v8::Isolate::kMessageInfo;
if (type == ConsoleAPIType::kDebug || type == ConsoleAPIType::kCount ||
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 37b1d5c7a9..0f476f2316 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -496,11 +496,11 @@ void V8Console::valuesCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
info.GetReturnValue().Set(values);
}
-static void setFunctionBreakpoint(ConsoleHelper& helper, int sessionId,
- v8::Local<v8::Function> function,
- V8DebuggerAgentImpl::BreakpointSource source,
- v8::Local<v8::String> condition,
- bool enable) {
+static void setFunctionBreakpoint(
+ ConsoleHelper& helper, // NOLINT(runtime/references)
+ int sessionId, v8::Local<v8::Function> function,
+ V8DebuggerAgentImpl::BreakpointSource source,
+ v8::Local<v8::String> condition, bool enable) {
V8InspectorSessionImpl* session = helper.session(sessionId);
if (session == nullptr) return;
if (!session->debuggerAgent()->enabled()) return;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 3301838587..e5458823ea 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -262,7 +262,7 @@ String16 scopeType(v8::debug::ScopeIterator::ScopeType type) {
Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
InjectedScript* injectedScript,
std::unique_ptr<Array<Scope>>* scopes) {
- *scopes = Array<Scope>::create();
+ *scopes = v8::base::make_unique<Array<Scope>>();
if (!injectedScript) return Response::OK();
if (iterator->Done()) return Response::OK();
@@ -299,7 +299,7 @@ Response buildScopes(v8::Isolate* isolate, v8::debug::ScopeIterator* iterator,
.setColumnNumber(end.GetColumnNumber())
.build());
}
- (*scopes)->addItem(std::move(scope));
+ (*scopes)->emplace_back(std::move(scope));
}
return Response::OK();
}
@@ -472,7 +472,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
Maybe<int> optionalColumnNumber, Maybe<String16> optionalCondition,
String16* outBreakpointId,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
- *locations = Array<protocol::Debugger::Location>::create();
+ *locations = v8::base::make_unique<Array<protocol::Debugger::Location>>();
int specified = (optionalURL.isJust() ? 1 : 0) +
(optionalURLRegex.isJust() ? 1 : 0) +
@@ -539,7 +539,7 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
if (location && type != BreakpointType::kByUrlRegex) {
hint = breakpointHint(*script.second, lineNumber, columnNumber);
}
- if (location) (*locations)->addItem(std::move(location));
+ if (location) (*locations)->emplace_back(std::move(location));
}
breakpoints->setString(breakpointId, condition);
if (!hint.isEmpty()) {
@@ -708,7 +708,8 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations);
}
- *locations = protocol::Array<protocol::Debugger::BreakLocation>::create();
+ *locations = v8::base::make_unique<
+ protocol::Array<protocol::Debugger::BreakLocation>>();
for (size_t i = 0; i < v8Locations.size(); ++i) {
std::unique_ptr<protocol::Debugger::BreakLocation> breakLocation =
protocol::Debugger::BreakLocation::create()
@@ -719,7 +720,7 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
if (v8Locations[i].type() != v8::debug::kCommonBreakLocation) {
breakLocation->setType(breakLocationType(v8Locations[i].type()));
}
- (*locations)->addItem(std::move(breakLocation));
+ (*locations)->emplace_back(std::move(breakLocation));
}
return Response::OK();
}
@@ -871,13 +872,11 @@ Response V8DebuggerAgentImpl::searchInContent(
if (it == m_scripts.end())
return Response::Error("No script for id: " + scriptId);
- std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
- searchInTextByLinesImpl(m_session, it->second->source(0), query,
- optionalCaseSensitive.fromMaybe(false),
- optionalIsRegex.fromMaybe(false));
- *results = protocol::Array<protocol::Debugger::SearchMatch>::create();
- for (size_t i = 0; i < matches.size(); ++i)
- (*results)->addItem(std::move(matches[i]));
+ *results =
+ v8::base::make_unique<protocol::Array<protocol::Debugger::SearchMatch>>(
+ searchInTextByLinesImpl(m_session, it->second->source(0), query,
+ optionalCaseSensitive.fromMaybe(false),
+ optionalIsRegex.fromMaybe(false)));
return Response::OK();
}
@@ -1190,7 +1189,7 @@ Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
Response V8DebuggerAgentImpl::setBlackboxPatterns(
std::unique_ptr<protocol::Array<String16>> patterns) {
- if (!patterns->length()) {
+ if (patterns->empty()) {
m_blackboxPattern = nullptr;
resetBlackboxedStateCache();
m_state->remove(DebuggerAgentState::blackboxPattern);
@@ -1199,11 +1198,11 @@ Response V8DebuggerAgentImpl::setBlackboxPatterns(
String16Builder patternBuilder;
patternBuilder.append('(');
- for (size_t i = 0; i < patterns->length() - 1; ++i) {
- patternBuilder.append(patterns->get(i));
+ for (size_t i = 0; i < patterns->size() - 1; ++i) {
+ patternBuilder.append((*patterns)[i]);
patternBuilder.append("|");
}
- patternBuilder.append(patterns->get(patterns->length() - 1));
+ patternBuilder.append(patterns->back());
patternBuilder.append(')');
String16 pattern = patternBuilder.toString();
Response response = setBlackboxPattern(pattern);
@@ -1236,16 +1235,16 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
if (it == m_scripts.end())
return Response::Error("No script with passed id.");
- if (!inPositions->length()) {
+ if (inPositions->empty()) {
m_blackboxedPositions.erase(scriptId);
it->second->resetBlackboxedStateCache();
return Response::OK();
}
std::vector<std::pair<int, int>> positions;
- positions.reserve(inPositions->length());
- for (size_t i = 0; i < inPositions->length(); ++i) {
- protocol::Debugger::ScriptPosition* position = inPositions->get(i);
+ positions.reserve(inPositions->size());
+ for (const std::unique_ptr<protocol::Debugger::ScriptPosition>& position :
+ *inPositions) {
if (position->getLineNumber() < 0)
return Response::Error("Position missing 'line' or 'line' < 0.");
if (position->getColumnNumber() < 0)
@@ -1271,11 +1270,11 @@ Response V8DebuggerAgentImpl::setBlackboxedRanges(
Response V8DebuggerAgentImpl::currentCallFrames(
std::unique_ptr<Array<CallFrame>>* result) {
if (!isPaused()) {
- *result = Array<CallFrame>::create();
+ *result = v8::base::make_unique<Array<CallFrame>>();
return Response::OK();
}
v8::HandleScope handles(m_isolate);
- *result = Array<CallFrame>::create();
+ *result = v8::base::make_unique<Array<CallFrame>>();
auto iterator = v8::debug::StackTraceIterator::Create(m_isolate);
int frameOrdinal = 0;
for (; !iterator->Done(); iterator->Advance(), frameOrdinal++) {
@@ -1354,7 +1353,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
if (!res.isSuccess()) return res;
frame->setReturnValue(std::move(value));
}
- (*result)->addItem(std::move(frame));
+ (*result)->emplace_back(std::move(frame));
}
return Response::OK();
}
@@ -1603,7 +1602,7 @@ void V8DebuggerAgentImpl::didPause(
}
}
- std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
+ auto hitBreakpointIds = v8::base::make_unique<Array<String16>>();
for (const auto& id : hitBreakpoints) {
auto it = m_breakpointsOnScriptRun.find(id);
@@ -1619,7 +1618,7 @@ void V8DebuggerAgentImpl::didPause(
continue;
}
const String16& breakpointId = breakpointIterator->second;
- hitBreakpointIds->addItem(breakpointId);
+ hitBreakpointIds->emplace_back(breakpointId);
BreakpointType type;
parseBreakpointId(breakpointId, &type);
if (type != BreakpointType::kDebugCommand) continue;
@@ -1655,7 +1654,8 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<CallFrame>> protocolCallFrames;
Response response = currentCallFrames(&protocolCallFrames);
- if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
+ if (!response.isSuccess())
+ protocolCallFrames = v8::base::make_unique<Array<CallFrame>>();
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index fe7d570942..b83eafc96a 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -4,7 +4,7 @@
#include "src/inspector/v8-debugger-script.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
@@ -44,7 +44,7 @@ String16 calculateHash(v8::Isolate* isolate, v8::Local<v8::String> source) {
size_t sizeInBytes = sizeof(UChar) * written;
data = reinterpret_cast<const uint32_t*>(buffer.get());
for (size_t i = 0; i < sizeInBytes / 4; ++i) {
- uint32_t d = v8::internal::ReadUnalignedUInt32(
+ uint32_t d = v8::base::ReadUnalignedValue<uint32_t>(
reinterpret_cast<v8::internal::Address>(data + i));
#if V8_TARGET_LITTLE_ENDIAN
uint32_t v = d;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index bc0c9d8cf6..5ddc375a80 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -24,7 +24,7 @@ static const int kMaxAsyncTaskStacks = 128 * 1024;
static const int kNoBreakpointId = 0;
template <typename Map>
-void cleanupExpiredWeakPointers(Map& map) {
+void cleanupExpiredWeakPointers(Map& map) { // NOLINT(runtime/references)
for (auto it = map.begin(); it != map.end();) {
if (it->second.expired()) {
it = map.erase(it);
@@ -42,6 +42,7 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate {
: m_inspector(inspector), m_context(context), m_prototype(prototype) {}
bool Filter(v8::Local<v8::Object> object) override {
+ if (object->IsModuleNamespaceObject()) return false;
v8::Local<v8::Context> objectContext =
v8::debug::GetCreationContext(object);
if (objectContext != m_context) return false;
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index b1d60877fe..fcee8a6ef3 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "src/base/template-utils.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -127,12 +128,11 @@ class HeapStatsStream final : public v8::OutputStream {
WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* updateData,
int count) override {
DCHECK_GT(count, 0);
- std::unique_ptr<protocol::Array<int>> statsDiff =
- protocol::Array<int>::create();
+ auto statsDiff = v8::base::make_unique<protocol::Array<int>>();
for (int i = 0; i < count; ++i) {
- statsDiff->addItem(updateData[i].index);
- statsDiff->addItem(updateData[i].count);
- statsDiff->addItem(updateData[i].size);
+ statsDiff->emplace_back(updateData[i].index);
+ statsDiff->emplace_back(updateData[i].count);
+ statsDiff->emplace_back(updateData[i].size);
}
m_frontend->heapStatsUpdate(std::move(statsDiff));
return kContinue;
@@ -337,10 +337,10 @@ namespace {
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfileNode>
buildSampingHeapProfileNode(v8::Isolate* isolate,
const v8::AllocationProfile::Node* node) {
- auto children = protocol::Array<
- protocol::HeapProfiler::SamplingHeapProfileNode>::create();
+ auto children = v8::base::make_unique<
+ protocol::Array<protocol::HeapProfiler::SamplingHeapProfileNode>>();
for (const auto* child : node->children)
- children->addItem(buildSampingHeapProfileNode(isolate, child));
+ children->emplace_back(buildSampingHeapProfileNode(isolate, child));
size_t selfSize = 0;
for (const auto& allocation : node->allocations)
selfSize += allocation.size * allocation.count;
@@ -384,14 +384,15 @@ Response V8HeapProfilerAgentImpl::getSamplingProfile(
if (!v8Profile)
return Response::Error("V8 sampling heap profiler was not started.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
- auto samples = protocol::Array<
- protocol::HeapProfiler::SamplingHeapProfileSample>::create();
+ auto samples = v8::base::make_unique<
+ protocol::Array<protocol::HeapProfiler::SamplingHeapProfileSample>>();
for (const auto& sample : v8Profile->GetSamples()) {
- samples->addItem(protocol::HeapProfiler::SamplingHeapProfileSample::create()
- .setSize(sample.size * sample.count)
- .setNodeId(sample.node_id)
- .setOrdinal(static_cast<double>(sample.sample_id))
- .build());
+ samples->emplace_back(
+ protocol::HeapProfiler::SamplingHeapProfileSample::create()
+ .setSize(sample.size * sample.count)
+ .setNodeId(sample.node_id)
+ .setOrdinal(static_cast<double>(sample.sample_id))
+ .build());
}
*profile = protocol::HeapProfiler::SamplingHeapProfile::create()
.setHead(buildSampingHeapProfileNode(m_isolate, root))
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index 4242abb64a..fdfb41924c 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -161,53 +161,28 @@ protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
return state;
}
-namespace {
-
-class MessageBuffer : public StringBuffer {
- public:
- static std::unique_ptr<MessageBuffer> create(
- std::unique_ptr<protocol::Serializable> message, bool binary) {
- return std::unique_ptr<MessageBuffer>(
- new MessageBuffer(std::move(message), binary));
- }
-
- const StringView& string() override {
- if (!m_serialized) {
- if (m_binary) {
- // Encode binary response as an 8bit string buffer.
- m_serialized.reset(
- new BinaryStringBuffer(m_message->serializeToBinary()));
- } else {
- m_serialized =
- StringBuffer::create(toStringView(m_message->serializeToJSON()));
- }
- m_message.reset(nullptr);
- }
- return m_serialized->string();
- }
-
- private:
- explicit MessageBuffer(std::unique_ptr<protocol::Serializable> message,
- bool binary)
- : m_message(std::move(message)), m_binary(binary) {}
-
- std::unique_ptr<protocol::Serializable> m_message;
- std::unique_ptr<StringBuffer> m_serialized;
- bool m_binary;
-};
-
-} // namespace
+std::unique_ptr<StringBuffer> V8InspectorSessionImpl::serializeForFrontend(
+ std::unique_ptr<protocol::Serializable> message) {
+ std::vector<uint8_t> cbor = message->serializeToBinary();
+ if (use_binary_protocol_)
+ return std::unique_ptr<StringBuffer>(
+ new BinaryStringBuffer(std::move(cbor)));
+ std::vector<uint8_t> json;
+ IPEStatus status = ConvertCBORToJSON(SpanFrom(cbor), &json);
+ DCHECK(status.ok());
+ USE(status);
+ String16 string16(reinterpret_cast<const char*>(json.data()), json.size());
+ return StringBufferImpl::adopt(string16);
+}
void V8InspectorSessionImpl::sendProtocolResponse(
int callId, std::unique_ptr<protocol::Serializable> message) {
- m_channel->sendResponse(
- callId, MessageBuffer::create(std::move(message), use_binary_protocol_));
+ m_channel->sendResponse(callId, serializeForFrontend(std::move(message)));
}
void V8InspectorSessionImpl::sendProtocolNotification(
std::unique_ptr<protocol::Serializable> message) {
- m_channel->sendNotification(
- MessageBuffer::create(std::move(message), use_binary_protocol_));
+ m_channel->sendNotification(serializeForFrontend(std::move(message)));
}
void V8InspectorSessionImpl::fallThrough(
@@ -357,20 +332,30 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
void V8InspectorSessionImpl::dispatchProtocolMessage(
const StringView& message) {
- bool binary_protocol = IsCBORMessage(message);
- if (binary_protocol) {
+ using ::v8_inspector_protocol_encoding::span;
+ using ::v8_inspector_protocol_encoding::SpanFrom;
+ span<uint8_t> cbor;
+ std::vector<uint8_t> converted_cbor;
+ if (IsCBORMessage(message)) {
use_binary_protocol_ = true;
m_state->setBoolean("use_binary_protocol", true);
- }
-
- int callId;
- std::unique_ptr<protocol::Value> parsed_message;
- if (binary_protocol) {
- parsed_message = protocol::Value::parseBinary(
- message.characters8(), static_cast<unsigned>(message.length()));
+ cbor = span<uint8_t>(message.characters8(), message.length());
} else {
- parsed_message = protocol::StringUtil::parseJSON(message);
+ if (message.is8Bit()) {
+ // We're ignoring the return value of these conversion functions
+ // intentionally. It means the |parsed_message| below will be nullptr.
+ ConvertJSONToCBOR(span<uint8_t>(message.characters8(), message.length()),
+ &converted_cbor);
+ } else {
+ ConvertJSONToCBOR(
+ span<uint16_t>(message.characters16(), message.length()),
+ &converted_cbor);
+ }
+ cbor = SpanFrom(converted_cbor);
}
+ int callId;
+ std::unique_ptr<protocol::Value> parsed_message =
+ protocol::Value::parseBinary(cbor.data(), cbor.size());
String16 method;
if (m_dispatcher.parseCommand(parsed_message.get(), &callId, &method)) {
// Pass empty string instead of the actual message to save on a conversion.
@@ -380,14 +365,6 @@ void V8InspectorSessionImpl::dispatchProtocolMessage(
}
}
-std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
- std::vector<uint8_t> json;
- IPEStatus status = ConvertCBORToJSON(SpanFrom(state()), &json);
- DCHECK(status.ok());
- USE(status);
- return v8::base::make_unique<BinaryStringBuffer>(std::move(json));
-}
-
std::vector<uint8_t> V8InspectorSessionImpl::state() {
std::vector<uint8_t> out;
m_state->writeBinary(&out);
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index ea1d29773c..7a976bcd40 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -64,7 +64,6 @@ class V8InspectorSessionImpl : public V8InspectorSession,
// V8InspectorSession implementation.
void dispatchProtocolMessage(const StringView& message) override;
- std::unique_ptr<StringBuffer> stateJSON() override;
std::vector<uint8_t> state() override;
std::vector<std::unique_ptr<protocol::Schema::API::Domain>> supportedDomains()
override;
@@ -106,6 +105,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
const protocol::ProtocolMessage& message) override;
void flushProtocolNotifications() override;
+ std::unique_ptr<StringBuffer> serializeForFrontend(
+ std::unique_ptr<protocol::Serializable> message);
int m_contextGroupId;
int m_sessionId;
V8InspectorImpl* m_inspector;
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 15f93e39d7..3b02f7faa1 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -44,7 +44,8 @@ std::unique_ptr<protocol::Array<protocol::Profiler::PositionTickInfo>>
buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
unsigned lineCount = node->GetHitLineCount();
if (!lineCount) return nullptr;
- auto array = protocol::Array<protocol::Profiler::PositionTickInfo>::create();
+ auto array = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::PositionTickInfo>>();
std::vector<v8::CpuProfileNode::LineTick> entries(lineCount);
if (node->GetLineTicks(&entries[0], lineCount)) {
for (unsigned i = 0; i < lineCount; i++) {
@@ -53,7 +54,7 @@ buildInspectorObjectForPositionTicks(const v8::CpuProfileNode* node) {
.setLine(entries[i].line)
.setTicks(entries[i].hit_count)
.build();
- array->addItem(std::move(line));
+ array->emplace_back(std::move(line));
}
}
return array;
@@ -79,9 +80,9 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
const int childrenCount = node->GetChildrenCount();
if (childrenCount) {
- auto children = protocol::Array<int>::create();
+ auto children = v8::base::make_unique<protocol::Array<int>>();
for (int i = 0; i < childrenCount; i++)
- children->addItem(node->GetChild(i)->GetNodeId());
+ children->emplace_back(node->GetChild(i)->GetNodeId());
result->setChildren(std::move(children));
}
@@ -97,21 +98,21 @@ std::unique_ptr<protocol::Profiler::ProfileNode> buildInspectorObjectFor(
std::unique_ptr<protocol::Array<int>> buildInspectorObjectForSamples(
v8::CpuProfile* v8profile) {
- auto array = protocol::Array<int>::create();
+ auto array = v8::base::make_unique<protocol::Array<int>>();
int count = v8profile->GetSamplesCount();
for (int i = 0; i < count; i++)
- array->addItem(v8profile->GetSample(i)->GetNodeId());
+ array->emplace_back(v8profile->GetSample(i)->GetNodeId());
return array;
}
std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
v8::CpuProfile* v8profile) {
- auto array = protocol::Array<int>::create();
+ auto array = v8::base::make_unique<protocol::Array<int>>();
int count = v8profile->GetSamplesCount();
uint64_t lastTime = v8profile->GetStartTime();
for (int i = 0; i < count; i++) {
uint64_t ts = v8profile->GetSampleTimestamp(i);
- array->addItem(static_cast<int>(ts - lastTime));
+ array->emplace_back(static_cast<int>(ts - lastTime));
lastTime = ts;
}
return array;
@@ -120,7 +121,7 @@ std::unique_ptr<protocol::Array<int>> buildInspectorObjectForTimestamps(
void flattenNodesTree(V8InspectorImpl* inspector,
const v8::CpuProfileNode* node,
protocol::Array<protocol::Profiler::ProfileNode>* list) {
- list->addItem(buildInspectorObjectFor(inspector, node));
+ list->emplace_back(buildInspectorObjectFor(inspector, node));
const int childrenCount = node->GetChildrenCount();
for (int i = 0; i < childrenCount; i++)
flattenNodesTree(inspector, node->GetChild(i), list);
@@ -128,7 +129,8 @@ void flattenNodesTree(V8InspectorImpl* inspector,
std::unique_ptr<protocol::Profiler::Profile> createCPUProfile(
V8InspectorImpl* inspector, v8::CpuProfile* v8profile) {
- auto nodes = protocol::Array<protocol::Profiler::ProfileNode>::create();
+ auto nodes =
+ v8::base::make_unique<protocol::Array<protocol::Profiler::ProfileNode>>();
flattenNodesTree(inspector, v8profile->GetTopDownRoot(), nodes.get());
return protocol::Profiler::Profile::create()
.setNodes(std::move(nodes))
@@ -336,36 +338,35 @@ Response coverageToProtocol(
V8InspectorImpl* inspector, const v8::debug::Coverage& coverage,
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
out_result) {
- std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>> result =
- protocol::Array<protocol::Profiler::ScriptCoverage>::create();
+ auto result = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::ScriptCoverage>>();
v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < coverage.ScriptCount(); i++) {
v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
- std::unique_ptr<protocol::Array<protocol::Profiler::FunctionCoverage>>
- functions =
- protocol::Array<protocol::Profiler::FunctionCoverage>::create();
+ auto functions = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::FunctionCoverage>>();
for (size_t j = 0; j < script_data.FunctionCount(); j++) {
v8::debug::Coverage::FunctionData function_data =
script_data.GetFunctionData(j);
- std::unique_ptr<protocol::Array<protocol::Profiler::CoverageRange>>
- ranges = protocol::Array<protocol::Profiler::CoverageRange>::create();
+ auto ranges = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::CoverageRange>>();
// Add function range.
- ranges->addItem(createCoverageRange(function_data.StartOffset(),
- function_data.EndOffset(),
- function_data.Count()));
+ ranges->emplace_back(createCoverageRange(function_data.StartOffset(),
+ function_data.EndOffset(),
+ function_data.Count()));
// Process inner blocks.
for (size_t k = 0; k < function_data.BlockCount(); k++) {
v8::debug::Coverage::BlockData block_data =
function_data.GetBlockData(k);
- ranges->addItem(createCoverageRange(block_data.StartOffset(),
- block_data.EndOffset(),
- block_data.Count()));
+ ranges->emplace_back(createCoverageRange(block_data.StartOffset(),
+ block_data.EndOffset(),
+ block_data.Count()));
}
- functions->addItem(
+ functions->emplace_back(
protocol::Profiler::FunctionCoverage::create()
.setFunctionName(toProtocolString(
isolate,
@@ -381,11 +382,11 @@ Response coverageToProtocol(
} else if (script->Name().ToLocal(&name) && name->Length()) {
url = resourceNameToUrl(inspector, name);
}
- result->addItem(protocol::Profiler::ScriptCoverage::create()
- .setScriptId(String16::fromInteger(script->Id()))
- .setUrl(url)
- .setFunctions(std::move(functions))
- .build());
+ result->emplace_back(protocol::Profiler::ScriptCoverage::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setUrl(url)
+ .setFunctions(std::move(functions))
+ .build());
}
*out_result = std::move(result);
return Response::OK();
@@ -417,31 +418,30 @@ namespace {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
typeProfileToProtocol(V8InspectorImpl* inspector,
const v8::debug::TypeProfile& type_profile) {
- std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>
- result = protocol::Array<protocol::Profiler::ScriptTypeProfile>::create();
+ auto result = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::ScriptTypeProfile>>();
v8::Isolate* isolate = inspector->isolate();
for (size_t i = 0; i < type_profile.ScriptCount(); i++) {
v8::debug::TypeProfile::ScriptData script_data =
type_profile.GetScriptData(i);
v8::Local<v8::debug::Script> script = script_data.GetScript();
- std::unique_ptr<protocol::Array<protocol::Profiler::TypeProfileEntry>>
- entries =
- protocol::Array<protocol::Profiler::TypeProfileEntry>::create();
+ auto entries = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::TypeProfileEntry>>();
for (const auto& entry : script_data.Entries()) {
- std::unique_ptr<protocol::Array<protocol::Profiler::TypeObject>> types =
- protocol::Array<protocol::Profiler::TypeObject>::create();
+ auto types = v8::base::make_unique<
+ protocol::Array<protocol::Profiler::TypeObject>>();
for (const auto& type : entry.Types()) {
- types->addItem(
+ types->emplace_back(
protocol::Profiler::TypeObject::create()
.setName(toProtocolString(
isolate, type.FromMaybe(v8::Local<v8::String>())))
.build());
}
- entries->addItem(protocol::Profiler::TypeProfileEntry::create()
- .setOffset(entry.SourcePosition())
- .setTypes(std::move(types))
- .build());
+ entries->emplace_back(protocol::Profiler::TypeProfileEntry::create()
+ .setOffset(entry.SourcePosition())
+ .setTypes(std::move(types))
+ .build());
}
String16 url;
v8::Local<v8::String> name;
@@ -450,11 +450,11 @@ typeProfileToProtocol(V8InspectorImpl* inspector,
} else if (script->Name().ToLocal(&name) && name->Length()) {
url = resourceNameToUrl(inspector, name);
}
- result->addItem(protocol::Profiler::ScriptTypeProfile::create()
- .setScriptId(String16::fromInteger(script->Id()))
- .setUrl(url)
- .setEntries(std::move(entries))
- .build());
+ result->emplace_back(protocol::Profiler::ScriptTypeProfile::create()
+ .setScriptId(String16::fromInteger(script->Id()))
+ .setUrl(url)
+ .setEntries(std::move(entries))
+ .build());
}
return result;
}
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 601f263c7a..fd2d35abd7 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -107,7 +107,8 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
}
void innerCallFunctionOn(
- V8InspectorSessionImpl* session, InjectedScript::Scope& scope,
+ V8InspectorSessionImpl* session,
+ InjectedScript::Scope& scope, // NOLINT(runtime/references)
v8::Local<v8::Value> recv, const String16& expression,
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
bool silent, WrapMode wrapMode, bool userGesture, bool awaitPromise,
@@ -120,12 +121,12 @@ void innerCallFunctionOn(
if (optionalArguments.isJust()) {
protocol::Array<protocol::Runtime::CallArgument>* arguments =
optionalArguments.fromJust();
- argc = static_cast<int>(arguments->length());
+ argc = static_cast<int>(arguments->size());
argv.reset(new v8::Local<v8::Value>[argc]);
for (int i = 0; i < argc; ++i) {
v8::Local<v8::Value> argumentValue;
Response response = scope.injectedScript()->resolveCallArgument(
- arguments->get(i), &argumentValue);
+ (*arguments)[i].get(), &argumentValue);
if (!response.isSuccess()) {
callback->sendFailure(response);
return;
@@ -419,9 +420,9 @@ Response V8RuntimeAgentImpl::getProperties(
object, scope.objectGroupName(), &internalPropertiesProtocolArray,
&privatePropertiesProtocolArray);
if (!response.isSuccess()) return response;
- if (internalPropertiesProtocolArray->length())
+ if (!internalPropertiesProtocolArray->empty())
*internalProperties = std::move(internalPropertiesProtocolArray);
- if (privatePropertiesProtocolArray->length())
+ if (!privatePropertiesProtocolArray->empty())
*privateProperties = std::move(privatePropertiesProtocolArray);
return Response::OK();
}
@@ -612,9 +613,9 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
v8::PersistentValueVector<v8::String> names(m_inspector->isolate());
v8::debug::GlobalLexicalScopeNames(scope.context(), &names);
- *outNames = protocol::Array<String16>::create();
+ *outNames = v8::base::make_unique<protocol::Array<String16>>();
for (size_t i = 0; i < names.Size(); ++i) {
- (*outNames)->addItem(
+ (*outNames)->emplace_back(
toProtocolString(m_inspector->isolate(), names.Get(i)));
}
return Response::OK();
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
index 07bbd35d97..808f59b0bf 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-schema-agent-impl.h"
+#include "src/base/template-utils.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/v8-inspector-session-impl.h"
@@ -18,11 +19,9 @@ V8SchemaAgentImpl::~V8SchemaAgentImpl() = default;
Response V8SchemaAgentImpl::getDomains(
std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
- std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
- m_session->supportedDomainsImpl();
- *result = protocol::Array<protocol::Schema::Domain>::create();
- for (size_t i = 0; i < domains.size(); ++i)
- (*result)->addItem(std::move(domains[i]));
+ *result = v8::base::make_unique<
+ std::vector<std::unique_ptr<protocol::Schema::Domain>>>(
+ m_session->supportedDomainsImpl());
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 8bf16b4baf..e2be811069 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "src/base/template-utils.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/wasm-translation.h"
@@ -72,13 +73,13 @@ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
return asyncParent->buildInspectorObject(debugger, maxAsyncDepth);
}
- std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>>
- inspectorFrames = protocol::Array<protocol::Runtime::CallFrame>::create();
- for (size_t i = 0; i < frames.size(); i++) {
+ auto inspectorFrames =
+ v8::base::make_unique<protocol::Array<protocol::Runtime::CallFrame>>();
+ for (const std::shared_ptr<StackFrame>& frame : frames) {
V8InspectorClient* client = nullptr;
if (debugger && debugger->inspector())
client = debugger->inspector()->client();
- inspectorFrames->addItem(frames[i]->buildInspectorObject(client));
+ inspectorFrames->emplace_back(frame->buildInspectorObject(client));
}
std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
protocol::Runtime::StackTrace::create()
@@ -284,6 +285,12 @@ V8StackTraceImpl::buildInspectorObject() const {
return buildInspectorObjectImpl(nullptr);
}
+std::unique_ptr<protocol::Runtime::API::StackTrace>
+V8StackTraceImpl::buildInspectorObject(int maxAsyncDepth) const {
+ return buildInspectorObjectImpl(nullptr,
+ std::min(maxAsyncDepth, m_maxAsyncDepth));
+}
+
std::unique_ptr<StringBuffer> V8StackTraceImpl::toString() const {
String16Builder stackTrace;
for (size_t i = 0; i < m_frames.size(); ++i) {
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index 1142cfaa82..681b3c2aba 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -78,6 +78,8 @@ class V8StackTraceImpl : public V8StackTrace {
StringView topFunctionName() const override;
std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
const override;
+ std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject(
+ int maxAsyncDepth) const override;
std::unique_ptr<StringBuffer> toString() const override;
bool isEqualIgnoringTopFrame(V8StackTraceImpl* stackTrace) const;
diff --git a/deps/v8/src/inspector/v8-string-conversions.cc b/deps/v8/src/inspector/v8-string-conversions.cc
index 0c75e66b97..4ccf6351fb 100644
--- a/deps/v8/src/inspector/v8-string-conversions.cc
+++ b/deps/v8/src/inspector/v8-string-conversions.cc
@@ -228,7 +228,9 @@ static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
static_cast<UChar32>(0xFA082080UL),
static_cast<UChar32>(0x82082080UL)};
-static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
+static inline UChar32 readUTF8Sequence(
+ const char*& sequence, // NOLINT(runtime/references)
+ size_t length) {
UChar32 character = 0;
// The cases all fall through.
@@ -334,7 +336,8 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
// Helper to write a three-byte UTF-8 code point to the buffer, caller must
// check room is available.
-static inline void putUTF8Triple(char*& buffer, UChar ch) {
+static inline void putUTF8Triple(char*& buffer, // NOLINT(runtime/references)
+ UChar ch) {
*buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
*buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
*buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 72eef3cd3f..3ab9085c44 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -351,7 +351,8 @@ class PrimitiveValueMirror final : public ValueMirror {
.setType(m_type)
.setDescription(descriptionForPrimitiveType(context, m_value))
.setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
.build();
if (m_value->IsNull())
(*preview)->setSubtype(RemoteObject::SubtypeEnum::Null);
@@ -411,12 +412,14 @@ class NumberMirror final : public ValueMirror {
v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* preview) const override {
bool unserializable = false;
- *preview = ObjectPreview::create()
- .setType(RemoteObject::TypeEnum::Number)
- .setDescription(description(&unserializable))
- .setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
- .build();
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Number)
+ .setDescription(description(&unserializable))
+ .setOverflow(false)
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
}
private:
@@ -467,12 +470,14 @@ class BigIntMirror final : public ValueMirror {
int* indexLimit,
std::unique_ptr<protocol::Runtime::ObjectPreview>*
preview) const override {
- *preview = ObjectPreview::create()
- .setType(RemoteObject::TypeEnum::Bigint)
- .setDescription(descriptionForBigInt(context, m_value))
- .setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
- .build();
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Bigint)
+ .setDescription(descriptionForBigInt(context, m_value))
+ .setOverflow(false)
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
}
v8::Local<v8::Value> v8Value() const override { return m_value; }
@@ -625,12 +630,14 @@ class FunctionMirror final : public ValueMirror {
void buildEntryPreview(
v8::Local<v8::Context> context, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* preview) const override {
- *preview = ObjectPreview::create()
- .setType(RemoteObject::TypeEnum::Function)
- .setDescription(descriptionForFunction(context, m_value))
- .setOverflow(false)
- .setProperties(protocol::Array<PropertyPreview>::create())
- .build();
+ *preview =
+ ObjectPreview::create()
+ .setType(RemoteObject::TypeEnum::Function)
+ .setDescription(descriptionForFunction(context, m_value))
+ .setOverflow(false)
+ .setProperties(
+ v8::base::make_unique<protocol::Array<PropertyPreview>>())
+ .build();
}
private:
@@ -824,7 +831,7 @@ void getPrivatePropertiesForPreview(
return;
}
--*nameLimit;
- privateProperties->addItem(std::move(propertyPreview));
+ privateProperties->emplace_back(std::move(propertyPreview));
}
}
@@ -911,8 +918,7 @@ class ObjectMirror final : public ValueMirror {
v8::Local<v8::Context> context, bool forEntry,
bool generatePreviewForTable, int* nameLimit, int* indexLimit,
std::unique_ptr<ObjectPreview>* result) const {
- std::unique_ptr<protocol::Array<PropertyPreview>> properties =
- protocol::Array<PropertyPreview>::create();
+ auto properties = v8::base::make_unique<protocol::Array<PropertyPreview>>();
std::unique_ptr<protocol::Array<EntryPreview>> entriesPreview;
bool overflow = false;
@@ -929,7 +935,7 @@ class ObjectMirror final : public ValueMirror {
internalProperties[i].value->buildPropertyPreview(
context, internalProperties[i].name, &propertyPreview);
if (propertyPreview) {
- properties->addItem(std::move(propertyPreview));
+ properties->emplace_back(std::move(propertyPreview));
}
}
@@ -959,7 +965,7 @@ class ObjectMirror final : public ValueMirror {
if (valuePreview) {
preview->setValuePreview(std::move(valuePreview));
}
- properties->addItem(std::move(preview));
+ properties->emplace_back(std::move(preview));
}
}
@@ -969,7 +975,8 @@ class ObjectMirror final : public ValueMirror {
if (forEntry) {
overflow = true;
} else {
- entriesPreview = protocol::Array<EntryPreview>::create();
+ entriesPreview =
+ v8::base::make_unique<protocol::Array<EntryPreview>>();
for (const auto& entry : entries) {
std::unique_ptr<ObjectPreview> valuePreview;
entry.value->buildEntryPreview(context, nameLimit, indexLimit,
@@ -986,7 +993,7 @@ class ObjectMirror final : public ValueMirror {
.setValue(std::move(valuePreview))
.build();
if (keyPreview) entryPreview->setKey(std::move(keyPreview));
- entriesPreview->addItem(std::move(entryPreview));
+ entriesPreview->emplace_back(std::move(entryPreview));
}
}
}
@@ -1145,19 +1152,28 @@ void addTypedArrayViews(v8::Local<v8::Context> context,
v8::Local<ArrayBuffer> buffer,
ValueMirror::PropertyAccumulator* accumulator) {
// TODO(alph): these should be internal properties.
- size_t length = buffer->ByteLength();
+ // TODO(v8:9308): Reconsider how large arrays are previewed.
+ const size_t byte_length = buffer->ByteLength();
+
+ size_t length = byte_length;
+ if (length > v8::TypedArray::kMaxLength) return;
+
addTypedArrayView<v8::Int8Array>(context, buffer, length, "[[Int8Array]]",
accumulator);
addTypedArrayView<v8::Uint8Array>(context, buffer, length, "[[Uint8Array]]",
accumulator);
- if (buffer->ByteLength() % 2 == 0) {
- addTypedArrayView<v8::Int16Array>(context, buffer, length / 2,
- "[[Int16Array]]", accumulator);
- }
- if (buffer->ByteLength() % 4 == 0) {
- addTypedArrayView<v8::Int32Array>(context, buffer, length / 4,
- "[[Int32Array]]", accumulator);
- }
+
+ length = byte_length / 2;
+ if (length > v8::TypedArray::kMaxLength || (byte_length % 2) != 0) return;
+
+ addTypedArrayView<v8::Int16Array>(context, buffer, length, "[[Int16Array]]",
+ accumulator);
+
+ length = byte_length / 4;
+ if (length > v8::TypedArray::kMaxLength || (byte_length % 4) != 0) return;
+
+ addTypedArrayView<v8::Int32Array>(context, buffer, length, "[[Int32Array]]",
+ accumulator);
}
} // anonymous namespace
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index e985bda102..254e6e60d1 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
leszeks@chromium.org
mstarzinger@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index d7232fcd4c..d460c1a45f 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -14,15 +14,61 @@ namespace v8 {
namespace internal {
namespace interpreter {
+namespace {
+
+class OnHeapBytecodeArray final : public AbstractBytecodeArray {
+ public:
+ explicit OnHeapBytecodeArray(Handle<BytecodeArray> bytecode_array)
+ : array_(bytecode_array) {}
+
+ int length() const override { return array_->length(); }
+
+ int parameter_count() const override { return array_->parameter_count(); }
+
+ uint8_t get(int index) const override { return array_->get(index); }
+
+ void set(int index, uint8_t value) override {
+ return array_->set(index, value);
+ }
+
+ Address GetFirstBytecodeAddress() const override {
+ return array_->GetFirstBytecodeAddress();
+ }
+
+ Handle<Object> GetConstantAtIndex(int index,
+ Isolate* isolate) const override {
+ return handle(array_->constant_pool().get(index), isolate);
+ }
+
+ bool IsConstantAtIndexSmi(int index) const override {
+ return array_->constant_pool().get(index).IsSmi();
+ }
+
+ Smi GetConstantAtIndexAsSmi(int index) const override {
+ return Smi::cast(array_->constant_pool().get(index));
+ }
+
+ private:
+ Handle<BytecodeArray> array_;
+};
+
+} // namespace
+
BytecodeArrayAccessor::BytecodeArrayAccessor(
- Handle<BytecodeArray> bytecode_array, int initial_offset)
- : bytecode_array_(bytecode_array),
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array, int initial_offset)
+ : bytecode_array_(std::move(bytecode_array)),
bytecode_offset_(initial_offset),
operand_scale_(OperandScale::kSingle),
prefix_offset_(0) {
UpdateOperandScale();
}
+BytecodeArrayAccessor::BytecodeArrayAccessor(
+ Handle<BytecodeArray> bytecode_array, int initial_offset)
+ : BytecodeArrayAccessor(
+ base::make_unique<OnHeapBytecodeArray>(bytecode_array),
+ initial_offset) {}
+
void BytecodeArrayAccessor::SetOffset(int offset) {
bytecode_offset_ = offset;
UpdateOperandScale();
@@ -33,12 +79,12 @@ void BytecodeArrayAccessor::ApplyDebugBreak() {
// scaling prefix, which we can patch with the matching debug-break
// variant.
interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array_->get(bytecode_offset_));
+ interpreter::Bytecodes::FromByte(bytecode_array()->get(bytecode_offset_));
if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
interpreter::Bytecode debugbreak =
interpreter::Bytecodes::GetDebugBreak(bytecode);
- bytecode_array_->set(bytecode_offset_,
- interpreter::Bytecodes::ToByte(debugbreak));
+ bytecode_array()->set(bytecode_offset_,
+ interpreter::Bytecodes::ToByte(debugbreak));
}
void BytecodeArrayAccessor::UpdateOperandScale() {
@@ -197,13 +243,22 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
-Object BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
- return bytecode_array()->constant_pool().get(index);
+Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(
+ int index, Isolate* isolate) const {
+ return bytecode_array()->GetConstantAtIndex(index, isolate);
}
-Object BytecodeArrayAccessor::GetConstantForIndexOperand(
- int operand_index) const {
- return GetConstantAtIndex(GetIndexOperand(operand_index));
+bool BytecodeArrayAccessor::IsConstantAtIndexSmi(int index) const {
+ return bytecode_array()->IsConstantAtIndexSmi(index);
+}
+
+Smi BytecodeArrayAccessor::GetConstantAtIndexAsSmi(int index) const {
+ return bytecode_array()->GetConstantAtIndexAsSmi(index);
+}
+
+Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+ int operand_index, Isolate* isolate) const {
+ return GetConstantAtIndex(GetIndexOperand(operand_index), isolate);
}
int BytecodeArrayAccessor::GetJumpTargetOffset() const {
@@ -215,7 +270,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
}
return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi smi = Smi::cast(GetConstantForIndexOperand(0));
+ Smi smi = GetConstantAtIndexAsSmi(GetIndexOperand(0));
return GetAbsoluteOffset(smi.value());
} else {
UNREACHABLE();
@@ -315,19 +370,16 @@ bool JumpTableTargetOffsets::iterator::operator!=(
}
void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
- if (table_offset_ >= table_end_) return;
-
- Object current = accessor_->GetConstantAtIndex(table_offset_);
- while (!current.IsSmi()) {
- DCHECK(current.IsTheHole());
+ while (table_offset_ < table_end_ &&
+ !accessor_->IsConstantAtIndexSmi(table_offset_)) {
++table_offset_;
++index_;
- if (table_offset_ >= table_end_) break;
- current = accessor_->GetConstantAtIndex(table_offset_);
}
+
// Make sure we haven't reached the end of the table with a hole in current.
- if (current.IsSmi()) {
- current_ = Smi::cast(current);
+ if (table_offset_ < table_end_) {
+ DCHECK(accessor_->IsConstantAtIndexSmi(table_offset_));
+ current_ = accessor_->GetConstantAtIndexAsSmi(table_offset_);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index 91b6886204..97278af7bd 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-register.h"
@@ -64,8 +65,27 @@ class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
int case_value_base_;
};
+class V8_EXPORT_PRIVATE AbstractBytecodeArray {
+ public:
+ virtual int length() const = 0;
+ virtual int parameter_count() const = 0;
+ virtual uint8_t get(int index) const = 0;
+ virtual void set(int index, uint8_t value) = 0;
+ virtual Address GetFirstBytecodeAddress() const = 0;
+
+ virtual Handle<Object> GetConstantAtIndex(int index,
+ Isolate* isolate) const = 0;
+ virtual bool IsConstantAtIndexSmi(int index) const = 0;
+ virtual Smi GetConstantAtIndexAsSmi(int index) const = 0;
+
+ virtual ~AbstractBytecodeArray() = default;
+};
+
class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
public:
+ BytecodeArrayAccessor(std::unique_ptr<AbstractBytecodeArray> bytecode_array,
+ int initial_offset);
+
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
int initial_offset);
@@ -78,8 +98,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int current_offset() const { return bytecode_offset_; }
OperandScale current_operand_scale() const { return operand_scale_; }
int current_prefix_offset() const { return prefix_offset_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
+ AbstractBytecodeArray* bytecode_array() const {
+ return bytecode_array_.get();
}
uint32_t GetFlagOperand(int operand_index) const;
@@ -93,8 +113,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- Object GetConstantAtIndex(int offset) const;
- Object GetConstantForIndexOperand(int operand_index) const;
+ Handle<Object> GetConstantAtIndex(int offset, Isolate* isolate) const;
+ bool IsConstantAtIndexSmi(int offset) const;
+ Smi GetConstantAtIndexAsSmi(int offset) const;
+ Handle<Object> GetConstantForIndexOperand(int operand_index,
+ Isolate* isolate) const;
// Returns the absolute offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
@@ -122,7 +145,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
void UpdateOperandScale();
- Handle<BytecodeArray> bytecode_array_;
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array_;
int bytecode_offset_;
OperandScale operand_scale_;
int prefix_offset_;
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index b582311007..0fc57f85b8 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -11,6 +11,10 @@ namespace internal {
namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array)
+ : BytecodeArrayAccessor(std::move(bytecode_array), 0) {}
+
+BytecodeArrayIterator::BytecodeArrayIterator(
Handle<BytecodeArray> bytecode_array)
: BytecodeArrayAccessor(bytecode_array, 0) {}
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 7ec9d1288c..e6b58deadc 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -14,7 +14,9 @@ namespace interpreter {
class V8_EXPORT_PRIVATE BytecodeArrayIterator final
: public BytecodeArrayAccessor {
public:
- explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
+ explicit BytecodeArrayIterator(std::unique_ptr<AbstractBytecodeArray> array);
+
+ explicit BytecodeArrayIterator(Handle<BytecodeArray> array);
void Advance();
bool done() const;
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index 4ed5ce5e7d..9362232899 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -11,11 +11,21 @@ namespace internal {
namespace interpreter {
BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array, Zone* zone)
+ : BytecodeArrayAccessor(std::move(bytecode_array), 0), offsets_(zone) {
+ Initialize();
+}
+
+BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
Handle<BytecodeArray> bytecode_array, Zone* zone)
: BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) {
+ Initialize();
+}
+
+void BytecodeArrayRandomIterator::Initialize() {
// Run forwards through the bytecode array to determine the offset of each
// bytecode.
- while (current_offset() < bytecode_array->length()) {
+ while (current_offset() < bytecode_array()->length()) {
offsets_.push_back(current_offset());
SetOffset(current_offset() + current_bytecode_size());
}
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
index 7d559ea176..a3b69b7015 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -16,8 +16,10 @@ namespace interpreter {
class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
: public BytecodeArrayAccessor {
public:
- explicit BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array,
- Zone* zone);
+ BytecodeArrayRandomIterator(
+ std::unique_ptr<AbstractBytecodeArray> bytecode_array, Zone* zone);
+
+ BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array, Zone* zone);
BytecodeArrayRandomIterator& operator++() {
++current_index_;
@@ -66,6 +68,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
ZoneVector<int> offsets_;
int current_index_;
+ void Initialize();
void UpdateOffsetFromIndex();
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 3769eefda1..3ecc5e1a89 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -334,8 +334,8 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
// The jump fits within the range of an Imm16 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
- WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes),
- static_cast<uint16_t>(delta));
+ base::WriteUnalignedValue<uint16_t>(
+ reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(delta));
} else {
// The jump does not fit within the range of an Imm16 operand, so
// commit reservation putting the offset into the constant pool,
@@ -344,8 +344,8 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
OperandSize::kShort, Smi::FromInt(delta));
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
- WriteUnalignedUInt16(reinterpret_cast<Address>(operand_bytes),
- static_cast<uint16_t>(entry));
+ base::WriteUnalignedValue<uint16_t>(
+ reinterpret_cast<Address>(operand_bytes), static_cast<uint16_t>(entry));
}
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
@@ -359,8 +359,8 @@ void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
Bytecodes::FromByte(bytecodes()->at(jump_location))));
constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
uint8_t operand_bytes[4];
- WriteUnalignedUInt32(reinterpret_cast<Address>(operand_bytes),
- static_cast<uint32_t>(delta));
+ base::WriteUnalignedValue<uint32_t>(reinterpret_cast<Address>(operand_bytes),
+ static_cast<uint32_t>(delta));
size_t operand_location = jump_location + 1;
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index 6f2f9dda0d..3a297b1ddf 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -42,9 +42,11 @@ int32_t BytecodeDecoder::DecodeSignedOperand(Address operand_start,
case OperandSize::kByte:
return *reinterpret_cast<const int8_t*>(operand_start);
case OperandSize::kShort:
- return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+ return static_cast<int16_t>(
+ base::ReadUnalignedValue<uint16_t>(operand_start));
case OperandSize::kQuad:
- return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+ return static_cast<int32_t>(
+ base::ReadUnalignedValue<uint32_t>(operand_start));
case OperandSize::kNone:
UNREACHABLE();
}
@@ -60,9 +62,9 @@ uint32_t BytecodeDecoder::DecodeUnsignedOperand(Address operand_start,
case OperandSize::kByte:
return *reinterpret_cast<const uint8_t*>(operand_start);
case OperandSize::kShort:
- return ReadUnalignedUInt16(operand_start);
+ return base::ReadUnalignedValue<uint16_t>(operand_start);
case OperandSize::kQuad:
- return ReadUnalignedUInt32(operand_start);
+ return base::ReadUnalignedValue<uint32_t>(operand_start);
case OperandSize::kNone:
UNREACHABLE();
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 706580ac14..d3b27b4375 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -915,6 +915,45 @@ class BytecodeGenerator::IteratorRecord final {
Register next_;
};
+namespace {
+
+// A map from property names to getter/setter pairs allocated in the zone that
+// also provides a way of accessing the pairs in the order they were first
+// added so that the generated bytecode is always the same.
+class AccessorTable
+ : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ bool (*)(void*, void*),
+ ZoneAllocationPolicy> {
+ public:
+ explicit AccessorTable(Zone* zone)
+ : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ bool (*)(void*, void*), ZoneAllocationPolicy>(
+ Literal::Match, ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
+
+ Iterator lookup(Literal* literal) {
+ Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
+ if (it->second == nullptr) {
+ it->second = new (zone_) ObjectLiteral::Accessors();
+ ordered_accessors_.push_back({literal, it->second});
+ }
+ return it;
+ }
+
+ const std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>&
+ ordered_accessors() {
+ return ordered_accessors_;
+ }
+
+ private:
+ std::vector<std::pair<Literal*, ObjectLiteral::Accessors*>>
+ ordered_accessors_;
+
+ Zone* zone_;
+};
+
+} // namespace
+
#ifdef DEBUG
static bool IsInEagerLiterals(
@@ -1354,7 +1393,8 @@ void BytecodeGenerator::VisitModuleNamespaceImports() {
RegisterAllocationScope register_scope(this);
Register module_request = register_allocator()->NewRegister();
- ModuleDescriptor* descriptor = closure_scope()->AsModuleScope()->module();
+ SourceTextModuleDescriptor* descriptor =
+ closure_scope()->AsModuleScope()->module();
for (auto entry : descriptor->namespace_imports()) {
builder()
->LoadLiteral(Smi::FromInt(entry->module_request))
@@ -2201,6 +2241,19 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement(
}
}
+void BytecodeGenerator::BuildThrowPrivateMethodWriteError(
+ const AstRawString* name) {
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(Smi::FromEnum(MessageTemplate::kInvalidPrivateMethodWrite))
+ .StoreAccumulatorInRegister(args[0])
+ .LoadLiteral(name)
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewTypeError, args)
+ .Throw();
+}
+
void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver) {
RegisterList brand_args = register_allocator()->NewRegisterList(2);
Variable* brand = info()->scope()->outer_scope()->AsClassScope()->brand();
@@ -2366,13 +2419,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterAllocationScope register_scope(this);
Expression* property = expr->properties()->first()->value();
Register from_value = VisitForRegisterValue(property);
-
- BytecodeLabels clone_object(zone());
- builder()->JumpIfUndefined(clone_object.New());
- builder()->JumpIfNull(clone_object.New());
- builder()->ToObject(from_value);
-
- clone_object.Bind(builder());
int clone_index = feedback_index(feedback_spec()->AddCloneObjectSlot());
builder()->CloneObject(from_value, flags, clone_index);
builder()->StoreAccumulatorInRegister(literal);
@@ -2473,14 +2519,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Define accessors, using only a single call to the runtime for each pair of
// corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end(); ++it) {
+ for (auto accessors : accessor_table.ordered_accessors()) {
RegisterAllocationScope inner_register_scope(this);
RegisterList args = register_allocator()->NewRegisterList(5);
builder()->MoveRegister(literal, args[0]);
- VisitForRegisterValue(it->first, args[1]);
- VisitObjectLiteralAccessor(literal, it->second->getter, args[2]);
- VisitObjectLiteralAccessor(literal, it->second->setter, args[3]);
+ VisitForRegisterValue(accessors.first, args[1]);
+ VisitObjectLiteralAccessor(literal, accessors.second->getter, args[2]);
+ VisitObjectLiteralAccessor(literal, accessors.second->setter, args[3]);
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(args[4])
@@ -3156,6 +3201,13 @@ BytecodeGenerator::AssignmentLhsData::NamedSuperProperty(
}
// static
BytecodeGenerator::AssignmentLhsData
+BytecodeGenerator::AssignmentLhsData::PrivateMethod(Register object,
+ const AstRawString* name) {
+ return AssignmentLhsData(PRIVATE_METHOD, nullptr, RegisterList(), object,
+ Register(), nullptr, name);
+}
+// static
+BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::KeyedSuperProperty(
RegisterList super_property_args) {
return AssignmentLhsData(KEYED_SUPER_PROPERTY, nullptr, super_property_args,
@@ -3185,6 +3237,13 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
Register key = VisitForRegisterValue(property->key());
return AssignmentLhsData::KeyedProperty(object, key);
}
+ case PRIVATE_METHOD: {
+ DCHECK(!property->IsSuperAccess());
+ AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
+ Register object = VisitForRegisterValue(property->obj());
+ const AstRawString* name = property->key()->AsVariableProxy()->raw_name();
+ return AssignmentLhsData::PrivateMethod(object, name);
+ }
case NAMED_SUPER_PROPERTY: {
AccumulatorPreservingScope scope(this, accumulator_preserving_mode);
RegisterList super_property_args =
@@ -3219,15 +3278,16 @@ BytecodeGenerator::AssignmentLhsData BytecodeGenerator::PrepareAssignmentLhs(
// Build the iteration finalizer called in the finally block of an iteration
// protocol execution. This closes the iterator if needed, and suppresses any
-// exception it throws if necessary.
+// exception it throws if necessary, including the exception when the return
+// method is not callable.
//
// In pseudo-code, this builds:
//
// if (!done) {
// let method = iterator.return
// if (method !== null && method !== undefined) {
-// if (typeof(method) !== "function") throw TypeError
// try {
+// if (typeof(method) !== "function") throw TypeError
// let return_val = method.call(iterator)
// if (!%IsObject(return_val)) throw TypeError
// } catch (e) {
@@ -3259,33 +3319,35 @@ void BytecodeGenerator::BuildFinalizeIteration(
.JumpIfUndefined(iterator_is_done.New())
.JumpIfNull(iterator_is_done.New());
- // if (typeof(method) !== "function") throw TypeError
- BytecodeLabel if_callable;
- builder()
- ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
- {
- // throw %NewTypeError(kReturnMethodNotCallable)
- RegisterAllocationScope register_scope(this);
- RegisterList new_type_error_args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadLiteral(Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
- .StoreAccumulatorInRegister(new_type_error_args[0])
- .LoadLiteral(ast_string_constants()->empty_string())
- .StoreAccumulatorInRegister(new_type_error_args[1])
- .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
- .Throw();
- }
- builder()->Bind(&if_callable);
-
{
RegisterAllocationScope register_scope(this);
BuildTryCatch(
// try {
+ // if (typeof(method) !== "function") throw TypeError
// let return_val = method.call(iterator)
// if (!%IsObject(return_val)) throw TypeError
// }
[&]() {
+ BytecodeLabel if_callable;
+ builder()
+ ->CompareTypeOf(TestTypeOfFlags::LiteralFlag::kFunction)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &if_callable);
+ {
+ // throw %NewTypeError(kReturnMethodNotCallable)
+ RegisterAllocationScope register_scope(this);
+ RegisterList new_type_error_args =
+ register_allocator()->NewRegisterList(2);
+ builder()
+ ->LoadLiteral(
+ Smi::FromEnum(MessageTemplate::kReturnMethodNotCallable))
+ .StoreAccumulatorInRegister(new_type_error_args[0])
+ .LoadLiteral(ast_string_constants()->empty_string())
+ .StoreAccumulatorInRegister(new_type_error_args[1])
+ .CallRuntime(Runtime::kNewTypeError, new_type_error_args)
+ .Throw();
+ }
+ builder()->Bind(&if_callable);
+
RegisterList args(iterator.object());
builder()->CallProperty(
method, args, feedback_index(feedback_spec()->AddCallICSlot()));
@@ -3736,6 +3798,10 @@ void BytecodeGenerator::BuildAssignment(
lhs_data.super_property_args());
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(lhs_data.name());
+ break;
+ }
}
}
@@ -3781,6 +3847,10 @@ void BytecodeGenerator::VisitCompoundAssignment(CompoundAssignment* expr) {
lhs_data.super_property_args().Truncate(3));
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(lhs_data.name());
+ break;
+ }
}
BinaryOperation* binop = expr->AsCompoundAssignment()->binary_operation();
FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
@@ -4238,6 +4308,23 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* property) {
case KEYED_SUPER_PROPERTY:
VisitKeyedSuperPropertyLoad(property, Register::invalid_value());
break;
+ case PRIVATE_METHOD: {
+ Variable* private_name = property->key()->AsVariableProxy()->var();
+
+ // Perform the brand check.
+ DCHECK(private_name->requires_brand_check());
+ ClassScope* scope = private_name->scope()->AsClassScope();
+ Variable* brand = scope->brand();
+ BuildVariableLoadForAccumulatorValue(brand, HoleCheckMode::kElided);
+ builder()->SetExpressionPosition(property);
+ builder()->LoadKeyedProperty(
+ obj, feedback_index(feedback_spec()->AddKeyedLoadICSlot()));
+
+ // In the case of private methods, property->key() is the function to be
+ // loaded (stored in a context slot), so load this directly.
+ VisitForAccumulatorValue(property->key());
+ break;
+ }
}
}
@@ -4342,7 +4429,8 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// the semantics of the underlying call type.
switch (call_type) {
case Call::NAMED_PROPERTY_CALL:
- case Call::KEYED_PROPERTY_CALL: {
+ case Call::KEYED_PROPERTY_CALL:
+ case Call::PRIVATE_CALL: {
Property* property = callee_expr->AsProperty();
VisitAndPushIntoRegisterList(property->obj(), &args);
VisitPropertyLoadForRegister(args.last_register(), property, callee);
@@ -4678,6 +4766,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* unary) {
// Delete of an object property is allowed both in sloppy
// and strict modes.
Property* property = expr->AsProperty();
+ DCHECK(!property->IsPrivateReference());
Register object = VisitForRegisterValue(property->obj());
VisitForAccumulatorValue(property->key());
builder()->Delete(object, language_mode());
@@ -4785,6 +4874,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, load_super_args);
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(
+ property->key()->AsVariableProxy()->raw_name());
+ break;
+ }
}
// Save result for postfix expressions.
@@ -4851,6 +4945,11 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
.CallRuntime(Runtime::kStoreKeyedToSuper, super_property_args);
break;
}
+ case PRIVATE_METHOD: {
+ BuildThrowPrivateMethodWriteError(
+ property->key()->AsVariableProxy()->raw_name());
+ break;
+ }
}
// Restore old value for postfix expressions.
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index dda8b15c80..b754d2c296 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -84,6 +84,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register object,
const AstRawString* name);
static AssignmentLhsData KeyedProperty(Register object, Register key);
+ static AssignmentLhsData PrivateMethod(Register object,
+ const AstRawString* name);
static AssignmentLhsData NamedSuperProperty(
RegisterList super_property_args);
static AssignmentLhsData KeyedSuperProperty(
@@ -99,15 +101,16 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
return object_expr_;
}
Register object() const {
- DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY);
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == KEYED_PROPERTY ||
+ assign_type_ == PRIVATE_METHOD);
return object_;
}
Register key() const {
- DCHECK_EQ(assign_type_, KEYED_PROPERTY);
+ DCHECK(assign_type_ == KEYED_PROPERTY);
return key_;
}
const AstRawString* name() const {
- DCHECK_EQ(assign_type_, NAMED_PROPERTY);
+ DCHECK(assign_type_ == NAMED_PROPERTY || assign_type_ == PRIVATE_METHOD);
return name_;
}
RegisterList super_property_args() const {
@@ -135,7 +138,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
//
// NON_PROPERTY: expr
// NAMED_PROPERTY: object_expr, object, name
- // KEYED_PROPERTY: object, key
+ // KEYED_PROPERTY, PRIVATE_METHOD: object, key
// NAMED_SUPER_PROPERTY: super_property_args
// KEYED_SUPER_PROPERT: super_property_args
Expression* expr_;
@@ -238,8 +241,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Build jump to targets[value], where
// start_index <= value < start_index + size.
- void BuildIndexedJump(Register value, size_t start_index, size_t size,
- ZoneVector<BytecodeLabel>& targets);
+ void BuildIndexedJump(
+ Register value, size_t start_index, size_t size,
+ ZoneVector<BytecodeLabel>& targets); // NOLINT(runtime/references)
void BuildNewLocalActivationContext();
void BuildLocalActivationContextInitialization();
@@ -291,6 +295,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
+ void BuildThrowPrivateMethodWriteError(const AstRawString* name);
void BuildPrivateClassMemberNameAssignment(ClassLiteral::Property* property);
void BuildClassLiteral(ClassLiteral* expr, Register name);
void VisitClassLiteral(ClassLiteral* expr, Register name);
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index db7ed750dd..66b8d1f937 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -5,9 +5,10 @@
#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
-#include "src/execution/frames.h"
+#include "src/codegen/handler-table.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects/fixed-array.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 0af58b674f..7291ea1c35 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -1265,7 +1265,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
// Make sure we include the current bytecode in the budget calculation.
TNode<Int32T> budget_after_bytecode =
- Signed(Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize())));
+ Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
Label done(this);
TVARIABLE(Int32T, new_budget);
@@ -1501,9 +1501,9 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
UpdateInterruptBudget(profiling_weight, true);
}
-Node* InterpreterAssembler::LoadOSRNestingLevel() {
+Node* InterpreterAssembler::LoadOsrNestingLevel() {
return LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kOSRNestingLevelOffset,
+ BytecodeArray::kOsrNestingLevelOffset,
MachineType::Int8());
}
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index db4523b744..a135eaacdd 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -237,7 +237,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
void UpdateInterruptBudgetOnReturn();
// Returns the OSR nesting level from the bytecode header.
- compiler::Node* LoadOSRNestingLevel();
+ compiler::Node* LoadOsrNestingLevel();
// Dispatch to the bytecode.
compiler::Node* Dispatch();
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 852aae4482..00ce8eaf68 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -21,9 +21,9 @@
#include "src/interpreter/interpreter-intrinsics-generator.h"
#include "src/objects/cell.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
+#include "src/objects/source-text-module.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -512,17 +512,18 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
// Load receiver.
Node* recv = LoadRegisterAtOperandIndex(0);
- // Load the name.
- // TODO(jgruber): Not needed for monomorphic smi handler constant/field case.
- Node* name = LoadConstantPoolEntryAtOperandIndex(1);
- Node* context = GetContext();
+ // Load the name and context lazily.
+ LazyNode<Name> name = [=] {
+ return CAST(LoadConstantPoolEntryAtOperandIndex(1));
+ };
+ LazyNode<Context> context = [=] { return CAST(GetContext()); };
Label done(this);
Variable var_result(this, MachineRepresentation::kTagged);
ExitPoint exit_point(this, &done, &var_result);
- AccessorAssembler::LoadICParameters params(context, recv, name, smi_slot,
- feedback_vector);
+ AccessorAssembler::LazyLoadICParameters params(context, recv, name, smi_slot,
+ feedback_vector);
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
@@ -735,7 +736,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
BIND(&if_export);
{
TNode<FixedArray> regular_exports =
- CAST(LoadObjectField(module, Module::kRegularExportsOffset));
+ CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset));
// The actual array index is (cell_index - 1).
Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
Node* cell = LoadFixedArrayElement(regular_exports, export_index);
@@ -746,7 +747,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
BIND(&if_import);
{
TNode<FixedArray> regular_imports =
- CAST(LoadObjectField(module, Module::kRegularImportsOffset));
+ CAST(LoadObjectField(module, SourceTextModule::kRegularImportsOffset));
// The actual array index is (-cell_index - 1).
Node* import_index = IntPtrSub(IntPtrConstant(-1), cell_index);
Node* cell = LoadFixedArrayElement(regular_imports, import_index);
@@ -777,7 +778,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
BIND(&if_export);
{
TNode<FixedArray> regular_exports =
- CAST(LoadObjectField(module, Module::kRegularExportsOffset));
+ CAST(LoadObjectField(module, SourceTextModule::kRegularExportsOffset));
// The actual array index is (cell_index - 1).
Node* export_index = IntPtrSub(cell_index, IntPtrConstant(1));
Node* cell = LoadFixedArrayElement(regular_exports, export_index);
@@ -2336,7 +2337,7 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
Node* relative_jump = BytecodeOperandUImmWord(0);
Node* loop_depth = BytecodeOperandImm(1);
- Node* osr_level = LoadOSRNestingLevel();
+ Node* osr_level = LoadOsrNestingLevel();
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_level} loaded from the header of the BytecodeArray.
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index 19d17baa52..d581802340 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -12,9 +12,9 @@
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
-#include "src/utils/allocation.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/source-text-module.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -324,7 +324,7 @@ Node* IntrinsicsGenerator::GetImportMetaObject(
Node* const module =
__ LoadContextElement(module_context, Context::EXTENSION_INDEX);
Node* const import_meta =
- __ LoadObjectField(module, Module::kImportMetaOffset);
+ __ LoadObjectField(module, SourceTextModule::kImportMetaOffset);
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 9e06d95fde..eb91ae06a4 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -112,7 +112,7 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
CHECK(code_entry == kNullAddress ||
InstructionStream::PcIsOffHeap(isolate_, code_entry));
}
-#endif // ENABLE_SLOW_DCHECKS
+#endif // DEBUG
return;
}
@@ -230,12 +230,12 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
return SUCCEEDED;
}
-UnoptimizedCompilationJob* Interpreter::NewCompilationJob(
+std::unique_ptr<UnoptimizedCompilationJob> Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals) {
- return new InterpreterCompilationJob(parse_info, literal, allocator,
- eager_inner_literals);
+ return base::make_unique<InterpreterCompilationJob>(
+ parse_info, literal, allocator, eager_inner_literals);
}
void Interpreter::ForEachBytecode(
@@ -290,14 +290,9 @@ bool Interpreter::IsDispatchTableInitialized() const {
}
const char* Interpreter::LookupNameOfBytecodeHandler(const Code code) {
-#ifdef ENABLE_DISASSEMBLER
-#define RETURN_NAME(Name, ...) \
- if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code.entry()) { \
- return #Name; \
+ if (code.kind() == Code::BYTECODE_HANDLER) {
+ return Builtins::name(code.builtin_index());
}
- BYTECODE_LIST(RETURN_NAME)
-#undef RETURN_NAME
-#endif // ENABLE_DISASSEMBLER
return nullptr;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 8c6216b6a6..e8c494a6ce 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -43,7 +43,7 @@ class Interpreter {
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
// compilable inner FunctionLiterals to this list.
- static UnoptimizedCompilationJob* NewCompilationJob(
+ static std::unique_ptr<UnoptimizedCompilationJob> NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
std::vector<FunctionLiteral*>* eager_inner_literals);
@@ -60,8 +60,8 @@ class Interpreter {
// GC support.
void IterateDispatchTable(RootVisitor* v);
- // Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
- const char* LookupNameOfBytecodeHandler(const Code code);
+ // Disassembler support.
+ V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
diff --git a/deps/v8/src/json/OWNERS b/deps/v8/src/json/OWNERS
index 9a078e6d10..bc56882a9a 100644
--- a/deps/v8/src/json/OWNERS
+++ b/deps/v8/src/json/OWNERS
@@ -1,3 +1,6 @@
ishell@chromium.org
+jkummerow@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
index 83bacc81a6..fa2118af1e 100644
--- a/deps/v8/src/json/json-parser.cc
+++ b/deps/v8/src/json/json-parser.cc
@@ -4,8 +4,8 @@
#include "src/json/json-parser.h"
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
-#include "src/execution/message-template.h"
#include "src/numbers/conversions.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/field-type.h"
@@ -499,7 +499,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
Representation expected_representation = details.representation();
if (!value->FitsRepresentation(expected_representation)) {
- Representation representation = value->OptimalRepresentation();
+ Representation representation = value->OptimalRepresentation(isolate());
representation = representation.generalize(expected_representation);
if (!expected_representation.CanBeInPlaceChangedTo(representation)) {
map = ParentOfDescriptorOwner(isolate_, map, target, descriptor);
diff --git a/deps/v8/src/json/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index 2280292332..a021fbbc1b 100644
--- a/deps/v8/src/json/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -4,7 +4,7 @@
#include "src/json/json-stringifier.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
@@ -75,7 +75,8 @@ class JsonStringifier {
return SerializeDouble(object->value());
}
- Result SerializeJSValue(Handle<JSValue> object, Handle<Object> key);
+ Result SerializeJSPrimitiveWrapper(Handle<JSPrimitiveWrapper> object,
+ Handle<Object> key);
V8_INLINE Result SerializeJSArray(Handle<JSArray> object, Handle<Object> key);
V8_INLINE Result SerializeJSObject(Handle<JSObject> object,
@@ -257,8 +258,9 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
if (element->IsNumber() || element->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, key, Object::ToString(isolate_, element), false);
- } else if (element->IsJSValue()) {
- Handle<Object> value(Handle<JSValue>::cast(element)->value(), isolate_);
+ } else if (element->IsJSPrimitiveWrapper()) {
+ Handle<Object> value(Handle<JSPrimitiveWrapper>::cast(element)->value(),
+ isolate_);
if (value->IsNumber() || value->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, key, Object::ToString(isolate_, element), false);
@@ -281,8 +283,9 @@ bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
bool JsonStringifier::InitializeGap(Handle<Object> gap) {
DCHECK_NULL(gap_);
HandleScope scope(isolate_);
- if (gap->IsJSValue()) {
- Handle<Object> value(Handle<JSValue>::cast(gap)->value(), isolate_);
+ if (gap->IsJSPrimitiveWrapper()) {
+ Handle<Object> value(Handle<JSPrimitiveWrapper>::cast(gap)->value(),
+ isolate_);
if (value->IsString()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap,
Object::ToString(isolate_, gap), false);
@@ -558,9 +561,10 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
case JS_ARRAY_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSArray(Handle<JSArray>::cast(object), key);
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSValue(Handle<JSValue>::cast(object), key);
+ return SerializeJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper>::cast(object), key);
case SYMBOL_TYPE:
return UNCHANGED;
default:
@@ -583,8 +587,8 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
UNREACHABLE();
}
-JsonStringifier::Result JsonStringifier::SerializeJSValue(
- Handle<JSValue> object, Handle<Object> key) {
+JsonStringifier::Result JsonStringifier::SerializeJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper> object, Handle<Object> key) {
Object raw = object->value();
if (raw.IsString()) {
Handle<Object> value;
diff --git a/deps/v8/src/libplatform/tracing/OWNERS b/deps/v8/src/libplatform/tracing/OWNERS
new file mode 100644
index 0000000000..507f904088
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/OWNERS
@@ -0,0 +1 @@
+petermarshall@chromium.org
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
index 99db86a7d1..94b74ef255 100644
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
@@ -8,6 +8,8 @@
#include "base/trace_event/common/trace_event_common.h"
#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "perfetto/trace/trace.pb.h"
+#include "perfetto/tracing.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -111,7 +113,7 @@ void JSONTraceEventListener::AppendArgValue(
}
void JSONTraceEventListener::ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) {
+ const ::perfetto::protos::TracePacket& packet) {
for (const ::perfetto::protos::ChromeTraceEvent& event :
packet.chrome_events().trace_events()) {
if (append_comma_) *stream_ << ",";
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
index fc4979f14c..d13332871f 100644
--- a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
@@ -26,10 +26,9 @@ class JSONTraceEventListener final : public TraceEventListener {
explicit JSONTraceEventListener(std::ostream* stream);
~JSONTraceEventListener() override;
- private:
- void ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) override;
+ void ProcessPacket(const ::perfetto::protos::TracePacket& packet) override;
+ private:
// Internal implementation
void AppendJSONString(const char* str);
void AppendArgValue(const ::perfetto::protos::ChromeTraceEvent_Arg& arg);
diff --git a/deps/v8/src/libplatform/tracing/perfetto-consumer.cc b/deps/v8/src/libplatform/tracing/perfetto-consumer.cc
deleted file mode 100644
index 8071fe52d5..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-consumer.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-consumer.h"
-
-#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
-#include "perfetto/tracing/core/trace_packet.h"
-#include "src/base/macros.h"
-#include "src/base/platform/semaphore.h"
-#include "src/libplatform/tracing/trace-event-listener.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoConsumer::PerfettoConsumer(base::Semaphore* finished)
- : finished_semaphore_(finished) {}
-
-void PerfettoConsumer::OnTraceData(std::vector<::perfetto::TracePacket> packets,
- bool has_more) {
- for (const ::perfetto::TracePacket& packet : packets) {
- perfetto::protos::ChromeTracePacket proto_packet;
- bool success = packet.Decode(&proto_packet);
- USE(success);
- DCHECK(success);
-
- for (TraceEventListener* listener : listeners_) {
- listener->ProcessPacket(proto_packet);
- }
- }
- // PerfettoTracingController::StopTracing() waits on this sempahore. This is
- // so that we can ensure that this consumer has finished consuming all of the
- // trace events from the buffer before the buffer is destroyed.
- if (!has_more) finished_semaphore_->Signal();
-}
-
-void PerfettoConsumer::AddTraceEventListener(TraceEventListener* listener) {
- listeners_.push_back(listener);
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-consumer.h b/deps/v8/src/libplatform/tracing/perfetto-consumer.h
deleted file mode 100644
index 83d0c48c1b..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-consumer.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
-
-#include <memory>
-
-#include "perfetto/tracing/core/consumer.h"
-#include "perfetto/tracing/core/tracing_service.h"
-#include "src/base/logging.h"
-
-namespace perfetto {
-namespace protos {
-class ChromeTracePacket;
-} // namespace protos
-} // namespace perfetto
-
-namespace v8 {
-
-namespace base {
-class Semaphore;
-}
-
-namespace platform {
-namespace tracing {
-
-class TraceEventListener;
-
-// A Perfetto Consumer gets streamed trace events from the Service via
-// OnTraceData(). A Consumer can be configured (via
-// service_endpoint()->EnableTracing()) to listen to various different types of
-// trace events. The Consumer is responsible for producing whatever tracing
-// output the system should have.
-
-// Implements the V8-specific logic for interacting with the tracing controller
-// and directs trace events to the added TraceEventListeners.
-class PerfettoConsumer final : public ::perfetto::Consumer {
- public:
- explicit PerfettoConsumer(base::Semaphore* finished);
-
- using ServiceEndpoint = ::perfetto::TracingService::ConsumerEndpoint;
-
- // Register a trace event listener that will receive trace events from this
- // consumer. This can be called multiple times to register multiple listeners,
- // but must be called before starting tracing.
- void AddTraceEventListener(TraceEventListener* listener);
-
- ServiceEndpoint* service_endpoint() const { return service_endpoint_.get(); }
- void set_service_endpoint(std::unique_ptr<ServiceEndpoint> endpoint) {
- service_endpoint_ = std::move(endpoint);
- }
-
- private:
- // ::perfetto::Consumer implementation
- void OnConnect() override {}
- void OnDisconnect() override {}
- void OnTracingDisabled() override {}
- void OnTraceData(std::vector<::perfetto::TracePacket> packets,
- bool has_more) override;
- void OnDetach(bool success) override {}
- void OnAttach(bool success, const ::perfetto::TraceConfig&) override {}
- void OnTraceStats(bool success, const ::perfetto::TraceStats&) override {
- UNREACHABLE();
- }
- void OnObservableEvents(const ::perfetto::ObservableEvents&) override {
- UNREACHABLE();
- }
-
- std::unique_ptr<ServiceEndpoint> service_endpoint_;
- base::Semaphore* finished_semaphore_;
- std::vector<TraceEventListener*> listeners_;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-producer.cc b/deps/v8/src/libplatform/tracing/perfetto-producer.cc
deleted file mode 100644
index 814dca6b59..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-producer.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-producer.h"
-
-#include "perfetto/tracing/core/data_source_config.h"
-#include "perfetto/tracing/core/data_source_descriptor.h"
-#include "perfetto/tracing/core/trace_writer.h"
-#include "src/libplatform/tracing/perfetto-tasks.h"
-#include "src/libplatform/tracing/perfetto-tracing-controller.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-void PerfettoProducer::OnConnect() {
- ::perfetto::DataSourceDescriptor ds_desc;
- ds_desc.set_name("v8.trace_events");
- service_endpoint_->RegisterDataSource(ds_desc);
-}
-
-void PerfettoProducer::StartDataSource(
- ::perfetto::DataSourceInstanceID, const ::perfetto::DataSourceConfig& cfg) {
- target_buffer_ = cfg.target_buffer();
- tracing_controller_->OnProducerReady();
-}
-
-void PerfettoProducer::StopDataSource(::perfetto::DataSourceInstanceID) {
- target_buffer_ = 0;
-}
-
-std::unique_ptr<::perfetto::TraceWriter> PerfettoProducer::CreateTraceWriter()
- const {
- CHECK_NE(0, target_buffer_);
- return service_endpoint_->CreateTraceWriter(target_buffer_);
-}
-
-PerfettoProducer::PerfettoProducer(
- PerfettoTracingController* tracing_controller)
- : tracing_controller_(tracing_controller) {}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-producer.h b/deps/v8/src/libplatform/tracing/perfetto-producer.h
deleted file mode 100644
index 2a363e8bf8..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-producer.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
-
-#include <atomic>
-#include <memory>
-
-#include "perfetto/tracing/core/producer.h"
-#include "perfetto/tracing/core/tracing_service.h"
-#include "src/base/logging.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-class PerfettoTracingController;
-
-class PerfettoProducer final : public ::perfetto::Producer {
- public:
- using ServiceEndpoint = ::perfetto::TracingService::ProducerEndpoint;
-
- explicit PerfettoProducer(PerfettoTracingController* tracing_controller);
-
- ServiceEndpoint* service_endpoint() const { return service_endpoint_.get(); }
- void set_service_endpoint(std::unique_ptr<ServiceEndpoint> endpoint) {
- service_endpoint_ = std::move(endpoint);
- }
-
- // Create a TraceWriter for the calling thread. The TraceWriter is a
- // thread-local object that writes data into a buffer which is shared between
- // all TraceWriters for a given PerfettoProducer instance. Can only be called
- // after the StartDataSource() callback has been received from the service, as
- // this provides the buffer.
- std::unique_ptr<::perfetto::TraceWriter> CreateTraceWriter() const;
-
- private:
- // ::perfetto::Producer implementation
- void OnConnect() override;
- void OnDisconnect() override {}
- void OnTracingSetup() override {}
- void SetupDataSource(::perfetto::DataSourceInstanceID,
- const ::perfetto::DataSourceConfig&) override {}
- void StartDataSource(::perfetto::DataSourceInstanceID,
- const ::perfetto::DataSourceConfig& cfg) override;
- void StopDataSource(::perfetto::DataSourceInstanceID) override;
- // TODO(petermarshall): Implement Flush(). A final flush happens when the
- // TraceWriter object for each thread is destroyed, but this will be more
- // efficient.
- void Flush(::perfetto::FlushRequestID,
- const ::perfetto::DataSourceInstanceID*, size_t) override {}
-
- void ClearIncrementalState(
- const ::perfetto::DataSourceInstanceID* data_source_ids,
- size_t num_data_sources) override {
- UNREACHABLE();
- }
-
- std::unique_ptr<ServiceEndpoint> service_endpoint_;
- uint32_t target_buffer_ = 0;
- PerfettoTracingController* tracing_controller_;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc
deleted file mode 100644
index 6c31c05070..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-shared-memory.h"
-
-#include "src/base/platform/platform.h"
-#include "src/base/template-utils.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoSharedMemory::PerfettoSharedMemory(size_t size)
- : size_(size),
- paged_memory_(::perfetto::base::PagedMemory::Allocate(size)) {
- // TODO(956543): Find a cross-platform solution.
- // TODO(petermarshall): Don't assume that size is page-aligned.
-}
-
-std::unique_ptr<::perfetto::SharedMemory>
-PerfettoSharedMemoryFactory::CreateSharedMemory(size_t size) {
- return base::make_unique<PerfettoSharedMemory>(size);
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h
deleted file mode 100644
index 7a987cc7f0..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
-
-#include "perfetto/tracing/core/shared_memory.h"
-
-#include "third_party/perfetto/include/perfetto/base/paged_memory.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-// Perfetto requires a shared memory implementation for multi-process embedders
-// but V8 is single process. We implement it here using PagedMemory from
-// perfetto.
-class PerfettoSharedMemory : public ::perfetto::SharedMemory {
- public:
- explicit PerfettoSharedMemory(size_t size);
-
- // The PagedMemory destructor will free the underlying memory when this object
- // is destroyed.
-
- void* start() const override { return paged_memory_.Get(); }
- size_t size() const override { return size_; }
-
- private:
- size_t size_;
- ::perfetto::base::PagedMemory paged_memory_;
-};
-
-class PerfettoSharedMemoryFactory : public ::perfetto::SharedMemory::Factory {
- public:
- ~PerfettoSharedMemoryFactory() override = default;
- std::unique_ptr<::perfetto::SharedMemory> CreateSharedMemory(
- size_t size) override;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tasks.cc b/deps/v8/src/libplatform/tracing/perfetto-tasks.cc
deleted file mode 100644
index 70d00ed626..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tasks.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-tasks.h"
-
-#include "src/base/platform/semaphore.h"
-#include "src/base/platform/time.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoTaskRunner::PerfettoTaskRunner() : runner_(1, DefaultTimeFunction) {}
-
-PerfettoTaskRunner::~PerfettoTaskRunner() { runner_.Terminate(); }
-
-// static
-double PerfettoTaskRunner::DefaultTimeFunction() {
- return (base::TimeTicks::HighResolutionNow() - base::TimeTicks())
- .InSecondsF();
-}
-
-void PerfettoTaskRunner::PostTask(std::function<void()> f) {
- runner_.PostTask(base::make_unique<TracingTask>(std::move(f)));
-}
-
-void PerfettoTaskRunner::PostDelayedTask(std::function<void()> f,
- uint32_t delay_ms) {
- double delay_in_seconds =
- delay_ms / static_cast<double>(base::Time::kMillisecondsPerSecond);
- runner_.PostDelayedTask(base::make_unique<TracingTask>(std::move(f)),
- delay_in_seconds);
-}
-
-bool PerfettoTaskRunner::RunsTasksOnCurrentThread() const {
- return runner_.RunsTasksOnCurrentThread();
-}
-
-void PerfettoTaskRunner::FinishImmediateTasks() {
- DCHECK(!RunsTasksOnCurrentThread());
- base::Semaphore semaphore(0);
- // PostTask has guaranteed ordering so this will be the last task executed.
- runner_.PostTask(
- base::make_unique<TracingTask>([&semaphore] { semaphore.Signal(); }));
-
- semaphore.Wait();
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tasks.h b/deps/v8/src/libplatform/tracing/perfetto-tasks.h
deleted file mode 100644
index 054a9e157a..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tasks.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
-
-#include <functional>
-
-#include "include/v8-platform.h"
-#include "perfetto/base/task_runner.h"
-#include "src/libplatform/default-worker-threads-task-runner.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-class TracingTask : public Task {
- public:
- explicit TracingTask(std::function<void()> f) : f_(std::move(f)) {}
-
- void Run() override { f_(); }
-
- private:
- std::function<void()> f_;
-};
-
-class PerfettoTaskRunner : public ::perfetto::base::TaskRunner {
- public:
- PerfettoTaskRunner();
- ~PerfettoTaskRunner() override;
-
- // ::perfetto::base::TaskRunner implementation
- void PostTask(std::function<void()> f) override;
- void PostDelayedTask(std::function<void()> f, uint32_t delay_ms) override;
- void AddFileDescriptorWatch(int fd, std::function<void()>) override {
- UNREACHABLE();
- }
- void RemoveFileDescriptorWatch(int fd) override { UNREACHABLE(); }
- bool RunsTasksOnCurrentThread() const override;
-
- // PerfettoTaskRunner implementation
- void FinishImmediateTasks();
-
- private:
- static double DefaultTimeFunction();
-
- DefaultWorkerThreadsTaskRunner runner_;
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc
deleted file mode 100644
index 9b62c2ae78..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/libplatform/tracing/perfetto-tracing-controller.h"
-
-#include "perfetto/tracing/core/trace_config.h"
-#include "perfetto/tracing/core/trace_writer.h"
-#include "perfetto/tracing/core/tracing_service.h"
-#include "src/libplatform/tracing/perfetto-consumer.h"
-#include "src/libplatform/tracing/perfetto-producer.h"
-#include "src/libplatform/tracing/perfetto-shared-memory.h"
-#include "src/libplatform/tracing/perfetto-tasks.h"
-#include "src/libplatform/tracing/trace-event-listener.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-PerfettoTracingController::PerfettoTracingController()
- : writer_key_(base::Thread::CreateThreadLocalKey()),
- producer_ready_semaphore_(0),
- consumer_finished_semaphore_(0) {}
-
-void PerfettoTracingController::StartTracing(
- const ::perfetto::TraceConfig& trace_config) {
- DCHECK(!task_runner_);
- task_runner_ = base::make_unique<PerfettoTaskRunner>();
- // The Perfetto service expects calls on the task runner thread which is why
- // the setup below occurs in posted tasks.
- task_runner_->PostTask([&trace_config, this] {
- std::unique_ptr<::perfetto::SharedMemory::Factory> shmem_factory =
- base::make_unique<PerfettoSharedMemoryFactory>();
-
- service_ = ::perfetto::TracingService::CreateInstance(
- std::move(shmem_factory), task_runner_.get());
- // This allows Perfetto to recover trace events that were written by
- // TraceWriters which have not yet been deleted. This allows us to keep
- // TraceWriters alive past the end of tracing, rather than having to delete
- // them all when tracing stops which would require synchronization on every
- // trace event. Eventually we will delete TraceWriters when threads die, but
- // for now we just leak all TraceWriters.
- service_->SetSMBScrapingEnabled(true);
- producer_ = base::make_unique<PerfettoProducer>(this);
- consumer_ =
- base::make_unique<PerfettoConsumer>(&consumer_finished_semaphore_);
-
- for (TraceEventListener* listener : listeners_) {
- consumer_->AddTraceEventListener(listener);
- }
-
- producer_->set_service_endpoint(service_->ConnectProducer(
- producer_.get(), 0, "v8.perfetto-producer", 0, true));
-
- consumer_->set_service_endpoint(
- service_->ConnectConsumer(consumer_.get(), 0));
-
- // We need to wait for the OnConnected() callbacks of the producer and
- // consumer to be called.
- consumer_->service_endpoint()->EnableTracing(trace_config);
- });
-
- producer_ready_semaphore_.Wait();
-}
-
-void PerfettoTracingController::StopTracing() {
- // Finish all of the tasks such as existing AddTraceEvent calls. These
- // require the data structures below to work properly, so keep them alive
- // until the tasks are done.
- task_runner_->FinishImmediateTasks();
-
- task_runner_->PostTask([this] {
- // Trigger shared memory buffer scraping which will get all pending trace
- // events that have been written by still-living TraceWriters.
- consumer_->service_endpoint()->DisableTracing();
- // Trigger the consumer to finish. This can trigger multiple calls to
- // PerfettoConsumer::OnTraceData(), with the final call passing has_more
- // as false.
- consumer_->service_endpoint()->ReadBuffers();
- });
-
- // Wait until the final OnTraceData() call with has_more=false has completed.
- consumer_finished_semaphore_.Wait();
-
- task_runner_->PostTask([this] {
- consumer_.reset();
- producer_.reset();
- service_.reset();
- });
-
- // Finish the above task, and any callbacks that were triggered.
- task_runner_->FinishImmediateTasks();
- task_runner_.reset();
-}
-
-void PerfettoTracingController::AddTraceEventListener(
- TraceEventListener* listener) {
- listeners_.push_back(listener);
-}
-
-PerfettoTracingController::~PerfettoTracingController() {
- base::Thread::DeleteThreadLocalKey(writer_key_);
-}
-
-::perfetto::TraceWriter*
-PerfettoTracingController::GetOrCreateThreadLocalWriter() {
- // TODO(petermarshall): Use some form of thread-local destructor so that
- // repeatedly created threads don't cause excessive leaking of TraceWriters.
- if (base::Thread::HasThreadLocal(writer_key_)) {
- return static_cast<::perfetto::TraceWriter*>(
- base::Thread::GetExistingThreadLocal(writer_key_));
- }
-
- // We leak the TraceWriter objects created for each thread. Perfetto has a
- // way of getting events from leaked TraceWriters and we can avoid needing a
- // lock on every trace event this way.
- std::unique_ptr<::perfetto::TraceWriter> tw = producer_->CreateTraceWriter();
- ::perfetto::TraceWriter* writer = tw.release();
-
- base::Thread::SetThreadLocal(writer_key_, writer);
- return writer;
-}
-
-void PerfettoTracingController::OnProducerReady() {
- producer_ready_semaphore_.Signal();
-}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h
deleted file mode 100644
index 67a3c26cef..0000000000
--- a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
-#define V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
-
-#include <atomic>
-#include <fstream>
-#include <memory>
-#include <vector>
-
-#include "src/base/platform/platform.h"
-#include "src/base/platform/semaphore.h"
-
-namespace perfetto {
-class TraceConfig;
-class TraceWriter;
-class TracingService;
-} // namespace perfetto
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-class PerfettoConsumer;
-class PerfettoProducer;
-class PerfettoTaskRunner;
-class TraceEventListener;
-
-// This is the top-level interface for performing tracing with perfetto. The
-// user of this class should call StartTracing() to start tracing, and
-// StopTracing() to stop it. To write trace events, the user can obtain a
-// thread-local TraceWriter object using GetOrCreateThreadLocalWriter().
-class PerfettoTracingController {
- public:
- PerfettoTracingController();
-
- // Blocks and sets up all required data structures for tracing. It is safe to
- // call GetOrCreateThreadLocalWriter() to obtain thread-local TraceWriters for
- // writing trace events once this call returns. Tracing output will be sent to
- // the TraceEventListeners registered via AddTraceEventListener().
- void StartTracing(const ::perfetto::TraceConfig& trace_config);
-
- // Blocks and finishes all existing AddTraceEvent tasks. Stops the tracing
- // thread.
- void StopTracing();
-
- // Register a trace event listener that will receive trace events. This can be
- // called multiple times to register multiple listeners, but must be called
- // before starting tracing.
- void AddTraceEventListener(TraceEventListener* listener);
-
- ~PerfettoTracingController();
-
- // Each thread that wants to trace should call this to get their TraceWriter.
- // PerfettoTracingController creates and owns the TraceWriter.
- ::perfetto::TraceWriter* GetOrCreateThreadLocalWriter();
-
- private:
- // Signals the producer_ready_semaphore_.
- void OnProducerReady();
-
- // PerfettoProducer is the only class allowed to call OnProducerReady().
- friend class PerfettoProducer;
-
- std::unique_ptr<::perfetto::TracingService> service_;
- std::unique_ptr<PerfettoProducer> producer_;
- std::unique_ptr<PerfettoConsumer> consumer_;
- std::unique_ptr<PerfettoTaskRunner> task_runner_;
- std::vector<TraceEventListener*> listeners_;
- base::Thread::LocalStorageKey writer_key_;
- // A semaphore that is signalled when StartRecording is called. StartTracing
- // waits on this semaphore to be notified when the tracing service is ready to
- // receive trace events.
- base::Semaphore producer_ready_semaphore_;
- base::Semaphore consumer_finished_semaphore_;
-
- DISALLOW_COPY_AND_ASSIGN(PerfettoTracingController);
-};
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.cc b/deps/v8/src/libplatform/tracing/trace-event-listener.cc
new file mode 100644
index 0000000000..8224221228
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.cc
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/trace-event-listener.h"
+
+#include "perfetto/trace/trace.pb.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+void TraceEventListener::ParseFromArray(const std::vector<char>& array) {
+ perfetto::protos::Trace trace;
+ CHECK(trace.ParseFromArray(array.data(), static_cast<int>(array.size())));
+
+ for (int i = 0; i < trace.packet_size(); i++) {
+ // TODO(petermarshall): ChromeTracePacket instead.
+ const perfetto::protos::TracePacket& packet = trace.packet(i);
+ ProcessPacket(packet);
+ }
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.h b/deps/v8/src/libplatform/tracing/trace-event-listener.h
index 4acdb2935b..6a535c79c5 100644
--- a/deps/v8/src/libplatform/tracing/trace-event-listener.h
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.h
@@ -5,9 +5,11 @@
#ifndef V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
#define V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
+#include <vector>
+
namespace perfetto {
namespace protos {
-class ChromeTracePacket;
+class TracePacket;
} // namespace protos
} // namespace perfetto
@@ -23,8 +25,9 @@ namespace tracing {
class TraceEventListener {
public:
virtual ~TraceEventListener() = default;
- virtual void ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) = 0;
+ virtual void ProcessPacket(const ::perfetto::protos::TracePacket& packet) = 0;
+
+ void ParseFromArray(const std::vector<char>& array);
};
} // namespace tracing
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 91d042ba1e..0700e34825 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -16,12 +16,25 @@
#include "base/trace_event/common/trace_event_common.h"
#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h"
#include "perfetto/trace/trace_packet.pbzero.h"
-#include "perfetto/tracing/core/data_source_config.h"
-#include "perfetto/tracing/core/trace_config.h"
-#include "perfetto/tracing/core/trace_packet.h"
-#include "perfetto/tracing/core/trace_writer.h"
+#include "perfetto/tracing.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
#include "src/libplatform/tracing/json-trace-event-listener.h"
-#include "src/libplatform/tracing/perfetto-tracing-controller.h"
+#endif // V8_USE_PERFETTO
+
+#ifdef V8_USE_PERFETTO
+class V8DataSource : public perfetto::DataSource<V8DataSource> {
+ public:
+ void OnSetup(const SetupArgs&) override {}
+ void OnStart(const StartArgs&) override { started_.Signal(); }
+ void OnStop(const StopArgs&) override {}
+
+ static v8::base::Semaphore started_;
+};
+
+v8::base::Semaphore V8DataSource::started_{0};
+
+PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(V8DataSource);
#endif // V8_USE_PERFETTO
namespace v8 {
@@ -79,6 +92,7 @@ void TracingController::InitializeForPerfetto(std::ostream* output_stream) {
output_stream_ = output_stream;
DCHECK_NOT_NULL(output_stream);
DCHECK(output_stream->good());
+ mutex_.reset(new base::Mutex());
}
void TracingController::SetTraceEventListenerForTesting(
@@ -133,7 +147,9 @@ void AddArgsToTraceProto(
case TRACE_VALUE_TYPE_POINTER:
arg->set_pointer_value(arg_value.as_uint);
break;
- // TODO(petermarshall): Treat copy strings specially.
+ // There is no difference between copy strings and regular strings for
+ // Perfetto; the set_string_value(const char*) API will copy the string
+ // into the protobuf by default.
case TRACE_VALUE_TYPE_COPY_STRING:
case TRACE_VALUE_TYPE_STRING:
arg->set_string_value(arg_value.as_string);
@@ -171,44 +187,40 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
#ifdef V8_USE_PERFETTO
- if (perfetto_recording_.load()) {
// Don't use COMPLETE events with perfetto - instead transform them into
// BEGIN/END pairs. This avoids the need for a thread-local stack of pending
// trace events as perfetto does not support handles into the trace buffer.
if (phase == TRACE_EVENT_PHASE_COMPLETE) phase = TRACE_EVENT_PHASE_BEGIN;
- ::perfetto::TraceWriter* writer =
- perfetto_tracing_controller_->GetOrCreateThreadLocalWriter();
- // TODO(petermarshall): We shouldn't start one packet for each event.
- // We should try to bundle them together in one bundle.
- auto packet = writer->NewTracePacket();
- auto* trace_event_bundle = packet->set_chrome_events();
- auto* trace_event = trace_event_bundle->add_trace_events();
-
- trace_event->set_name(name);
- trace_event->set_timestamp(timestamp);
- trace_event->set_phase(phase);
- trace_event->set_thread_id(base::OS::GetCurrentThreadId());
- trace_event->set_duration(0);
- trace_event->set_thread_duration(0);
- if (scope) trace_event->set_scope(scope);
- trace_event->set_id(id);
- trace_event->set_flags(flags);
- if (category_enabled_flag) {
- const char* category_group_name =
- GetCategoryGroupName(category_enabled_flag);
- DCHECK_NOT_NULL(category_group_name);
- trace_event->set_category_group_name(category_group_name);
- }
- trace_event->set_process_id(base::OS::GetCurrentProcessId());
- trace_event->set_thread_timestamp(cpu_now_us);
- trace_event->set_bind_id(bind_id);
- AddArgsToTraceProto(trace_event, num_args, arg_names, arg_types, arg_values,
- arg_convertables);
+ V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
+ auto packet = ctx.NewTracePacket();
+ auto* trace_event_bundle = packet->set_chrome_events();
+ auto* trace_event = trace_event_bundle->add_trace_events();
+
+ trace_event->set_name(name);
+ trace_event->set_timestamp(timestamp);
+ trace_event->set_phase(phase);
+ trace_event->set_thread_id(base::OS::GetCurrentThreadId());
+ trace_event->set_duration(0);
+ trace_event->set_thread_duration(0);
+ if (scope) trace_event->set_scope(scope);
+ trace_event->set_id(id);
+ trace_event->set_flags(flags);
+ if (category_enabled_flag) {
+ const char* category_group_name =
+ GetCategoryGroupName(category_enabled_flag);
+ DCHECK_NOT_NULL(category_group_name);
+ trace_event->set_category_group_name(category_group_name);
+ }
+ trace_event->set_process_id(base::OS::GetCurrentProcessId());
+ trace_event->set_thread_timestamp(cpu_now_us);
+ trace_event->set_bind_id(bind_id);
- packet->Finalize();
- }
-#endif // V8_USE_PERFETTO
+ AddArgsToTraceProto(trace_event, num_args, arg_names, arg_types,
+ arg_values, arg_convertables);
+ });
+ return 0;
+#else
uint64_t handle = 0;
if (recording_.load(std::memory_order_acquire)) {
@@ -224,6 +236,7 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
}
}
return handle;
+#endif // V8_USE_PERFETTO
}
void TracingController::UpdateTraceEventDuration(
@@ -232,15 +245,8 @@ void TracingController::UpdateTraceEventDuration(
int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
#ifdef V8_USE_PERFETTO
- // TODO(petermarshall): Should we still record the end of unfinished events
- // when tracing has stopped?
- if (perfetto_recording_.load()) {
- // TODO(petermarshall): We shouldn't start one packet for each event. We
- // should try to bundle them together in one bundle.
- ::perfetto::TraceWriter* writer =
- perfetto_tracing_controller_->GetOrCreateThreadLocalWriter();
-
- auto packet = writer->NewTracePacket();
+ V8DataSource::Trace([&](V8DataSource::TraceContext ctx) {
+ auto packet = ctx.NewTracePacket();
auto* trace_event_bundle = packet->set_chrome_events();
auto* trace_event = trace_event_bundle->add_trace_events();
@@ -249,14 +255,13 @@ void TracingController::UpdateTraceEventDuration(
trace_event->set_timestamp(now_us);
trace_event->set_process_id(base::OS::GetCurrentProcessId());
trace_event->set_thread_timestamp(cpu_now_us);
-
- packet->Finalize();
- }
-#endif // V8_USE_PERFETTO
+ });
+#else
TraceObject* trace_object = trace_buffer_->GetEventByHandle(handle);
if (!trace_object) return;
trace_object->UpdateDuration(now_us, cpu_now_us);
+#endif // V8_USE_PERFETTO
}
const char* TracingController::GetCategoryGroupName(
@@ -277,24 +282,27 @@ const char* TracingController::GetCategoryGroupName(
void TracingController::StartTracing(TraceConfig* trace_config) {
#ifdef V8_USE_PERFETTO
- perfetto_tracing_controller_ = base::make_unique<PerfettoTracingController>();
-
- if (listener_for_testing_) {
- perfetto_tracing_controller_->AddTraceEventListener(listener_for_testing_);
- }
DCHECK_NOT_NULL(output_stream_);
DCHECK(output_stream_->good());
json_listener_ = base::make_unique<JSONTraceEventListener>(output_stream_);
- perfetto_tracing_controller_->AddTraceEventListener(json_listener_.get());
- ::perfetto::TraceConfig perfetto_trace_config;
+ // TODO(petermarshall): Set other the params for the config.
+ ::perfetto::TraceConfig perfetto_trace_config;
perfetto_trace_config.add_buffers()->set_size_kb(4096);
auto* ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
ds_config->set_name("v8.trace_events");
- // TODO(petermarshall): Set all the params from |perfetto_trace_config|.
- perfetto_tracing_controller_->StartTracing(perfetto_trace_config);
- perfetto_recording_.store(true);
+ perfetto::DataSourceDescriptor dsd;
+ dsd.set_name("v8.trace_events");
+ V8DataSource::Register(dsd);
+
+ tracing_session_ =
+ perfetto::Tracing::NewTrace(perfetto::BackendType::kUnspecifiedBackend);
+ tracing_session_->Setup(perfetto_trace_config);
+ // TODO(petermarshall): Switch to StartBlocking when available.
+ tracing_session_->Start();
+ V8DataSource::started_.Wait();
+
#endif // V8_USE_PERFETTO
trace_config_.reset(trace_config);
@@ -315,7 +323,6 @@ void TracingController::StopTracing() {
if (!recording_.compare_exchange_strong(expected, false)) {
return;
}
- DCHECK(trace_buffer_);
UpdateCategoryGroupEnabledFlags();
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
@@ -327,16 +334,24 @@ void TracingController::StopTracing() {
}
#ifdef V8_USE_PERFETTO
- perfetto_recording_.store(false);
- perfetto_tracing_controller_->StopTracing();
- perfetto_tracing_controller_.reset();
+ base::Semaphore stopped_{0};
+ tracing_session_->SetOnStopCallback([&stopped_]() { stopped_.Signal(); });
+ tracing_session_->Stop();
+ stopped_.Wait();
+
+ std::vector<char> trace = tracing_session_->ReadTraceBlocking();
+ json_listener_->ParseFromArray(trace);
+ if (listener_for_testing_) listener_for_testing_->ParseFromArray(trace);
+
json_listener_.reset();
-#endif // V8_USE_PERFETTO
+#else
{
base::MutexGuard lock(mutex_.get());
+ DCHECK(trace_buffer_);
trace_buffer_->Flush();
}
+#endif // V8_USE_PERFETTO
}
void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
diff --git a/deps/v8/src/libsampler/OWNERS b/deps/v8/src/libsampler/OWNERS
index 87c96616bc..7ab7c063da 100644
--- a/deps/v8/src/libsampler/OWNERS
+++ b/deps/v8/src/libsampler/OWNERS
@@ -1 +1,4 @@
alph@chromium.org
+petermarshall@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/logging/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 298d8d4446..3d517e29fc 100644
--- a/deps/v8/src/logging/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -13,7 +13,6 @@ namespace internal {
HR(background_marking, V8.GCBackgroundMarking, 0, 10000, 101) \
HR(background_scavenger, V8.GCBackgroundScavenger, 0, 10000, 101) \
HR(background_sweeping, V8.GCBackgroundSweeping, 0, 10000, 101) \
- HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
@@ -89,7 +88,13 @@ namespace internal {
0, 100, 32) \
/* number of code GCs triggered per native module, collected on code GC */ \
HR(wasm_module_num_triggered_code_gcs, \
- V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20)
+ V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
+ /* number of code spaces reserved per wasm module */ \
+ HR(wasm_module_num_code_spaces, V8.WasmModuleNumberOfCodeSpaces, 1, 128, 20) \
+ /* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
+ HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21) \
+ /* Ticks observed in a single Turbofan compilation, in 1K */ \
+ HR(turbofan_ticks, V8.TurboFan1KTicks, 0, 100000, 200)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index bfe52f45ac..1efa7105cd 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -893,6 +893,7 @@ class RuntimeCallTimer final {
V(ArrayLengthSetter) \
V(BoundFunctionLengthGetter) \
V(BoundFunctionNameGetter) \
+ V(CodeGenerationFromStringsCallbacks) \
V(CompileAnalyse) \
V(CompileBackgroundAnalyse) \
V(CompileBackgroundCompileTask) \
@@ -941,7 +942,6 @@ class RuntimeCallTimer final {
V(Invoke) \
V(InvokeApiFunction) \
V(InvokeApiInterruptCallbacks) \
- V(InvokeFunctionCallback) \
V(JS_Execution) \
V(Map_SetPrototype) \
V(Map_TransitionToAccessorProperty) \
@@ -1140,7 +1140,7 @@ class WorkerThreadRuntimeCallStats final {
// when it is destroyed.
class WorkerThreadRuntimeCallStatsScope final {
public:
- WorkerThreadRuntimeCallStatsScope(
+ explicit WorkerThreadRuntimeCallStatsScope(
WorkerThreadRuntimeCallStats* off_thread_stats);
~WorkerThreadRuntimeCallStatsScope();
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 9f8cf82d36..ecf4de6767 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -1123,10 +1123,10 @@ void Logger::SetterCallbackEvent(Name name, Address entry_point) {
namespace {
-void AppendCodeCreateHeader(Log::MessageBuilder& msg,
- CodeEventListener::LogEventsAndTags tag,
- AbstractCode::Kind kind, uint8_t* address, int size,
- base::ElapsedTimer* timer) {
+void AppendCodeCreateHeader(
+ Log::MessageBuilder& msg, // NOLINT(runtime/references)
+ CodeEventListener::LogEventsAndTags tag, AbstractCode::Kind kind,
+ uint8_t* address, int size, base::ElapsedTimer* timer) {
msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
<< Logger::kNext << kLogEventsNames[tag] << Logger::kNext << kind
<< Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
@@ -1134,9 +1134,10 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
<< Logger::kNext;
}
-void AppendCodeCreateHeader(Log::MessageBuilder& msg,
- CodeEventListener::LogEventsAndTags tag,
- AbstractCode code, base::ElapsedTimer* timer) {
+void AppendCodeCreateHeader(
+ Log::MessageBuilder& msg, // NOLINT(runtime/references)
+ CodeEventListener::LogEventsAndTags tag, AbstractCode code,
+ base::ElapsedTimer* timer) {
AppendCodeCreateHeader(msg, tag, code.kind(),
reinterpret_cast<uint8_t*>(code.InstructionStart()),
code.InstructionSize(), timer);
@@ -1336,8 +1337,9 @@ void Logger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
namespace {
-void CodeLinePosEvent(JitLogger* jit_logger, Address code_start,
- SourcePositionTableIterator& iter) {
+void CodeLinePosEvent(
+ JitLogger* jit_logger, Address code_start,
+ SourcePositionTableIterator& iter) { // NOLINT(runtime/references)
if (jit_logger) {
void* jit_handler_data = jit_logger->StartCodePosInfoEvent();
for (; !iter.done(); iter.Advance()) {
@@ -1415,9 +1417,10 @@ void Logger::SuspectReadEvent(Name name, Object obj) {
}
namespace {
-void AppendFunctionMessage(Log::MessageBuilder& msg, const char* reason,
- int script_id, double time_delta, int start_position,
- int end_position, base::ElapsedTimer* timer) {
+void AppendFunctionMessage(
+ Log::MessageBuilder& msg, // NOLINT(runtime/references)
+ const char* reason, int script_id, double time_delta, int start_position,
+ int end_position, base::ElapsedTimer* timer) {
msg << "function" << Logger::kNext << reason << Logger::kNext << script_id
<< Logger::kNext << start_position << Logger::kNext << end_position
<< Logger::kNext << time_delta << Logger::kNext
@@ -1665,14 +1668,14 @@ static void AddFunctionAndCode(SharedFunctionInfo sfi, AbstractCode code_object,
static int EnumerateCompiledFunctions(Heap* heap,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects) {
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
// the unoptimized code for them.
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
if (sfi.is_compiled() && (!sfi.script().IsScript() ||
@@ -1706,12 +1709,12 @@ static int EnumerateCompiledFunctions(Heap* heap,
static int EnumerateWasmModuleObjects(
Heap* heap, Handle<WasmModuleObject>* module_objects) {
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
int module_objects_count = 0;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsWasmModuleObject()) {
WasmModuleObject module = WasmModuleObject::cast(obj);
if (module_objects != nullptr) {
@@ -1741,10 +1744,10 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsAccessorInfo()) continue;
AccessorInfo ai = AccessorInfo::cast(obj);
if (!ai.name().IsName()) continue;
@@ -1769,9 +1772,9 @@ void Logger::LogAccessorCallbacks() {
void Logger::LogAllMaps() {
DisallowHeapAllocation no_gc;
Heap* heap = isolate_->heap();
- HeapIterator iterator(heap);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(heap);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsMap()) continue;
Map map = Map::cast(obj);
MapCreate(map);
@@ -1999,10 +2002,10 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
void ExistingCodeLogger::LogCodeObjects() {
Heap* heap = isolate_->heap();
- HeapIterator iterator(heap);
+ HeapObjectIterator iterator(heap);
DisallowHeapAllocation no_gc;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsCode()) LogCodeObject(obj);
if (obj.IsBytecodeArray()) LogCodeObject(obj);
}
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
index 097b008121..df62d01730 100644
--- a/deps/v8/src/numbers/OWNERS
+++ b/deps/v8/src/numbers/OWNERS
@@ -1,5 +1,7 @@
-ahaas@chromium.org
-bmeurer@chromium.org
clemensh@chromium.org
+jgruber@chromium.org
jkummerow@chromium.org
sigurds@chromium.org
+verwaest@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/numbers/conversions.cc b/deps/v8/src/numbers/conversions.cc
index cb424a1ded..faf3e33df3 100644
--- a/deps/v8/src/numbers/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -1245,7 +1245,7 @@ char* DoubleToRadixCString(double value, int radix) {
double delta = 0.5 * (Double(value).NextDouble() - value);
delta = std::max(Double(0.0).NextDouble(), delta);
DCHECK_GT(delta, 0.0);
- if (fraction > delta) {
+ if (fraction >= delta) {
// Insert decimal point.
buffer[fraction_cursor++] = '.';
do {
@@ -1280,7 +1280,7 @@ char* DoubleToRadixCString(double value, int radix) {
break;
}
}
- } while (fraction > delta);
+ } while (fraction >= delta);
}
// Compute integer digits. Fill unrepresented digits with zero.
diff --git a/deps/v8/src/objects/OWNERS b/deps/v8/src/objects/OWNERS
new file mode 100644
index 0000000000..450423f878
--- /dev/null
+++ b/deps/v8/src/objects/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 041247637a..c327a35746 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -13,6 +13,7 @@
#include "src/objects/js-objects-inl.h"
#include "src/objects/name.h"
#include "src/objects/templates.h"
+#include "torque-generated/class-definitions-tq-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,12 +24,12 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(AccessCheckInfo, Struct)
OBJECT_CONSTRUCTORS_IMPL(AccessorInfo, Struct)
OBJECT_CONSTRUCTORS_IMPL(InterceptorInfo, Struct)
-OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo, Tuple3)
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(CallHandlerInfo)
CAST_ACCESSOR(AccessorInfo)
CAST_ACCESSOR(AccessCheckInfo)
CAST_ACCESSOR(InterceptorInfo)
-CAST_ACCESSOR(CallHandlerInfo)
ACCESSORS(AccessorInfo, name, Name, kNameOffset)
SMI_ACCESSORS(AccessorInfo, flags, kFlagsOffset)
@@ -119,9 +120,6 @@ BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
BOOL_ACCESSORS(InterceptorInfo, flags, is_named, kNamed)
BOOL_ACCESSORS(InterceptorInfo, flags, has_no_side_effect, kHasNoSideEffect)
-ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
-ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
-ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
bool CallHandlerInfo::IsSideEffectFreeCallHandlerInfo() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
diff --git a/deps/v8/src/objects/api-callbacks.h b/deps/v8/src/objects/api-callbacks.h
index 1d8b456a8e..518339f7d4 100644
--- a/deps/v8/src/objects/api-callbacks.h
+++ b/deps/v8/src/objects/api-callbacks.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_API_CALLBACKS_H_
#include "src/objects/struct.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -159,14 +160,9 @@ class InterceptorInfo : public Struct {
OBJECT_CONSTRUCTORS(InterceptorInfo, Struct);
};
-class CallHandlerInfo : public Tuple3 {
+class CallHandlerInfo
+ : public TorqueGeneratedCallHandlerInfo<CallHandlerInfo, Struct> {
public:
- DECL_ACCESSORS(callback, Object)
- DECL_ACCESSORS(js_callback, Object)
- DECL_ACCESSORS(data, Object)
-
- DECL_CAST(CallHandlerInfo)
-
inline bool IsSideEffectFreeCallHandlerInfo() const;
inline bool IsSideEffectCallHandlerInfo() const;
inline void SetNextCallHasNoSideEffect();
@@ -180,11 +176,7 @@ class CallHandlerInfo : public Tuple3 {
Address redirected_callback() const;
- static const int kCallbackOffset = kValue1Offset;
- static const int kJsCallbackOffset = kValue2Offset;
- static const int kDataOffset = kValue3Offset;
-
- OBJECT_CONSTRUCTORS(CallHandlerInfo, Tuple3);
+ TQ_OBJECT_CONSTRUCTORS(CallHandlerInfo)
};
} // namespace internal
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index c2ef59a896..2931c5b0a0 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -29,12 +29,14 @@ CAST_ACCESSOR(JSArgumentsObject)
SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot,
kAliasedContextSlotOffset)
-Context SloppyArgumentsElements::context() {
- return Context::cast(get(kContextIndex));
+DEF_GETTER(SloppyArgumentsElements, context, Context) {
+ return TaggedField<Context>::load(isolate, *this,
+ OffsetOfElementAt(kContextIndex));
}
-FixedArray SloppyArgumentsElements::arguments() {
- return FixedArray::cast(get(kArgumentsIndex));
+DEF_GETTER(SloppyArgumentsElements, arguments, FixedArray) {
+ return TaggedField<FixedArray>::load(isolate, *this,
+ OffsetOfElementAt(kArgumentsIndex));
}
void SloppyArgumentsElements::set_arguments(FixedArray arguments) {
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index a1d39f1f36..79d2e604bd 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -102,8 +102,8 @@ class SloppyArgumentsElements : public FixedArray {
static const int kArgumentsIndex = 1;
static const uint32_t kParameterMapStart = 2;
- inline Context context();
- inline FixedArray arguments();
+ DECL_GETTER(context, Context)
+ DECL_GETTER(arguments, FixedArray)
inline void set_arguments(FixedArray arguments);
inline uint32_t parameter_map_length();
inline Object get_mapped_entry(uint32_t entry);
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 92b78f8821..b02c0f29d6 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -46,6 +46,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static MaybeHandle<BigInt> MakeImmutable(MaybeHandle<MutableBigInt> maybe);
static Handle<BigInt> MakeImmutable(Handle<MutableBigInt> result);
+ static void Canonicalize(MutableBigInt result);
+
// Allocation helpers.
static MaybeHandle<MutableBigInt> New(
Isolate* isolate, int length,
@@ -64,6 +66,10 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
SLOW_DCHECK(bigint->IsBigInt());
return Handle<MutableBigInt>::cast(bigint);
}
+ static MutableBigInt cast(Object o) {
+ SLOW_DCHECK(o.IsBigInt());
+ return MutableBigInt(o.ptr());
+ }
static MutableBigInt unchecked_cast(Object o) {
return MutableBigInt(o.ptr());
}
@@ -87,8 +93,13 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static MaybeHandle<BigInt> AbsoluteAdd(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y, bool result_sign);
+
+ static void AbsoluteAdd(MutableBigInt result, BigInt x, BigInt y);
+
static Handle<BigInt> AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y, bool result_sign);
+ static void AbsoluteSub(MutableBigInt result, BigInt x, BigInt y);
+
static MaybeHandle<MutableBigInt> AbsoluteAddOne(
Isolate* isolate, Handle<BigIntBase> x, bool sign,
MutableBigInt result_storage = MutableBigInt());
@@ -120,6 +131,8 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static int AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y);
+ static int AbsoluteCompare(BigIntBase x, BigIntBase y);
+
static void MultiplyAccumulate(Handle<BigIntBase> multiplicand,
digit_t multiplier,
Handle<MutableBigInt> accumulator,
@@ -223,11 +236,24 @@ NEVER_READ_ONLY_SPACE_IMPL(MutableBigInt)
#include "src/objects/object-macros-undef.h"
+template <typename T>
+MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
+ // If the result of a BigInt computation is truncated to 64 bit, Turbofan
+ // can sometimes truncate intermediate results already, which can prevent
+ // those from exceeding the maximum length, effectively preventing a
+ // RangeError from being thrown. As this is a performance optimization, this
+ // behavior is accepted. To prevent the correctness fuzzer from detecting this
+ // difference, we crash the program.
+ if (FLAG_correctness_fuzzer_suppressions) {
+ FATAL("Aborting on invalid BigInt length");
+ }
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig), T);
+}
+
MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
AllocationType allocation) {
if (length > BigInt::kMaxLength) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- MutableBigInt);
+ return ThrowBigIntTooBig<MutableBigInt>(isolate);
}
Handle<MutableBigInt> result =
Cast(isolate->factory()->NewBigInt(length, allocation));
@@ -347,32 +373,36 @@ MaybeHandle<BigInt> MutableBigInt::MakeImmutable(
}
Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
+ MutableBigInt::Canonicalize(*result);
+ return Handle<BigInt>::cast(result);
+}
+
+void MutableBigInt::Canonicalize(MutableBigInt result) {
// Check if we need to right-trim any leading zero-digits.
- int old_length = result->length();
+ int old_length = result.length();
int new_length = old_length;
- while (new_length > 0 && result->digit(new_length - 1) == 0) new_length--;
+ while (new_length > 0 && result.digit(new_length - 1) == 0) new_length--;
int to_trim = old_length - new_length;
if (to_trim != 0) {
- int size_delta = to_trim * kDigitSize;
- Address new_end = result->address() + BigInt::SizeFor(new_length);
- Heap* heap = result->GetHeap();
- if (!heap->IsLargeObject(*result)) {
+ int size_delta = to_trim * MutableBigInt::kDigitSize;
+ Address new_end = result.address() + BigInt::SizeFor(new_length);
+ Heap* heap = result.GetHeap();
+ if (!heap->IsLargeObject(result)) {
// We do not create a filler for objects in large object space.
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
}
- result->synchronized_set_length(new_length);
+ result.synchronized_set_length(new_length);
// Canonicalize -0n.
if (new_length == 0) {
- result->set_sign(false);
+ result.set_sign(false);
// TODO(jkummerow): If we cache a canonical 0n, return that here.
}
}
- DCHECK_IMPLIES(result->length() > 0,
- result->digit(result->length() - 1) != 0); // MSD is non-zero.
- return Handle<BigInt>(result.location());
+ DCHECK_IMPLIES(result.length() > 0,
+ result.digit(result.length() - 1) != 0); // MSD is non-zero.
}
Handle<BigInt> BigInt::Zero(Isolate* isolate) {
@@ -428,14 +458,12 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Isolate* isolate, Handle<BigInt> base,
// results.
STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
if (exponent->length() > 1) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
digit_t exp_value = exponent->digit(0);
if (exp_value == 1) return base;
if (exp_value >= kMaxLengthBits) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
STATIC_ASSERT(kMaxLengthBits <= kMaxInt);
int n = static_cast<int>(exp_value);
@@ -1130,6 +1158,26 @@ void BigInt::BigIntShortPrint(std::ostream& os) {
// Internal helpers.
+void MutableBigInt::AbsoluteAdd(MutableBigInt result, BigInt x, BigInt y) {
+ DisallowHeapAllocation no_gc;
+ digit_t carry = 0;
+ int i = 0;
+ for (; i < y.length(); i++) {
+ digit_t new_carry = 0;
+ digit_t sum = digit_add(x.digit(i), y.digit(i), &new_carry);
+ sum = digit_add(sum, carry, &new_carry);
+ result.set_digit(i, sum);
+ carry = new_carry;
+ }
+ for (; i < x.length(); i++) {
+ digit_t new_carry = 0;
+ digit_t sum = digit_add(x.digit(i), carry, &new_carry);
+ result.set_digit(i, sum);
+ carry = new_carry;
+ }
+ result.set_digit(i, carry);
+}
+
MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Isolate* isolate,
Handle<BigInt> x,
Handle<BigInt> y,
@@ -1146,22 +1194,9 @@ MaybeHandle<BigInt> MutableBigInt::AbsoluteAdd(Isolate* isolate,
if (!New(isolate, x->length() + 1).ToHandle(&result)) {
return MaybeHandle<BigInt>();
}
- digit_t carry = 0;
- int i = 0;
- for (; i < y->length(); i++) {
- digit_t new_carry = 0;
- digit_t sum = digit_add(x->digit(i), y->digit(i), &new_carry);
- sum = digit_add(sum, carry, &new_carry);
- result->set_digit(i, sum);
- carry = new_carry;
- }
- for (; i < x->length(); i++) {
- digit_t new_carry = 0;
- digit_t sum = digit_add(x->digit(i), carry, &new_carry);
- result->set_digit(i, sum);
- carry = new_carry;
- }
- result->set_digit(i, carry);
+
+ AbsoluteAdd(*result, *x, *y);
+
result->set_sign(result_sign);
return MakeImmutable(result);
}
@@ -1178,24 +1213,31 @@ Handle<BigInt> MutableBigInt::AbsoluteSub(Isolate* isolate, Handle<BigInt> x,
return result_sign == x->sign() ? x : BigInt::UnaryMinus(isolate, x);
}
Handle<MutableBigInt> result = New(isolate, x->length()).ToHandleChecked();
+
+ AbsoluteSub(*result, *x, *y);
+
+ result->set_sign(result_sign);
+ return MakeImmutable(result);
+}
+
+void MutableBigInt::AbsoluteSub(MutableBigInt result, BigInt x, BigInt y) {
+ DisallowHeapAllocation no_gc;
digit_t borrow = 0;
int i = 0;
- for (; i < y->length(); i++) {
+ for (; i < y.length(); i++) {
digit_t new_borrow = 0;
- digit_t difference = digit_sub(x->digit(i), y->digit(i), &new_borrow);
+ digit_t difference = digit_sub(x.digit(i), y.digit(i), &new_borrow);
difference = digit_sub(difference, borrow, &new_borrow);
- result->set_digit(i, difference);
+ result.set_digit(i, difference);
borrow = new_borrow;
}
- for (; i < x->length(); i++) {
+ for (; i < x.length(); i++) {
digit_t new_borrow = 0;
- digit_t difference = digit_sub(x->digit(i), borrow, &new_borrow);
- result->set_digit(i, difference);
+ digit_t difference = digit_sub(x.digit(i), borrow, &new_borrow);
+ result.set_digit(i, difference);
borrow = new_borrow;
}
DCHECK_EQ(0, borrow);
- result->set_sign(result_sign);
- return MakeImmutable(result);
}
// Adds 1 to the absolute value of {x} and sets the result's sign to {sign}.
@@ -1375,12 +1417,17 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
// Returns a positive value if abs(x) > abs(y), a negative value if
// abs(x) < abs(y), or zero if abs(x) == abs(y).
int MutableBigInt::AbsoluteCompare(Handle<BigIntBase> x, Handle<BigIntBase> y) {
- int diff = x->length() - y->length();
+ return MutableBigInt::AbsoluteCompare(*x, *y);
+}
+
+int MutableBigInt::AbsoluteCompare(BigIntBase x, BigIntBase y) {
+ DisallowHeapAllocation no_gc;
+ int diff = x.length() - y.length();
if (diff != 0) return diff;
- int i = x->length() - 1;
- while (i >= 0 && x->digit(i) == y->digit(i)) i--;
+ int i = x.length() - 1;
+ while (i >= 0 && x.digit(i) == y.digit(i)) i--;
if (i < 0) return 0;
- return x->digit(i) > y->digit(i) ? 1 : -1;
+ return x.digit(i) > y.digit(i) ? 1 : -1;
}
// Multiplies {multiplicand} with {multiplier} and adds the result to
@@ -1716,8 +1763,7 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
Handle<BigIntBase> y) {
Maybe<digit_t> maybe_shift = ToShiftAmount(y);
if (maybe_shift.IsNothing()) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
digit_t shift = maybe_shift.FromJust();
int digit_shift = static_cast<int>(shift / kDigitBits);
@@ -1727,8 +1773,7 @@ MaybeHandle<BigInt> MutableBigInt::LeftShiftByAbsolute(Isolate* isolate,
(x->digit(length - 1) >> (kDigitBits - bits_shift)) != 0;
int result_length = length + digit_shift + grow;
if (result_length > kMaxLength) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
Handle<MutableBigInt> result;
if (!New(isolate, result_length).ToHandle(&result)) {
@@ -1887,8 +1932,7 @@ MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
}
// All the overflow/maximum checks above fall through to here.
if (should_throw == kThrowOnError) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- FreshlyAllocatedBigInt);
+ return ThrowBigIntTooBig<FreshlyAllocatedBigInt>(isolate);
} else {
return MaybeHandle<FreshlyAllocatedBigInt>();
}
@@ -2155,10 +2199,6 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
// the raw characters pointer (as the string might have moved).
chars = result->GetChars(no_gc);
}
- if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
- return MaybeHandle<String>();
- }
}
} while (nonzero_digit > 0);
last_digit = rest->digit(0);
@@ -2250,8 +2290,7 @@ MaybeHandle<BigInt> BigInt::AsUintN(Isolate* isolate, uint64_t n,
// If {x} is negative, simulate two's complement representation.
if (x->sign()) {
if (n > kMaxLengthBits) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
return MutableBigInt::TruncateAndSubFromPowerOfTwo(
isolate, static_cast<int>(n), x, false);
@@ -2395,8 +2434,7 @@ MaybeHandle<BigInt> BigInt::FromWords64(Isolate* isolate, int sign_bit,
int words64_count,
const uint64_t* words) {
if (words64_count < 0 || words64_count > kMaxLength / (64 / kDigitBits)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
+ return ThrowBigIntTooBig<BigInt>(isolate);
}
if (words64_count == 0) return MutableBigInt::Zero(isolate);
STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
@@ -2674,5 +2712,32 @@ void BigInt::BigIntPrint(std::ostream& os) {
}
#endif // OBJECT_PRINT
+void MutableBigInt_AbsoluteAddAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ MutableBigInt::AbsoluteAdd(result, x, y);
+ MutableBigInt::Canonicalize(result);
+}
+
+int32_t MutableBigInt_AbsoluteCompare(Address x_addr, Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+
+ return MutableBigInt::AbsoluteCompare(x, y);
+}
+
+void MutableBigInt_AbsoluteSubAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr) {
+ BigInt x = BigInt::cast(Object(x_addr));
+ BigInt y = BigInt::cast(Object(y_addr));
+ MutableBigInt result = MutableBigInt::cast(Object(result_addr));
+
+ MutableBigInt::AbsoluteSub(result, x, y);
+ MutableBigInt::Canonicalize(result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 3f5d35878b..a5ca514867 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -16,6 +16,12 @@
namespace v8 {
namespace internal {
+void MutableBigInt_AbsoluteAddAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr);
+int32_t MutableBigInt_AbsoluteCompare(Address x_addr, Address y_addr);
+void MutableBigInt_AbsoluteSubAndCanonicalize(Address result_addr,
+ Address x_addr, Address y_addr);
+
class BigInt;
class ValueDeserializer;
class ValueSerializer;
@@ -66,6 +72,10 @@ class BigIntBase : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, BIGINT_FIELDS)
#undef BIGINT_FIELDS
+ static constexpr bool HasOptionalPadding() {
+ return FIELD_SIZE(kOptionalPaddingOffset) > 0;
+ }
+
private:
friend class ::v8::internal::BigInt; // MSVC wants full namespace.
friend class MutableBigInt;
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 0877746d11..e6f00b0fb2 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/code.h"
+#include "src/base/memory.h"
#include "src/codegen/code-desc.h"
-#include "src/common/v8memory.h"
#include "src/execution/isolate.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/dictionary.h"
@@ -29,7 +29,7 @@ OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
@@ -39,12 +39,6 @@ CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
-CAST_ACCESSOR(SourcePositionTableWithFrameCache)
-
-ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
- kSourcePositionTableOffset)
-ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- SimpleNumberDictionary, kStackFrameCacheOffset)
int AbstractCode::raw_instruction_size() {
if (IsCode()) {
@@ -331,7 +325,9 @@ int Code::SizeIncludingMetadata() const {
}
ByteArray Code::unchecked_relocation_info() const {
- return ByteArray::unchecked_cast(READ_FIELD(*this, kRelocationInfoOffset));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return ByteArray::unchecked_cast(
+ TaggedField<HeapObject, kRelocationInfoOffset>::load(isolate, *this));
}
byte* Code::relocation_start() const {
@@ -575,7 +571,7 @@ Code Code::GetCodeFromTargetAddress(Address address) {
}
Code Code::GetObjectFromEntryAddress(Address location_of_address) {
- Address code_entry = Memory<Address>(location_of_address);
+ Address code_entry = base::Memory<Address>(location_of_address);
HeapObject code = HeapObject::FromAddress(code_entry - Code::kHeaderSize);
// Unchecked cast because we can't rely on the map currently
// not being a forwarding pointer.
@@ -622,32 +618,32 @@ void BytecodeArray::set(int index, byte value) {
WriteField<byte>(kHeaderSize + index * kCharSize, value);
}
-void BytecodeArray::set_frame_size(int frame_size) {
+void BytecodeArray::set_frame_size(int32_t frame_size) {
DCHECK_GE(frame_size, 0);
DCHECK(IsAligned(frame_size, kSystemPointerSize));
- WriteField<int>(kFrameSizeOffset, frame_size);
+ WriteField<int32_t>(kFrameSizeOffset, frame_size);
}
-int BytecodeArray::frame_size() const {
- return ReadField<int>(kFrameSizeOffset);
+int32_t BytecodeArray::frame_size() const {
+ return ReadField<int32_t>(kFrameSizeOffset);
}
int BytecodeArray::register_count() const {
- return frame_size() / kSystemPointerSize;
+ return static_cast<int>(frame_size()) / kSystemPointerSize;
}
-void BytecodeArray::set_parameter_count(int number_of_parameters) {
+void BytecodeArray::set_parameter_count(int32_t number_of_parameters) {
DCHECK_GE(number_of_parameters, 0);
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- WriteField<int>(kParameterSizeOffset,
+ WriteField<int32_t>(kParameterSizeOffset,
(number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
const {
- int register_operand =
- ReadField<int>(kIncomingNewTargetOrGeneratorRegisterOffset);
+ int32_t register_operand =
+ ReadField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset);
if (register_operand == 0) {
return interpreter::Register::invalid_value();
} else {
@@ -658,24 +654,24 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
void BytecodeArray::set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register) {
if (!incoming_new_target_or_generator_register.is_valid()) {
- WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
+ WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
} else {
DCHECK(incoming_new_target_or_generator_register.index() <
register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
- WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset,
+ WriteField<int32_t>(kIncomingNewTargetOrGeneratorRegisterOffset,
incoming_new_target_or_generator_register.ToOperand());
}
}
int BytecodeArray::osr_loop_nesting_level() const {
- return ReadField<int8_t>(kOSRNestingLevelOffset);
+ return ReadField<int8_t>(kOsrNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WriteField<int8_t>(kOSRNestingLevelOffset, depth);
+ WriteField<int8_t>(kOsrNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
@@ -691,10 +687,10 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
RELAXED_WRITE_INT8_FIELD(*this, kBytecodeAgeOffset, static_cast<int8_t>(age));
}
-int BytecodeArray::parameter_count() const {
+int32_t BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- return ReadField<int>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
+ return ReadField<int32_t>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
@@ -745,7 +741,9 @@ ByteArray BytecodeArray::SourcePositionTableIfCollected() const {
void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
Object maybe_table = source_position_table();
- if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+ if (maybe_table.IsUndefined() || maybe_table.IsByteArray() ||
+ maybe_table.IsException())
+ return;
DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
.source_position_table());
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index 89180693a5..a51a8c5b79 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -352,7 +352,8 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
isolate_ = isolate;
Object list = isolate->heap()->native_contexts_list();
- next_context_ = list.IsUndefined(isolate_) ? Context() : Context::cast(list);
+ next_context_ =
+ list.IsUndefined(isolate_) ? NativeContext() : NativeContext::cast(list);
}
Code Code::OptimizedCodeIterator::Next() {
@@ -366,8 +367,8 @@ Code Code::OptimizedCodeIterator::Next() {
next = next_context_.OptimizedCodeListHead();
Object next_context = next_context_.next_context_link();
next_context_ = next_context.IsUndefined(isolate_)
- ? Context()
- : Context::cast(next_context);
+ ? NativeContext()
+ : NativeContext::cast(next_context);
} else {
// Exhausted contexts.
return Code();
@@ -830,7 +831,8 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << reinterpret_cast<const void*>(current_address) << " @ "
<< std::setw(4) << iterator.current_offset() << " : ";
interpreter::BytecodeDecoder::Decode(
- os, reinterpret_cast<byte*>(current_address), parameter_count());
+ os, reinterpret_cast<byte*>(current_address),
+ static_cast<int>(parameter_count()));
if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
Address jump_target = base_address + iterator.GetJumpTargetOffset();
os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
@@ -856,7 +858,7 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Constant pool (size = " << constant_pool().length() << ")\n";
#ifdef OBJECT_PRINT
if (constant_pool().length() > 0) {
- constant_pool().Print();
+ constant_pool().Print(os);
}
#endif
@@ -1084,5 +1086,15 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
UNREACHABLE();
}
+bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
+ if (length() != other.length()) return false;
+
+ for (int i = 0; i < length(); ++i) {
+ if (get(i) != other.get(i)) return false;
+ }
+
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index a950261103..2f85d4ac7b 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -476,7 +476,7 @@ class Code::OptimizedCodeIterator {
Code Next();
private:
- Context next_context_;
+ NativeContext next_context_;
Code current_code_;
Isolate* isolate_;
@@ -741,15 +741,15 @@ class BytecodeArray : public FixedArrayBase {
inline Address GetFirstBytecodeAddress();
// Accessors for frame size.
- inline int frame_size() const;
- inline void set_frame_size(int frame_size);
+ inline int32_t frame_size() const;
+ inline void set_frame_size(int32_t frame_size);
// Accessor for register count (derived from frame_size).
inline int register_count() const;
// Accessors for parameter count (including implicit 'this' receiver).
- inline int parameter_count() const;
- inline void set_parameter_count(int number_of_parameters);
+ inline int32_t parameter_count() const;
+ inline void set_parameter_count(int32_t number_of_parameters);
// Register used to pass the incoming new.target or generator object from the
// fucntion call.
@@ -828,28 +828,15 @@ class BytecodeArray : public FixedArrayBase {
// Compares only the bytecode array but not any of the header fields.
bool IsBytecodeEqual(const BytecodeArray other) const;
-// Layout description.
-#define BYTECODE_ARRAY_FIELDS(V) \
- /* Pointer fields. */ \
- V(kConstantPoolOffset, kTaggedSize) \
- V(kHandlerTableOffset, kTaggedSize) \
- V(kSourcePositionTableOffset, kTaggedSize) \
- V(kFrameSizeOffset, kIntSize) \
- V(kParameterSizeOffset, kIntSize) \
- V(kIncomingNewTargetOrGeneratorRegisterOffset, kIntSize) \
- V(kOSRNestingLevelOffset, kCharSize) \
- V(kBytecodeAgeOffset, kCharSize) \
- /* Total size. */ \
- V(kHeaderSize, 0)
-
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- BYTECODE_ARRAY_FIELDS)
-#undef BYTECODE_ARRAY_FIELDS
+ TORQUE_GENERATED_BYTECODE_ARRAY_FIELDS)
+ static constexpr int kHeaderSize = kSize;
// InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
- kOSRNestingLevelOffset + kCharSize);
+ kOsrNestingLevelOffset + kCharSize);
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
@@ -948,22 +935,11 @@ class DeoptimizationData : public FixedArray {
OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray);
};
-class SourcePositionTableWithFrameCache : public Struct {
+class SourcePositionTableWithFrameCache
+ : public TorqueGeneratedSourcePositionTableWithFrameCache<
+ SourcePositionTableWithFrameCache, Struct> {
public:
- DECL_ACCESSORS(source_position_table, ByteArray)
- DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
-
- DECL_CAST(SourcePositionTableWithFrameCache)
-
- DECL_PRINTER(SourcePositionTableWithFrameCache)
- DECL_VERIFIER(SourcePositionTableWithFrameCache)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- Struct::kHeaderSize,
- TORQUE_GENERATED_SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_FIELDS)
-
- OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Struct);
+ TQ_OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache)
};
} // namespace internal
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index b08bc938e5..a93814fee2 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -22,6 +22,12 @@ namespace internal {
CompressedObjectSlot::CompressedObjectSlot(Object* object)
: SlotBase(reinterpret_cast<Address>(&object->ptr_)) {}
+bool CompressedObjectSlot::contains_value(Address raw_value) const {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
+ return static_cast<uint32_t>(value) ==
+ static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
+}
+
Object CompressedObjectSlot::operator*() const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(address(), value));
@@ -61,54 +67,6 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
}
//
-// CompressedMapWordSlot implementation.
-//
-
-bool CompressedMapWordSlot::contains_value(Address raw_value) const {
- AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return static_cast<uint32_t>(value) ==
- static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
-}
-
-Object CompressedMapWordSlot::operator*() const {
- Tagged_t value = *location();
- return Object(DecompressTaggedPointer(address(), value));
-}
-
-void CompressedMapWordSlot::store(Object value) const {
- *location() = CompressTagged(value.ptr());
-}
-
-Object CompressedMapWordSlot::Relaxed_Load() const {
- AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location());
- return Object(DecompressTaggedPointer(address(), value));
-}
-
-void CompressedMapWordSlot::Relaxed_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value.ptr());
- AsAtomicTagged::Relaxed_Store(location(), ptr);
-}
-
-Object CompressedMapWordSlot::Acquire_Load() const {
- AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
- return Object(DecompressTaggedPointer(address(), value));
-}
-
-void CompressedMapWordSlot::Release_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value.ptr());
- AsAtomicTagged::Release_Store(location(), ptr);
-}
-
-Object CompressedMapWordSlot::Release_CompareAndSwap(Object old,
- Object target) const {
- Tagged_t old_ptr = CompressTagged(old.ptr());
- Tagged_t target_ptr = CompressTagged(target.ptr());
- Tagged_t result =
- AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
- return Object(DecompressTaggedPointer(address(), result));
-}
-
-//
// CompressedMaybeObjectSlot implementation.
//
diff --git a/deps/v8/src/objects/compressed-slots.h b/deps/v8/src/objects/compressed-slots.h
index 45df733caf..07660b1961 100644
--- a/deps/v8/src/objects/compressed-slots.h
+++ b/deps/v8/src/objects/compressed-slots.h
@@ -34,32 +34,6 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
explicit CompressedObjectSlot(SlotBase<T, TData, kSlotDataAlignment> slot)
: SlotBase(slot.address()) {}
- inline Object operator*() const;
- inline void store(Object value) const;
-
- inline Object Acquire_Load() const;
- inline Object Relaxed_Load() const;
- inline void Relaxed_Store(Object value) const;
- inline void Release_Store(Object value) const;
- inline Object Release_CompareAndSwap(Object old, Object target) const;
-};
-
-// A CompressedMapWordSlot instance describes a kTaggedSize-sized map-word field
-// ("slot") of heap objects holding a compressed tagged pointer or a Smi
-// representing forwaring pointer value.
-// This slot kind is similar to CompressedObjectSlot but decompression of
-// forwarding pointer is different.
-// Its address() is the address of the slot.
-// The slot's contents can be read and written using operator* and store().
-class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
- public:
- using TObject = Object;
-
- static constexpr bool kCanBeWeak = false;
-
- CompressedMapWordSlot() : SlotBase(kNullAddress) {}
- explicit CompressedMapWordSlot(Address ptr) : SlotBase(ptr) {}
-
// Compares memory representation of a value stored in the slot with given
// raw value without decompression.
inline bool contains_value(Address raw_value) const;
@@ -67,10 +41,9 @@ class CompressedMapWordSlot : public SlotBase<CompressedMapWordSlot, Tagged_t> {
inline Object operator*() const;
inline void store(Object value) const;
+ inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
inline void Relaxed_Store(Object value) const;
-
- inline Object Acquire_Load() const;
inline void Release_Store(Object value) const;
inline Object Release_CompareAndSwap(Object old, Object target) const;
};
diff --git a/deps/v8/src/objects/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index bb861a1d1e..0c566dd081 100644
--- a/deps/v8/src/objects/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -52,9 +52,15 @@ SMI_ACCESSORS(Context, length, kLengthOffset)
CAST_ACCESSOR(NativeContext)
Object Context::get(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object Context::get(Isolate* isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
- return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
+ return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void Context::set(int index, Object value) {
diff --git a/deps/v8/src/objects/contexts.cc b/deps/v8/src/objects/contexts.cc
index cddbcb98c0..861e06d87f 100644
--- a/deps/v8/src/objects/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -44,7 +44,7 @@ bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
DCHECK(context.IsScriptContext());
int slot_index = ScopeInfo::ContextSlotIndex(
context.scope_info(), name, &result->mode, &result->init_flag,
- &result->maybe_assigned_flag);
+ &result->maybe_assigned_flag, &result->requires_brand_check);
if (slot_index >= 0) {
result->context_index = i;
@@ -105,12 +105,12 @@ ScopeInfo Context::scope_info() {
return ScopeInfo::cast(get(SCOPE_INFO_INDEX));
}
-Module Context::module() {
+SourceTextModule Context::module() {
Context current = *this;
while (!current.IsModuleContext()) {
current = current.previous();
}
- return Module::cast(current.extension());
+ return SourceTextModule::cast(current.extension());
}
JSGlobalObject Context::global_object() {
@@ -287,8 +287,10 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
int slot_index = ScopeInfo::ContextSlotIndex(scope_info, *name, &mode,
- &flag, &maybe_assigned_flag);
+ &flag, &maybe_assigned_flag,
+ &requires_brand_check);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
@@ -338,8 +340,8 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
*index = cell_index;
*variable_mode = mode;
*init_flag = flag;
- *attributes = ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport
+ *attributes = SourceTextModuleDescriptor::GetCellIndexKind(
+ cell_index) == SourceTextModuleDescriptor::kExport
? GetAttributesForMode(mode)
: READ_ONLY;
return handle(context->module(), isolate);
@@ -394,31 +396,26 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
return Handle<Object>::null();
}
-void Context::AddOptimizedCode(Code code) {
- DCHECK(IsNativeContext());
+void NativeContext::AddOptimizedCode(Code code) {
DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(code.next_code_link().IsUndefined());
code.set_next_code_link(get(OPTIMIZED_CODE_LIST));
set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
-void Context::SetOptimizedCodeListHead(Object head) {
- DCHECK(IsNativeContext());
+void NativeContext::SetOptimizedCodeListHead(Object head) {
set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
-Object Context::OptimizedCodeListHead() {
- DCHECK(IsNativeContext());
+Object NativeContext::OptimizedCodeListHead() {
return get(OPTIMIZED_CODE_LIST);
}
-void Context::SetDeoptimizedCodeListHead(Object head) {
- DCHECK(IsNativeContext());
+void NativeContext::SetDeoptimizedCodeListHead(Object head) {
set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
}
-Object Context::DeoptimizedCodeListHead() {
- DCHECK(IsNativeContext());
+Object NativeContext::DeoptimizedCodeListHead() {
return get(DEOPTIMIZED_CODE_LIST);
}
@@ -474,19 +471,14 @@ bool Context::IsBootstrappingOrValidParentContext(Object object,
#endif
-void Context::ResetErrorsThrown() {
- DCHECK(IsNativeContext());
- set_errors_thrown(Smi::FromInt(0));
-}
-
-void Context::IncrementErrorsThrown() {
- DCHECK(IsNativeContext());
+void NativeContext::ResetErrorsThrown() { set_errors_thrown(Smi::FromInt(0)); }
+void NativeContext::IncrementErrorsThrown() {
int previous_value = errors_thrown().value();
set_errors_thrown(Smi::FromInt(previous_value + 1));
}
-int Context::GetErrorsThrown() { return errors_thrown().value(); }
+int NativeContext::GetErrorsThrown() { return errors_thrown().value(); }
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
STATIC_ASSERT(NativeContext::kScopeInfoOffset ==
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index d83e351550..0c00aba08e 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -38,9 +38,6 @@ enum ContextLookupFlags {
// Factory::NewContext.
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(MAKE_ERROR_INDEX, JSFunction, make_error) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
@@ -48,20 +45,10 @@ enum ContextLookupFlags {
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
V(OBJECT_CREATE, JSFunction, object_create) \
- V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
- V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
promise_internal_constructor) \
V(IS_PROMISE_INDEX, JSFunction, is_promise) \
@@ -193,8 +180,10 @@ enum ContextLookupFlags {
V(JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_MAP_INDEX, Map, \
js_finalization_group_cleanup_iterator_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
- V(JS_WEAK_REF_MAP_INDEX, Map, js_weak_ref_map) \
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
+ V(JS_WEAK_REF_FUNCTION_INDEX, JSFunction, js_weak_ref_fun) \
+ V(JS_FINALIZATION_GROUP_FUNCTION_INDEX, JSFunction, \
+ js_finalization_group_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_KEY_ITERATOR_MAP_INDEX, Map, map_key_iterator_map) \
V(MAP_KEY_VALUE_ITERATOR_MAP_INDEX, Map, map_key_value_iterator_map) \
@@ -238,12 +227,14 @@ enum ContextLookupFlags {
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
+ V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
+ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(REGEXP_SPECIES_PROTECTOR_INDEX, PropertyCell, regexp_species_protector) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_regexp_string_iterator_prototype_map) \
- V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
- V(REGEXP_PROTOTYPE_INDEX, JSObject, regexp_prototype) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
+ V(SCRIPT_EXECUTION_CALLBACK_INDEX, Object, script_execution_callback) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SERIALIZED_OBJECTS, FixedArray, serialized_objects) \
V(SET_VALUE_ITERATOR_MAP_INDEX, Map, set_value_iterator_map) \
@@ -302,7 +293,6 @@ enum ContextLookupFlags {
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(WASM_EXPORTED_FUNCTION_MAP_INDEX, Map, wasm_exported_function_map) \
V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \
- V(WASM_FUNCTION_CONSTRUCTOR_INDEX, JSFunction, wasm_function_constructor) \
V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
@@ -366,6 +356,7 @@ class ScriptContextTable : public FixedArray {
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
+ RequiresBrandCheckFlag requires_brand_check;
};
inline int used() const;
@@ -453,6 +444,7 @@ class Context : public HeapObject {
// Setter and getter for elements.
V8_INLINE Object get(int index) const;
+ V8_INLINE Object get(Isolate* isolate, int index) const;
V8_INLINE void set(int index, Object value);
// Setter with explicit barrier mode.
V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
@@ -531,10 +523,6 @@ class Context : public HeapObject {
static const int kNoContext = 0;
static const int kInvalidContext = 1;
- void ResetErrorsThrown();
- void IncrementErrorsThrown();
- int GetErrorsThrown();
-
// Direct slot access.
inline void set_scope_info(ScopeInfo scope_info);
@@ -553,7 +541,7 @@ class Context : public HeapObject {
// Find the module context (assuming there is one) and return the associated
// module object.
- Module module();
+ SourceTextModule module();
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
@@ -591,14 +579,6 @@ class Context : public HeapObject {
inline bool HasSameSecurityTokenAs(Context that) const;
- // The native context also stores a list of all optimized code and a
- // list of all deoptimized code, which are needed by the deoptimizer.
- V8_EXPORT_PRIVATE void AddOptimizedCode(Code code);
- void SetOptimizedCodeListHead(Object head);
- Object OptimizedCodeListHead();
- void SetDeoptimizedCodeListHead(Object head);
- Object DeoptimizedCodeListHead();
-
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
static int IntrinsicIndexForName(Handle<String> name);
@@ -703,6 +683,18 @@ class NativeContext : public Context {
class BodyDescriptor;
+ // The native context stores a list of all optimized code and a list of all
+ // deoptimized code, which are needed by the deoptimizer.
+ V8_EXPORT_PRIVATE void AddOptimizedCode(Code code);
+ void SetOptimizedCodeListHead(Object head);
+ Object OptimizedCodeListHead();
+ void SetDeoptimizedCodeListHead(Object head);
+ Object DeoptimizedCodeListHead();
+
+ void ResetErrorsThrown();
+ void IncrementErrorsThrown();
+ int GetErrorsThrown();
+
private:
STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) ==
Internals::kNativeContextEmbedderDataOffset);
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index 1cd64c1bf1..e2805d795a 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -25,13 +25,9 @@ namespace v8 {
namespace internal {
OBJECT_CONSTRUCTORS_IMPL(DescriptorArray, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(EnumCache, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(EnumCache)
CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(EnumCache)
-
-ACCESSORS(EnumCache, keys, FixedArray, kKeysOffset)
-ACCESSORS(EnumCache, indices, FixedArray, kIndicesOffset)
ACCESSORS(DescriptorArray, enum_cache, EnumCache, kEnumCacheOffset)
RELAXED_INT16_ACCESSORS(DescriptorArray, number_of_all_descriptors,
@@ -106,17 +102,22 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
return RawField(OffsetOfDescriptorAt(descriptor));
}
-ObjectSlot DescriptorArray::GetKeySlot(int descriptor) {
- DCHECK_LE(descriptor, number_of_all_descriptors());
- ObjectSlot slot = GetDescriptorSlot(descriptor) + kEntryKeyIndex;
- DCHECK((*slot).IsObject());
- return slot;
+Name DescriptorArray::GetKey(int descriptor_number) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetKey(isolate, descriptor_number);
}
-Name DescriptorArray::GetKey(int descriptor_number) const {
- DCHECK(descriptor_number < number_of_descriptors());
- return Name::cast(
- get(ToKeyIndex(descriptor_number))->GetHeapObjectAssumeStrong());
+Name DescriptorArray::GetKey(Isolate* isolate, int descriptor_number) const {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ return Name::cast(EntryKeyField::Relaxed_Load(isolate, *this, entry_offset));
+}
+
+void DescriptorArray::SetKey(int descriptor_number, Name key) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ EntryKeyField::Relaxed_Store(*this, entry_offset, key);
+ WRITE_BARRIER(*this, entry_offset + kEntryKeyOffset, key);
}
int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
@@ -124,38 +125,59 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) {
}
Name DescriptorArray::GetSortedKey(int descriptor_number) {
- return GetKey(GetSortedKeyIndex(descriptor_number));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetSortedKey(isolate, descriptor_number);
}
-void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
- PropertyDetails details = GetDetails(descriptor_index);
- set(ToDetailsIndex(descriptor_index),
- MaybeObject::FromObject(details.set_pointer(pointer).AsSmi()));
+Name DescriptorArray::GetSortedKey(Isolate* isolate, int descriptor_number) {
+ return GetKey(isolate, GetSortedKeyIndex(descriptor_number));
}
-MaybeObjectSlot DescriptorArray::GetValueSlot(int descriptor) {
- DCHECK_LT(descriptor, number_of_descriptors());
- return MaybeObjectSlot(GetDescriptorSlot(descriptor) + kEntryValueIndex);
+void DescriptorArray::SetSortedKey(int descriptor_number, int pointer) {
+ PropertyDetails details = GetDetails(descriptor_number);
+ SetDetails(descriptor_number, details.set_pointer(pointer));
}
Object DescriptorArray::GetStrongValue(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- return get(ToValueIndex(descriptor_number))->cast<Object>();
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetStrongValue(isolate, descriptor_number);
+}
+
+Object DescriptorArray::GetStrongValue(Isolate* isolate,
+ int descriptor_number) {
+ return GetValue(isolate, descriptor_number).cast<Object>();
}
-void DescriptorArray::SetValue(int descriptor_index, Object value) {
- set(ToValueIndex(descriptor_index), MaybeObject::FromObject(value));
+void DescriptorArray::SetValue(int descriptor_number, MaybeObject value) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ EntryValueField::Relaxed_Store(*this, entry_offset, value);
+ WEAK_WRITE_BARRIER(*this, entry_offset + kEntryValueOffset, value);
}
MaybeObject DescriptorArray::GetValue(int descriptor_number) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetValue(isolate, descriptor_number);
+}
+
+MaybeObject DescriptorArray::GetValue(Isolate* isolate, int descriptor_number) {
DCHECK_LT(descriptor_number, number_of_descriptors());
- return get(ToValueIndex(descriptor_number));
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ return EntryValueField::Relaxed_Load(isolate, *this, entry_offset);
}
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
- DCHECK(descriptor_number < number_of_descriptors());
- MaybeObject details = get(ToDetailsIndex(descriptor_number));
- return PropertyDetails(details->ToSmi());
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ Smi details = EntryDetailsField::Relaxed_Load(*this, entry_offset);
+ return PropertyDetails(details);
+}
+
+void DescriptorArray::SetDetails(int descriptor_number,
+ PropertyDetails details) {
+ DCHECK_LT(descriptor_number, number_of_descriptors());
+ int entry_offset = OffsetOfDescriptorAt(descriptor_number);
+ EntryDetailsField::Relaxed_Store(*this, entry_offset, details.AsSmi());
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
@@ -164,19 +186,22 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
}
FieldType DescriptorArray::GetFieldType(int descriptor_number) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return GetFieldType(isolate, descriptor_number);
+}
+
+FieldType DescriptorArray::GetFieldType(Isolate* isolate,
+ int descriptor_number) {
DCHECK_EQ(GetDetails(descriptor_number).location(), kField);
- MaybeObject wrapped_type = GetValue(descriptor_number);
+ MaybeObject wrapped_type = GetValue(isolate, descriptor_number);
return Map::UnwrapFieldType(wrapped_type);
}
void DescriptorArray::Set(int descriptor_number, Name key, MaybeObject value,
PropertyDetails details) {
- // Range check.
- DCHECK(descriptor_number < number_of_descriptors());
- set(ToKeyIndex(descriptor_number), MaybeObject::FromObject(key));
- set(ToValueIndex(descriptor_number), value);
- set(ToDetailsIndex(descriptor_number),
- MaybeObject::FromObject(details.AsSmi()));
+ SetKey(descriptor_number, key);
+ SetDetails(descriptor_number, details);
+ SetValue(descriptor_number, value);
}
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
@@ -211,21 +236,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
SetSortedKey(second, first_key);
}
-int DescriptorArray::length() const {
- return number_of_all_descriptors() * kEntrySize;
-}
-
-MaybeObject DescriptorArray::get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_WEAK_FIELD(*this, offset(index));
-}
-
-void DescriptorArray::set(int index, MaybeObject value) {
- DCHECK(index >= 0 && index < this->length());
- RELAXED_WRITE_WEAK_FIELD(*this, offset(index), value);
- WEAK_WRITE_BARRIER(*this, offset(index), value);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 3c1fa98a37..0f17cd22ea 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -22,21 +22,11 @@ class Handle;
class Isolate;
// An EnumCache is a pair used to hold keys and indices caches.
-class EnumCache : public Struct {
+class EnumCache : public TorqueGeneratedEnumCache<EnumCache, Struct> {
public:
- DECL_ACCESSORS(keys, FixedArray)
- DECL_ACCESSORS(indices, FixedArray)
-
- DECL_CAST(EnumCache)
-
- DECL_PRINTER(EnumCache)
DECL_VERIFIER(EnumCache)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_ENUM_CACHE_FIELDS)
-
- OBJECT_CONSTRUCTORS(EnumCache, Struct);
+ TQ_OBJECT_CONSTRUCTORS(EnumCache)
};
// A DescriptorArray is a custom array that holds instance descriptors.
@@ -73,14 +63,18 @@ class DescriptorArray : public HeapObject {
// Accessors for fetching instance descriptor at descriptor number.
inline Name GetKey(int descriptor_number) const;
+ inline Name GetKey(Isolate* isolate, int descriptor_number) const;
inline Object GetStrongValue(int descriptor_number);
- inline void SetValue(int descriptor_number, Object value);
+ inline Object GetStrongValue(Isolate* isolate, int descriptor_number);
inline MaybeObject GetValue(int descriptor_number);
+ inline MaybeObject GetValue(Isolate* isolate, int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
inline FieldType GetFieldType(int descriptor_number);
+ inline FieldType GetFieldType(Isolate* isolate, int descriptor_number);
inline Name GetSortedKey(int descriptor_number);
+ inline Name GetSortedKey(Isolate* isolate, int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
@@ -153,15 +147,13 @@ class DescriptorArray : public HeapObject {
int16_t number_of_marked_descriptors);
static constexpr int SizeFor(int number_of_all_descriptors) {
- return offset(number_of_all_descriptors * kEntrySize);
+ return OffsetOfDescriptorAt(number_of_all_descriptors);
}
static constexpr int OffsetOfDescriptorAt(int descriptor) {
- return offset(descriptor * kEntrySize);
+ return kHeaderSize + descriptor * kEntrySize * kTaggedSize;
}
inline ObjectSlot GetFirstPointerSlot();
inline ObjectSlot GetDescriptorSlot(int descriptor);
- inline ObjectSlot GetKeySlot(int descriptor);
- inline MaybeObjectSlot GetValueSlot(int descriptor);
static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset,
"Weak fields follow strong fields.");
@@ -178,6 +170,10 @@ class DescriptorArray : public HeapObject {
static const int kEntryValueIndex = 2;
static const int kEntrySize = 3;
+ static const int kEntryKeyOffset = kEntryKeyIndex * kTaggedSize;
+ static const int kEntryDetailsOffset = kEntryDetailsIndex * kTaggedSize;
+ static const int kEntryValueOffset = kEntryValueIndex * kTaggedSize;
+
// Print all the descriptors.
void PrintDescriptors(std::ostream& os);
void PrintDescriptorDetails(std::ostream& os, int descriptor,
@@ -207,15 +203,16 @@ class DescriptorArray : public HeapObject {
return (descriptor_number * kEntrySize) + kEntryValueIndex;
}
+ using EntryKeyField = TaggedField<HeapObject, kEntryKeyOffset>;
+ using EntryDetailsField = TaggedField<Smi, kEntryDetailsOffset>;
+ using EntryValueField = TaggedField<MaybeObject, kEntryValueOffset>;
+
private:
DECL_INT16_ACCESSORS(filler16bits)
- // Low-level per-element accessors.
- static constexpr int offset(int index) {
- return kHeaderSize + index * kTaggedSize;
- }
- inline int length() const;
- inline MaybeObject get(int index) const;
- inline void set(int index, MaybeObject value);
+
+ inline void SetKey(int descriptor_number, Name key);
+ inline void SetValue(int descriptor_number, MaybeObject value);
+ inline void SetDetails(int descriptor_number, PropertyDetails details);
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index a1692978f3..92c1d0940f 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -98,15 +98,27 @@ RootIndex GlobalDictionaryShape::GetMapRootIndex() {
return RootIndex::kGlobalDictionaryMap;
}
-Name NameDictionary::NameAt(int entry) { return Name::cast(KeyAt(entry)); }
+Name NameDictionary::NameAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return NameAt(isolate, entry);
+}
+
+Name NameDictionary::NameAt(Isolate* isolate, int entry) {
+ return Name::cast(KeyAt(isolate, entry));
+}
RootIndex NameDictionaryShape::GetMapRootIndex() {
return RootIndex::kNameDictionaryMap;
}
PropertyCell GlobalDictionary::CellAt(int entry) {
- DCHECK(KeyAt(entry).IsPropertyCell());
- return PropertyCell::cast(KeyAt(entry));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return CellAt(isolate, entry);
+}
+
+PropertyCell GlobalDictionary::CellAt(Isolate* isolate, int entry) {
+ DCHECK(KeyAt(isolate, entry).IsPropertyCell(isolate));
+ return PropertyCell::cast(KeyAt(isolate, entry));
}
bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object k) {
@@ -118,8 +130,23 @@ bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object k) {
return IsLive(roots, k) && !PropertyCell::cast(k).value().IsTheHole(roots);
}
-Name GlobalDictionary::NameAt(int entry) { return CellAt(entry).name(); }
-Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry).value(); }
+Name GlobalDictionary::NameAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return NameAt(isolate, entry);
+}
+
+Name GlobalDictionary::NameAt(Isolate* isolate, int entry) {
+ return CellAt(isolate, entry).name(isolate);
+}
+
+Object GlobalDictionary::ValueAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return ValueAt(isolate, entry);
+}
+
+Object GlobalDictionary::ValueAt(Isolate* isolate, int entry) {
+ return CellAt(isolate, entry).value(isolate);
+}
void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object key,
Object value, PropertyDetails details) {
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index ca709f34d8..fe6001f58c 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -32,7 +32,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
using Key = typename Shape::Key;
// Returns the value at entry.
Object ValueAt(int entry) {
- return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return ValueAt(isolate, entry);
+ }
+ Object ValueAt(Isolate* isolate, int entry) {
+ return this->get(isolate, DerivedHashTable::EntryToIndex(entry) + 1);
}
// Set the value for entry.
@@ -208,6 +212,8 @@ class V8_EXPORT_PRIVATE NameDictionary
static const int kInitialCapacity = 2;
inline Name NameAt(int entry);
+ inline Name NameAt(Isolate* isolate, int entry);
+
inline void set_hash(int hash);
inline int hash() const;
@@ -246,10 +252,13 @@ class V8_EXPORT_PRIVATE GlobalDictionary
DECL_CAST(GlobalDictionary)
inline Object ValueAt(int entry);
+ inline Object ValueAt(Isolate* isolate, int entry);
inline PropertyCell CellAt(int entry);
+ inline PropertyCell CellAt(Isolate* isolate, int entry);
inline void SetEntry(Isolate* isolate, int entry, Object key, Object value,
PropertyDetails details);
inline Name NameAt(int entry);
+ inline Name NameAt(Isolate* isolate, int entry);
inline void ValueAtPut(int entry, Object value);
OBJECT_CONSTRUCTORS(
diff --git a/deps/v8/src/objects/elements.cc b/deps/v8/src/objects/elements.cc
index e1232a0d5b..4bdfba052d 100644
--- a/deps/v8/src/objects/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -4,10 +4,10 @@
#include "src/objects/elements.h"
+#include "src/common/message-template.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize.
#include "src/heap/heap-write-barrier-inl.h"
@@ -141,6 +141,12 @@ WriteBarrierMode GetWriteBarrierMode(ElementsKind kind) {
return UPDATE_WRITE_BARRIER;
}
+// If kCopyToEndAndInitializeToHole is specified as the copy_size to
+// CopyElements, it copies all of elements from source after source_start to
+// destination array, padding any remaining uninitialized elements in the
+// destination array with the hole.
+constexpr int kCopyToEndAndInitializeToHole = -1;
+
void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
ElementsKind from_kind, uint32_t from_start,
FixedArrayBase to_base, ElementsKind to_kind,
@@ -150,17 +156,14 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
Min(from_base.length() - from_start, to_base.length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base.length() - start;
- if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
- roots.the_hole_value(), length);
- }
+ int start = to_start + copy_size;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ roots.the_hole_value(), length);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -179,24 +182,21 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
write_barrier_mode);
}
-static void CopyDictionaryToObjectElements(
- Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
- FixedArrayBase to_base, ElementsKind to_kind, uint32_t to_start,
- int raw_copy_size) {
+void CopyDictionaryToObjectElements(Isolate* isolate, FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ ElementsKind to_kind, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
NumberDictionary from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = from.max_number_key() + 1 - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base.length() - start;
- if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
- ReadOnlyRoots(isolate).the_hole_value(), length);
- }
+ int start = to_start + copy_size;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
DCHECK(to_base != from_base);
@@ -223,28 +223,23 @@ static void CopyDictionaryToObjectElements(
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessorBase::CopyElements() for details.
-static void CopyDoubleToObjectElements(Isolate* isolate,
- FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopyDoubleToObjectElements(Isolate* isolate, FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
DisallowHeapAllocation no_allocation;
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
Min(from_base.length() - from_start, to_base.length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- // Also initialize the area that will be copied over since HeapNumber
- // allocation below can cause an incremental marking step, requiring all
- // existing heap objects to be propertly initialized.
- int start = to_start;
- int length = to_base.length() - start;
- if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
- ReadOnlyRoots(isolate).the_hole_value(), length);
- }
+ // Also initialize the area that will be copied over since HeapNumber
+ // allocation below can cause an incremental marking step, requiring all
+ // existing heap objects to be propertly initialized.
+ int start = to_start;
+ int length = to_base.length() - start;
+ if (length > 0) {
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
+ ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
@@ -272,21 +267,17 @@ static void CopyDoubleToObjectElements(Isolate* isolate,
}
}
-static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopyDoubleToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size =
Min(from_base.length() - from_start, to_base.length() - to_start);
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -312,19 +303,16 @@ static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
#endif
}
-static void CopySmiToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start, FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopySmiToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = from_base.length() - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -344,25 +332,19 @@ static void CopySmiToDoubleElements(FixedArrayBase from_base,
}
}
-static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int packed_size,
- int raw_copy_size) {
+void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int packed_size,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
uint32_t to_end;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = packed_size - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to_base.length();
- for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
- } else {
- to_end = to_start + static_cast<uint32_t>(copy_size);
+ to_end = to_base.length();
+ for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -382,20 +364,16 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
}
}
-static void CopyObjectToDoubleElements(FixedArrayBase from_base,
- uint32_t from_start,
- FixedArrayBase to_base,
- uint32_t to_start, int raw_copy_size) {
+void CopyObjectToDoubleElements(FixedArrayBase from_base, uint32_t from_start,
+ FixedArrayBase to_base, uint32_t to_start,
+ int raw_copy_size) {
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
- DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
- raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, raw_copy_size);
copy_size = from_base.length() - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
@@ -415,20 +393,17 @@ static void CopyObjectToDoubleElements(FixedArrayBase from_base,
}
}
-static void CopyDictionaryToDoubleElements(
- Isolate* isolate, FixedArrayBase from_base, uint32_t from_start,
- FixedArrayBase to_base, uint32_t to_start, int raw_copy_size) {
+void CopyDictionaryToDoubleElements(Isolate* isolate, FixedArrayBase from_base,
+ uint32_t from_start, FixedArrayBase to_base,
+ uint32_t to_start, int raw_copy_size) {
DisallowHeapAllocation no_allocation;
NumberDictionary from = NumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
- DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
- copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ DCHECK_EQ(kCopyToEndAndInitializeToHole, copy_size);
copy_size = from.max_number_key() + 1 - from_start;
- if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base.length(); ++i) {
- FixedDoubleArray::cast(to_base).set_the_hole(i);
- }
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
if (copy_size == 0) return;
@@ -447,17 +422,16 @@ static void CopyDictionaryToDoubleElements(
}
}
-static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
- uint32_t sort_size) {
+void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
+ uint32_t sort_size) {
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress());
AtomicSlot end(start + sort_size);
std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
#ifdef V8_COMPRESS_POINTERS
- DEFINE_ROOT_VALUE(isolate);
- Object a(DecompressTaggedAny(ROOT_VALUE, elementA));
- Object b(DecompressTaggedAny(ROOT_VALUE, elementB));
+ Object a(DecompressTaggedAny(isolate, elementA));
+ Object b(DecompressTaggedAny(isolate, elementB));
#else
Object a(elementA);
Object b(elementB);
@@ -474,10 +448,9 @@ static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
ObjectSlot(end));
}
-static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
- Handle<JSObject> receiver,
- Handle<Object> value,
- uint32_t start_from, uint32_t length) {
+Maybe<bool> IncludesValueSlowPath(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) {
bool search_for_hole = value->IsUndefined(isolate);
for (uint32_t k = start_from; k < length; ++k) {
LookupIterator it(isolate, receiver, k);
@@ -495,11 +468,9 @@ static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
return Just(false);
}
-static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
- Handle<JSObject> receiver,
- Handle<Object> value,
- uint32_t start_from,
- uint32_t length) {
+Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) {
for (uint32_t k = start_from; k < length; ++k) {
LookupIterator it(isolate, receiver, k);
if (!it.IsFound()) {
@@ -595,23 +566,6 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return true;
}
- static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
- if (!IsHoleyElementsKind(kind())) return;
- Handle<FixedArrayBase> backing_store(array->elements(),
- array->GetIsolate());
- int length = Smi::ToInt(array->length());
- if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) return;
-
- ElementsKind packed_kind = GetPackedElementsKind(kind());
- Handle<Map> new_map =
- JSObject::GetElementsTransitionMap(array, packed_kind);
- JSObject::MigrateToMap(array, new_map);
- if (FLAG_trace_elements_transitions) {
- JSObject::PrintElementsTransition(stdout, array, kind(), backing_store,
- packed_kind, backing_store);
- }
- }
-
bool HasElement(JSObject holder, uint32_t index, FixedArrayBase backing_store,
PropertyFilter filter) final {
return Subclass::HasElementImpl(holder.GetIsolate(), holder, index,
@@ -804,22 +758,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity) {
- return ConvertElementsWithCapacity(
- object, old_elements, from_kind, capacity, 0, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
- }
-
- static Handle<FixedArrayBase> ConvertElementsWithCapacity(
- Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
- ElementsKind from_kind, uint32_t capacity, int copy_size) {
return ConvertElementsWithCapacity(object, old_elements, from_kind,
- capacity, 0, 0, copy_size);
+ capacity, 0, 0);
}
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity, uint32_t src_index,
- uint32_t dst_index, int copy_size) {
+ uint32_t dst_index) {
Isolate* isolate = object->GetIsolate();
Handle<FixedArrayBase> new_elements;
if (IsDoubleElementsKind(kind())) {
@@ -834,14 +780,16 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
Subclass::CopyElementsImpl(isolate, *old_elements, src_index, *new_elements,
- from_kind, dst_index, packed_size, copy_size);
+ from_kind, dst_index, packed_size,
+ kCopyToEndAndInitializeToHole);
return new_elements;
}
static void TransitionElementsKindImpl(Handle<JSObject> object,
Handle<Map> to_map) {
- Handle<Map> from_map = handle(object->map(), object->GetIsolate());
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> from_map = handle(object->map(), isolate);
ElementsKind from_kind = from_map->elements_kind();
ElementsKind to_kind = to_map->elements_kind();
if (IsHoleyElementsKind(from_kind)) {
@@ -853,14 +801,12 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(IsFastElementsKind(to_kind));
DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
- Handle<FixedArrayBase> from_elements(object->elements(),
- object->GetIsolate());
- if (object->elements() ==
- object->GetReadOnlyRoots().empty_fixed_array() ||
+ Handle<FixedArrayBase> from_elements(object->elements(), isolate);
+ if (object->elements() == ReadOnlyRoots(isolate).empty_fixed_array() ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
- JSObject::MigrateToMap(object, to_map);
+ JSObject::MigrateToMap(isolate, object, to_map);
} else {
DCHECK(
(IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
@@ -871,9 +817,9 @@ class ElementsAccessorBase : public InternalElementsAccessor {
JSObject::SetMapAndElements(object, to_map, elements);
}
if (FLAG_trace_elements_transitions) {
- JSObject::PrintElementsTransition(
- stdout, object, from_kind, from_elements, to_kind,
- handle(object->elements(), object->GetIsolate()));
+ JSObject::PrintElementsTransition(stdout, object, from_kind,
+ from_elements, to_kind,
+ handle(object->elements(), isolate));
}
}
}
@@ -2394,7 +2340,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Copy over all objects to a new backing_store.
backing_store = Subclass::ConvertElementsWithCapacity(
receiver, backing_store, KindTraits::Kind, capacity, 0,
- copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_dst_index);
receiver->set_elements(*backing_store);
} else if (add_position == AT_START) {
// If the backing store has enough capacity and we add elements to the
@@ -2639,7 +2585,7 @@ class FastSealedObjectElementsAccessor
"SlowCopyForSetLengthImpl");
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- JSObject::MigrateToMap(array, new_map);
+ JSObject::MigrateToMap(isolate, array, new_map);
if (!new_element_dictionary.is_null()) {
array->set_elements(*new_element_dictionary);
@@ -2955,7 +2901,7 @@ class TypedElementsAccessor
// fields (external pointers, doubles and BigInt data) are only
// kTaggedSize aligned so we have to use unaligned pointer friendly way of
// accessing them in order to avoid undefined behavior in C++ code.
- WriteUnalignedValue<ElementType>(
+ base::WriteUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr + entry), value);
} else {
data_ptr[entry] = value;
@@ -2995,7 +2941,7 @@ class TypedElementsAccessor
// fields (external pointers, doubles and BigInt data) are only
// kTaggedSize aligned so we have to use unaligned pointer friendly way of
// accessing them in order to avoid undefined behavior in C++ code.
- result = ReadUnalignedValue<ElementType>(
+ result = base::ReadUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr + entry));
} else {
result = data_ptr[entry];
@@ -3664,10 +3610,7 @@ Handle<Object> TypedElementsAccessor<UINT32_ELEMENTS, uint32_t>::ToHandle(
// static
template <>
float TypedElementsAccessor<FLOAT32_ELEMENTS, float>::FromScalar(double value) {
- using limits = std::numeric_limits<float>;
- if (value > limits::max()) return limits::infinity();
- if (value < limits::lowest()) return -limits::infinity();
- return static_cast<float>(value);
+ return DoubleToFloat32(value);
}
// static
@@ -4377,7 +4320,7 @@ class FastSloppyArgumentsElementsAccessor
ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity);
Handle<Map> new_map = JSObject::GetElementsTransitionMap(
object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
elements->set_arguments(FixedArray::cast(*arguments));
JSObject::ValidateElements(*object);
}
@@ -4549,8 +4492,8 @@ class StringWrapperElementsAccessor
private:
static String GetString(JSObject holder) {
- DCHECK(holder.IsJSValue());
- JSValue js_value = JSValue::cast(holder);
+ DCHECK(holder.IsJSPrimitiveWrapper());
+ JSPrimitiveWrapper js_value = JSPrimitiveWrapper::cast(holder);
DCHECK(js_value.value().IsString());
return String::cast(js_value.value());
}
diff --git a/deps/v8/src/objects/elements.h b/deps/v8/src/objects/elements.h
index 844cd2ed94..a72a6b068e 100644
--- a/deps/v8/src/objects/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -66,15 +66,6 @@ class ElementsAccessor {
// element that is non-deletable.
virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
- // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
- // of elements from source after source_start to the destination array.
- static const int kCopyToEnd = -1;
- // If kCopyToEndAndInitializeToHole is specified as the copy_size to
- // CopyElements, it copies all of elements from source after source_start to
- // destination array, padding any remaining uninitialized elements in the
- // destination array with the hole.
- static const int kCopyToEndAndInitializeToHole = -2;
-
// Copy all indices that have elements from |object| into the given
// KeyAccumulator. For Dictionary-based element-kinds we filter out elements
// whose PropertyAttribute match |filter|.
@@ -210,7 +201,7 @@ class ElementsAccessor {
uint32_t destination_start, int copy_size) = 0;
private:
- static ElementsAccessor** elements_accessors_;
+ V8_EXPORT_PRIVATE static ElementsAccessor** elements_accessors_;
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index 6830a4d22e..78189ba381 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/embedder-data-slot.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/embedder-data-array.h"
#include "src/objects/js-objects-inl.h"
@@ -77,7 +77,7 @@ bool EmbedderDataSlot::ToAlignedPointer(void** out_pointer) const {
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- Address raw_value = ReadUnalignedValue<Address>(address());
+ Address raw_value = base::ReadUnalignedValue<Address>(address());
#else
Address raw_value = *location();
#endif
@@ -103,7 +103,7 @@ EmbedderDataSlot::RawData EmbedderDataSlot::load_raw(
// fields (external pointers, doubles and BigInt data) are only kTaggedSize
// aligned so we have to use unaligned pointer friendly way of accessing them
// in order to avoid undefined behavior in C++ code.
- return ReadUnalignedValue<Address>(address());
+ return base::ReadUnalignedValue<Address>(address());
#else
return *location();
#endif
diff --git a/deps/v8/src/objects/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 6b1fdcc1e5..9cdc03b5c2 100644
--- a/deps/v8/src/objects/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -155,13 +155,22 @@ FeedbackSlot FeedbackVector::ToSlot(int index) {
}
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
- return get(GetIndex(slot));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return Get(isolate, slot);
+}
+
+MaybeObject FeedbackVector::Get(Isolate* isolate, FeedbackSlot slot) const {
+ return get(isolate, GetIndex(slot));
}
MaybeObject FeedbackVector::get(int index) const {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+MaybeObject FeedbackVector::get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
return RELAXED_READ_WEAK_FIELD(*this, offset);
}
@@ -180,7 +189,7 @@ void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
void FeedbackVector::set(int index, MaybeObject value, WriteBarrierMode mode) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
- int offset = kFeedbackSlotsOffset + index * kTaggedSize;
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_WEAK_FIELD(*this, offset, value);
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode);
}
diff --git a/deps/v8/src/objects/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index 0393a55f69..4f4826eab3 100644
--- a/deps/v8/src/objects/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -374,6 +374,7 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
bool FeedbackVector::ClearSlots(Isolate* isolate) {
+ if (!shared_function_info().HasFeedbackMetadata()) return false;
MaybeObject uninitialized_sentinel = MaybeObject::FromObject(
FeedbackVector::RawUninitializedSentinel(isolate));
@@ -943,6 +944,7 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+ DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
MaybeObject feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -986,19 +988,22 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
return 0;
}
-MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
+int FeedbackNexus::ExtractMapsAndHandlers(MapHandles* maps,
+ MaybeObjectHandles* handlers) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
- IsKeyedHasICKind(kind()));
+ IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
- MaybeObject feedback = GetFeedback();
+ DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
+ MaybeObject feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object.IsWeakFixedArray()) ||
is_named_feedback) {
+ int found = 0;
WeakFixedArray array;
if (is_named_feedback) {
array =
@@ -1011,36 +1016,39 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
for (int i = 0; i < array.length(); i += increment) {
DCHECK(array.Get(i)->IsWeakOrCleared());
if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
- Map array_map = Map::cast(heap_object);
- if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) {
- MaybeObject handler = array.Get(i + increment - 1);
+ MaybeObject handler = array.Get(i + 1);
+ if (!handler->IsCleared()) {
DCHECK(IC::IsHandler(handler));
- return handle(handler, isolate);
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ handlers->push_back(handle(handler, isolate));
+ found++;
}
}
}
+ return found;
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- Map cell_map = Map::cast(heap_object);
- if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
- MaybeObject handler = GetFeedbackExtra();
+ MaybeObject handler = GetFeedbackExtra();
+ if (!handler->IsCleared()) {
DCHECK(IC::IsHandler(handler));
- return handle(handler, isolate);
+ Map map = Map::cast(heap_object);
+ maps->push_back(handle(map, isolate));
+ handlers->push_back(handle(handler, isolate));
+ return 1;
}
}
- return MaybeObjectHandle();
+ return 0;
}
-bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
- int length) const {
+MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
- IsStoreInArrayLiteralICKind(kind()) || IsKeyedHasICKind(kind()));
+ IsKeyedHasICKind(kind()));
MaybeObject feedback = GetFeedback();
Isolate* isolate = GetIsolate();
- int count = 0;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
@@ -1056,25 +1064,26 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
const int increment = 2;
HeapObject heap_object;
for (int i = 0; i < array.length(); i += increment) {
- // Be sure to skip handlers whose maps have been cleared.
DCHECK(array.Get(i)->IsWeakOrCleared());
- if (array.Get(i)->GetHeapObjectIfWeak(&heap_object) &&
- !array.Get(i + increment - 1)->IsCleared()) {
- MaybeObject handler = array.Get(i + increment - 1);
- DCHECK(IC::IsHandler(handler));
- code_list->push_back(handle(handler, isolate));
- count++;
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
+ Map array_map = Map::cast(heap_object);
+ if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) {
+ MaybeObject handler = array.Get(i + increment - 1);
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
+ }
}
}
} else if (feedback->GetHeapObjectIfWeak(&heap_object)) {
- MaybeObject extra = GetFeedbackExtra();
- if (!extra->IsCleared()) {
- DCHECK(IC::IsHandler(extra));
- code_list->push_back(handle(extra, isolate));
- count++;
+ Map cell_map = Map::cast(heap_object);
+ if (cell_map == *map && !GetFeedbackExtra()->IsCleared()) {
+ MaybeObject handler = GetFeedbackExtra();
+ DCHECK(IC::IsHandler(handler));
+ return handle(handler, isolate);
}
}
- return count == length;
+
+ return MaybeObjectHandle();
}
Name FeedbackNexus::GetName() const {
@@ -1095,8 +1104,7 @@ KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
if (GetKeyType() == PROPERTY) return STANDARD_LOAD;
- ExtractMaps(&maps);
- FindHandlers(&handlers, static_cast<int>(maps.size()));
+ ExtractMapsAndHandlers(&maps, &handlers);
for (MaybeObjectHandle const& handler : handlers) {
KeyedAccessLoadMode mode = LoadHandler::GetKeyedAccessLoadMode(*handler);
if (mode != STANDARD_LOAD) return mode;
@@ -1179,8 +1187,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
if (GetKeyType() == PROPERTY) return mode;
- ExtractMaps(&maps);
- FindHandlers(&handlers, static_cast<int>(maps.size()));
+ ExtractMapsAndHandlers(&maps, &handlers);
for (const MaybeObjectHandle& maybe_code_handler : handlers) {
// The first handler that isn't the slow handler will have the bits we need.
Handle<Code> handler;
diff --git a/deps/v8/src/objects/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 89e0b9e6aa..af03bb4130 100644
--- a/deps/v8/src/objects/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -233,7 +233,9 @@ class FeedbackVector : public HeapObject {
// Conversion from an integer index to the underlying array to a slot.
static inline FeedbackSlot ToSlot(int index);
inline MaybeObject Get(FeedbackSlot slot) const;
+ inline MaybeObject Get(Isolate* isolate, FeedbackSlot slot) const;
inline MaybeObject get(int index) const;
+ inline MaybeObject get(Isolate* isolate, int index) const;
inline void Set(FeedbackSlot slot, MaybeObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void set(int index, MaybeObject value,
@@ -322,11 +324,13 @@ class FeedbackVector : public HeapObject {
class BodyDescriptor;
- // Garbage collection support.
- static constexpr int SizeFor(int length) {
- return kFeedbackSlotsOffset + length * kTaggedSize;
+ static constexpr int OffsetOfElementAt(int index) {
+ return kFeedbackSlotsOffset + index * kTaggedSize;
}
+ // Garbage collection support.
+ static constexpr int SizeFor(int length) { return OffsetOfElementAt(length); }
+
private:
static void AddToVectorsForProfilingTools(Isolate* isolate,
Handle<FeedbackVector> vector);
@@ -561,6 +565,7 @@ class FeedbackMetadata : public HeapObject {
// Verify that an empty hash field looks like a tagged object, but can't
// possibly be confused with a pointer.
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
STATIC_ASSERT(Name::kEmptyHashField == 0x3);
// Verify that a set hash field will not look like a tagged object.
@@ -646,8 +651,9 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
Map GetFirstMap() const;
int ExtractMaps(MapHandles* maps) const;
+ int ExtractMapsAndHandlers(MapHandles* maps,
+ MaybeObjectHandles* handlers) const;
MaybeObjectHandle FindHandlerForMap(Handle<Map> map) const;
- bool FindHandlers(MaybeObjectHandles* code_list, int length = -1) const;
bool IsCleared() const {
InlineCacheState state = ic_state();
diff --git a/deps/v8/src/objects/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index be60fb54a2..997cd68c32 100644
--- a/deps/v8/src/objects/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -19,7 +19,7 @@ FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
return FieldIndex(true, offset, encoding, 0, 0);
}
-FieldIndex FieldIndex::ForPropertyIndex(const Map map, int property_index,
+FieldIndex FieldIndex::ForPropertyIndex(Map map, int property_index,
Representation representation) {
DCHECK(map.instance_type() >= FIRST_NONSTRING_TYPE);
int inobject_properties = map.GetInObjectProperties();
@@ -60,9 +60,15 @@ int FieldIndex::GetLoadByFieldIndex() const {
return is_double() ? (result | 1) : result;
}
-FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) {
+FieldIndex FieldIndex::ForDescriptor(Map map, int descriptor_index) {
+ Isolate* isolate = GetIsolateForPtrCompr(map);
+ return ForDescriptor(isolate, map, descriptor_index);
+}
+
+FieldIndex FieldIndex::ForDescriptor(Isolate* isolate, Map map,
+ int descriptor_index) {
PropertyDetails details =
- map.instance_descriptors().GetDetails(descriptor_index);
+ map.instance_descriptors(isolate).GetDetails(descriptor_index);
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
}
diff --git a/deps/v8/src/objects/field-index.h b/deps/v8/src/objects/field-index.h
index f352ef6800..a6657634c8 100644
--- a/deps/v8/src/objects/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -24,10 +24,12 @@ class FieldIndex final {
FieldIndex() : bit_field_(0) {}
static inline FieldIndex ForPropertyIndex(
- const Map map, int index,
+ Map map, int index,
Representation representation = Representation::Tagged());
static inline FieldIndex ForInObjectOffset(int offset, Encoding encoding);
- static inline FieldIndex ForDescriptor(const Map map, int descriptor_index);
+ static inline FieldIndex ForDescriptor(Map map, int descriptor_index);
+ static inline FieldIndex ForDescriptor(Isolate* isolate, Map map,
+ int descriptor_index);
inline int GetLoadByFieldIndex() const;
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index 6d2b42edbf..79c29a6eeb 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -90,51 +90,57 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
}
Object FixedArray::get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_FIELD(*this, kHeaderSize + index * kTaggedSize);
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object FixedArray::get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
Handle<Object> FixedArray::get(FixedArray array, int index, Isolate* isolate) {
- return handle(array.get(index), isolate);
+ return handle(array.get(isolate, index), isolate);
}
bool FixedArray::is_the_hole(Isolate* isolate, int index) {
- return get(index).IsTheHole(isolate);
+ return get(isolate, index).IsTheHole(isolate);
}
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(index, this->length());
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
DCHECK(Object(value).IsSmi());
- int offset = kHeaderSize + index * kTaggedSize;
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kTaggedSize;
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
WRITE_BARRIER(*this, offset, value);
}
void FixedArray::set(int index, Object value, WriteBarrierMode mode) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, this->length());
- int offset = kHeaderSize + index * kTaggedSize;
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
+// static
void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
DCHECK_NE(array.map(), array.GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, array.length());
+ DCHECK_LT(static_cast<unsigned>(index),
+ static_cast<unsigned>(array.length()));
DCHECK(!ObjectInYoungGeneration(value));
- RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value);
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(array, offset, value);
}
void FixedArray::set_undefined(int index) {
@@ -323,7 +329,7 @@ uint64_t FixedDoubleArray::get_representation(int index) {
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
// Bug(v8:8875): Doubles may be unaligned.
- return ReadUnalignedValue<uint64_t>(field_address(offset));
+ return base::ReadUnalignedValue<uint64_t>(field_address(offset));
}
Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
@@ -355,7 +361,7 @@ void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- WriteUnalignedValue<uint64_t>(field_address(offset), kHoleNanInt64);
+ base::WriteUnalignedValue<uint64_t>(field_address(offset), kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
@@ -382,8 +388,14 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
MaybeObject WeakFixedArray::Get(int index) const {
- DCHECK(index >= 0 && index < this->length());
- return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return Get(isolate, index);
+}
+
+MaybeObject WeakFixedArray::Get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
+ return TaggedField<MaybeObject>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void WeakFixedArray::Set(int index, MaybeObject value) {
@@ -424,8 +436,14 @@ void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
}
MaybeObject WeakArrayList::Get(int index) const {
- DCHECK(index >= 0 && index < this->capacity());
- return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return Get(isolate, index);
+}
+
+MaybeObject WeakArrayList::Get(Isolate* isolate, int index) const {
+ DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
+ return TaggedField<MaybeObject>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
@@ -478,6 +496,10 @@ Object ArrayList::Get(int index) const {
return FixedArray::cast(*this).get(kFirstIndex + index);
}
+Object ArrayList::Get(Isolate* isolate, int index) const {
+ return FixedArray::cast(*this).get(isolate, kFirstIndex + index);
+}
+
ObjectSlot ArrayList::Slot(int index) {
return RawField(OffsetOfElementAt(kFirstIndex + index));
}
@@ -538,6 +560,16 @@ void ByteArray::set_uint32(int index, uint32_t value) {
WriteField<uint32_t>(kHeaderSize + index * kUInt32Size, value);
}
+uint32_t ByteArray::get_uint32_relaxed(int index) const {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ return RELAXED_READ_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size);
+}
+
+void ByteArray::set_uint32_relaxed(int index, uint32_t value) {
+ DCHECK(index >= 0 && index < this->length() / kUInt32Size);
+ RELAXED_WRITE_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size, value);
+}
+
void ByteArray::clear_padding() {
int data_size = length() + kHeaderSize;
memset(reinterpret_cast<void*>(address() + data_size), 0, Size() - data_size);
@@ -589,6 +621,10 @@ Object TemplateList::get(int index) const {
return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
+Object TemplateList::get(Isolate* isolate, int index) const {
+ return FixedArray::cast(*this).get(isolate, kFirstElementIndex + index);
+}
+
void TemplateList::set(int index, Object value) {
FixedArray::cast(*this).set(kFirstElementIndex + index, value);
}
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 02f26502b2..ca6f06e83c 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -72,16 +72,15 @@ enum FixedArraySubInstanceType {
class FixedArrayBase : public HeapObject {
public:
// [length]: length of the array.
- inline int length() const;
- inline void set_length(int value);
+ DECL_INT_ACCESSORS(length)
// Get and set the length using acquire loads and release stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length)
inline Object unchecked_synchronized_length() const;
DECL_CAST(FixedArrayBase)
+ DECL_VERIFIER(FixedArrayBase)
static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
@@ -113,6 +112,8 @@ class FixedArray : public FixedArrayBase {
public:
// Setter and getter for elements.
inline Object get(int index) const;
+ inline Object get(Isolate* isolate, int index) const;
+
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
@@ -267,6 +268,7 @@ class WeakFixedArray : public HeapObject {
DECL_CAST(WeakFixedArray)
inline MaybeObject Get(int index) const;
+ inline MaybeObject Get(Isolate* isolate, int index) const;
// Setter that uses write barrier.
inline void Set(int index, MaybeObject value);
@@ -281,8 +283,7 @@ class WeakFixedArray : public HeapObject {
DECL_INT_ACCESSORS(length)
// Get and set the length using acquire loads and release stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length)
// Gives access to raw memory which stores the array's data.
inline MaybeObjectSlot data_start();
@@ -336,6 +337,7 @@ class WeakArrayList : public HeapObject {
const MaybeObjectHandle& value);
inline MaybeObject Get(int index) const;
+ inline MaybeObject Get(Isolate* isolate, int index) const;
// Set the element at index to obj. The underlying array must be large enough.
// If you need to grow the WeakArrayList, use the static AddToEnd() method
@@ -359,19 +361,12 @@ class WeakArrayList : public HeapObject {
DECL_INT_ACCESSORS(length)
// Get and set the capacity using acquire loads and release stores.
- inline int synchronized_capacity() const;
- inline void synchronized_set_capacity(int value);
-
+ DECL_SYNCHRONIZED_INT_ACCESSORS(capacity)
// Layout description.
-#define WEAK_ARRAY_LIST_FIELDS(V) \
- V(kCapacityOffset, kTaggedSize) \
- V(kLengthOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_ARRAY_LIST_FIELDS)
-#undef WEAK_ARRAY_LIST_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WEAK_ARRAY_LIST_FIELDS)
+ static constexpr int kHeaderSize = kSize;
using BodyDescriptor = WeakArrayBodyDescriptor;
@@ -442,6 +437,7 @@ class ArrayList : public FixedArray {
// storage capacity, i.e., length().
inline void SetLength(int length);
inline Object Get(int index) const;
+ inline Object Get(Isolate* isolate, int index) const;
inline ObjectSlot Slot(int index);
// Set the element at index to obj. The underlying array must be large enough.
@@ -492,6 +488,9 @@ class ByteArray : public FixedArrayBase {
inline uint32_t get_uint32(int index) const;
inline void set_uint32(int index, uint32_t value);
+ inline uint32_t get_uint32_relaxed(int index) const;
+ inline void set_uint32_relaxed(int index, uint32_t value);
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
inline void clear_padding();
@@ -552,9 +551,9 @@ class PodArray : public ByteArray {
static Handle<PodArray<T>> New(
Isolate* isolate, int length,
AllocationType allocation = AllocationType::kYoung);
- void copy_out(int index, T* result) {
+ void copy_out(int index, T* result, int length) {
ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
- sizeof(T));
+ length * sizeof(T));
}
void copy_in(int index, const T* buffer, int length) {
@@ -562,9 +561,14 @@ class PodArray : public ByteArray {
length * sizeof(T));
}
+ bool matches(const T* buffer, int length) {
+ DCHECK_LE(length, this->length());
+ return memcmp(GetDataStartAddress(), buffer, length * sizeof(T)) == 0;
+ }
+
T get(int index) {
T result;
- copy_out(index, &result);
+ copy_out(index, &result, 1);
return result;
}
@@ -581,6 +585,7 @@ class TemplateList : public FixedArray {
static Handle<TemplateList> New(Isolate* isolate, int size);
inline int length() const;
inline Object get(int index) const;
+ inline Object get(Isolate* isolate, int index) const;
inline void set(int index, Object value);
static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
Handle<Object> value);
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index bea8257515..848b8202ae 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -26,29 +26,14 @@ RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
int FreeSpace::Size() { return size(); }
FreeSpace FreeSpace::next() {
-#ifdef DEBUG
- Heap* heap = GetHeapFromWritableObject(*this);
- Object free_space_map =
- Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
- !heap->deserialization_complete() &&
- map_slot().contains_value(kNullAddress));
-#endif
- DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
- return FreeSpace::unchecked_cast(*ObjectSlot(address() + kNextOffset));
+ DCHECK(IsValid());
+ return FreeSpace::unchecked_cast(
+ TaggedField<Object, kNextOffset>::load(*this));
}
void FreeSpace::set_next(FreeSpace next) {
-#ifdef DEBUG
- Heap* heap = GetHeapFromWritableObject(*this);
- Object free_space_map =
- Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
- !heap->deserialization_complete() &&
- map_slot().contains_value(kNullAddress));
-#endif
- DCHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
- ObjectSlot(address() + kNextOffset).Relaxed_Store(next);
+ DCHECK(IsValid());
+ RELAXED_WRITE_FIELD(*this, kNextOffset, next);
}
FreeSpace FreeSpace::cast(HeapObject o) {
@@ -61,6 +46,17 @@ FreeSpace FreeSpace::unchecked_cast(const Object o) {
return bit_cast<FreeSpace>(o);
}
+bool FreeSpace::IsValid() {
+ Heap* heap = GetHeapFromWritableObject(*this);
+ Object free_space_map =
+ Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
+ CHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
+ !heap->deserialization_complete() &&
+ map_slot().contains_value(kNullAddress));
+ CHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index 38f5794646..5714727036 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -44,6 +44,9 @@ class FreeSpace : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FREE_SPACE_FIELDS)
+ private:
+ inline bool IsValid();
+
OBJECT_CONSTRUCTORS(FreeSpace, HeapObject);
};
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index 77453721ae..b807851d85 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -71,14 +71,19 @@ void EphemeronHashTable::set_key(int index, Object value,
}
int HashTableBase::NumberOfElements() const {
- return Smi::ToInt(get(kNumberOfElementsIndex));
+ int offset = OffsetOfElementAt(kNumberOfElementsIndex);
+ return TaggedField<Smi>::load(*this, offset).value();
}
int HashTableBase::NumberOfDeletedElements() const {
- return Smi::ToInt(get(kNumberOfDeletedElementsIndex));
+ int offset = OffsetOfElementAt(kNumberOfDeletedElementsIndex);
+ return TaggedField<Smi>::load(*this, offset).value();
}
-int HashTableBase::Capacity() const { return Smi::ToInt(get(kCapacityIndex)); }
+int HashTableBase::Capacity() const {
+ int offset = OffsetOfElementAt(kCapacityIndex);
+ return TaggedField<Smi>::load(*this, offset).value();
+}
void HashTableBase::ElementAdded() {
SetNumberOfElements(NumberOfElements() + 1);
@@ -165,6 +170,15 @@ bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, int entry,
}
template <typename Derived, typename Shape>
+bool HashTable<Derived, Shape>::ToKey(Isolate* isolate, int entry,
+ Object* out_k) {
+ Object k = KeyAt(isolate, entry);
+ if (!IsKey(GetReadOnlyRoots(isolate), k)) return false;
+ *out_k = Shape::Unwrap(k);
+ return true;
+}
+
+template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::set_key(int index, Object value) {
DCHECK(!IsEphemeronHashTable());
FixedArray::set(index, value);
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 610dc9d28e..54d8ce0d2a 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -160,9 +160,16 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
static bool IsKey(ReadOnlyRoots roots, Object k);
inline bool ToKey(ReadOnlyRoots roots, int entry, Object* out_k);
+ inline bool ToKey(Isolate* isolate, int entry, Object* out_k);
// Returns the key at entry.
- Object KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
+ Object KeyAt(int entry) {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return KeyAt(isolate, entry);
+ }
+ Object KeyAt(Isolate* isolate, int entry) {
+ return get(isolate, EntryToIndex(entry) + kEntryKeyIndex);
+ }
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index 3986e9146c..3d70d71c89 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -31,11 +31,11 @@ void HeapNumberBase::set_value(double value) {
uint64_t HeapNumberBase::value_as_bits() const {
// Bug(v8:8875): HeapNumber's double may be unaligned.
- return ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
+ return base::ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
}
void HeapNumberBase::set_value_as_bits(uint64_t bits) {
- WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
+ base::WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
}
int HeapNumberBase::get_exponent() {
diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h
index 3d5deeff63..88c0011bdf 100644
--- a/deps/v8/src/objects/heap-object-inl.h
+++ b/deps/v8/src/objects/heap-object-inl.h
@@ -7,10 +7,6 @@
#include "src/objects/heap-object.h"
-#include "src/heap/heap-write-barrier-inl.h"
-// TODO(jkummerow): Get rid of this by moving NROSO::GetIsolate elsewhere.
-#include "src/execution/isolate.h"
-
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -24,16 +20,6 @@ HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
IsHeapObject());
}
-// static
-Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) {
- return GetHeapFromWritableObject(object);
-}
-
-// static
-Isolate* NeverReadOnlySpaceObject::GetIsolate(const HeapObject object) {
- return Isolate::FromHeap(GetHeap(object));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index 9ca51bdda1..ad5475c9e8 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -9,6 +9,7 @@
#include "src/roots/roots.h"
#include "src/objects/objects.h"
+#include "src/objects/tagged-field.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,27 +23,30 @@ class Heap;
// objects.
class HeapObject : public Object {
public:
- bool is_null() const { return ptr() == kNullAddress; }
+ bool is_null() const {
+ return static_cast<Tagged_t>(ptr()) == static_cast<Tagged_t>(kNullAddress);
+ }
// [map]: Contains a map which contains the object's reflective
// information.
- inline Map map() const;
+ DECL_GETTER(map, Map)
inline void set_map(Map value);
- inline MapWordSlot map_slot() const;
+ inline ObjectSlot map_slot() const;
// The no-write-barrier version. This is OK if the object is white and in
// new space, or if the value is an immortal immutable object, like the maps
// of primitive (non-JS) objects like strings, heap numbers etc.
inline void set_map_no_write_barrier(Map value);
- // Get the map using acquire load.
- inline Map synchronized_map() const;
- inline MapWord synchronized_map_word() const;
-
- // Set the map using release store
+ // Access the map using acquire load and release store.
+ DECL_GETTER(synchronized_map, Map)
inline void synchronized_set_map(Map value);
- inline void synchronized_set_map_word(MapWord map_word);
+
+ // Compare-and-swaps map word using release store, returns true if the map
+ // word was actually swapped.
+ inline bool synchronized_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word);
// Initialize the map immediately after the object is allocated.
// Do not use this outside Heap.
@@ -51,18 +55,29 @@ class HeapObject : public Object {
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
- inline MapWord map_word() const;
+ DECL_GETTER(map_word, MapWord)
inline void set_map_word(MapWord map_word);
+ // Access the map word using acquire load and release store.
+ DECL_GETTER(synchronized_map_word, MapWord)
+ inline void synchronized_set_map_word(MapWord map_word);
+
// TODO(v8:7464): Once RO_SPACE is shared between isolates, this method can be
// removed as ReadOnlyRoots will be accessible from a global variable. For now
// this method exists to help remove GetIsolate/GetHeap from HeapObject, in a
// way that doesn't require passing Isolate/Heap down huge call chains or to
// places where it might not be safe to access it.
inline ReadOnlyRoots GetReadOnlyRoots() const;
+ // This version is intended to be used for the isolate values produced by
+ // i::GetIsolateForPtrCompr(HeapObject) function which may return nullptr.
+ inline ReadOnlyRoots GetReadOnlyRoots(Isolate* isolate) const;
-#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
+#define IS_TYPE_FUNCTION_DECL(Type) \
+ V8_INLINE bool Is##Type() const; \
+ V8_INLINE bool Is##Type(Isolate* isolate) const;
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(HashTableBase)
+ IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DECL
bool IsExternal(Isolate* isolate) const;
@@ -74,13 +89,12 @@ class HeapObject : public Object {
V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
#undef IS_TYPE_FUNCTION_DECL
- V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
- V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
- V8_INLINE bool IsNullOrUndefined() const;
-
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
+ V8_INLINE bool Is##Name() const; \
+ V8_INLINE bool Is##Name(Isolate* isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -189,6 +203,8 @@ class HeapObject : public Object {
STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
+ using MapField = TaggedField<MapWord, HeapObject::kMapOffset>;
+
inline Address GetFieldAddress(int field_offset) const;
protected:
@@ -203,16 +219,6 @@ class HeapObject : public Object {
OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
CAST_ACCESSOR(HeapObject)
-// Helper class for objects that can never be in RO space.
-class NeverReadOnlySpaceObject {
- public:
- // The Heap the object was allocated in. Used also to access Isolate.
- static inline Heap* GetHeap(const HeapObject object);
-
- // Convenience method to get current isolate.
- static inline Isolate* GetIsolate(const HeapObject object);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index 559ed34784..79c953aa87 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -11,6 +11,8 @@
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
+#include "torque-generated/instance-types-tq.h"
+
namespace v8 {
namespace internal {
@@ -32,11 +34,16 @@ enum StringRepresentationTag {
};
const uint32_t kIsIndirectStringMask = 1 << 0;
const uint32_t kIsIndirectStringTag = 1 << 0;
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) ==
kIsIndirectStringTag);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
// For strings, bit 3 indicates whether the string consists of two-byte
@@ -141,6 +148,7 @@ enum InstanceType : uint16_t {
ACCESSOR_PAIR_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
ALLOCATION_MEMENTO_TYPE,
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
ASM_WASM_DATA_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
CLASS_POSITIONS_TYPE,
@@ -150,24 +158,23 @@ enum InstanceType : uint16_t {
FUNCTION_TEMPLATE_RARE_DATA_TYPE,
INTERCEPTOR_INFO_TYPE,
INTERPRETER_DATA_TYPE,
- MODULE_INFO_ENTRY_TYPE,
- MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
PROMISE_CAPABILITY_TYPE,
PROMISE_REACTION_TYPE,
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE,
+ SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE,
STACK_FRAME_INFO_TYPE,
STACK_TRACE_FRAME_TYPE,
TEMPLATE_OBJECT_DESCRIPTION_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
- ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
WASM_CAPI_FUNCTION_DATA_TYPE,
WASM_DEBUG_INFO_TYPE,
WASM_EXCEPTION_TAG_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ WASM_INDIRECT_FUNCTION_TABLE_TYPE,
WASM_JS_FUNCTION_DATA_TYPE,
CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
@@ -177,6 +184,14 @@ enum InstanceType : uint16_t {
PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, // LAST_MICROTASK_TYPE
+#define MAKE_TORQUE_INSTANCE_TYPE(V) V,
+ TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_INSTANCE_TYPE)
+#undef MAKE_TORQUE_INSTANCE_TYPE
+
+ // Modules
+ SOURCE_TEXT_MODULE_TYPE, // FIRST_MODULE_TYPE
+ SYNTHETIC_MODULE_TYPE, // LAST_MODULE_TYPE
+
ALLOCATION_SITE_TYPE,
EMBEDDER_DATA_ARRAY_TYPE,
// FixedArrays.
@@ -246,7 +261,7 @@ enum InstanceType : uint16_t {
// Like JS_API_OBJECT_TYPE, but requires access checks and/or has
// interceptors.
JS_SPECIAL_API_OBJECT_TYPE = 0x0410, // LAST_SPECIAL_RECEIVER_TYPE
- JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
+ JS_PRIMITIVE_WRAPPER_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
// Like JS_OBJECT_TYPE, but created from API function.
JS_API_OBJECT_TYPE = 0x0420,
JS_OBJECT_TYPE,
@@ -332,6 +347,9 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,
+ // Boundaries of module record types
+ FIRST_MODULE_TYPE = SOURCE_TEXT_MODULE_TYPE,
+ LAST_MODULE_TYPE = SYNTHETIC_MODULE_TYPE,
// Boundary for promotion to old space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -349,7 +367,7 @@ enum InstanceType : uint16_t {
// Boundary case for testing JSReceivers that may have elements while having
// an empty fixed array as elements backing store. This is true for string
// wrappers.
- LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
+ LAST_CUSTOM_ELEMENTS_RECEIVER = JS_PRIMITIVE_WRAPPER_TYPE,
FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
@@ -364,6 +382,7 @@ enum InstanceType : uint16_t {
constexpr InstanceType LAST_STRING_TYPE =
static_cast<InstanceType>(FIRST_NONSTRING_TYPE - 1);
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
@@ -420,12 +439,16 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSDataView, JS_DATA_VIEW_TYPE) \
V(JSDate, JS_DATE_TYPE) \
V(JSError, JS_ERROR_TYPE) \
+ V(JSFinalizationGroup, JS_FINALIZATION_GROUP_TYPE) \
+ V(JSFinalizationGroupCleanupIterator, \
+ JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
V(JSFunction, JS_FUNCTION_TYPE) \
V(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE) \
V(JSGlobalProxy, JS_GLOBAL_PROXY_TYPE) \
V(JSMap, JS_MAP_TYPE) \
V(JSMessageObject, JS_MESSAGE_OBJECT_TYPE) \
V(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE) \
+ V(JSPrimitiveWrapper, JS_PRIMITIVE_WRAPPER_TYPE) \
V(JSPromise, JS_PROMISE_TYPE) \
V(JSProxy, JS_PROXY_TYPE) \
V(JSRegExp, JS_REGEXP_TYPE) \
@@ -434,10 +457,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSSet, JS_SET_TYPE) \
V(JSStringIterator, JS_STRING_ITERATOR_TYPE) \
V(JSTypedArray, JS_TYPED_ARRAY_TYPE) \
- V(JSValue, JS_VALUE_TYPE) \
- V(JSFinalizationGroup, JS_FINALIZATION_GROUP_TYPE) \
- V(JSFinalizationGroupCleanupIterator, \
- JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE) \
V(JSWeakMap, JS_WEAK_MAP_TYPE) \
V(JSWeakRef, JS_WEAK_REF_TYPE) \
V(JSWeakSet, JS_WEAK_SET_TYPE) \
@@ -462,9 +481,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
V(SmallOrderedNameDictionary, SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
+ V(SourceTextModule, SOURCE_TEXT_MODULE_TYPE) \
V(StoreHandler, STORE_HANDLER_TYPE) \
V(StringTable, STRING_TABLE_TYPE) \
V(Symbol, SYMBOL_TYPE) \
+ V(SyntheticModule, SYNTHETIC_MODULE_TYPE) \
V(TransitionArray, TRANSITION_ARRAY_TYPE) \
V(UncompiledDataWithoutPreparseData, \
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
@@ -505,6 +526,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
V(Microtask, FIRST_MICROTASK_TYPE, LAST_MICROTASK_TYPE) \
+ V(Module, FIRST_MODULE_TYPE, LAST_MODULE_TYPE) \
V(Name, FIRST_NAME_TYPE, LAST_NAME_TYPE) \
V(String, FIRST_STRING_TYPE, LAST_STRING_TYPE) \
V(WeakFixedArray, FIRST_WEAK_FIXED_ARRAY_TYPE, LAST_WEAK_FIXED_ARRAY_TYPE)
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index f2bc87ebac..dbf212aaf8 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -34,9 +34,11 @@
#include "unicode/formattedvalue.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
+#include "unicode/numberformatter.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
#include "unicode/timezone.h"
+#include "unicode/ures.h"
#include "unicode/ustring.h"
#include "unicode/uvernum.h" // U_ICU_VERSION_MAJOR_NUM
@@ -52,9 +54,8 @@ namespace v8 {
namespace internal {
namespace {
-inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
-const uint8_t kToLower[256] = {
+constexpr uint8_t kToLower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
@@ -79,20 +80,17 @@ const uint8_t kToLower[256] = {
0xFC, 0xFD, 0xFE, 0xFF,
};
-inline uint16_t ToLatin1Lower(uint16_t ch) {
+inline constexpr uint16_t ToLatin1Lower(uint16_t ch) {
return static_cast<uint16_t>(kToLower[ch]);
}
-inline uint16_t ToASCIIUpper(uint16_t ch) {
- return ch & ~((ch >= 'a' && ch <= 'z') << 5);
-}
-
// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
-inline uint16_t ToLatin1Upper(uint16_t ch) {
+inline constexpr uint16_t ToLatin1Upper(uint16_t ch) {
+#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+#endif
return ch &
- ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
- << 5);
+ ~((IsAsciiLower(ch) || (((ch & 0xE0) == 0xE0) && ch != 0xF7)) << 5);
}
template <typename Char>
@@ -104,7 +102,7 @@ bool ToUpperFastASCII(const Vector<const Char>& src,
for (auto it = src.begin(); it != src.end(); ++it) {
uint16_t ch = static_cast<uint16_t>(*it);
ored |= ch;
- result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
+ result->SeqOneByteStringSet(index++, ToAsciiUpper(ch));
}
return !(ored & ~0x7F);
}
@@ -155,7 +153,7 @@ void ToUpperWithSharpS(const Vector<const Char>& src,
inline int FindFirstUpperOrNonAscii(String s, int length) {
for (int index = 0; index < length; ++index) {
uint16_t ch = s.Get(index);
- if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+ if (V8_UNLIKELY(IsAsciiUpper(ch) || ch & ~0x7F)) {
return index;
}
}
@@ -180,12 +178,11 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
template <typename T>
MaybeHandle<T> New(Isolate* isolate, Handle<JSFunction> constructor,
Handle<Object> locales, Handle<Object> options) {
- Handle<JSObject> result;
+ Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
- T);
- return T::Initialize(isolate, Handle<T>::cast(result), locales, options);
+ isolate, map,
+ JSFunction::GetDerivedMap(isolate, constructor, constructor), T);
+ return T::New(isolate, map, locales, options);
}
} // namespace
@@ -212,6 +209,24 @@ icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate,
return icu::UnicodeString(uchar_buffer, length);
}
+icu::StringPiece Intl::ToICUStringPiece(Isolate* isolate,
+ Handle<String> string) {
+ DCHECK(string->IsFlat());
+ DisallowHeapAllocation no_gc;
+
+ const String::FlatContent& flat = string->GetFlatContent(no_gc);
+ if (!flat.IsOneByte()) return icu::StringPiece(nullptr, 0);
+
+ int32_t length = string->length();
+ const char* char_buffer =
+ reinterpret_cast<const char*>(flat.ToOneByteVector().begin());
+ if (!String::IsAscii(char_buffer, length)) {
+ return icu::StringPiece(nullptr, 0);
+ }
+
+ return icu::StringPiece(char_buffer, length);
+}
+
namespace {
MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
bool is_to_upper, const char* lang) {
@@ -506,23 +521,59 @@ bool RemoveLocaleScriptTag(const std::string& icu_locale,
return true;
}
+bool ValidateResource(const icu::Locale locale, const char* path,
+ const char* key) {
+ bool result = false;
+ UErrorCode status = U_ZERO_ERROR;
+ UResourceBundle* bundle = ures_open(path, locale.getName(), &status);
+ if (bundle != nullptr && status == U_ZERO_ERROR) {
+ if (key == nullptr) {
+ result = true;
+ } else {
+ UResourceBundle* key_bundle =
+ ures_getByKey(bundle, key, nullptr, &status);
+ result = key_bundle != nullptr && (status == U_ZERO_ERROR);
+ ures_close(key_bundle);
+ }
+ }
+ ures_close(bundle);
+ if (!result) {
+ if ((locale.getCountry()[0] != '\0') && (locale.getScript()[0] != '\0')) {
+ // Fallback to try without country.
+ std::string without_country(locale.getLanguage());
+ without_country = without_country.append("-").append(locale.getScript());
+ return ValidateResource(without_country.c_str(), path, key);
+ } else if ((locale.getCountry()[0] != '\0') ||
+ (locale.getScript()[0] != '\0')) {
+ // Fallback to try with only language.
+ std::string language(locale.getLanguage());
+ return ValidateResource(language.c_str(), path, key);
+ }
+ }
+ return result;
+}
+
} // namespace
std::set<std::string> Intl::BuildLocaleSet(
- const icu::Locale* icu_available_locales, int32_t count) {
+ const icu::Locale* icu_available_locales, int32_t count, const char* path,
+ const char* validate_key) {
std::set<std::string> locales;
for (int32_t i = 0; i < count; ++i) {
std::string locale =
Intl::ToLanguageTag(icu_available_locales[i]).FromJust();
+ if (path != nullptr || validate_key != nullptr) {
+ if (!ValidateResource(icu_available_locales[i], path, validate_key)) {
+ continue;
+ }
+ }
locales.insert(locale);
-
std::string shortened_locale;
if (RemoveLocaleScriptTag(locale, &shortened_locale)) {
std::replace(shortened_locale.begin(), shortened_locale.end(), '_', '-');
locales.insert(shortened_locale);
}
}
-
return locales;
}
@@ -683,19 +734,10 @@ V8_WARN_UNUSED_RESULT Maybe<bool> Intl::GetBoolOption(
namespace {
-char AsciiToLower(char c) {
- if (c < 'A' || c > 'Z') {
- return c;
- }
- return c | (1 << 5);
-}
-
-bool IsLowerAscii(char c) { return c >= 'a' && c < 'z'; }
-
bool IsTwoLetterLanguage(const std::string& locale) {
// Two letters, both in range 'a'-'z'...
- return locale.length() == 2 && IsLowerAscii(locale[0]) &&
- IsLowerAscii(locale[1]);
+ return locale.length() == 2 && IsAsciiLower(locale[0]) &&
+ IsAsciiLower(locale[1]);
}
bool IsDeprecatedLanguage(const std::string& locale) {
@@ -770,7 +812,7 @@ Maybe<std::string> Intl::CanonicalizeLanguageTag(Isolate* isolate,
// Because per BCP 47 2.1.1 language tags are case-insensitive, lowercase
// the input before any more check.
- std::transform(locale.begin(), locale.end(), locale.begin(), AsciiToLower);
+ std::transform(locale.begin(), locale.end(), locale.begin(), ToAsciiLower);
// ICU maps a few grandfathered tags to what looks like a regular language
// tag even though IANA language tag registry does not have a preferred
@@ -1020,6 +1062,16 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
UCollationResult result;
UErrorCode status = U_ZERO_ERROR;
+ icu::StringPiece string_piece1 = Intl::ToICUStringPiece(isolate, string1);
+ if (!string_piece1.empty()) {
+ icu::StringPiece string_piece2 = Intl::ToICUStringPiece(isolate, string2);
+ if (!string_piece2.empty()) {
+ result = icu_collator.compareUTF8(string_piece1, string_piece2, status);
+ DCHECK(U_SUCCESS(status));
+ return factory->NewNumberFromInt(result);
+ }
+ }
+
icu::UnicodeString string_val1 = Intl::ToICUUnicodeString(isolate, string1);
icu::UnicodeString string_val2 = Intl::ToICUUnicodeString(isolate, string2);
result = icu_collator.compare(string_val1, string_val2, status);
@@ -1116,10 +1168,12 @@ Maybe<int> DefaultNumberOption(Isolate* isolate, Handle<Object> value, int min,
return Just(FastD2I(floor(value_num->Number())));
}
+} // namespace
+
// ecma402/#sec-getnumberoption
-Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- Handle<String> property, int min, int max,
- int fallback) {
+Maybe<int> Intl::GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
+ Handle<String> property, int min, int max,
+ int fallback) {
// 1. Let value be ? Get(options, property).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1130,62 +1184,70 @@ Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
return DefaultNumberOption(isolate, value, min, max, fallback, property);
}
-Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
- const char* property, int min, int max,
- int fallback) {
- Handle<String> property_str =
- isolate->factory()->NewStringFromAsciiChecked(property);
- return GetNumberOption(isolate, options, property_str, min, max, fallback);
-}
-
-} // namespace
-
Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
Isolate* isolate, Handle<JSReceiver> options, int mnfd_default,
- int mxfd_default) {
+ int mxfd_default, bool notation_is_compact) {
+ Factory* factory = isolate->factory();
Intl::NumberFormatDigitOptions digit_options;
// 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
// 1).
- int mnid;
- if (!GetNumberOption(isolate, options, "minimumIntegerDigits", 1, 21, 1)
+ int mnid = 1;
+ if (!Intl::GetNumberOption(isolate, options,
+ factory->minimumIntegerDigits_string(), 1, 21, 1)
.To(&mnid)) {
return Nothing<NumberFormatDigitOptions>();
}
- // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
- // mnfdDefault).
- int mnfd;
- if (!GetNumberOption(isolate, options, "minimumFractionDigits", 0, 20,
- mnfd_default)
- .To(&mnfd)) {
- return Nothing<NumberFormatDigitOptions>();
- }
+ int mnfd = 0;
+ int mxfd = 0;
+ Handle<Object> mnfd_obj;
+ Handle<Object> mxfd_obj;
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // 6. Let mnfd be ? Get(options, "minimumFractionDigits").
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mnfd_obj, JSReceiver::GetProperty(isolate, options, mnfd_str),
+ Nothing<NumberFormatDigitOptions>());
+
+ // 8. Let mnfd be ? Get(options, "maximumFractionDigits").
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, mxfd_obj, JSReceiver::GetProperty(isolate, options, mxfd_str),
+ Nothing<NumberFormatDigitOptions>());
+ } else {
+ // 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
+ // mnfdDefault).
+ if (!Intl::GetNumberOption(isolate, options,
+ factory->minimumFractionDigits_string(), 0, 20,
+ mnfd_default)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
- // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
- int mxfd_actual_default = std::max(mnfd, mxfd_default);
+ // 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
+ int mxfd_actual_default = std::max(mnfd, mxfd_default);
- // 8. Let mxfd be ? GetNumberOption(options,
- // "maximumFractionDigits", mnfd, 20, mxfdActualDefault).
- int mxfd;
- if (!GetNumberOption(isolate, options, "maximumFractionDigits", mnfd, 20,
- mxfd_actual_default)
- .To(&mxfd)) {
- return Nothing<NumberFormatDigitOptions>();
+ // 8. Let mxfd be ? GetNumberOption(options,
+ // "maximumFractionDigits", mnfd, 20, mxfdActualDefault).
+ if (!Intl::GetNumberOption(isolate, options,
+ factory->maximumFractionDigits_string(), mnfd,
+ 20, mxfd_actual_default)
+ .To(&mxfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
}
// 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
Handle<Object> mnsd_obj;
- Handle<String> mnsd_str =
- isolate->factory()->minimumSignificantDigits_string();
+ Handle<String> mnsd_str = factory->minimumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
Nothing<NumberFormatDigitOptions>());
// 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
Handle<Object> mxsd_obj;
- Handle<String> mxsd_str =
- isolate->factory()->maximumSignificantDigits_string();
+ Handle<String> mxsd_str = factory->maximumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
Nothing<NumberFormatDigitOptions>());
@@ -1222,8 +1284,50 @@ Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
} else {
digit_options.minimum_significant_digits = 0;
digit_options.maximum_significant_digits = 0;
- }
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // 15. Else If mnfd is not undefined or mxfd is not undefined, then
+ if (!mnfd_obj->IsUndefined(isolate) || !mxfd_obj->IsUndefined(isolate)) {
+ // 15. b. Let mnfd be ? DefaultNumberOption(mnfd, 0, 20, mnfdDefault).
+ Handle<String> mnfd_str = factory->minimumFractionDigits_string();
+ if (!DefaultNumberOption(isolate, mnfd_obj, 0, 20, mnfd_default,
+ mnfd_str)
+ .To(&mnfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+
+ // 15. c. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
+ int mxfd_actual_default = std::max(mnfd, mxfd_default);
+
+ // 15. d. Let mxfd be ? DefaultNumberOption(mxfd, mnfd, 20,
+ // mxfdActualDefault).
+ Handle<String> mxfd_str = factory->maximumFractionDigits_string();
+ if (!DefaultNumberOption(isolate, mxfd_obj, mnfd, 20,
+ mxfd_actual_default, mxfd_str)
+ .To(&mxfd)) {
+ return Nothing<NumberFormatDigitOptions>();
+ }
+ // 15. e. Set intlObj.[[MinimumFractionDigits]] to mnfd.
+ digit_options.minimum_fraction_digits = mnfd;
+
+ // 15. f. Set intlObj.[[MaximumFractionDigits]] to mxfd.
+ digit_options.maximum_fraction_digits = mxfd;
+ // Else If intlObj.[[Notation]] is "compact", then
+ } else if (notation_is_compact) {
+ // a. Set intlObj.[[RoundingType]] to "compact-rounding".
+ // Set minimum_significant_digits to -1 to represent roundingtype is
+ // "compact-rounding".
+ digit_options.minimum_significant_digits = -1;
+ // 17. Else,
+ } else {
+ // 17. b. Set intlObj.[[MinimumFractionDigits]] to mnfdDefault.
+ digit_options.minimum_fraction_digits = mnfd_default;
+
+ // 17. c. Set intlObj.[[MaximumFractionDigits]] to mxfdDefault.
+ digit_options.maximum_fraction_digits = mxfd_default;
+ }
+ }
+ }
return Just(digit_options);
}
@@ -1678,7 +1782,7 @@ Intl::ResolvedLocale Intl::ResolveLocale(
return Intl::ResolvedLocale{canonicalized_locale, icu_locale, extensions};
}
-Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
+Handle<Managed<icu::UnicodeString>> Intl::SetTextToBreakIterator(
Isolate* isolate, Handle<String> text, icu::BreakIterator* break_iterator) {
text = String::Flatten(isolate, text);
icu::UnicodeString* u_text =
@@ -1688,7 +1792,7 @@ Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
Managed<icu::UnicodeString>::FromRawPtr(isolate, 0, u_text);
break_iterator->setText(*u_text);
- return *new_u_text;
+ return new_u_text;
}
// ecma262 #sec-string.prototype.normalize
@@ -1927,8 +2031,18 @@ const std::set<std::string>& Intl::GetAvailableLocalesForLocale() {
return available_locales.Pointer()->Get();
}
+namespace {
+
+struct CheckCalendar {
+ static const char* key() { return "calendar"; }
+ static const char* path() { return nullptr; }
+};
+
+} // namespace
+
const std::set<std::string>& Intl::GetAvailableLocalesForDateFormat() {
- static base::LazyInstance<Intl::AvailableLocales<icu::DateFormat>>::type
+ static base::LazyInstance<
+ Intl::AvailableLocales<icu::DateFormat, CheckCalendar>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
@@ -1966,16 +2080,17 @@ Handle<String> Intl::NumberFieldToType(Isolate* isolate,
: isolate->factory()->plusSign_string();
} else {
double number = numeric_obj->Number();
- return number < 0 ? isolate->factory()->minusSign_string()
- : isolate->factory()->plusSign_string();
+ return std::signbit(number) ? isolate->factory()->minusSign_string()
+ : isolate->factory()->plusSign_string();
}
case UNUM_EXPONENT_SYMBOL_FIELD:
+ return isolate->factory()->exponentSeparator_string();
+
case UNUM_EXPONENT_SIGN_FIELD:
+ return isolate->factory()->exponentMinusSign_string();
+
case UNUM_EXPONENT_FIELD:
- // We should never get these because we're not using any scientific
- // formatter.
- UNREACHABLE();
- return Handle<String>();
+ return isolate->factory()->exponentInteger_string();
case UNUM_PERMILL_FIELD:
// We're not creating any permill formatter, and it's not even clear how
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 1274fa0549..4d4d3245fd 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -49,7 +49,8 @@ class Intl {
// script; eg, pa_Guru_IN (language=Panjabi, script=Gurmukhi, country-India)
// would include pa_IN.
static std::set<std::string> BuildLocaleSet(
- const icu::Locale* icu_available_locales, int32_t count);
+ const icu::Locale* icu_available_locales, int32_t count, const char* path,
+ const char* validate_key);
static Maybe<std::string> ToLanguageTag(const icu::Locale& locale);
@@ -126,6 +127,10 @@ class Intl {
Isolate* isolate, Handle<JSReceiver> options, const char* property,
const char* service, bool* result);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<int> GetNumberOption(
+ Isolate* isolate, Handle<JSReceiver> options, Handle<String> property,
+ int min, int max, int fallback);
+
// Canonicalize the locale.
// https://tc39.github.io/ecma402/#sec-canonicalizelanguagetag,
// including type check and structural validity check.
@@ -180,7 +185,8 @@ class Intl {
};
V8_WARN_UNUSED_RESULT static Maybe<NumberFormatDigitOptions>
SetNumberFormatDigitOptions(Isolate* isolate, Handle<JSReceiver> options,
- int mnfd_default, int mxfd_default);
+ int mnfd_default, int mxfd_default,
+ bool notation_is_compact);
static icu::Locale CreateICULocale(const std::string& bcp47_locale);
@@ -277,20 +283,26 @@ class Intl {
// A helper template to implement the GetAvailableLocales
// Usage in src/objects/js-XXX.cc
- //
// const std::set<std::string>& JSXxx::GetAvailableLocales() {
// static base::LazyInstance<Intl::AvailableLocales<icu::YYY>>::type
// available_locales = LAZY_INSTANCE_INITIALIZER;
// return available_locales.Pointer()->Get();
// }
- template <typename T>
+
+ struct SkipResourceCheck {
+ static const char* key() { return nullptr; }
+ static const char* path() { return nullptr; }
+ };
+
+ template <typename T, typename C = SkipResourceCheck>
class AvailableLocales {
public:
AvailableLocales() {
int32_t num_locales = 0;
const icu::Locale* icu_available_locales =
T::getAvailableLocales(num_locales);
- set = Intl::BuildLocaleSet(icu_available_locales, num_locales);
+ set = Intl::BuildLocaleSet(icu_available_locales, num_locales, C::path(),
+ C::key());
}
virtual ~AvailableLocales() {}
const std::set<std::string>& Get() const { return set; }
@@ -300,7 +312,7 @@ class Intl {
};
// Utility function to set text to BreakIterator.
- static Managed<icu::UnicodeString> SetTextToBreakIterator(
+ static Handle<Managed<icu::UnicodeString>> SetTextToBreakIterator(
Isolate* isolate, Handle<String> text,
icu::BreakIterator* break_iterator);
@@ -313,6 +325,10 @@ class Intl {
static icu::UnicodeString ToICUUnicodeString(Isolate* isolate,
Handle<String> string);
+ // Convert a Handle<String> to icu::StringPiece
+ static icu::StringPiece ToICUStringPiece(Isolate* isolate,
+ Handle<String> string);
+
static const uint8_t* ToLatin1LowerTable();
static String ConvertOneByteToLower(String src, String dst);
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
index 67d8537feb..d91df566c3 100644
--- a/deps/v8/src/objects/intl-objects.tq
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include 'src/objects/js-break-iterator.h'
+#include 'src/objects/js-collator.h'
#include 'src/objects/js-number-format.h'
#include 'src/objects/js-objects.h'
#include 'src/objects/js-plural-rules.h'
@@ -37,8 +39,9 @@ extern class JSNumberFormat extends JSObject {
extern class JSPluralRules extends JSObject {
locale: String;
flags: Smi;
- icu_plural_rules: Foreign; // Managed<icu::PluralRules>
- icu_decimal_format: Foreign; // Managed<icu::DecimalFormat>
+ icu_plural_rules: Foreign; // Managed<icu::PluralRules>
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
}
extern class JSRelativeTimeFormat extends JSObject {
@@ -62,3 +65,20 @@ extern class JSSegmentIterator extends JSObject {
unicode_string: Foreign; // Managed<icu::UnicodeString>
flags: Smi;
}
+
+extern class JSV8BreakIterator extends JSObject {
+ locale: String;
+ break_iterator: Foreign; // Managed<icu::BreakIterator>;
+ unicode_string: Foreign; // Managed<icu::UnicodeString>;
+ bound_adopt_text: Undefined | JSFunction;
+ bound_first: Undefined | JSFunction;
+ bound_next: Undefined | JSFunction;
+ bound_current: Undefined | JSFunction;
+ bound_break_type: Undefined | JSFunction;
+ break_iterator_type: Smi;
+}
+
+extern class JSCollator extends JSObject {
+ icu_collator: Foreign; // Managed<icu::Collator>
+ bound_compare: Undefined | JSFunction;
+}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 061fec10f7..9151be6da4 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -91,11 +91,11 @@ void JSArrayBuffer::clear_padding() {
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
- WriteField<uint32_t>(kBitFieldOffset, bits);
+ RELAXED_WRITE_UINT32_FIELD(*this, kBitFieldOffset, bits);
}
uint32_t JSArrayBuffer::bit_field() const {
- return ReadField<uint32_t>(kBitFieldOffset);
+ return RELAXED_READ_UINT32_FIELD(*this, kBitFieldOffset);
}
// |bit_field| fields.
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index b22001f04a..7bf2e1ae94 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -243,6 +243,12 @@ class JSTypedArray : public JSArrayBufferView {
class BodyDescriptor;
+#ifdef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
+ static constexpr size_t kMaxSizeInHeap = V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP;
+#else
+ static constexpr size_t kMaxSizeInHeap = 64;
+#endif
+
private:
static Handle<JSArrayBuffer> MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array);
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 335fabba86..1ff7dcb123 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -61,13 +61,14 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
+SMI_ACCESSORS(JSArrayIterator, raw_kind, kKindOffset)
+
IterationKind JSArrayIterator::kind() const {
- return static_cast<IterationKind>(
- Smi::cast(READ_FIELD(*this, kKindOffset)).value());
+ return static_cast<IterationKind>(raw_kind());
}
void JSArrayIterator::set_kind(IterationKind kind) {
- WRITE_FIELD(*this, kKindOffset, Smi::FromInt(static_cast<int>(kind)));
+ set_raw_kind(static_cast<int>(kind));
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 4bc296e31e..eb581c104e 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -132,7 +132,8 @@ class JSArray : public JSObject {
OBJECT_CONSTRUCTORS(JSArray, JSObject);
};
-Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
+ Handle<Context> native_context,
Handle<Map> initial_map);
// The JSArrayIterator describes JavaScript Array Iterators Objects, as
@@ -179,6 +180,9 @@ class JSArrayIterator : public JSObject {
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSARRAY_ITERATOR_FIELDS)
+ private:
+ DECL_INT_ACCESSORS(raw_kind)
+
OBJECT_CONSTRUCTORS(JSArrayIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 177d9d352b..86e87ddb0d 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -20,14 +20,12 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSV8BreakIterator, JSObject)
-inline void JSV8BreakIterator::set_type(Type type) {
- DCHECK_GT(JSV8BreakIterator::Type::COUNT, type);
- WRITE_FIELD(*this, kTypeOffset, Smi::FromInt(static_cast<int>(type)));
+inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
+ return static_cast<JSV8BreakIterator::Type>(raw_type());
}
-inline JSV8BreakIterator::Type JSV8BreakIterator::type() const {
- Object value = READ_FIELD(*this, kTypeOffset);
- return static_cast<JSV8BreakIterator::Type>(Smi::ToInt(value));
+inline void JSV8BreakIterator::set_type(Type type) {
+ set_raw_type(static_cast<int>(type));
}
ACCESSORS(JSV8BreakIterator, locale, String, kLocaleOffset)
@@ -41,6 +39,8 @@ ACCESSORS(JSV8BreakIterator, bound_next, Object, kBoundNextOffset)
ACCESSORS(JSV8BreakIterator, bound_current, Object, kBoundCurrentOffset)
ACCESSORS(JSV8BreakIterator, bound_break_type, Object, kBoundBreakTypeOffset)
+SMI_ACCESSORS(JSV8BreakIterator, raw_type, kBreakIteratorTypeOffset)
+
CAST_ACCESSOR(JSV8BreakIterator)
} // namespace internal
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 4879fb41a4..31ed3f8611 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -15,9 +15,9 @@
namespace v8 {
namespace internal {
-MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
- Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
- Handle<Object> locales, Handle<Object> options_obj) {
+MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options_obj) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -96,8 +96,13 @@ MaybeHandle<JSV8BreakIterator> JSV8BreakIterator::Initialize(
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- break_iterator_holder->set_locale(*locale_str);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSV8BreakIterator> break_iterator_holder =
+ Handle<JSV8BreakIterator>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ break_iterator_holder->set_locale(*locale_str);
break_iterator_holder->set_type(type_enum);
break_iterator_holder->set_break_iterator(*managed_break_iterator);
break_iterator_holder->set_unicode_string(*managed_unicode_string);
@@ -126,9 +131,9 @@ void JSV8BreakIterator::AdoptText(
icu::BreakIterator* break_iterator =
break_iterator_holder->break_iterator().raw();
CHECK_NOT_NULL(break_iterator);
- Managed<icu::UnicodeString> unicode_string =
+ Handle<Managed<icu::UnicodeString>> unicode_string =
Intl::SetTextToBreakIterator(isolate, text, break_iterator);
- break_iterator_holder->set_unicode_string(unicode_string);
+ break_iterator_holder->set_unicode_string(*unicode_string);
}
Handle<String> JSV8BreakIterator::TypeAsString() const {
@@ -141,9 +146,8 @@ Handle<String> JSV8BreakIterator::TypeAsString() const {
return GetReadOnlyRoots().sentence_string_handle();
case Type::LINE:
return GetReadOnlyRoots().line_string_handle();
- case Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
Handle<Object> JSV8BreakIterator::Current(
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index fe94c177c4..4b40192c81 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -15,6 +15,7 @@
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
#include "src/objects/objects.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -28,9 +29,9 @@ namespace internal {
class JSV8BreakIterator : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> Initialize(
- Isolate* isolate, Handle<JSV8BreakIterator> break_iterator,
- Handle<Object> input_locales, Handle<Object> input_options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSV8BreakIterator> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> input_locales,
+ Handle<Object> input_options);
static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator);
@@ -50,7 +51,7 @@ class JSV8BreakIterator : public JSObject {
static String BreakType(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator);
- enum class Type { CHARACTER, WORD, SENTENCE, LINE, COUNT };
+ enum class Type { CHARACTER, WORD, SENTENCE, LINE };
inline void set_type(Type type);
inline Type type() const;
@@ -69,23 +70,12 @@ class JSV8BreakIterator : public JSObject {
DECL_ACCESSORS(bound_current, Object)
DECL_ACCESSORS(bound_break_type, Object)
-// Layout description.
-#define BREAK_ITERATOR_FIELDS(V) \
- /* Pointer fields. */ \
- V(kLocaleOffset, kTaggedSize) \
- V(kTypeOffset, kTaggedSize) \
- V(kBreakIteratorOffset, kTaggedSize) \
- V(kUnicodeStringOffset, kTaggedSize) \
- V(kBoundAdoptTextOffset, kTaggedSize) \
- V(kBoundFirstOffset, kTaggedSize) \
- V(kBoundNextOffset, kTaggedSize) \
- V(kBoundCurrentOffset, kTaggedSize) \
- V(kBoundBreakTypeOffset, kTaggedSize) \
- /* Total Size */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, BREAK_ITERATOR_FIELDS)
-#undef BREAK_ITERATOR_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSV8BREAK_ITERATOR_FIELDS)
+
+ private:
+ DECL_INT_ACCESSORS(raw_type)
OBJECT_CONSTRUCTORS(JSV8BreakIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index e82351993d..a8d3893316 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -20,7 +20,7 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSCollator, JSObject)
-ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kICUCollatorOffset)
+ACCESSORS(JSCollator, icu_collator, Managed<icu::Collator>, kIcuCollatorOffset)
ACCESSORS(JSCollator, bound_compare, Object, kBoundCompareOffset)
CAST_ACCESSOR(JSCollator)
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index 4a1e857403..0413e2acd1 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -15,7 +15,9 @@
#include "unicode/locid.h"
#include "unicode/strenum.h"
#include "unicode/ucol.h"
+#include "unicode/udata.h"
#include "unicode/uloc.h"
+#include "unicode/utypes.h"
namespace v8 {
namespace internal {
@@ -239,10 +241,9 @@ void SetCaseFirstOption(icu::Collator* icu_collator,
} // anonymous namespace
// static
-MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
- Handle<JSCollator> collator,
- Handle<Object> locales,
- Handle<Object> options_obj) {
+MaybeHandle<JSCollator> JSCollator::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -465,15 +466,31 @@ MaybeHandle<JSCollator> JSCollator::Initialize(Isolate* isolate,
Handle<Managed<icu::Collator>> managed_collator =
Managed<icu::Collator>::FromUniquePtr(isolate, 0,
std::move(icu_collator));
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSCollator> collator = Handle<JSCollator>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
collator->set_icu_collator(*managed_collator);
// 29. Return collator.
return collator;
}
+namespace {
+
+struct CheckColl {
+ static const char* key() { return nullptr; }
+#define U_ICUDATA_COLL U_ICUDATA_NAME U_TREE_SEPARATOR_STRING "coll"
+ static const char* path() { return U_ICUDATA_COLL; }
+#undef U_ICUDATA_COLL
+};
+
+} // namespace
+
const std::set<std::string>& JSCollator::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::Collator>>::type
- available_locales = LAZY_INSTANCE_INITIALIZER;
+ static base::LazyInstance<Intl::AvailableLocales<icu::Collator, CheckColl>>::
+ type available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index 2bedbf811a..e9114afeb1 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -32,8 +32,8 @@ namespace internal {
class JSCollator : public JSObject {
public:
// ecma402/#sec-initializecollator
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> Initialize(
- Isolate* isolate, Handle<JSCollator> collator, Handle<Object> locales,
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSCollator> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
Handle<Object> options);
// ecma402/#sec-intl.collator.prototype.resolvedoptions
@@ -47,14 +47,8 @@ class JSCollator : public JSObject {
DECL_VERIFIER(JSCollator)
// Layout description.
-#define JS_COLLATOR_FIELDS(V) \
- V(kICUCollatorOffset, kTaggedSize) \
- V(kBoundCompareOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_COLLATOR_FIELDS)
-#undef JS_COLLATOR_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSCOLLATOR_FIELDS)
DECL_ACCESSORS(icu_collator, Managed<icu::Collator>)
DECL_ACCESSORS(bound_compare, Object)
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index 4952f04a72..c002294b01 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -25,6 +25,7 @@ class JSCollectionIterator : public JSObject {
DECL_ACCESSORS(index, Object)
void JSCollectionIteratorPrint(std::ostream& os, const char* name);
+ DECL_VERIFIER(JSCollectionIterator)
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSCOLLECTION_ITERATOR_FIELDS)
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 6dfde352ca..0a856ca062 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -30,6 +30,8 @@ class JSCollection : public JSObject {
static const int kAddFunctionDescriptorIndex = 3;
+ DECL_VERIFIER(JSCollection)
+
OBJECT_CONSTRUCTORS(JSCollection, JSObject);
};
@@ -114,6 +116,8 @@ class JSWeakCollection : public JSObject {
static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
int max_entries);
+ DECL_VERIFIER(JSWeakCollection)
+
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSWEAK_COLLECTION_FIELDS)
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 8730e0a39b..db7ba27312 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -56,13 +56,13 @@ class PatternItem {
std::vector<const char*> allowed_values;
};
-static const std::vector<PatternItem> BuildPatternItems() {
+static std::vector<PatternItem> BuildPatternItems() {
const std::vector<const char*> kLongShort = {"long", "short"};
const std::vector<const char*> kNarrowLongShort = {"narrow", "long", "short"};
const std::vector<const char*> k2DigitNumeric = {"2-digit", "numeric"};
const std::vector<const char*> kNarrowLongShort2DigitNumeric = {
"narrow", "long", "short", "2-digit", "numeric"};
- const std::vector<PatternItem> kPatternItems = {
+ std::vector<PatternItem> items = {
PatternItem("weekday",
{{"EEEEE", "narrow"},
{"EEEE", "long"},
@@ -75,38 +75,59 @@ static const std::vector<PatternItem> BuildPatternItems() {
{{"GGGGG", "narrow"}, {"GGGG", "long"}, {"GGG", "short"}},
kNarrowLongShort),
PatternItem("year", {{"yy", "2-digit"}, {"y", "numeric"}},
- k2DigitNumeric),
- // Sometimes we get L instead of M for month - standalone name.
- PatternItem("month",
- {{"MMMMM", "narrow"},
- {"MMMM", "long"},
- {"MMM", "short"},
- {"MM", "2-digit"},
- {"M", "numeric"},
- {"LLLLL", "narrow"},
- {"LLLL", "long"},
- {"LLL", "short"},
- {"LL", "2-digit"},
- {"L", "numeric"}},
- kNarrowLongShort2DigitNumeric),
- PatternItem("day", {{"dd", "2-digit"}, {"d", "numeric"}}, k2DigitNumeric),
- PatternItem("hour",
- {{"HH", "2-digit"},
- {"H", "numeric"},
- {"hh", "2-digit"},
- {"h", "numeric"},
- {"kk", "2-digit"},
- {"k", "numeric"},
- {"KK", "2-digit"},
- {"K", "numeric"}},
- k2DigitNumeric),
- PatternItem("minute", {{"mm", "2-digit"}, {"m", "numeric"}},
- k2DigitNumeric),
- PatternItem("second", {{"ss", "2-digit"}, {"s", "numeric"}},
- k2DigitNumeric),
- PatternItem("timeZoneName", {{"zzzz", "long"}, {"z", "short"}},
- kLongShort)};
- return kPatternItems;
+ k2DigitNumeric)};
+ if (FLAG_harmony_intl_dateformat_quarter) {
+ items.push_back(PatternItem("quarter",
+ {{"QQQQQ", "narrow"},
+ {"QQQQ", "long"},
+ {"QQQ", "short"},
+ {"qqqqq", "narrow"},
+ {"qqqq", "long"},
+ {"qqq", "short"}},
+ kNarrowLongShort));
+ }
+ // Sometimes we get L instead of M for month - standalone name.
+ items.push_back(PatternItem("month",
+ {{"MMMMM", "narrow"},
+ {"MMMM", "long"},
+ {"MMM", "short"},
+ {"MM", "2-digit"},
+ {"M", "numeric"},
+ {"LLLLL", "narrow"},
+ {"LLLL", "long"},
+ {"LLL", "short"},
+ {"LL", "2-digit"},
+ {"L", "numeric"}},
+ kNarrowLongShort2DigitNumeric));
+ items.push_back(PatternItem("day", {{"dd", "2-digit"}, {"d", "numeric"}},
+ k2DigitNumeric));
+ if (FLAG_harmony_intl_dateformat_day_period) {
+ items.push_back(PatternItem("dayPeriod",
+ {{"BBBBB", "narrow"},
+ {"bbbbb", "narrow"},
+ {"BBBB", "long"},
+ {"bbbb", "long"},
+ {"B", "short"},
+ {"b", "short"}},
+ kNarrowLongShort));
+ }
+ items.push_back(PatternItem("hour",
+ {{"HH", "2-digit"},
+ {"H", "numeric"},
+ {"hh", "2-digit"},
+ {"h", "numeric"},
+ {"kk", "2-digit"},
+ {"k", "numeric"},
+ {"KK", "2-digit"},
+ {"K", "numeric"}},
+ k2DigitNumeric));
+ items.push_back(PatternItem("minute", {{"mm", "2-digit"}, {"m", "numeric"}},
+ k2DigitNumeric));
+ items.push_back(PatternItem("second", {{"ss", "2-digit"}, {"s", "numeric"}},
+ k2DigitNumeric));
+ items.push_back(PatternItem("timeZoneName",
+ {{"zzzz", "long"}, {"z", "short"}}, kLongShort));
+ return items;
}
class PatternItems {
@@ -348,6 +369,16 @@ Handle<String> DateTimeStyleAsString(Isolate* isolate,
}
}
+int FractionalSecondDigitsFromPattern(const std::string& pattern) {
+ int result = 0;
+ for (size_t i = 0; i < pattern.length() && result < 3; i++) {
+ if (pattern[i] == 'S') {
+ result++;
+ }
+ }
+ return result;
+}
+
} // namespace
// ecma402 #sec-intl.datetimeformat.prototype.resolvedoptions
@@ -532,6 +563,13 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ int fsd = FractionalSecondDigitsFromPattern(pattern);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->fractionalSecondDigits_string(),
+ factory->NewNumberFromInt(fsd), Just(kDontThrow))
+ .FromJust());
+ }
return options;
}
@@ -643,17 +681,14 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
JSFunction::cast(
isolate->context().native_context().intl_date_time_format_function()),
isolate);
- Handle<JSObject> obj;
+ Handle<Map> map;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, obj,
- JSObject::New(constructor, constructor, Handle<AllocationSite>::null()),
- String);
+ isolate, map,
+ JSFunction::GetDerivedMap(isolate, constructor, constructor), String);
Handle<JSDateTimeFormat> date_time_format;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, date_time_format,
- JSDateTimeFormat::Initialize(isolate, Handle<JSDateTimeFormat>::cast(obj),
- locales, internal_options),
- String);
+ JSDateTimeFormat::New(isolate, map, locales, internal_options), String);
if (can_cache) {
isolate->set_icu_object_in_cache(
@@ -669,27 +704,23 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
namespace {
Maybe<bool> IsPropertyUndefined(Isolate* isolate, Handle<JSObject> options,
- const char* property) {
- Factory* factory = isolate->factory();
+ Handle<String> property) {
// i. Let prop be the property name.
// ii. Let value be ? Get(options, prop).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value,
- Object::GetPropertyOrElement(
- isolate, options, factory->NewStringFromAsciiChecked(property)),
+ isolate, value, Object::GetPropertyOrElement(isolate, options, property),
Nothing<bool>());
return Just(value->IsUndefined(isolate));
}
Maybe<bool> NeedsDefault(Isolate* isolate, Handle<JSObject> options,
- const std::vector<std::string>& props) {
+ const std::vector<Handle<String>>& props) {
bool needs_default = true;
for (const auto& prop : props) {
// i. Let prop be the property name.
// ii. Let value be ? Get(options, prop)
- Maybe<bool> maybe_undefined =
- IsPropertyUndefined(isolate, options, prop.c_str());
+ Maybe<bool> maybe_undefined = IsPropertyUndefined(isolate, options, prop);
MAYBE_RETURN(maybe_undefined, Nothing<bool>());
// iii. If value is not undefined, let needDefaults be false.
if (!maybe_undefined.FromJust()) {
@@ -741,8 +772,15 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
// 4. If required is "date" or "any", then
if (required == RequiredOption::kAny || required == RequiredOption::kDate) {
- // a. For each of the property names "weekday", "year", "month", "day", do
- const std::vector<std::string> list({"weekday", "year", "month", "day"});
+ // a. For each of the property names "weekday", "year", "quarter", "month",
+ // "day", do
+ std::vector<Handle<String>> list(
+ {factory->weekday_string(), factory->year_string()});
+ if (FLAG_harmony_intl_dateformat_quarter) {
+ list.push_back(factory->quarter_string());
+ }
+ list.push_back(factory->month_string());
+ list.push_back(factory->day_string());
Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
MAYBE_RETURN(maybe_needs_default, Handle<JSObject>());
needs_default = maybe_needs_default.FromJust();
@@ -750,8 +788,18 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
// 5. If required is "time" or "any", then
if (required == RequiredOption::kAny || required == RequiredOption::kTime) {
- // a. For each of the property names "hour", "minute", "second", do
- const std::vector<std::string> list({"hour", "minute", "second"});
+ // a. For each of the property names "dayPeriod", "hour", "minute",
+ // "second", "fractionalSecondDigits", do
+ std::vector<Handle<String>> list;
+ if (FLAG_harmony_intl_dateformat_day_period) {
+ list.push_back(factory->dayPeriod_string());
+ }
+ list.push_back(factory->hour_string());
+ list.push_back(factory->minute_string());
+ list.push_back(factory->second_string());
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ list.push_back(factory->fractionalSecondDigits_string());
+ }
Maybe<bool> maybe_needs_default = NeedsDefault(isolate, options, list);
MAYBE_RETURN(maybe_needs_default, Handle<JSObject>());
needs_default &= maybe_needs_default.FromJust();
@@ -890,7 +938,7 @@ icu::Calendar* CreateCalendar(Isolate* isolate, const icu::Locale& icu_locale,
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) {
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
// See https://github.com/tc39/ecma402/issues/225 . The best pattern
// generation needs to be done in the base locale according to the
// current spec however odd it may be. See also crbug.com/826549 .
@@ -920,9 +968,9 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormat(
class DateFormatCache {
public:
- icu::SimpleDateFormat* Create(const icu::Locale& icu_locale,
- const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) {
+ icu::SimpleDateFormat* Create(
+ const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
std::string key;
skeleton.toUTF8String<std::string>(key);
key += ":";
@@ -951,7 +999,7 @@ class DateFormatCache {
std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
const icu::Locale& icu_locale, const icu::UnicodeString& skeleton,
- icu::DateTimePatternGenerator& generator) {
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
static base::LazyInstance<DateFormatCache>::type cache =
LAZY_INSTANCE_INITIALIZER;
return std::unique_ptr<icu::SimpleDateFormat>(
@@ -1087,7 +1135,8 @@ icu::UnicodeString ReplaceSkeleton(const icu::UnicodeString input,
std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
JSDateTimeFormat::DateTimeStyle date_style,
JSDateTimeFormat::DateTimeStyle time_style, const icu::Locale& icu_locale,
- Intl::HourCycle hc, icu::DateTimePatternGenerator& generator) {
+ Intl::HourCycle hc,
+ icu::DateTimePatternGenerator& generator) { // NOLINT(runtime/references)
std::unique_ptr<icu::SimpleDateFormat> result;
if (date_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
if (time_style != JSDateTimeFormat::DateTimeStyle::kUndefined) {
@@ -1156,10 +1205,9 @@ class DateTimePatternGeneratorCache {
enum FormatMatcherOption { kBestFit, kBasic };
// ecma402/#sec-initializedatetimeformat
-MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- Handle<Object> locales, Handle<Object> input_options) {
- date_time_format->set_flags(0);
+MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> input_options) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
@@ -1347,7 +1395,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
}
}
}
- date_time_format->set_hour_cycle(hc);
DateTimeStyle date_style = DateTimeStyle::kUndefined;
DateTimeStyle time_style = DateTimeStyle::kUndefined;
@@ -1367,9 +1414,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 29. If dateStyle is not undefined, set dateTimeFormat.[[DateStyle]] to
// dateStyle.
date_style = maybe_date_style.FromJust();
- if (date_style != DateTimeStyle::kUndefined) {
- date_time_format->set_date_style(date_style);
- }
// 30. Let timeStyle be ? GetOption(options, "timeStyle", "string", «
// "full", "long", "medium", "short" »).
@@ -1385,9 +1429,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 31. If timeStyle is not undefined, set dateTimeFormat.[[TimeStyle]] to
// timeStyle.
time_style = maybe_time_style.FromJust();
- if (time_style != DateTimeStyle::kUndefined) {
- date_time_format->set_time_style(time_style);
- }
// 32. If dateStyle or timeStyle are not undefined, then
if (date_style != DateTimeStyle::kUndefined ||
@@ -1419,6 +1460,16 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
skeleton += item.map.find(input.get())->second;
}
}
+ if (FLAG_harmony_intl_dateformat_fractional_second_digits) {
+ Maybe<int> maybe_fsd = Intl::GetNumberOption(
+ isolate, options, factory->fractionalSecondDigits_string(), 0, 3, 0);
+ MAYBE_RETURN(maybe_fsd, MaybeHandle<JSDateTimeFormat>());
+ // Convert fractionalSecondDigits to skeleton.
+ int fsd = maybe_fsd.FromJust();
+ for (int i = 0; i < fsd; i++) {
+ skeleton += "S";
+ }
+ }
enum FormatMatcherOption { kBestFit, kBasic };
// We implement only best fit algorithm, but still need to check
@@ -1451,7 +1502,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// g. If dateTimeFormat.[[Hour]] is not undefined, then
if (!has_hour_option) {
// h. Else, i. Set dateTimeFormat.[[HourCycle]] to undefined.
- date_time_format->set_hour_cycle(Intl::HourCycle::kUndefined);
+ hc = Intl::HourCycle::kUndefined;
}
}
@@ -1477,8 +1528,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
maybe_hour_cycle.FromJust() != Intl::HourCycle::kUndefined) {
auto hc_extension_it = r.extensions.find("hc");
if (hc_extension_it != r.extensions.end()) {
- if (date_time_format->hour_cycle() !=
- Intl::ToHourCycle(hc_extension_it->second.c_str())) {
+ if (hc != Intl::ToHourCycle(hc_extension_it->second.c_str())) {
// Remove -hc- if it does not agree with what we used.
UErrorCode status = U_ZERO_ERROR;
icu_locale.setUnicodeKeywordValue("hc", nullptr, status);
@@ -1490,16 +1540,28 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Handle<Managed<icu::Locale>> managed_locale =
Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
- date_time_format->set_icu_locale(*managed_locale);
Handle<Managed<icu::SimpleDateFormat>> managed_format =
Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
std::move(icu_date_format));
- date_time_format->set_icu_simple_date_format(*managed_format);
Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
Managed<icu::DateIntervalFormat>::FromRawPtr(isolate, 0, nullptr);
- date_time_format->set_icu_date_interval_format(*managed_interval_format);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSDateTimeFormat> date_time_format = Handle<JSDateTimeFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ date_time_format->set_flags(0);
+ date_time_format->set_hour_cycle(hc);
+ if (date_style != DateTimeStyle::kUndefined) {
+ date_time_format->set_date_style(date_style);
+ }
+ if (time_style != DateTimeStyle::kUndefined) {
+ date_time_format->set_time_style(time_style);
+ }
+ date_time_format->set_icu_locale(*managed_locale);
+ date_time_format->set_icu_simple_date_format(*managed_format);
+ date_time_format->set_icu_date_interval_format(*managed_interval_format);
return date_time_format;
}
@@ -1516,6 +1578,9 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
case UDAT_EXTENDED_YEAR_FIELD:
case UDAT_YEAR_NAME_FIELD:
return isolate->factory()->year_string();
+ case UDAT_QUARTER_FIELD:
+ case UDAT_STANDALONE_QUARTER_FIELD:
+ return isolate->factory()->quarter_string();
case UDAT_MONTH_FIELD:
case UDAT_STANDALONE_MONTH_FIELD:
return isolate->factory()->month_string();
@@ -1535,6 +1600,8 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
case UDAT_STANDALONE_DAY_FIELD:
return isolate->factory()->weekday_string();
case UDAT_AM_PM_FIELD:
+ case UDAT_AM_PM_MIDNIGHT_NOON_FIELD:
+ case UDAT_FLEXIBLE_DAY_PERIOD_FIELD:
return isolate->factory()->dayPeriod_string();
case UDAT_TIMEZONE_FIELD:
case UDAT_TIMEZONE_RFC_FIELD:
@@ -1546,6 +1613,8 @@ Handle<String> IcuDateFieldIdToDateType(int32_t field_id, Isolate* isolate) {
return isolate->factory()->timeZoneName_string();
case UDAT_ERA_FIELD:
return isolate->factory()->era_string();
+ case UDAT_FRACTIONAL_SECOND_FIELD:
+ return isolate->factory()->fractionalSecond_string();
default:
// Other UDAT_*_FIELD's cannot show up because there is no way to specify
// them via options of Intl.DateTimeFormat.
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index 664ccdcdf7..f4a8ccc8f5 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -32,9 +32,9 @@ namespace internal {
class JSDateTimeFormat : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> Initialize(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSDateTimeFormat> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format);
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index 96e61c2205..6a1529ad33 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
inline void JSListFormat::set_style(Style style) {
- DCHECK_GT(Style::COUNT, style);
+ DCHECK_GE(StyleBits::kMax, style);
int hints = flags();
hints = StyleBits::update(hints, style);
set_flags(hints);
@@ -38,7 +38,7 @@ inline JSListFormat::Style JSListFormat::style() const {
}
inline void JSListFormat::set_type(Type type) {
- DCHECK_GT(Type::COUNT, type);
+ DCHECK_GE(TypeBits::kMax, type);
int hints = flags();
hints = TypeBits::update(hints, type);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 84691194ec..4f303b1874 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -50,8 +50,6 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kStandardShort;
case JSListFormat::Style::NARROW:
return kStandardNarrow;
- case JSListFormat::Style::COUNT:
- UNREACHABLE();
}
case JSListFormat::Type::DISJUNCTION:
switch (style) {
@@ -61,8 +59,6 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kOrShort;
case JSListFormat::Style::NARROW:
return kOrNarrow;
- case JSListFormat::Style::COUNT:
- UNREACHABLE();
}
case JSListFormat::Type::UNIT:
switch (style) {
@@ -72,12 +68,9 @@ const char* GetIcuStyleString(JSListFormat::Style style,
return kUnitShort;
case JSListFormat::Style::NARROW:
return kUnitNarrow;
- case JSListFormat::Style::COUNT:
- UNREACHABLE();
}
- case JSListFormat::Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
} // namespace
@@ -114,11 +107,9 @@ JSListFormat::Type get_type(const char* str) {
UNREACHABLE();
}
-MaybeHandle<JSListFormat> JSListFormat::Initialize(
- Isolate* isolate, Handle<JSListFormat> list_format, Handle<Object> locales,
- Handle<Object> input_options) {
- list_format->set_flags(0);
-
+MaybeHandle<JSListFormat> JSListFormat::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> input_options) {
Handle<JSReceiver> options;
// 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
@@ -156,11 +147,8 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
Intl::ResolvedLocale r =
Intl::ResolveLocale(isolate, JSListFormat::GetAvailableLocales(),
requested_locales, matcher, {});
-
- // 11. Set listFormat.[[Locale]] to r.[[Locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- list_format->set_locale(*locale_str);
// 12. Let t be GetOption(options, "type", "string", «"conjunction",
// "disjunction", "unit"», "conjunction").
@@ -171,9 +159,6 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
MAYBE_RETURN(maybe_type, MaybeHandle<JSListFormat>());
Type type_enum = maybe_type.FromJust();
- // 13. Set listFormat.[[Type]] to t.
- list_format->set_type(type_enum);
-
// 14. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
@@ -182,9 +167,6 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
MAYBE_RETURN(maybe_style, MaybeHandle<JSListFormat>());
Style style_enum = maybe_style.FromJust();
- // 15. Set listFormat.[[Style]] to s.
- list_format->set_style(style_enum);
-
icu::Locale icu_locale = r.icu_locale;
UErrorCode status = U_ZERO_ERROR;
icu::ListFormatter* formatter = icu::ListFormatter::createInstance(
@@ -198,7 +180,22 @@ MaybeHandle<JSListFormat> JSListFormat::Initialize(
Handle<Managed<icu::ListFormatter>> managed_formatter =
Managed<icu::ListFormatter>::FromRawPtr(isolate, 0, formatter);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSListFormat> list_format = Handle<JSListFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ list_format->set_flags(0);
list_format->set_icu_formatter(*managed_formatter);
+
+ // 11. Set listFormat.[[Locale]] to r.[[Locale]].
+ list_format->set_locale(*locale_str);
+
+ // 13. Set listFormat.[[Type]] to t.
+ list_format->set_type(type_enum);
+
+ // 15. Set listFormat.[[Style]] to s.
+ list_format->set_style(style_enum);
+
return list_format;
}
@@ -234,9 +231,8 @@ Handle<String> JSListFormat::StyleAsString() const {
return GetReadOnlyRoots().short_string_handle();
case Style::NARROW:
return GetReadOnlyRoots().narrow_string_handle();
- case Style::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
Handle<String> JSListFormat::TypeAsString() const {
@@ -247,9 +243,8 @@ Handle<String> JSListFormat::TypeAsString() const {
return GetReadOnlyRoots().disjunction_string_handle();
case Type::UNIT:
return GetReadOnlyRoots().unit_string_handle();
- case Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
namespace {
@@ -375,11 +370,20 @@ MaybeHandle<JSArray> JSListFormat::FormatListToParts(
FormattedListToJSArray);
}
+namespace {
+
+struct CheckListPattern {
+ static const char* key() { return "listPattern"; }
+ static const char* path() { return nullptr; }
+};
+
+} // namespace
+
const std::set<std::string>& JSListFormat::GetAvailableLocales() {
- // Since ListFormatter does not have a method to list all supported
- // locales, use the one in icu::Locale per comments in
- // ICU FR at https://unicode-org.atlassian.net/browse/ICU-20015
- return Intl::GetAvailableLocalesForLocale();
+ static base::LazyInstance<
+ Intl::AvailableLocales<icu::Locale, CheckListPattern>>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index 0284d05d42..df937722e6 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -30,11 +30,11 @@ namespace internal {
class JSListFormat : public JSObject {
public:
- // Initializes relative time format object with properties derived from input
+ // Creates relative time format object with properties derived from input
// locales and options.
- static MaybeHandle<JSListFormat> Initialize(
- Isolate* isolate, Handle<JSListFormat> list_format_holder,
- Handle<Object> locales, Handle<Object> options);
+ static MaybeHandle<JSListFormat> New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options);
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSListFormat> format_holder);
@@ -64,10 +64,9 @@ class JSListFormat : public JSObject {
//
// ecma402/#sec-properties-of-intl-listformat-instances
enum class Style {
- LONG, // Everything spelled out.
- SHORT, // Abbreviations used when possible.
- NARROW, // Use the shortest possible form.
- COUNT
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW // Use the shortest possible form.
};
inline void set_style(Style style);
inline Style style() const;
@@ -78,8 +77,7 @@ class JSListFormat : public JSObject {
enum class Type {
CONJUNCTION, // for "and"-based lists (e.g., "A, B and C")
DISJUNCTION, // for "or"-based lists (e.g., "A, B or C"),
- UNIT, // for lists of values with units (e.g., "5 pounds, 12 ounces").
- COUNT
+ UNIT // for lists of values with units (e.g., "5 pounds, 12 ounces").
};
inline void set_type(Type type);
inline Type type() const;
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 509f9a3069..4a66ea9eca 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -313,10 +313,9 @@ Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
} // namespace
-MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
- Handle<JSLocale> locale,
- Handle<String> locale_str,
- Handle<JSReceiver> options) {
+MaybeHandle<JSLocale> JSLocale::New(Isolate* isolate, Handle<Map> map,
+ Handle<String> locale_str,
+ Handle<JSReceiver> options) {
icu::LocaleBuilder builder;
Maybe<bool> maybe_apply =
ApplyOptionsToTag(isolate, locale_str, options, &builder);
@@ -341,8 +340,12 @@ MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
// 31. Set locale.[[Locale]] to r.[[locale]].
Handle<Managed<icu::Locale>> managed_locale =
Managed<icu::Locale>::FromRawPtr(isolate, 0, icu_locale.clone());
- locale->set_icu_locale(*managed_locale);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSLocale> locale = Handle<JSLocale>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ locale->set_icu_locale(*managed_locale);
return locale;
}
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 1a833e0e18..e1806e6b7f 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -27,12 +27,11 @@ namespace internal {
class JSLocale : public JSObject {
public:
- // Initializes locale object with properties derived from input locale string
+ // Creates locale object with properties derived from input locale string
// and options.
- static MaybeHandle<JSLocale> Initialize(Isolate* isolate,
- Handle<JSLocale> locale_holder,
- Handle<String> locale,
- Handle<JSReceiver> options);
+ static MaybeHandle<JSLocale> New(Isolate* isolate, Handle<Map> map,
+ Handle<String> locale,
+ Handle<JSReceiver> options);
static Handle<String> Maximize(Isolate* isolate, String locale);
static Handle<String> Minimize(Isolate* isolate, String locale);
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index bd76dfe556..afdfef89f2 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -66,6 +66,17 @@ inline void JSNumberFormat::set_maximum_fraction_digits(int digits) {
set_flags(hints);
}
+inline void JSNumberFormat::set_style(Style style) {
+ DCHECK_GE(StyleBits::kMax, style);
+ int hints = flags();
+ hints = StyleBits::update(hints, style);
+ set_flags(hints);
+}
+
+inline JSNumberFormat::Style JSNumberFormat::style() const {
+ return StyleBits::decode(flags());
+}
+
CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index 67d545e0be..d1e3ef4d0c 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -31,19 +31,9 @@ namespace internal {
namespace {
-// [[Style]] is one of the values "decimal", "percent", "currency",
-// or "unit" identifying the style of the number format.
-// Note: "unit" is added in proposal-unified-intl-numberformat
-enum class Style {
- DECIMAL,
- PERCENT,
- CURRENCY,
- UNIT,
-};
-
// [[CurrencyDisplay]] is one of the values "code", "symbol", "name",
-// or "narrow-symbol" identifying the display of the currency number format.
-// Note: "narrow-symbol" is added in proposal-unified-intl-numberformat
+// or "narrowSymbol" identifying the display of the currency number format.
+// Note: "narrowSymbol" is added in proposal-unified-intl-numberformat
enum class CurrencyDisplay {
CODE,
SYMBOL,
@@ -62,8 +52,8 @@ enum class CurrencySign {
// [[UnitDisplay]] is one of the String values "short", "narrow", or "long",
// specifying whether to display the unit as a symbol, narrow symbol, or
-// localized long name if formatting with the "unit" or "percent" style. It is
-// only used when [[Style]] has the value "unit" or "percent".
+// localized long name if formatting with the "unit" style. It is
+// only used when [[Style]] has the value "unit".
enum class UnitDisplay {
SHORT,
NARROW,
@@ -95,7 +85,7 @@ enum class CompactDisplay {
};
// [[SignDisplay]] is one of the String values "auto", "always", "never", or
-// "except-zero", specifying whether to show the sign on negative numbers
+// "exceptZero", specifying whether to show the sign on negative numbers
// only, positive and negative numbers including zero, neither positive nor
// negative numbers, or positive and negative numbers but not zero.
enum class SignDisplay {
@@ -164,7 +154,9 @@ icu::number::Notation ToICUNotation(Notation notation,
return icu::number::Notation::scientific();
case Notation::ENGINEERING:
return icu::number::Notation::engineering();
+ // 29. If notation is "compact", then
case Notation::COMPACT:
+ // 29. a. Set numberFormat.[[CompactDisplay]] to compactDisplay.
if (compact_display == CompactDisplay::SHORT) {
return icu::number::Notation::compactShort();
}
@@ -195,7 +187,9 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
CHECK(U_SUCCESS(status));
std::map<const std::string, icu::MeasureUnit> map;
for (auto it = units.begin(); it != units.end(); ++it) {
- if (sanctioned.count(it->getSubtype()) > 0) {
+ // Need to skip none/percent
+ if (sanctioned.count(it->getSubtype()) > 0 &&
+ strcmp("none", it->getType()) != 0) {
map[it->getSubtype()] = *it;
}
}
@@ -304,38 +298,16 @@ bool IsWellFormedCurrencyCode(const std::string& currency) {
return (IsAToZ(currency[0]) && IsAToZ(currency[1]) && IsAToZ(currency[2]));
}
-// Parse the 'style' from the skeleton.
-Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
- // Ex: skeleton as
- // "percent precision-integer rounding-mode-half-up scale/100"
- if (skeleton.indexOf("percent") >= 0 && skeleton.indexOf("scale/100") >= 0) {
- return Style::PERCENT;
- }
- // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
- if (skeleton.indexOf("currency") >= 0) {
- return Style::CURRENCY;
- }
- // Ex: skeleton as
- // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow"
- // or special case for "percent .### rounding-mode-half-up"
- if (skeleton.indexOf("measure-unit") >= 0 ||
- skeleton.indexOf("percent") >= 0) {
- return Style::UNIT;
- }
- // Ex: skeleton as ".### rounding-mode-half-up"
- return Style::DECIMAL;
-}
-
// Return the style as a String.
-Handle<String> StyleAsString(Isolate* isolate, Style style) {
+Handle<String> StyleAsString(Isolate* isolate, JSNumberFormat::Style style) {
switch (style) {
- case Style::PERCENT:
+ case JSNumberFormat::Style::PERCENT:
return ReadOnlyRoots(isolate).percent_string_handle();
- case Style::CURRENCY:
+ case JSNumberFormat::Style::CURRENCY:
return ReadOnlyRoots(isolate).currency_string_handle();
- case Style::UNIT:
+ case JSNumberFormat::Style::UNIT:
return ReadOnlyRoots(isolate).unit_string_handle();
- case Style::DECIMAL:
+ case JSNumberFormat::Style::DECIMAL:
return ReadOnlyRoots(isolate).decimal_string_handle();
}
UNREACHABLE();
@@ -357,7 +329,7 @@ Handle<String> CurrencyDisplayString(Isolate* isolate,
// Ex: skeleton as
// "currency/TWD .00 rounding-mode-half-up unit-width-narrow;
if (skeleton.indexOf("unit-width-narrow") >= 0) {
- return ReadOnlyRoots(isolate).narrow_symbol_string_handle();
+ return ReadOnlyRoots(isolate).narrowSymbol_string_handle();
}
// Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
return ReadOnlyRoots(isolate).symbol_string_handle();
@@ -480,11 +452,13 @@ Handle<String> SignDisplayString(Isolate* isolate,
// "currency/TWD .00 rounding-mode-half-up sign-except-zero"
if (skeleton.indexOf("sign-accounting-except-zero") >= 0 ||
skeleton.indexOf("sign-except-zero") >= 0) {
- return ReadOnlyRoots(isolate).except_zero_string_handle();
+ return ReadOnlyRoots(isolate).exceptZero_string_handle();
}
return ReadOnlyRoots(isolate).auto_string_handle();
}
+} // anonymous namespace
+
// Return the minimum integer digits by counting the number of '0' after
// "integer-width/+" in the skeleton.
// Ex: Return 15 for skeleton as
@@ -492,7 +466,8 @@ Handle<String> SignDisplayString(Isolate* isolate,
// 1
// 123456789012345
// Return default value as 1 if there are no "integer-width/+".
-int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) {
+int32_t JSNumberFormat::MinimumIntegerDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton) {
// count the number of 0 after "integer-width/+"
icu::UnicodeString search("integer-width/+");
int32_t index = skeleton.indexOf(search);
@@ -515,8 +490,8 @@ int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) {
// 123
// 4567
// Set The minimum as 3 and maximum as 7.
-bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
- int32_t* minimum, int32_t* maximum) {
+bool JSNumberFormat::FractionDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton, int32_t* minimum, int32_t* maximum) {
icu::UnicodeString search(".");
int32_t index = skeleton.indexOf(search);
if (index < 0) return false;
@@ -542,8 +517,8 @@ bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
// 12345
// 6789012
// Set The minimum as 5 and maximum as 12.
-bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
- int32_t* minimum, int32_t* maximum) {
+bool JSNumberFormat::SignificantDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton, int32_t* minimum, int32_t* maximum) {
icu::UnicodeString search("@");
int32_t index = skeleton.indexOf(search);
if (index < 0) return false;
@@ -561,6 +536,8 @@ bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
return true;
}
+namespace {
+
// Ex: percent .### rounding-mode-half-up
// Special case for "percent"
// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .###
@@ -630,6 +607,34 @@ std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
} // anonymous namespace
+icu::number::LocalizedNumberFormatter
+JSNumberFormat::SetDigitOptionsToFormatter(
+ const icu::number::LocalizedNumberFormatter& icu_number_formatter,
+ const Intl::NumberFormatDigitOptions& digit_options) {
+ icu::number::LocalizedNumberFormatter result = icu_number_formatter;
+ if (digit_options.minimum_integer_digits > 1) {
+ result = result.integerWidth(icu::number::IntegerWidth::zeroFillTo(
+ digit_options.minimum_integer_digits));
+ }
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Value -1 of minimum_significant_digits represent the roundingtype is
+ // "compact-rounding".
+ if (digit_options.minimum_significant_digits < 0) {
+ return result;
+ }
+ }
+ icu::number::Precision precision =
+ (digit_options.minimum_significant_digits > 0)
+ ? icu::number::Precision::minMaxSignificantDigits(
+ digit_options.minimum_significant_digits,
+ digit_options.maximum_significant_digits)
+ : icu::number::Precision::minMaxFraction(
+ digit_options.minimum_fraction_digits,
+ digit_options.maximum_fraction_digits);
+
+ return result.precision(precision);
+}
+
// static
// ecma402 #sec-intl.numberformat.prototype.resolvedoptions
Handle<JSObject> JSNumberFormat::ResolvedOptions(
@@ -642,9 +647,6 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status);
CHECK(U_SUCCESS(status));
- std::string s_str;
- s_str = skeleton.toUTF8String<std::string>(s_str);
-
// 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
@@ -680,7 +682,7 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
- Style style = StyleFromSkeleton(skeleton);
+ JSNumberFormat::Style style = number_format->style();
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->style_string(),
StyleAsString(isolate, style), Just(kDontThrow))
@@ -706,15 +708,15 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
}
if (FLAG_harmony_intl_numberformat_unified) {
- std::string unit = UnitFromSkeleton(skeleton);
- if (!unit.empty()) {
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->unit_string(),
- isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
- Just(kDontThrow))
- .FromJust());
- }
- if (style == Style::UNIT || style == Style::PERCENT) {
+ if (style == JSNumberFormat::Style::UNIT) {
+ std::string unit = UnitFromSkeleton(skeleton);
+ if (!unit.empty()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unit_string(),
+ isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
+ Just(kDontThrow))
+ .FromJust());
+ }
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->unitDisplay_string(),
UnitDisplayString(isolate, skeleton), Just(kDontThrow))
@@ -827,10 +829,10 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
}
// static
-MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
- Isolate* isolate, Handle<JSNumberFormat> number_format,
- Handle<Object> locales, Handle<Object> options_obj) {
- number_format->set_flags(0);
+MaybeHandle<JSNumberFormat> JSNumberFormat::New(Isolate* isolate,
+ Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
@@ -898,7 +900,6 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 9. Set numberFormat.[[Locale]] to r.[[locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- number_format->set_locale(*locale_str);
// 11. Let dataLocale be r.[[dataLocale]].
@@ -911,17 +912,19 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
const char* service = "Intl.NumberFormat";
std::vector<const char*> style_str_values({"decimal", "percent", "currency"});
- std::vector<Style> style_enum_values(
- {Style::DECIMAL, Style::PERCENT, Style::CURRENCY});
+ std::vector<JSNumberFormat::Style> style_enum_values(
+ {JSNumberFormat::Style::DECIMAL, JSNumberFormat::Style::PERCENT,
+ JSNumberFormat::Style::CURRENCY});
if (FLAG_harmony_intl_numberformat_unified) {
style_str_values.push_back("unit");
- style_enum_values.push_back(Style::UNIT);
+ style_enum_values.push_back(JSNumberFormat::Style::UNIT);
}
- Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", service, style_str_values, style_enum_values,
- Style::DECIMAL);
+ Maybe<JSNumberFormat::Style> maybe_style =
+ Intl::GetStringOption<JSNumberFormat::Style>(
+ isolate, options, "style", service, style_str_values,
+ style_enum_values, JSNumberFormat::Style::DECIMAL);
MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
- Style style = maybe_style.FromJust();
+ JSNumberFormat::Style style = maybe_style.FromJust();
// 13. Set numberFormat.[[Style]] to style.
@@ -952,14 +955,14 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 16. If style is "currency" and currency is undefined, throw a TypeError
// exception.
- if (style == Style::CURRENCY && !found_currency.FromJust()) {
+ if (style == JSNumberFormat::Style::CURRENCY && !found_currency.FromJust()) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCurrencyCode),
JSNumberFormat);
}
// 17. If style is "currency", then
int c_digits = 0;
icu::UnicodeString currency_ustr;
- if (style == Style::CURRENCY) {
+ if (style == JSNumberFormat::Style::CURRENCY) {
// a. Let currency be the result of converting currency to upper case as
// specified in 6.1
std::transform(currency.begin(), currency.end(), currency.begin(), toupper);
@@ -975,7 +978,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
std::vector<CurrencyDisplay> currency_display_enum_values(
{CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, CurrencyDisplay::NAME});
if (FLAG_harmony_intl_numberformat_unified) {
- currency_display_str_values.push_back("narrow-symbol");
+ currency_display_str_values.push_back("narrowSymbol");
currency_display_enum_values.push_back(CurrencyDisplay::NARROW_SYMBOL);
}
Maybe<CurrencyDisplay> maybe_currency_display =
@@ -1020,13 +1023,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
UnitDisplay unit_display = maybe_unit_display.FromJust();
- // If style is "percent", then
- if (style == Style::PERCENT) {
- // Let unit be "concentr-percent".
- unit = "percent";
- }
- // If style is "unit" or "percent", then
- if (style == Style::PERCENT || style == Style::UNIT) {
+ // If style is "unit", then
+ if (style == JSNumberFormat::Style::UNIT) {
// If unit is undefined, throw a TypeError exception.
if (unit == "") {
THROW_NEW_ERROR(
@@ -1070,12 +1068,12 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
}
}
- if (style == Style::PERCENT) {
+ if (style == JSNumberFormat::Style::PERCENT) {
icu_number_formatter = icu_number_formatter.unit(icu::NoUnit::percent())
.scale(icu::number::Scale::powerOfTen(2));
}
- if (style == Style::CURRENCY) {
+ if (style == JSNumberFormat::Style::CURRENCY) {
// 19. If style is "currency", set numberFormat.[[CurrencyDisplay]] to
// currencyDisplay.
@@ -1099,19 +1097,19 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
}
}
- // 20. If style is "currency", then
+ // 23. If style is "currency", then
int mnfd_default, mxfd_default;
- if (style == Style::CURRENCY) {
+ if (style == JSNumberFormat::Style::CURRENCY) {
// a. Let mnfdDefault be cDigits.
// b. Let mxfdDefault be cDigits.
mnfd_default = c_digits;
mxfd_default = c_digits;
+ // 24. Else,
} else {
- // 21. Else,
// a. Let mnfdDefault be 0.
mnfd_default = 0;
// b. If style is "percent", then
- if (style == Style::PERCENT) {
+ if (style == JSNumberFormat::Style::PERCENT) {
// i. Let mxfdDefault be 0.
mxfd_default = 0;
} else {
@@ -1120,51 +1118,11 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
mxfd_default = 3;
}
}
- // 22. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
- // mnfdDefault, mxfdDefault).
- Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
- Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
- mxfd_default);
- MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
- Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
-
- icu::number::Precision precision =
- (digit_options.minimum_significant_digits > 0)
- ? icu::number::Precision::minMaxSignificantDigits(
- digit_options.minimum_significant_digits,
- digit_options.maximum_significant_digits)
- : icu::number::Precision::minMaxFraction(
- digit_options.minimum_fraction_digits,
- digit_options.maximum_fraction_digits);
-
- if (digit_options.minimum_significant_digits > 0) {
- // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
- // uncondictionally while the unified number proposal eventually will only
- // record either (Min|Max)imumFractionDigits or
- // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
- // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
- // decide not to take a PR to address that prior to the unified number
- // proposal, we have to add these two 5 bits int into flags to remember the
- // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
- // present.
- // TODO(ftang) remove the following two lines once we ship
- // int-number-format-unified
- number_format->set_minimum_fraction_digits(
- digit_options.minimum_fraction_digits);
- number_format->set_maximum_fraction_digits(
- digit_options.maximum_fraction_digits);
- }
-
- icu_number_formatter = icu_number_formatter.precision(precision);
- if (digit_options.minimum_integer_digits > 1) {
- icu_number_formatter =
- icu_number_formatter.integerWidth(icu::number::IntegerWidth::zeroFillTo(
- digit_options.minimum_integer_digits));
- }
+ Notation notation = Notation::STANDARD;
if (FLAG_harmony_intl_numberformat_unified) {
- // Let notation be ? GetOption(options, "notation", "string", « "standard",
- // "scientific", "engineering", "compact" », "standard").
+ // 25. Let notation be ? GetOption(options, "notation", "string", «
+ // "standard", "scientific", "engineering", "compact" », "standard").
Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
isolate, options, "notation", service,
{"standard", "scientific", "engineering", "compact"},
@@ -1172,10 +1130,23 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Notation::COMPACT},
Notation::STANDARD);
MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
- Notation notation = maybe_notation.FromJust();
+ notation = maybe_notation.FromJust();
+ }
+
+ // 27. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
+ // mnfdDefault, mxfdDefault).
+ Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
+ mxfd_default,
+ notation == Notation::COMPACT);
+ MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
+ Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
+ icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
+ icu_number_formatter, digit_options);
- // Let compactDisplay be ? GetOption(options, "compactDisplay", "string", «
- // "short", "long" », "short").
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // 28. Let compactDisplay be ? GetOption(options, "compactDisplay",
+ // "string", « "short", "long" », "short").
Maybe<CompactDisplay> maybe_compact_display =
Intl::GetStringOption<CompactDisplay>(
isolate, options, "compactDisplay", service, {"short", "long"},
@@ -1184,6 +1155,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
CompactDisplay compact_display = maybe_compact_display.FromJust();
+ // 26. Set numberFormat.[[Notation]] to notation.
// The default notation in ICU is Simple, which mapped from STANDARD
// so we can skip setting it.
if (notation != Notation::STANDARD) {
@@ -1191,30 +1163,31 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
ToICUNotation(notation, compact_display));
}
}
- // 23. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
+ // 30. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
bool use_grouping = true;
Maybe<bool> found_use_grouping = Intl::GetBoolOption(
isolate, options, "useGrouping", service, &use_grouping);
MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
- // 24. Set numberFormat.[[UseGrouping]] to useGrouping.
+ // 31. Set numberFormat.[[UseGrouping]] to useGrouping.
if (!use_grouping) {
icu_number_formatter = icu_number_formatter.grouping(
UNumberGroupingStrategy::UNUM_GROUPING_OFF);
}
if (FLAG_harmony_intl_numberformat_unified) {
- // Let signDisplay be ? GetOption(options, "signDisplay", "string", «
- // "auto", "never", "always", "except-zero" », "auto").
+ // 32. Let signDisplay be ? GetOption(options, "signDisplay", "string", «
+ // "auto", "never", "always", "exceptZero" », "auto").
Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
isolate, options, "signDisplay", service,
- {"auto", "never", "always", "except-zero"},
+ {"auto", "never", "always", "exceptZero"},
{SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
SignDisplay::EXCEPT_ZERO},
SignDisplay::AUTO);
MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
SignDisplay sign_display = maybe_sign_display.FromJust();
+ // 33. Set numberFormat.[[SignDisplay]] to signDisplay.
// The default sign in ICU is UNUM_SIGN_AUTO which is mapped from
// SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting
// under that values for optimization.
@@ -1244,6 +1217,33 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
isolate, 0,
new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSNumberFormat> number_format = Handle<JSNumberFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ number_format->set_flags(0);
+ number_format->set_style(style);
+ number_format->set_locale(*locale_str);
+
+ if (digit_options.minimum_significant_digits > 0) {
+ // The current ECMA 402 spec mandates recording (Min|Max)imumFractionDigits
+ // unconditionally, while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or
+ // (Min|Max)imumSignificantDigits. Since LocalizedNumberFormatter can only
+ // remember one set, and during 2019-1-17 ECMA402 meeting the committee
+ // decided not to take a PR to address that prior to the unified number
+ // proposal, we have to add these two 5-bit ints into flags to remember the
+ // (Min|Max)imumFractionDigits while (Min|Max)imumSignificantDigits is
+ // present.
+ // TODO(ftang) remove the following two lines once we ship
+ // int-number-format-unified
+ number_format->set_minimum_fraction_digits(
+ digit_options.minimum_fraction_digits);
+ number_format->set_maximum_fraction_digits(
+ digit_options.maximum_fraction_digits);
+ }
+
number_format->set_icu_number_formatter(*managed_number_formatter);
number_format->set_bound_format(*factory->undefined_value());
@@ -1417,7 +1417,7 @@ namespace {
Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
icu::FieldPositionIterator* fp_iter,
Handle<JSArray> result, int start_index,
- Handle<Object> numeric_obj, Handle<String> unit) {
+ Handle<Object> numeric_obj, bool style_is_unit) {
DCHECK(numeric_obj->IsNumeric());
int32_t length = formatted.length();
int index = start_index;
@@ -1442,21 +1442,23 @@ Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
for (auto it = parts.begin(); it < parts.end(); it++) {
NumberFormatSpan part = *it;
- Handle<String> field_type_string =
- part.field_id == -1
- ? isolate->factory()->literal_string()
- : Intl::NumberFieldToType(isolate, numeric_obj, part.field_id);
+ Handle<String> field_type_string = isolate->factory()->literal_string();
+ if (part.field_id != -1) {
+ if (style_is_unit && static_cast<UNumberFormatFields>(part.field_id) ==
+ UNUM_PERCENT_FIELD) {
+ // Special case when style is unit.
+ field_type_string = isolate->factory()->unit_string();
+ } else {
+ field_type_string =
+ Intl::NumberFieldToType(isolate, numeric_obj, part.field_id);
+ }
+ }
Handle<String> substring;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, substring,
Intl::ToString(isolate, formatted, part.begin_pos, part.end_pos),
Nothing<int>());
- if (unit.is_null()) {
- Intl::AddElement(isolate, result, index, field_type_string, substring);
- } else {
- Intl::AddElement(isolate, result, index, field_type_string, substring,
- isolate->factory()->unit_string(), unit);
- }
+ Intl::AddElement(isolate, result, index, field_type_string, substring);
++index;
}
JSObject::ValidateElements(*result);
@@ -1480,16 +1482,26 @@ MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
MAYBE_RETURN(maybe_format, Handle<JSArray>());
Handle<JSArray> result = factory->NewJSArray(0);
- Maybe<int> maybe_format_to_parts =
- ConstructParts(isolate, maybe_format.FromJust(), &fp_iter, result, 0,
- numeric_obj, Handle<String>());
+ Maybe<int> maybe_format_to_parts = ConstructParts(
+ isolate, maybe_format.FromJust(), &fp_iter, result, 0, numeric_obj,
+ number_format->style() == JSNumberFormat::Style::UNIT);
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
}
+namespace {
+
+struct CheckNumberElements {
+ static const char* key() { return "NumberElements"; }
+ static const char* path() { return nullptr; }
+};
+
+} // namespace
+
const std::set<std::string>& JSNumberFormat::GetAvailableLocales() {
- static base::LazyInstance<Intl::AvailableLocales<icu::NumberFormat>>::type
+ static base::LazyInstance<
+ Intl::AvailableLocales<icu::NumberFormat, CheckNumberElements>>::type
available_locales = LAZY_INSTANCE_INITIALIZER;
return available_locales.Pointer()->Get();
}
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 6c59e76f7a..2979ab10f4 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -17,14 +17,15 @@
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
#include "src/objects/objects.h"
-#include "unicode/numberformatter.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
-class NumberFormat;
class UnicodeString;
+namespace number {
+class LocalizedNumberFormatter;
+} // namespace number
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -33,9 +34,9 @@ namespace internal {
class JSNumberFormat : public JSObject {
public:
// ecma402/#sec-initializenumberformat
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> Initialize(
- Isolate* isolate, Handle<JSNumberFormat> number_format,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
// ecma402/#sec-unwrapnumberformat
V8_WARN_UNUSED_RESULT static MaybeHandle<JSNumberFormat> UnwrapNumberFormat(
@@ -56,6 +57,17 @@ class JSNumberFormat : public JSObject {
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
+ // Helper functions shared with JSPluralRules.
+ static int32_t MinimumIntegerDigitsFromSkeleton(
+ const icu::UnicodeString& skeleton);
+ static bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum);
+ static bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum);
+ static icu::number::LocalizedNumberFormatter SetDigitOptionsToFormatter(
+ const icu::number::LocalizedNumberFormatter& icu_number_formatter,
+ const Intl::NumberFormatDigitOptions& digit_options);
+
DECL_CAST(JSNumberFormat)
DECL_PRINTER(JSNumberFormat)
DECL_VERIFIER(JSNumberFormat)
@@ -80,6 +92,14 @@ class JSNumberFormat : public JSObject {
inline int maximum_fraction_digits() const;
inline void set_maximum_fraction_digits(int digits);
+ // [[Style]] is one of the values "decimal", "percent", "currency",
+ // or "unit" identifying the style of the number format.
+ // Note: "unit" is added in proposal-unified-intl-numberformat
+ enum class Style { DECIMAL, PERCENT, CURRENCY, UNIT };
+
+ inline void set_style(Style style);
+ inline Style style() const;
+
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_JSNUMBER_FORMAT_FIELDS)
@@ -87,13 +107,18 @@ class JSNumberFormat : public JSObject {
// Bit positions in |flags|.
#define FLAGS_BIT_FIELDS(V, _) \
V(MinimumFractionDigitsBits, int, 5, _) \
- V(MaximumFractionDigitsBits, int, 5, _)
+ V(MaximumFractionDigitsBits, int, 5, _) \
+ V(StyleBits, Style, 2, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
STATIC_ASSERT(20 <= MinimumFractionDigitsBits::kMax);
STATIC_ASSERT(20 <= MaximumFractionDigitsBits::kMax);
+ STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
+ STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
+ STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
+ STATIC_ASSERT(Style::UNIT <= StyleBits::kMax);
DECL_ACCESSORS(locale, String)
DECL_ACCESSORS(icu_number_formatter,
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 6b7a7d72f0..10672d4443 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -8,6 +8,7 @@
#include "src/objects/js-objects.h"
#include "src/heap/heap-write-barrier.h"
+#include "src/objects/elements.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/feedback-vector.h"
@@ -29,17 +30,17 @@ namespace v8 {
namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(JSObject, JSReceiver)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSDate, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSObject)
OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
OBJECT_CONSTRUCTORS_IMPL(JSStringIterator, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSValue, JSObject)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
@@ -48,13 +49,10 @@ CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSValue)
MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
@@ -130,11 +128,6 @@ bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject object) {
ACCESSORS(JSReceiver, raw_properties_or_hash, Object, kPropertiesOrHashOffset)
-FixedArrayBase JSObject::elements() const {
- Object array = READ_FIELD(*this, kElementsOffset);
- return FixedArrayBase::cast(array);
-}
-
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(*object);
ElementsKind elements_kind = object->map().elements_kind();
@@ -225,39 +218,34 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
void JSObject::SetMapAndElements(Handle<JSObject> object, Handle<Map> new_map,
Handle<FixedArrayBase> value) {
- JSObject::MigrateToMap(object, new_map);
+ Isolate* isolate = object->GetIsolate();
+ JSObject::MigrateToMap(isolate, object, new_map);
DCHECK((object->map().has_fast_smi_or_object_elements() ||
- (*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
+ (*value == ReadOnlyRoots(isolate).empty_fixed_array()) ||
object->map().has_fast_string_wrapper_elements()) ==
- (value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
- value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
- DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
+ (value->map() == ReadOnlyRoots(isolate).fixed_array_map() ||
+ value->map() == ReadOnlyRoots(isolate).fixed_cow_array_map()));
+ DCHECK((*value == ReadOnlyRoots(isolate).empty_fixed_array()) ||
(object->map().has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
}
-void JSObject::set_elements(FixedArrayBase value, WriteBarrierMode mode) {
- WRITE_FIELD(*this, kElementsOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kElementsOffset, value, mode);
-}
-
void JSObject::initialize_elements() {
FixedArrayBase elements = map().GetInitialElements();
- WRITE_FIELD(*this, kElementsOffset, elements);
+ set_elements(elements, SKIP_WRITE_BARRIER);
}
-InterceptorInfo JSObject::GetIndexedInterceptor() {
- return map().GetIndexedInterceptor();
+DEF_GETTER(JSObject, GetIndexedInterceptor, InterceptorInfo) {
+ return map(isolate).GetIndexedInterceptor(isolate);
}
-InterceptorInfo JSObject::GetNamedInterceptor() {
- return map().GetNamedInterceptor();
+DEF_GETTER(JSObject, GetNamedInterceptor, InterceptorInfo) {
+ return map(isolate).GetNamedInterceptor(isolate);
}
-int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
-
-int JSObject::GetHeaderSize(const Map map) {
+// static
+int JSObject::GetHeaderSize(Map map) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
@@ -268,7 +256,7 @@ int JSObject::GetHeaderSize(const Map map) {
}
// static
-int JSObject::GetEmbedderFieldsStartOffset(const Map map) {
+int JSObject::GetEmbedderFieldsStartOffset(Map map) {
// Embedder fields are located after the object header.
return GetHeaderSize(map);
}
@@ -278,7 +266,7 @@ int JSObject::GetEmbedderFieldsStartOffset() {
}
// static
-int JSObject::GetEmbedderFieldCount(const Map map) {
+int JSObject::GetEmbedderFieldCount(Map map) {
int instance_size = map.instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
// Embedder fields are located after the object header, whereas in-object
@@ -314,29 +302,39 @@ void JSObject::SetEmbedderField(int index, Smi value) {
EmbedderDataSlot(*this, index).store_smi(value);
}
-bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
+bool JSObject::IsUnboxedDoubleField(FieldIndex index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return IsUnboxedDoubleField(isolate, index);
+}
+
+bool JSObject::IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
- return map().IsUnboxedDoubleField(index);
+ return map(isolate).IsUnboxedDoubleField(isolate, index);
}
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object JSObject::RawFastPropertyAt(FieldIndex index) {
- DCHECK(!IsUnboxedDoubleField(index));
+Object JSObject::RawFastPropertyAt(FieldIndex index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return RawFastPropertyAt(isolate, index);
+}
+
+Object JSObject::RawFastPropertyAt(Isolate* isolate, FieldIndex index) const {
+ DCHECK(!IsUnboxedDoubleField(isolate, index));
if (index.is_inobject()) {
- return READ_FIELD(*this, index.offset());
+ return TaggedField<Object>::load(isolate, *this, index.offset());
} else {
- return property_array().get(index.outobject_array_index());
+ return property_array(isolate).get(isolate, index.outobject_array_index());
}
}
-double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
+double JSObject::RawFastDoublePropertyAt(FieldIndex index) const {
DCHECK(IsUnboxedDoubleField(index));
return ReadField<double>(index.offset());
}
-uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
+uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
DCHECK(IsUnboxedDoubleField(index));
return ReadField<uint64_t>(index.offset());
}
@@ -417,7 +415,7 @@ int JSObject::GetInObjectPropertyOffset(int index) {
Object JSObject::InObjectPropertyAt(int index) {
int offset = GetInObjectPropertyOffset(index);
- return READ_FIELD(*this, offset);
+ return TaggedField<Object>::load(*this, offset);
}
Object JSObject::InObjectPropertyAtPut(int index, Object value,
@@ -452,10 +450,6 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
}
-Object JSBoundFunction::raw_bound_target_function() const {
- return READ_FIELD(*this, kBoundTargetFunctionOffset);
-}
-
ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
kBoundTargetFunctionOffset)
ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
@@ -466,8 +460,6 @@ ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
-ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-
FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
return FeedbackVector::cast(raw_feedback_cell().value());
@@ -564,7 +556,8 @@ void JSFunction::set_code_no_write_barrier(Code value) {
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
}
-SharedFunctionInfo JSFunction::shared() const {
+// TODO(ishell): Why relaxed read but release store?
+DEF_GETTER(JSFunction, shared, SharedFunctionInfo) {
return SharedFunctionInfo::cast(
RELAXED_READ_FIELD(*this, kSharedFunctionInfoOffset));
}
@@ -606,11 +599,11 @@ bool JSFunction::has_closure_feedback_cell_array() const {
}
Context JSFunction::context() {
- return Context::cast(READ_FIELD(*this, kContextOffset));
+ return TaggedField<Context, kContextOffset>::load(*this);
}
bool JSFunction::has_context() const {
- return READ_FIELD(*this, kContextOffset).IsContext();
+ return TaggedField<HeapObject, kContextOffset>::load(*this).IsContext();
}
JSGlobalProxy JSFunction::global_proxy() { return context().global_proxy(); }
@@ -619,65 +612,73 @@ NativeContext JSFunction::native_context() {
return context().native_context();
}
-void JSFunction::set_context(Object value) {
+void JSFunction::set_context(HeapObject value) {
DCHECK(value.IsUndefined() || value.IsContext());
WRITE_FIELD(*this, kContextOffset, value);
WRITE_BARRIER(*this, kContextOffset, value);
}
-ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
+ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, HeapObject,
kPrototypeOrInitialMapOffset, map().has_prototype_slot())
-bool JSFunction::has_prototype_slot() const {
- return map().has_prototype_slot();
+DEF_GETTER(JSFunction, has_prototype_slot, bool) {
+ return map(isolate).has_prototype_slot();
}
-Map JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
+DEF_GETTER(JSFunction, initial_map, Map) {
+ return Map::cast(prototype_or_initial_map(isolate));
+}
-bool JSFunction::has_initial_map() {
- DCHECK(has_prototype_slot());
- return prototype_or_initial_map().IsMap();
+DEF_GETTER(JSFunction, has_initial_map, bool) {
+ DCHECK(has_prototype_slot(isolate));
+ return prototype_or_initial_map(isolate).IsMap(isolate);
}
-bool JSFunction::has_instance_prototype() {
- DCHECK(has_prototype_slot());
- return has_initial_map() || !prototype_or_initial_map().IsTheHole();
+DEF_GETTER(JSFunction, has_instance_prototype, bool) {
+ DCHECK(has_prototype_slot(isolate));
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ return has_initial_map(isolate) ||
+ !prototype_or_initial_map(isolate).IsTheHole(
+ GetReadOnlyRoots(isolate));
}
-bool JSFunction::has_prototype() {
- DCHECK(has_prototype_slot());
- return map().has_non_instance_prototype() || has_instance_prototype();
+DEF_GETTER(JSFunction, has_prototype, bool) {
+ DCHECK(has_prototype_slot(isolate));
+ return map(isolate).has_non_instance_prototype() ||
+ has_instance_prototype(isolate);
}
-bool JSFunction::has_prototype_property() {
- return (has_prototype_slot() && IsConstructor()) ||
- IsGeneratorFunction(shared().kind());
+DEF_GETTER(JSFunction, has_prototype_property, bool) {
+ return (has_prototype_slot(isolate) && IsConstructor(isolate)) ||
+ IsGeneratorFunction(shared(isolate).kind());
}
-bool JSFunction::PrototypeRequiresRuntimeLookup() {
- return !has_prototype_property() || map().has_non_instance_prototype();
+DEF_GETTER(JSFunction, PrototypeRequiresRuntimeLookup, bool) {
+ return !has_prototype_property(isolate) ||
+ map(isolate).has_non_instance_prototype();
}
-HeapObject JSFunction::instance_prototype() {
- DCHECK(has_instance_prototype());
- if (has_initial_map()) return initial_map().prototype();
+DEF_GETTER(JSFunction, instance_prototype, HeapObject) {
+ DCHECK(has_instance_prototype(isolate));
+ if (has_initial_map(isolate)) return initial_map(isolate).prototype(isolate);
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
- return HeapObject::cast(prototype_or_initial_map());
+ return HeapObject::cast(prototype_or_initial_map(isolate));
}
-Object JSFunction::prototype() {
- DCHECK(has_prototype());
+DEF_GETTER(JSFunction, prototype, Object) {
+ DCHECK(has_prototype(isolate));
// If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
- if (map().has_non_instance_prototype()) {
- Object prototype = map().GetConstructor();
+ if (map(isolate).has_non_instance_prototype()) {
+ Object prototype = map(isolate).GetConstructor(isolate);
// The map must have a prototype in that field, not a back pointer.
- DCHECK(!prototype.IsMap());
- DCHECK(!prototype.IsFunctionTemplateInfo());
+ DCHECK(!prototype.IsMap(isolate));
+ DCHECK(!prototype.IsFunctionTemplateInfo(isolate));
return prototype;
}
- return instance_prototype();
+ return instance_prototype(isolate);
}
bool JSFunction::is_compiled() const {
@@ -711,8 +712,6 @@ void JSFunction::ResetIfBytecodeFlushed() {
}
}
-ACCESSORS(JSValue, value, Object, kValueOffset)
-
ACCESSORS(JSDate, value, Object, kValueOffset)
ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
ACCESSORS(JSDate, year, Object, kYearOffset)
@@ -738,12 +737,11 @@ int JSMessageObject::GetEndPosition() const {
}
MessageTemplate JSMessageObject::type() const {
- Object value = READ_FIELD(*this, kMessageTypeOffset);
- return MessageTemplateFromInt(Smi::ToInt(value));
+ return MessageTemplateFromInt(raw_type());
}
void JSMessageObject::set_type(MessageTemplate value) {
- WRITE_FIELD(*this, kMessageTypeOffset, Smi::FromInt(static_cast<int>(value)));
+ set_raw_type(static_cast<int>(value));
}
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
@@ -754,143 +752,154 @@ ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
+SMI_ACCESSORS(JSMessageObject, raw_type, kMessageTypeOffset)
-ElementsKind JSObject::GetElementsKind() const {
- ElementsKind kind = map().elements_kind();
+DEF_GETTER(JSObject, GetElementsKind, ElementsKind) {
+ ElementsKind kind = map(isolate).elements_kind();
#if VERIFY_HEAP && DEBUG
- FixedArrayBase fixed_array =
- FixedArrayBase::unchecked_cast(READ_FIELD(*this, kElementsOffset));
+ FixedArrayBase fixed_array = FixedArrayBase::unchecked_cast(
+ TaggedField<HeapObject, kElementsOffset>::load(isolate, *this));
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine()) {
- Map map = fixed_array.map();
+ if (ElementsAreSafeToExamine(isolate)) {
+ Map map = fixed_array.map(isolate);
if (IsSmiOrObjectElementsKind(kind)) {
- DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
- map == GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK(map == GetReadOnlyRoots(isolate).fixed_array_map() ||
+ map == GetReadOnlyRoots(isolate).fixed_cow_array_map());
} else if (IsDoubleElementsKind(kind)) {
- DCHECK(fixed_array.IsFixedDoubleArray() ||
- fixed_array == GetReadOnlyRoots().empty_fixed_array());
+ DCHECK(fixed_array.IsFixedDoubleArray(isolate) ||
+ fixed_array == GetReadOnlyRoots(isolate).empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
- DCHECK(fixed_array.IsFixedArray());
- DCHECK(fixed_array.IsNumberDictionary());
+ DCHECK(fixed_array.IsFixedArray(isolate));
+ DCHECK(fixed_array.IsNumberDictionary(isolate));
} else {
DCHECK(kind > DICTIONARY_ELEMENTS || IsFrozenOrSealedElementsKind(kind));
}
- DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
- (elements().IsFixedArray() && elements().length() >= 2));
+ DCHECK(
+ !IsSloppyArgumentsElementsKind(kind) ||
+ (elements(isolate).IsFixedArray() && elements(isolate).length() >= 2));
}
#endif
return kind;
}
-bool JSObject::HasObjectElements() {
- return IsObjectElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, GetElementsAccessor, ElementsAccessor*) {
+ return ElementsAccessor::ForKind(GetElementsKind(isolate));
+}
+
+DEF_GETTER(JSObject, HasObjectElements, bool) {
+ return IsObjectElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasSmiElements() { return IsSmiElementsKind(GetElementsKind()); }
+DEF_GETTER(JSObject, HasSmiElements, bool) {
+ return IsSmiElementsKind(GetElementsKind(isolate));
+}
-bool JSObject::HasSmiOrObjectElements() {
- return IsSmiOrObjectElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasSmiOrObjectElements, bool) {
+ return IsSmiOrObjectElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasDoubleElements() {
- return IsDoubleElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasDoubleElements, bool) {
+ return IsDoubleElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasHoleyElements() {
- return IsHoleyElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasHoleyElements, bool) {
+ return IsHoleyElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastElements() {
- return IsFastElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasFastElements, bool) {
+ return IsFastElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastPackedElements() {
- return IsFastPackedElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasFastPackedElements, bool) {
+ return IsFastPackedElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasDictionaryElements() {
- return GetElementsKind() == DICTIONARY_ELEMENTS;
+DEF_GETTER(JSObject, HasDictionaryElements, bool) {
+ return GetElementsKind(isolate) == DICTIONARY_ELEMENTS;
}
-bool JSObject::HasPackedElements() {
- return GetElementsKind() == PACKED_ELEMENTS;
+DEF_GETTER(JSObject, HasPackedElements, bool) {
+ return GetElementsKind(isolate) == PACKED_ELEMENTS;
}
-bool JSObject::HasFrozenOrSealedElements() {
- return IsFrozenOrSealedElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasFrozenOrSealedElements, bool) {
+ return IsFrozenOrSealedElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasSealedElements() {
- return IsSealedElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasSealedElements, bool) {
+ return IsSealedElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastArgumentsElements() {
- return GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+DEF_GETTER(JSObject, HasFastArgumentsElements, bool) {
+ return GetElementsKind(isolate) == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
-bool JSObject::HasSlowArgumentsElements() {
- return GetElementsKind() == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+DEF_GETTER(JSObject, HasSlowArgumentsElements, bool) {
+ return GetElementsKind(isolate) == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
}
-bool JSObject::HasSloppyArgumentsElements() {
- return IsSloppyArgumentsElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasSloppyArgumentsElements, bool) {
+ return IsSloppyArgumentsElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasStringWrapperElements() {
- return IsStringWrapperElementsKind(GetElementsKind());
+DEF_GETTER(JSObject, HasStringWrapperElements, bool) {
+ return IsStringWrapperElementsKind(GetElementsKind(isolate));
}
-bool JSObject::HasFastStringWrapperElements() {
- return GetElementsKind() == FAST_STRING_WRAPPER_ELEMENTS;
+DEF_GETTER(JSObject, HasFastStringWrapperElements, bool) {
+ return GetElementsKind(isolate) == FAST_STRING_WRAPPER_ELEMENTS;
}
-bool JSObject::HasSlowStringWrapperElements() {
- return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
+DEF_GETTER(JSObject, HasSlowStringWrapperElements, bool) {
+ return GetElementsKind(isolate) == SLOW_STRING_WRAPPER_ELEMENTS;
}
-bool JSObject::HasTypedArrayElements() {
- DCHECK(!elements().is_null());
- return map().has_typed_array_elements();
+DEF_GETTER(JSObject, HasTypedArrayElements, bool) {
+ DCHECK(!elements(isolate).is_null());
+ return map(isolate).has_typed_array_elements();
}
#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
- bool JSObject::HasFixed##Type##Elements() { \
- return map().elements_kind() == TYPE##_ELEMENTS; \
+ DEF_GETTER(JSObject, HasFixed##Type##Elements, bool) { \
+ return map(isolate).elements_kind() == TYPE##_ELEMENTS; \
}
TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
#undef FIXED_TYPED_ELEMENTS_CHECK
-bool JSObject::HasNamedInterceptor() { return map().has_named_interceptor(); }
+DEF_GETTER(JSObject, HasNamedInterceptor, bool) {
+ return map(isolate).has_named_interceptor();
+}
-bool JSObject::HasIndexedInterceptor() {
- return map().has_indexed_interceptor();
+DEF_GETTER(JSObject, HasIndexedInterceptor, bool) {
+ return map(isolate).has_indexed_interceptor();
}
-void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
- DCHECK(IsJSGlobalObject());
- set_raw_properties_or_hash(dictionary);
+DEF_GETTER(JSGlobalObject, global_dictionary, GlobalDictionary) {
+ DCHECK(!HasFastProperties(isolate));
+ DCHECK(IsJSGlobalObject(isolate));
+ return GlobalDictionary::cast(raw_properties_or_hash(isolate));
}
-GlobalDictionary JSGlobalObject::global_dictionary() {
- DCHECK(!HasFastProperties());
+void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
DCHECK(IsJSGlobalObject());
- return GlobalDictionary::cast(raw_properties_or_hash());
+ set_raw_properties_or_hash(dictionary);
}
-NumberDictionary JSObject::element_dictionary() {
- DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
- return NumberDictionary::cast(elements());
+DEF_GETTER(JSObject, element_dictionary, NumberDictionary) {
+ DCHECK(HasDictionaryElements(isolate) ||
+ HasSlowStringWrapperElements(isolate));
+ return NumberDictionary::cast(elements(isolate));
}
-void JSReceiver::initialize_properties() {
- ReadOnlyRoots roots = GetReadOnlyRoots();
+void JSReceiver::initialize_properties(Isolate* isolate) {
+ ReadOnlyRoots roots(isolate);
DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
- if (map().is_dictionary_map()) {
+ if (map(isolate).is_dictionary_map()) {
WRITE_FIELD(*this, kPropertiesOrHashOffset,
roots.empty_property_dictionary());
} else {
@@ -898,36 +907,36 @@ void JSReceiver::initialize_properties() {
}
}
-bool JSReceiver::HasFastProperties() const {
- DCHECK(raw_properties_or_hash().IsSmi() ||
- ((raw_properties_or_hash().IsGlobalDictionary() ||
- raw_properties_or_hash().IsNameDictionary()) ==
- map().is_dictionary_map()));
- return !map().is_dictionary_map();
+DEF_GETTER(JSReceiver, HasFastProperties, bool) {
+ DCHECK(raw_properties_or_hash(isolate).IsSmi() ||
+ ((raw_properties_or_hash(isolate).IsGlobalDictionary(isolate) ||
+ raw_properties_or_hash(isolate).IsNameDictionary(isolate)) ==
+ map(isolate).is_dictionary_map()));
+ return !map(isolate).is_dictionary_map();
}
-NameDictionary JSReceiver::property_dictionary() const {
- DCHECK(!IsJSGlobalObject());
- DCHECK(!HasFastProperties());
-
- Object prop = raw_properties_or_hash();
+DEF_GETTER(JSReceiver, property_dictionary, NameDictionary) {
+ DCHECK(!IsJSGlobalObject(isolate));
+ DCHECK(!HasFastProperties(isolate));
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ Object prop = raw_properties_or_hash(isolate);
if (prop.IsSmi()) {
- return GetReadOnlyRoots().empty_property_dictionary();
+ return GetReadOnlyRoots(isolate).empty_property_dictionary();
}
-
return NameDictionary::cast(prop);
}
// TODO(gsathya): Pass isolate directly to this function and access
// the heap from this.
-PropertyArray JSReceiver::property_array() const {
- DCHECK(HasFastProperties());
-
- Object prop = raw_properties_or_hash();
- if (prop.IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
- return GetReadOnlyRoots().empty_property_array();
+DEF_GETTER(JSReceiver, property_array, PropertyArray) {
+ DCHECK(HasFastProperties(isolate));
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ Object prop = raw_properties_or_hash(isolate);
+ if (prop.IsSmi() || prop == GetReadOnlyRoots(isolate).empty_fixed_array()) {
+ return GetReadOnlyRoots(isolate).empty_property_array();
}
-
return PropertyArray::cast(prop);
}
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 317837a99f..5c4db16206 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -8,6 +8,7 @@
#include "src/codegen/compiler.h"
#include "src/date/date.h"
#include "src/execution/arguments.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/handles/maybe-handles.h"
@@ -194,15 +195,16 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
return Just(!source->IsString() || String::cast(*source).length() == 0);
}
+ Isolate* isolate = target->GetIsolate();
+
// If the target is deprecated, the object will be updated on first store. If
// the source for that store equals the target, this will invalidate the
// cached representation of the source. Preventively upgrade the target.
// Do this on each iteration since any property load could cause deprecation.
if (target->map().is_deprecated()) {
- JSObject::MigrateInstance(Handle<JSObject>::cast(target));
+ JSObject::MigrateInstance(isolate, Handle<JSObject>::cast(target));
}
- Isolate* isolate = target->GetIsolate();
Handle<Map> map(JSReceiver::cast(*source).map(), isolate);
if (!map->IsJSObjectMap()) return Just(false);
@@ -374,8 +376,8 @@ String JSReceiver::class_name() {
TYPED_ARRAYS(SWITCH_KIND)
#undef SWITCH_KIND
}
- if (IsJSValue()) {
- Object value = JSValue::cast(*this).value();
+ if (IsJSPrimitiveWrapper()) {
+ Object value = JSPrimitiveWrapper::cast(*this).value();
if (value.IsBoolean()) return roots.Boolean_string();
if (value.IsString()) return roots.String_string();
if (value.IsNumber()) return roots.Number_string();
@@ -1092,7 +1094,8 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
Maybe<bool> DefinePropertyWithInterceptorInternal(
LookupIterator* it, Handle<InterceptorInfo> interceptor,
- Maybe<ShouldThrow> should_throw, PropertyDescriptor& desc) {
+ Maybe<ShouldThrow> should_throw,
+ PropertyDescriptor& desc) { // NOLINT(runtime/references)
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -1827,6 +1830,13 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
int number_of_own_elements =
object->GetElementsAccessor()->GetCapacity(*object, object->elements());
+
+ if (number_of_own_elements >
+ FixedArray::kMaxLength - number_of_own_descriptors) {
+ isolate->Throw(*isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ return Nothing<bool>();
+ }
Handle<FixedArray> values_or_entries = isolate->factory()->NewFixedArray(
number_of_own_descriptors + number_of_own_elements);
int count = 0;
@@ -1918,7 +1928,8 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
int length = 0;
for (int i = 0; i < keys->length(); ++i) {
- Handle<Name> key = Handle<Name>::cast(handle(keys->get(i), isolate));
+ Handle<Name> key =
+ Handle<Name>::cast(handle(keys->get(isolate, i), isolate));
if (filter & ONLY_ENUMERABLE) {
PropertyDescriptor descriptor;
@@ -2002,13 +2013,9 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, initial_map,
JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
- Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(
- initial_map, AllocationType::kYoung, site);
- if (initial_map->is_dictionary_map()) {
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
- result->SetProperties(*dictionary);
- }
+ Handle<JSObject> result = isolate->factory()->NewFastOrSlowJSObjectFromMap(
+ initial_map, NameDictionary::kInitialCapacity, AllocationType::kYoung,
+ site);
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
return result;
@@ -2026,13 +2033,7 @@ MaybeHandle<JSObject> JSObject::ObjectCreate(Isolate* isolate,
Map::GetObjectCreateMap(isolate, Handle<HeapObject>::cast(prototype));
// Actually allocate the object.
- Handle<JSObject> object;
- if (map->is_dictionary_map()) {
- object = isolate->factory()->NewSlowJSObjectFromMap(map);
- } else {
- object = isolate->factory()->NewJSObjectFromMap(map);
- }
- return object;
+ return isolate->factory()->NewFastOrSlowJSObjectFromMap(map);
}
void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
@@ -2072,8 +2073,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::GetHeaderSize(function_has_prototype_slot);
- case JS_VALUE_TYPE:
- return JSValue::kSize;
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ return JSPrimitiveWrapper::kSize;
case JS_DATE_TYPE:
return JSDate::kSize;
case JS_ARRAY_TYPE:
@@ -2423,7 +2424,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
// All other JSObjects are rather similar to each other (JSObject,
- // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSValue).
+ // JSGlobalProxy, JSGlobalObject, JSUndetectable, JSPrimitiveWrapper).
default: {
Map map_of_this = map();
Heap* heap = GetHeap();
@@ -2457,9 +2458,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS%sObject", global_object ? "Global " : "");
}
}
- if (IsJSValue()) {
+ if (IsJSPrimitiveWrapper()) {
accumulator->Add(" value = ");
- JSValue::cast(*this).value().ShortPrint(accumulator);
+ JSPrimitiveWrapper::cast(*this).value().ShortPrint(accumulator);
}
accumulator->Put('>');
break;
@@ -2595,11 +2596,11 @@ namespace {
// to temporarily store the inobject properties.
// * If there are properties left in the backing store, install the backing
// store.
-void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
- Isolate* isolate = object->GetIsolate();
+void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
+ Handle<Map> new_map) {
Handle<Map> old_map(object->map(), isolate);
// In case of a regular transition.
- if (new_map->GetBackPointer() == *old_map) {
+ if (new_map->GetBackPointer(isolate) == *old_map) {
// If the map does not add named properties, simply set the map.
if (old_map->NumberOfOwnDescriptors() ==
new_map->NumberOfOwnDescriptors()) {
@@ -2608,7 +2609,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
// If the map adds a new kDescriptor property, simply set the map.
- PropertyDetails details = new_map->GetLastDescriptorDetails();
+ PropertyDetails details = new_map->GetLastDescriptorDetails(isolate);
if (details.location() == kDescriptor) {
object->synchronized_set_map(*new_map);
return;
@@ -2618,14 +2619,14 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// can also simply set the map (modulo a special case for mutable
// double boxes).
FieldIndex index =
- FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
- if (index.is_inobject() ||
- index.outobject_array_index() < object->property_array().length()) {
+ FieldIndex::ForDescriptor(isolate, *new_map, new_map->LastAdded());
+ if (index.is_inobject() || index.outobject_array_index() <
+ object->property_array(isolate).length()) {
// We still need to allocate MutableHeapNumbers for double fields
// if either double field unboxing is disabled or the double field
// is in the PropertyArray backing store (where we don't support
// double field unboxing).
- if (index.is_double() && !new_map->IsUnboxedDoubleField(index)) {
+ if (index.is_double() && !new_map->IsUnboxedDoubleField(isolate, index)) {
auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
object->RawFastPropertyAtPut(index, *value);
}
@@ -2636,7 +2637,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// This migration is a transition from a map that has run out of property
// space. Extend the backing store.
int grow_by = new_map->UnusedPropertyFields() + 1;
- Handle<PropertyArray> old_storage(object->property_array(), isolate);
+ Handle<PropertyArray> old_storage(object->property_array(isolate), isolate);
Handle<PropertyArray> new_storage =
isolate->factory()->CopyPropertyArrayAndGrow(old_storage, grow_by);
@@ -2682,10 +2683,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Handle<FixedArray> inobject_props =
isolate->factory()->NewFixedArray(inobject);
- Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors(),
- isolate);
- Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors(),
- isolate);
+ Handle<DescriptorArray> old_descriptors(
+ old_map->instance_descriptors(isolate), isolate);
+ Handle<DescriptorArray> new_descriptors(
+ new_map->instance_descriptors(isolate), isolate);
int old_nof = old_map->NumberOfOwnDescriptors();
int new_nof = new_map->NumberOfOwnDescriptors();
@@ -2713,13 +2714,13 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
} else {
DCHECK_EQ(kData, old_details.kind());
- value = handle(old_descriptors->GetStrongValue(i), isolate);
+ value = handle(old_descriptors->GetStrongValue(isolate, i), isolate);
DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
} else {
DCHECK_EQ(kField, old_details.location());
- FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
- if (object->IsUnboxedDoubleField(index)) {
+ FieldIndex index = FieldIndex::ForDescriptor(isolate, *old_map, i);
+ if (object->IsUnboxedDoubleField(isolate, index)) {
uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
if (representation.IsDouble()) {
value = isolate->factory()->NewMutableHeapNumberFromBits(old_bits);
@@ -2727,7 +2728,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
value = isolate->factory()->NewHeapNumberFromBits(old_bits);
}
} else {
- value = handle(object->RawFastPropertyAt(index), isolate);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
DCHECK_IMPLIES(old_representation.IsNone(),
value->IsUninitialized(isolate));
@@ -2779,11 +2780,11 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- Object value = inobject_props->get(i);
+ Object value = inobject_props->get(isolate, i);
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
- if (new_map->IsUnboxedDoubleField(index)) {
- DCHECK(value.IsMutableHeapNumber());
+ if (new_map->IsUnboxedDoubleField(isolate, index)) {
+ DCHECK(value.IsMutableHeapNumber(isolate));
// Ensure that all bits of the double value are preserved.
object->RawFastDoublePropertyAsBitsAtPut(
index, MutableHeapNumber::cast(value).value_as_bits());
@@ -2818,19 +2819,19 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
object->synchronized_set_map(*new_map);
}
-void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
+void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
+ Handle<Map> new_map,
int expected_additional_properties) {
// The global object is always normalized.
- DCHECK(!object->IsJSGlobalObject());
+ DCHECK(!object->IsJSGlobalObject(isolate));
// JSGlobalProxy must never be normalized
- DCHECK(!object->IsJSGlobalProxy());
+ DCHECK(!object->IsJSGlobalProxy(isolate));
DCHECK_IMPLIES(new_map->is_prototype_map(),
Map::IsPrototypeChainInvalidated(*new_map));
- Isolate* isolate = object->GetIsolate();
HandleScope scope(isolate);
- Handle<Map> map(object->map(), isolate);
+ Handle<Map> map(object->map(isolate), isolate);
// Allocate new content.
int real_size = map->NumberOfOwnDescriptors();
@@ -2844,33 +2845,33 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
Handle<NameDictionary> dictionary =
NameDictionary::New(isolate, property_count);
- Handle<DescriptorArray> descs(map->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descs(map->instance_descriptors(isolate), isolate);
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- Handle<Name> key(descs->GetKey(i), isolate);
+ Handle<Name> key(descs->GetKey(isolate, i), isolate);
Handle<Object> value;
if (details.location() == kField) {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ FieldIndex index = FieldIndex::ForDescriptor(isolate, *map, i);
if (details.kind() == kData) {
- if (object->IsUnboxedDoubleField(index)) {
+ if (object->IsUnboxedDoubleField(isolate, index)) {
double old_value = object->RawFastDoublePropertyAt(index);
value = isolate->factory()->NewHeapNumber(old_value);
} else {
- value = handle(object->RawFastPropertyAt(index), isolate);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
if (details.representation().IsDouble()) {
- DCHECK(value->IsMutableHeapNumber());
+ DCHECK(value->IsMutableHeapNumber(isolate));
double old_value = Handle<MutableHeapNumber>::cast(value)->value();
value = isolate->factory()->NewHeapNumber(old_value);
}
}
} else {
DCHECK_EQ(kAccessor, details.kind());
- value = handle(object->RawFastPropertyAt(index), isolate);
+ value = handle(object->RawFastPropertyAt(isolate, index), isolate);
}
} else {
DCHECK_EQ(kDescriptor, details.location());
- value = handle(descs->GetStrongValue(i), isolate);
+ value = handle(descs->GetStrongValue(isolate, i), isolate);
}
DCHECK(!value.is_null());
PropertyDetails d(details.kind(), details.attributes(),
@@ -2932,11 +2933,12 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
} // namespace
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
+void JSObject::MigrateToMap(Isolate* isolate, Handle<JSObject> object,
+ Handle<Map> new_map,
int expected_additional_properties) {
- if (object->map() == *new_map) return;
- Handle<Map> old_map(object->map(), object->GetIsolate());
- NotifyMapChange(old_map, new_map, object->GetIsolate());
+ if (object->map(isolate) == *new_map) return;
+ Handle<Map> old_map(object->map(isolate), isolate);
+ NotifyMapChange(old_map, new_map, isolate);
if (old_map->is_dictionary_map()) {
// For slow-to-fast migrations JSObject::MigrateSlowToFast()
@@ -2946,7 +2948,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// Slow-to-slow migration is trivial.
object->synchronized_set_map(*new_map);
} else if (!new_map->is_dictionary_map()) {
- MigrateFastToFast(object, new_map);
+ MigrateFastToFast(isolate, object, new_map);
if (old_map->is_prototype_map()) {
DCHECK(!old_map->is_stable());
DCHECK(new_map->is_stable());
@@ -2958,13 +2960,12 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
old_map->set_owns_descriptors(false);
DCHECK(old_map->is_abandoned_prototype_map());
// Ensure that no transition was inserted for prototype migrations.
- DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
- .NumberOfTransitions());
- DCHECK(new_map->GetBackPointer().IsUndefined());
- DCHECK(object->map() != *old_map);
+ DCHECK_EQ(0, TransitionsAccessor(isolate, old_map).NumberOfTransitions());
+ DCHECK(new_map->GetBackPointer(isolate).IsUndefined(isolate));
+ DCHECK(object->map(isolate) != *old_map);
}
} else {
- MigrateFastToSlow(object, new_map, expected_additional_properties);
+ MigrateFastToSlow(isolate, object, new_map, expected_additional_properties);
}
// Careful: Don't allocate here!
@@ -2978,11 +2979,11 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
void JSObject::ForceSetPrototype(Handle<JSObject> object,
Handle<HeapObject> proto) {
// object.__proto__ = proto;
- Handle<Map> old_map = Handle<Map>(object->map(), object->GetIsolate());
- Handle<Map> new_map =
- Map::Copy(object->GetIsolate(), old_map, "ForceSetPrototype");
- Map::SetPrototype(object->GetIsolate(), new_map, proto);
- JSObject::MigrateToMap(object, new_map);
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map = Handle<Map>(object->map(), isolate);
+ Handle<Map> new_map = Map::Copy(isolate, old_map, "ForceSetPrototype");
+ Map::SetPrototype(isolate, new_map, proto);
+ JSObject::MigrateToMap(isolate, object, new_map);
}
Maybe<bool> JSObject::SetPropertyWithInterceptor(
@@ -3068,31 +3069,30 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
object->synchronized_set_map(*map);
}
-void JSObject::MigrateInstance(Handle<JSObject> object) {
- Handle<Map> original_map(object->map(), object->GetIsolate());
- Handle<Map> map = Map::Update(object->GetIsolate(), original_map);
+void JSObject::MigrateInstance(Isolate* isolate, Handle<JSObject> object) {
+ Handle<Map> original_map(object->map(), isolate);
+ Handle<Map> map = Map::Update(isolate, original_map);
map->set_is_migration_target(true);
- MigrateToMap(object, map);
+ JSObject::MigrateToMap(isolate, object, map);
if (FLAG_trace_migration) {
object->PrintInstanceMigration(stdout, *original_map, *map);
}
#if VERIFY_HEAP
if (FLAG_verify_heap) {
- object->JSObjectVerify(object->GetIsolate());
+ object->JSObjectVerify(isolate);
}
#endif
}
// static
-bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
+bool JSObject::TryMigrateInstance(Isolate* isolate, Handle<JSObject> object) {
DisallowDeoptimization no_deoptimization(isolate);
Handle<Map> original_map(object->map(), isolate);
Handle<Map> new_map;
if (!Map::TryUpdate(isolate, original_map).ToHandle(&new_map)) {
return false;
}
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (FLAG_trace_migration && *original_map != object->map()) {
object->PrintInstanceMigration(stdout, *original_map, object->map());
}
@@ -3263,16 +3263,18 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
return GetPropertyAttributesWithInterceptorInternal(it, it->GetInterceptor());
}
-void JSObject::NormalizeProperties(Handle<JSObject> object,
+void JSObject::NormalizeProperties(Isolate* isolate, Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties,
const char* reason) {
if (!object->HasFastProperties()) return;
- Handle<Map> map(object->map(), object->GetIsolate());
- Handle<Map> new_map = Map::Normalize(object->GetIsolate(), map, mode, reason);
+ Handle<Map> map(object->map(), isolate);
+ Handle<Map> new_map =
+ Map::Normalize(isolate, map, map->elements_kind(), mode, reason);
- MigrateToMap(object, new_map, expected_additional_properties);
+ JSObject::MigrateToMap(isolate, object, new_map,
+ expected_additional_properties);
}
void JSObject::MigrateSlowToFast(Handle<JSObject> object,
@@ -3475,7 +3477,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
: DICTIONARY_ELEMENTS;
Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
// Set the new map first to satify the elements type assert in set_elements().
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (is_sloppy_arguments) {
SloppyArgumentsElements::cast(object->elements())
@@ -3710,7 +3712,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
Map::Copy(isolate, handle(object->map(), isolate), "PreventExtensions");
new_map->set_is_extensible(false);
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
DCHECK(!object->map().is_extensible());
return Just(true);
@@ -3756,6 +3758,21 @@ template void JSObject::ApplyAttributesToDictionary(
Isolate* isolate, ReadOnlyRoots roots, Handle<NumberDictionary> dictionary,
const PropertyAttributes attributes);
+Handle<NumberDictionary> CreateElementDictionary(Isolate* isolate,
+ Handle<JSObject> object) {
+ Handle<NumberDictionary> new_element_dictionary;
+ if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
+ !object->HasSlowStringWrapperElements()) {
+ int length = object->IsJSArray()
+ ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
+ : object->elements().length();
+ new_element_dictionary =
+ length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+ : object->GetElementsAccessor()->Normalize(object);
+ }
+ return new_element_dictionary;
+}
+
template <PropertyAttributes attrs>
Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw) {
@@ -3776,10 +3793,12 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
if (attrs == NONE && !object->map().is_extensible()) return Just(true);
- ElementsKind old_elements_kind = object->map().elements_kind();
- if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
- return Just(true);
- if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
+ {
+ ElementsKind old_elements_kind = object->map().elements_kind();
+ if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
+ return Just(true);
+ if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
+ }
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
@@ -3808,17 +3827,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
RETURN_FAILURE(isolate, should_throw, NewTypeError(message));
}
- Handle<NumberDictionary> new_element_dictionary;
- if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
- !object->HasSlowStringWrapperElements()) {
- int length = object->IsJSArray()
- ? Smi::ToInt(Handle<JSArray>::cast(object)->length())
- : object->elements().length();
- new_element_dictionary =
- length == 0 ? isolate->factory()->empty_slow_element_dictionary()
- : object->GetElementsAccessor()->Normalize(object);
- }
-
Handle<Symbol> transition_marker;
if (attrs == NONE) {
transition_marker = isolate->factory()->nonextensible_symbol();
@@ -3829,6 +3837,31 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
transition_marker = isolate->factory()->frozen_symbol();
}
+ // Currently, there are only have sealed/frozen Object element kinds and
+ // Map::MigrateToMap doesn't handle properties' attributes reconfiguring and
+ // elements kind change in one go. If seal or freeze with Smi or Double
+ // elements kind, we will transition to Object elements kind first to make
+ // sure of valid element access.
+ if (FLAG_enable_sealed_frozen_elements_kind &&
+ (attrs == SEALED || attrs == FROZEN)) {
+ switch (object->map().elements_kind()) {
+ case PACKED_SMI_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
+ JSObject::TransitionElementsKind(object, PACKED_ELEMENTS);
+ break;
+ case HOLEY_SMI_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
+ JSObject::TransitionElementsKind(object, HOLEY_ELEMENTS);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Make sure we only use this element dictionary in case we can't transition
+ // to sealed, frozen elements kind.
+ Handle<NumberDictionary> new_element_dictionary;
+
Handle<Map> old_map(object->map(), isolate);
old_map = Map::Update(isolate, old_map);
TransitionsAccessor transitions(isolate, old_map);
@@ -3840,16 +3873,22 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
transition_map->has_frozen_or_sealed_elements());
DCHECK(!transition_map->is_extensible());
- JSObject::MigrateToMap(object, transition_map);
+ if (!transition_map->has_frozen_or_sealed_elements()) {
+ new_element_dictionary = CreateElementDictionary(isolate, object);
+ }
+ JSObject::MigrateToMap(isolate, object, transition_map);
} else if (transitions.CanHaveMoreTransitions()) {
// Create a new descriptor array with the appropriate property attributes
Handle<Map> new_map = Map::CopyForPreventExtensions(
isolate, old_map, attrs, transition_marker, "CopyForPreventExtensions");
- JSObject::MigrateToMap(object, new_map);
+ if (!new_map->has_frozen_or_sealed_elements()) {
+ new_element_dictionary = CreateElementDictionary(isolate, object);
+ }
+ JSObject::MigrateToMap(isolate, object, new_map);
} else {
DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
// Slow path: need to normalize properties for safety
- NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0,
+ NormalizeProperties(isolate, object, CLEAR_INOBJECT_PROPERTIES, 0,
"SlowPreventExtensions");
// Create a new map, since other objects with this map may be extensible.
@@ -3857,6 +3896,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<Map> new_map = Map::Copy(isolate, handle(object->map(), isolate),
"SlowCopyForPreventExtensions");
new_map->set_is_extensible(false);
+ new_element_dictionary = CreateElementDictionary(isolate, object);
if (!new_element_dictionary.is_null()) {
ElementsKind new_kind =
IsStringWrapperElementsKind(old_map->elements_kind())
@@ -3864,7 +3904,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
: DICTIONARY_ELEMENTS;
new_map->set_elements_kind(new_kind);
}
- JSObject::MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (attrs != NONE) {
ReadOnlyRoots roots(isolate);
@@ -3883,6 +3923,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
if (object->map().has_frozen_or_sealed_elements()) {
+ DCHECK(new_element_dictionary.is_null());
return Just(true);
}
@@ -3990,7 +4031,7 @@ bool JSObject::HasEnumerableElements() {
return true;
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- if (String::cast(JSValue::cast(object).value()).length() > 0) {
+ if (String::cast(JSPrimitiveWrapper::cast(object).value()).length() > 0) {
return true;
}
return object.elements().length() > 0;
@@ -4177,10 +4218,11 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
// static
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
bool enable_setup_mode) {
+ Isolate* isolate = object->GetIsolate();
if (object->IsJSGlobalObject()) return;
if (enable_setup_mode && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
}
if (object->map().is_prototype_map()) {
@@ -4189,10 +4231,9 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
} else {
- Handle<Map> new_map = Map::Copy(object->GetIsolate(),
- handle(object->map(), object->GetIsolate()),
- "CopyAsPrototype");
- JSObject::MigrateToMap(object, new_map);
+ Handle<Map> new_map =
+ Map::Copy(isolate, handle(object->map(), isolate), "CopyAsPrototype");
+ JSObject::MigrateToMap(isolate, object, new_map);
object->map().set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
@@ -4451,7 +4492,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
Handle<Map> new_map =
Map::TransitionToPrototype(isolate, map, Handle<HeapObject>::cast(value));
DCHECK(new_map->prototype() == *value);
- JSObject::MigrateToMap(real_receiver, new_map);
+ JSObject::MigrateToMap(isolate, real_receiver, new_map);
DCHECK(size == object->Size());
return Just(true);
@@ -4481,10 +4522,6 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
object, args->slot_at(first_arg + arg_count - 1), arg_count, mode);
}
-ElementsAccessor* JSObject::GetElementsAccessor() {
- return ElementsAccessor::ForKind(GetElementsKind());
-}
-
void JSObject::ValidateElements(JSObject object) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -4560,22 +4597,22 @@ static ElementsKind BestFittingFastElementsKind(JSObject object) {
void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
Handle<Object> value,
PropertyAttributes attributes) {
- DCHECK(object->map().is_extensible());
-
Isolate* isolate = object->GetIsolate();
+ DCHECK(object->map(isolate).is_extensible());
+
uint32_t old_length = 0;
uint32_t new_capacity = 0;
- if (object->IsJSArray()) {
+ if (object->IsJSArray(isolate)) {
CHECK(JSArray::cast(*object).length().ToArrayLength(&old_length));
}
- ElementsKind kind = object->GetElementsKind();
- FixedArrayBase elements = object->elements();
+ ElementsKind kind = object->GetElementsKind(isolate);
+ FixedArrayBase elements = object->elements(isolate);
ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
if (IsSloppyArgumentsElementsKind(kind)) {
- elements = SloppyArgumentsElements::cast(elements).arguments();
+ elements = SloppyArgumentsElements::cast(elements).arguments(isolate);
dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
} else if (IsStringWrapperElementsKind(kind)) {
dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
@@ -4583,7 +4620,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (attributes != NONE) {
kind = dictionary_kind;
- } else if (elements.IsNumberDictionary()) {
+ } else if (elements.IsNumberDictionary(isolate)) {
kind = ShouldConvertToFastElements(
*object, NumberDictionary::cast(elements), index, &new_capacity)
? BestFittingFastElementsKind(*object)
@@ -4594,8 +4631,9 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
kind = dictionary_kind;
}
- ElementsKind to = value->OptimalElementsKind();
- if (IsHoleyElementsKind(kind) || !object->IsJSArray() || index > old_length) {
+ ElementsKind to = value->OptimalElementsKind(isolate);
+ if (IsHoleyElementsKind(kind) || !object->IsJSArray(isolate) ||
+ index > old_length) {
to = GetHoleyElementsKind(to);
kind = GetHoleyElementsKind(kind);
}
@@ -4603,7 +4641,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
accessor->Add(object, index, value, attributes, new_capacity);
- if (object->IsJSArray() && index >= old_length) {
+ if (object->IsJSArray(isolate) && index >= old_length) {
Handle<Object> new_length =
isolate->factory()->NewNumberFromUint(index + 1);
JSArray::cast(*object).set_length(*new_length);
@@ -4658,14 +4696,15 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
UpdateAllocationSite(object, to_kind);
- if (object->elements() == object->GetReadOnlyRoots().empty_fixed_array() ||
+ Isolate* isolate = object->GetIsolate();
+ if (object->elements() == ReadOnlyRoots(isolate).empty_fixed_array() ||
IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) {
// No change is needed to the elements() buffer, the transition
// only requires a map change.
Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
- MigrateToMap(object, new_map);
+ JSObject::MigrateToMap(isolate, object, new_map);
if (FLAG_trace_elements_transitions) {
- Handle<FixedArrayBase> elms(object->elements(), object->GetIsolate());
+ Handle<FixedArrayBase> elms(object->elements(), isolate);
PrintElementsTransition(stdout, object, from_kind, elms, to_kind, elms);
}
} else {
@@ -4946,6 +4985,17 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
// static
void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
+
+ if (function->has_feedback_vector()) {
+ // TODO(984344): Make this a CHECK that feedback vectors are identical to
+ // what we expect once we have removed all bytecode generation differences
+ // between eager and lazy compilation. For now just reset if they aren't
+ // identical
+ FeedbackVector vector = function->feedback_vector();
+ if (vector.length() == vector.metadata().slot_count()) return;
+ function->raw_feedback_cell().reset();
+ }
+
bool needs_feedback_vector = !FLAG_lazy_feedback_allocation;
// We need feedback vector for certain log events, collecting type profile
// and more precise code coverage.
@@ -4995,7 +5045,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
if (array_function->IsJSFunction() &&
*function == JSFunction::cast(*array_function)) {
- CacheInitialJSArrayMaps(native_context, new_map);
+ CacheInitialJSArrayMaps(isolate, native_context, new_map);
}
}
@@ -5034,7 +5084,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Map> new_map =
Map::Copy(isolate, handle(function->map(), isolate), "SetPrototype");
- JSObject::MigrateToMap(function, new_map);
+ JSObject::MigrateToMap(isolate, function, new_map);
new_map->SetConstructor(*value);
new_map->set_has_non_instance_prototype(true);
@@ -5145,14 +5195,16 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_MESSAGE_OBJECT_TYPE:
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
+ case JS_FINALIZATION_GROUP_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_PROMISE_TYPE:
case JS_REGEXP_TYPE:
case JS_SET_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_REF_TYPE:
case JS_WEAK_SET_TYPE:
case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE:
@@ -5530,7 +5582,7 @@ void JSFunction::ClearTypeFeedbackInfo() {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
if (vector.ClearSlots(isolate)) {
- IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
+ IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(),
"ClearTypeFeedbackInfo");
}
}
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 5ac1751c48..bcea3a28df 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -8,6 +8,7 @@
#include "src/objects/embedder-data-slot.h"
#include "src/objects/objects.h"
#include "src/objects/property-array.h"
+#include "torque-generated/class-definitions-tq.h"
#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
@@ -27,16 +28,16 @@ class JSReceiver : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
// Returns true if there is no slow (ie, dictionary) backing store.
- inline bool HasFastProperties() const;
+ DECL_GETTER(HasFastProperties, bool)
// Returns the properties array backing store if it
// exists. Otherwise, returns an empty_property_array when there's a
// Smi (hash code) or an empty_fixed_array for a fast properties
// map.
- inline PropertyArray property_array() const;
+ DECL_GETTER(property_array, PropertyArray)
// Gets slow properties for non-global objects.
- inline NameDictionary property_dictionary() const;
+ DECL_GETTER(property_dictionary, NameDictionary)
// Sets the properties backing store and makes sure any existing hash is moved
// to the new properties store. To clear out the properties store, pass in the
@@ -62,12 +63,13 @@ class JSReceiver : public HeapObject {
// above typed getters and setters to access the properties.
DECL_ACCESSORS(raw_properties_or_hash, Object)
- inline void initialize_properties();
+ inline void initialize_properties(Isolate* isolate);
// Deletes an existing named property in a normalized object.
static void DeleteNormalizedProperty(Handle<JSReceiver> object, int entry);
DECL_CAST(JSReceiver)
+ DECL_VERIFIER(JSReceiver)
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
@@ -275,7 +277,7 @@ class JSReceiver : public HeapObject {
// properties.
// Note that the map of JSObject changes during execution to enable inline
// caching.
-class JSObject : public JSReceiver {
+class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
public:
static bool IsUnmodifiedApiObject(FullObjectSlot o);
@@ -290,78 +292,63 @@ class JSObject : public JSReceiver {
static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> ObjectCreate(
Isolate* isolate, Handle<Object> prototype);
- // [elements]: The elements (properties with names that are integers).
- //
- // Elements can be in two general modes: fast and slow. Each mode
- // corresponds to a set of object representations of elements that
- // have something in common.
- //
- // In the fast mode elements is a FixedArray and so each element can be
- // quickly accessed. The elements array can have one of several maps in this
- // mode: fixed_array_map, fixed_double_array_map,
- // sloppy_arguments_elements_map or fixed_cow_array_map (for copy-on-write
- // arrays). In the latter case the elements array may be shared by a few
- // objects and so before writing to any element the array must be copied. Use
- // EnsureWritableFastElements in this case.
- //
- // In the slow mode the elements is either a NumberDictionary or a
- // FixedArray parameter map for a (sloppy) arguments object.
- DECL_ACCESSORS(elements, FixedArrayBase)
inline void initialize_elements();
static inline void SetMapAndElements(Handle<JSObject> object, Handle<Map> map,
Handle<FixedArrayBase> elements);
- inline ElementsKind GetElementsKind() const;
- V8_EXPORT_PRIVATE ElementsAccessor* GetElementsAccessor();
+ DECL_GETTER(GetElementsKind, ElementsKind)
+ DECL_GETTER(GetElementsAccessor, ElementsAccessor*)
+
// Returns true if an object has elements of PACKED_SMI_ELEMENTS or
// HOLEY_SMI_ELEMENTS ElementsKind.
- inline bool HasSmiElements();
+ DECL_GETTER(HasSmiElements, bool)
// Returns true if an object has elements of PACKED_ELEMENTS or
// HOLEY_ELEMENTS ElementsKind.
- inline bool HasObjectElements();
+ DECL_GETTER(HasObjectElements, bool)
// Returns true if an object has elements of PACKED_SMI_ELEMENTS,
// HOLEY_SMI_ELEMENTS, PACKED_ELEMENTS, or HOLEY_ELEMENTS.
- inline bool HasSmiOrObjectElements();
+ DECL_GETTER(HasSmiOrObjectElements, bool)
// Returns true if an object has any of the "fast" elements kinds.
- inline bool HasFastElements();
+ DECL_GETTER(HasFastElements, bool)
// Returns true if an object has any of the PACKED elements kinds.
- inline bool HasFastPackedElements();
+ DECL_GETTER(HasFastPackedElements, bool)
// Returns true if an object has elements of PACKED_DOUBLE_ELEMENTS or
// HOLEY_DOUBLE_ELEMENTS ElementsKind.
- inline bool HasDoubleElements();
+ DECL_GETTER(HasDoubleElements, bool)
// Returns true if an object has elements of HOLEY_SMI_ELEMENTS,
// HOLEY_DOUBLE_ELEMENTS, or HOLEY_ELEMENTS ElementsKind.
- inline bool HasHoleyElements();
- inline bool HasSloppyArgumentsElements();
- inline bool HasStringWrapperElements();
- inline bool HasDictionaryElements();
+ DECL_GETTER(HasHoleyElements, bool)
+ DECL_GETTER(HasSloppyArgumentsElements, bool)
+ DECL_GETTER(HasStringWrapperElements, bool)
+ DECL_GETTER(HasDictionaryElements, bool)
// Returns true if an object has elements of PACKED_ELEMENTS
- inline bool HasPackedElements();
- inline bool HasFrozenOrSealedElements();
- inline bool HasSealedElements();
-
- inline bool HasTypedArrayElements();
-
- inline bool HasFixedUint8ClampedElements();
- inline bool HasFixedArrayElements();
- inline bool HasFixedInt8Elements();
- inline bool HasFixedUint8Elements();
- inline bool HasFixedInt16Elements();
- inline bool HasFixedUint16Elements();
- inline bool HasFixedInt32Elements();
- inline bool HasFixedUint32Elements();
- inline bool HasFixedFloat32Elements();
- inline bool HasFixedFloat64Elements();
- inline bool HasFixedBigInt64Elements();
- inline bool HasFixedBigUint64Elements();
-
- inline bool HasFastArgumentsElements();
- inline bool HasSlowArgumentsElements();
- inline bool HasFastStringWrapperElements();
- inline bool HasSlowStringWrapperElements();
+ DECL_GETTER(HasPackedElements, bool)
+ DECL_GETTER(HasFrozenOrSealedElements, bool)
+ DECL_GETTER(HasSealedElements, bool)
+
+ DECL_GETTER(HasTypedArrayElements, bool)
+
+ DECL_GETTER(HasFixedUint8ClampedElements, bool)
+ DECL_GETTER(HasFixedArrayElements, bool)
+ DECL_GETTER(HasFixedInt8Elements, bool)
+ DECL_GETTER(HasFixedUint8Elements, bool)
+ DECL_GETTER(HasFixedInt16Elements, bool)
+ DECL_GETTER(HasFixedUint16Elements, bool)
+ DECL_GETTER(HasFixedInt32Elements, bool)
+ DECL_GETTER(HasFixedUint32Elements, bool)
+ DECL_GETTER(HasFixedFloat32Elements, bool)
+ DECL_GETTER(HasFixedFloat64Elements, bool)
+ DECL_GETTER(HasFixedBigInt64Elements, bool)
+ DECL_GETTER(HasFixedBigUint64Elements, bool)
+
+ DECL_GETTER(HasFastArgumentsElements, bool)
+ DECL_GETTER(HasSlowArgumentsElements, bool)
+ DECL_GETTER(HasFastStringWrapperElements, bool)
+ DECL_GETTER(HasSlowStringWrapperElements, bool)
bool HasEnumerableElements();
- inline NumberDictionary element_dictionary(); // Gets slow elements.
+ // Gets slow elements.
+ DECL_GETTER(element_dictionary, NumberDictionary)
// Requires: HasFastElements().
static void EnsureWritableFastElements(Handle<JSObject> object);
@@ -431,11 +418,11 @@ class JSObject : public JSReceiver {
// Migrates the given object to a map whose field representations are the
// lowest upper bound of all known representations for that field.
- static void MigrateInstance(Handle<JSObject> instance);
+ static void MigrateInstance(Isolate* isolate, Handle<JSObject> instance);
// Migrates the given object only if the target map is already available,
// or returns false if such a map is not yet available.
- static bool TryMigrateInstance(Handle<JSObject> instance);
+ static bool TryMigrateInstance(Isolate* isolate, Handle<JSObject> instance);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
@@ -476,8 +463,8 @@ class JSObject : public JSReceiver {
int old_index, int new_index);
// Retrieve interceptors.
- inline InterceptorInfo GetNamedInterceptor();
- inline InterceptorInfo GetIndexedInterceptor();
+ DECL_GETTER(GetNamedInterceptor, InterceptorInfo)
+ DECL_GETTER(GetIndexedInterceptor, InterceptorInfo)
// Used from JSReceiver.
V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
@@ -546,8 +533,8 @@ class JSObject : public JSReceiver {
// Lookup interceptors are used for handling properties controlled by host
// objects.
- inline bool HasNamedInterceptor();
- inline bool HasIndexedInterceptor();
+ DECL_GETTER(HasNamedInterceptor, bool)
+ DECL_GETTER(HasIndexedInterceptor, bool)
// Support functions for v8 api (needed for correct interceptor behavior).
V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedProperty(
@@ -563,13 +550,12 @@ class JSObject : public JSReceiver {
// JSFunction objects.
static int GetHeaderSize(InstanceType instance_type,
bool function_has_prototype_slot = false);
- static inline int GetHeaderSize(const Map map);
- inline int GetHeaderSize() const;
+ static inline int GetHeaderSize(Map map);
- static inline int GetEmbedderFieldsStartOffset(const Map map);
+ static inline int GetEmbedderFieldsStartOffset(Map map);
inline int GetEmbedderFieldsStartOffset();
- static inline int GetEmbedderFieldCount(const Map map);
+ static inline int GetEmbedderFieldCount(Map map);
inline int GetEmbedderFieldCount() const;
inline int GetEmbedderFieldOffset(int index);
inline Object GetEmbedderField(int index);
@@ -596,7 +582,7 @@ class JSObject : public JSReceiver {
// |expected_additional_properties| is only used for fast-to-slow transitions
// and ignored otherwise.
V8_EXPORT_PRIVATE static void MigrateToMap(
- Handle<JSObject> object, Handle<Map> new_map,
+ Isolate* isolate, Handle<JSObject> object, Handle<Map> new_map,
int expected_additional_properties = 0);
// Forces a prototype without any of the checks that the regular SetPrototype
@@ -609,7 +595,7 @@ class JSObject : public JSReceiver {
// added this number can be indicated to have the backing store allocated to
// an initial capacity for holding these properties.
V8_EXPORT_PRIVATE static void NormalizeProperties(
- Handle<JSObject> object, PropertyNormalizationMode mode,
+ Isolate* isolate, Handle<JSObject> object, PropertyNormalizationMode mode,
int expected_additional_properties, const char* reason);
// Convert and update the elements backing store to be a
@@ -624,15 +610,17 @@ class JSObject : public JSReceiver {
int unused_property_fields,
const char* reason);
- inline bool IsUnboxedDoubleField(FieldIndex index);
+ inline bool IsUnboxedDoubleField(FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const;
// Access fast-case object properties at index.
static Handle<Object> FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index);
- inline Object RawFastPropertyAt(FieldIndex index);
- inline double RawFastDoublePropertyAt(FieldIndex index);
- inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
+ inline Object RawFastPropertyAt(FieldIndex index) const;
+ inline Object RawFastPropertyAt(Isolate* isolate, FieldIndex index) const;
+ inline double RawFastDoublePropertyAt(FieldIndex index) const;
+ inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
inline void FastPropertyAtPut(FieldIndex index, Object value);
inline void RawFastPropertyAtPut(
@@ -679,8 +667,6 @@ class JSObject : public JSReceiver {
static bool IsExtensible(Handle<JSObject> object);
- DECL_CAST(JSObject)
-
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
DECL_PRINTER(JSObject)
@@ -727,7 +713,7 @@ class JSObject : public JSReceiver {
// If a GC was caused while constructing this object, the elements pointer
// may point to a one pointer filler map. The object won't be rooted, but
// our heap verification code could stumble across it.
- V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine() const;
+ V8_EXPORT_PRIVATE bool ElementsAreSafeToExamine(Isolate* isolate) const;
#endif
Object SlowReverseLookup(Object value);
@@ -764,15 +750,6 @@ class JSObject : public JSReceiver {
STATIC_ASSERT(kMaxNumberOfDescriptors + kFieldsAdded <=
PropertyArray::kMaxLength);
-// Layout description.
-#define JS_OBJECT_FIELDS(V) \
- V(kElementsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize, JS_OBJECT_FIELDS)
-#undef JS_OBJECT_FIELDS
-
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
static const int kMaxInObjectProperties =
(kMaxInstanceSize - kHeaderSize) >> kTaggedSizeLog2;
@@ -825,7 +802,7 @@ class JSObject : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw);
- OBJECT_CONSTRUCTORS(JSObject, JSReceiver);
+ TQ_OBJECT_CONSTRUCTORS(JSObject)
};
// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
@@ -835,9 +812,17 @@ class JSObject : public JSReceiver {
class JSAccessorPropertyDescriptor : public JSObject {
public:
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize,
- TORQUE_GENERATED_JSACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
+#define JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS(V) \
+ V(kGetOffset, kTaggedSize) \
+ V(kSetOffset, kTaggedSize) \
+ V(kEnumerableOffset, kTaggedSize) \
+ V(kConfigurableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS)
+#undef JS_ACCESSOR_PROPERTY_DESCRIPTOR_FIELDS
// Indices of in-object properties.
static const int kGetIndex = 0;
@@ -855,8 +840,18 @@ class JSAccessorPropertyDescriptor : public JSObject {
// FromPropertyDescriptor function for regular data properties.
class JSDataPropertyDescriptor : public JSObject {
public:
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JSDATA_PROPERTY_DESCRIPTOR_FIELDS)
+ // Layout description.
+#define JS_DATA_PROPERTY_DESCRIPTOR_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kWritableOffset, kTaggedSize) \
+ V(kEnumerableOffset, kTaggedSize) \
+ V(kConfigurableOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ JS_DATA_PROPERTY_DESCRIPTOR_FIELDS)
+#undef JS_DATA_PROPERTY_DESCRIPTOR_FIELDS
// Indices of in-object properties.
static const int kValueIndex = 0;
@@ -870,7 +865,7 @@ class JSDataPropertyDescriptor : public JSObject {
// JSIteratorResult is just a JSObject with a specific initial map.
// This initial map adds in-object properties for "done" and "value",
-// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
+// as specified by ES6 section 25.1.1.3 The IteratorResult Interface.
class JSIteratorResult : public JSObject {
public:
DECL_ACCESSORS(value, Object)
@@ -878,8 +873,15 @@ class JSIteratorResult : public JSObject {
DECL_ACCESSORS(done, Object)
// Layout description.
+#define JS_ITERATOR_RESULT_FIELDS(V) \
+ V(kValueOffset, kTaggedSize) \
+ V(kDoneOffset, kTaggedSize) \
+ /* Total size. */ \
+ V(kSize, 0)
+
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSITERATOR_RESULT_FIELDS)
+ JS_ITERATOR_RESULT_FIELDS)
+#undef JS_ITERATOR_RESULT_FIELDS
// Indices of in-object properties.
static const int kValueIndex = 0;
@@ -894,7 +896,6 @@ class JSIteratorResult : public JSObject {
class JSBoundFunction : public JSObject {
public:
// [bound_target_function]: The wrapped function object.
- inline Object raw_bound_target_function() const;
DECL_ACCESSORS(bound_target_function, JSReceiver)
// [bound_this]: The value that is always passed as the this value when
@@ -933,7 +934,7 @@ class JSBoundFunction : public JSObject {
class JSFunction : public JSObject {
public:
// [prototype_or_initial_map]:
- DECL_ACCESSORS(prototype_or_initial_map, Object)
+ DECL_ACCESSORS(prototype_or_initial_map, HeapObject)
// [shared]: The information about the function that
// can be shared by instances.
@@ -947,7 +948,7 @@ class JSFunction : public JSObject {
// [context]: The context for this function.
inline Context context();
inline bool has_context() const;
- inline void set_context(Object context);
+ inline void set_context(HeapObject context);
inline JSGlobalProxy global_proxy();
inline NativeContext native_context();
inline int length();
@@ -1055,13 +1056,14 @@ class JSFunction : public JSObject {
inline bool NeedsResetDueToFlushedBytecode();
inline void ResetIfBytecodeFlushed();
- inline bool has_prototype_slot() const;
+ DECL_GETTER(has_prototype_slot, bool)
// The initial map for an object created by this constructor.
- inline Map initial_map();
+ DECL_GETTER(initial_map, Map)
+
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
Handle<HeapObject> prototype);
- inline bool has_initial_map();
+ DECL_GETTER(has_initial_map, bool)
V8_EXPORT_PRIVATE static void EnsureHasInitialMap(
Handle<JSFunction> function);
@@ -1076,12 +1078,12 @@ class JSFunction : public JSObject {
// function has an initial map the prototype is set on the initial
// map. Otherwise, the prototype is put in the initial map field
// until an initial map is needed.
- inline bool has_prototype();
- inline bool has_instance_prototype();
- inline Object prototype();
- inline HeapObject instance_prototype();
- inline bool has_prototype_property();
- inline bool PrototypeRequiresRuntimeLookup();
+ DECL_GETTER(has_prototype, bool)
+ DECL_GETTER(has_instance_prototype, bool)
+ DECL_GETTER(prototype, Object)
+ DECL_GETTER(instance_prototype, HeapObject)
+ DECL_GETTER(has_prototype_property, bool)
+ DECL_GETTER(PrototypeRequiresRuntimeLookup, bool)
static void SetPrototype(Handle<JSFunction> function, Handle<Object> value);
// Returns if this function has been compiled to native code yet.
@@ -1149,14 +1151,9 @@ class JSFunction : public JSObject {
//
// Accessing a JSGlobalProxy requires security check.
-class JSGlobalProxy : public JSObject {
+class JSGlobalProxy
+ : public TorqueGeneratedJSGlobalProxy<JSGlobalProxy, JSObject> {
public:
- // [native_context]: the owner native context of this global proxy object.
- // It is null value if this object is not used by any context.
- DECL_ACCESSORS(native_context, Object)
-
- DECL_CAST(JSGlobalProxy)
-
inline bool IsDetachedFrom(JSGlobalObject global) const;
static int SizeWithEmbedderFields(int embedder_field_count);
@@ -1165,11 +1162,7 @@ class JSGlobalProxy : public JSObject {
DECL_PRINTER(JSGlobalProxy)
DECL_VERIFIER(JSGlobalProxy)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSGLOBAL_PROXY_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSGlobalProxy, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSGlobalProxy)
};
// JavaScript global object.
@@ -1182,7 +1175,7 @@ class JSGlobalObject : public JSObject {
DECL_ACCESSORS(global_proxy, JSGlobalProxy)
// Gets global object properties.
- inline GlobalDictionary global_dictionary();
+ DECL_GETTER(global_dictionary, GlobalDictionary)
inline void set_global_dictionary(GlobalDictionary dictionary);
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
@@ -1208,22 +1201,13 @@ class JSGlobalObject : public JSObject {
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
-class JSValue : public JSObject {
+class JSPrimitiveWrapper
+ : public TorqueGeneratedJSPrimitiveWrapper<JSPrimitiveWrapper, JSObject> {
public:
- // [value]: the object being wrapped.
- DECL_ACCESSORS(value, Object)
-
- DECL_CAST(JSValue)
-
// Dispatched behavior.
- DECL_PRINTER(JSValue)
- DECL_VERIFIER(JSValue)
+ DECL_PRINTER(JSPrimitiveWrapper)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JSVALUE_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSValue, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSPrimitiveWrapper)
};
class DateCache;
@@ -1367,8 +1351,7 @@ class JSMessageObject : public JSObject {
// EnsureSourcePositionsAvailable must have been called before calling this.
Handle<String> GetSourceLine() const;
- inline int error_level() const;
- inline void set_error_level(int level);
+ DECL_INT_ACCESSORS(error_level)
DECL_CAST(JSMessageObject)
@@ -1384,8 +1367,6 @@ class JSMessageObject : public JSObject {
using BodyDescriptor = FixedBodyDescriptor<HeapObject::kMapOffset,
kPointerFieldsEndOffset, kSize>;
- OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
-
private:
friend class Factory;
@@ -1400,12 +1381,14 @@ class JSMessageObject : public JSObject {
DECL_ACCESSORS(bytecode_offset, Smi)
// [start_position]: the start position in the script for the error message.
- inline int start_position() const;
- inline void set_start_position(int value);
+ DECL_INT_ACCESSORS(start_position)
// [end_position]: the end position in the script for the error message.
- inline int end_position() const;
- inline void set_end_position(int value);
+ DECL_INT_ACCESSORS(end_position)
+
+ DECL_INT_ACCESSORS(raw_type)
+
+ OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index 1924bdc4ff..b8fe7f50f0 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -25,11 +25,12 @@ ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
SMI_ACCESSORS(JSPluralRules, flags, kFlagsOffset)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
kIcuPluralRulesOffset)
-ACCESSORS(JSPluralRules, icu_decimal_format, Managed<icu::DecimalFormat>,
- kIcuDecimalFormatOffset)
+ACCESSORS(JSPluralRules, icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>,
+ kIcuNumberFormatterOffset)
inline void JSPluralRules::set_type(Type type) {
- DCHECK_LT(type, Type::COUNT);
+ DCHECK_LE(type, TypeBits::kMax);
int hints = flags();
hints = TypeBits::update(hints, type);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index 8daf5db64a..84fe9b6d52 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -10,11 +10,12 @@
#include "src/execution/isolate-inl.h"
#include "src/objects/intl-objects.h"
+#include "src/objects/js-number-format.h"
#include "src/objects/js-plural-rules-inl.h"
-#include "unicode/decimfmt.h"
#include "unicode/locid.h"
-#include "unicode/numfmt.h"
+#include "unicode/numberformatter.h"
#include "unicode/plurrule.h"
+#include "unicode/unumberformatter.h"
namespace v8 {
namespace internal {
@@ -23,8 +24,7 @@ namespace {
bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
JSPluralRules::Type type,
- std::unique_ptr<icu::PluralRules>* pl,
- std::unique_ptr<icu::DecimalFormat>* nf) {
+ std::unique_ptr<icu::PluralRules>* pl) {
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
@@ -43,41 +43,10 @@ bool CreateICUPluralRules(Isolate* isolate, const icu::Locale& icu_locale,
}
CHECK_NOT_NULL(plural_rules.get());
- std::unique_ptr<icu::DecimalFormat> number_format(
- static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, UNUM_DECIMAL, status)));
- if (U_FAILURE(status)) {
- return false;
- }
- CHECK_NOT_NULL(number_format.get());
-
*pl = std::move(plural_rules);
- *nf = std::move(number_format);
-
return true;
}
-void InitializeICUPluralRules(
- Isolate* isolate, const icu::Locale& icu_locale, JSPluralRules::Type type,
- std::unique_ptr<icu::PluralRules>* plural_rules,
- std::unique_ptr<icu::DecimalFormat>* number_format) {
- bool success = CreateICUPluralRules(isolate, icu_locale, type, plural_rules,
- number_format);
- if (!success) {
- // Remove extensions and try again.
- icu::Locale no_extension_locale(icu_locale.getBaseName());
- success = CreateICUPluralRules(isolate, no_extension_locale, type,
- plural_rules, number_format);
-
- if (!success) {
- FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
- }
- }
-
- CHECK_NOT_NULL((*plural_rules).get());
- CHECK_NOT_NULL((*number_format).get());
-}
-
} // namespace
Handle<String> JSPluralRules::TypeAsString() const {
@@ -86,16 +55,14 @@ Handle<String> JSPluralRules::TypeAsString() const {
return GetReadOnlyRoots().cardinal_string_handle();
case Type::ORDINAL:
return GetReadOnlyRoots().ordinal_string_handle();
- case Type::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
// static
-MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
- Isolate* isolate, Handle<JSPluralRules> plural_rules,
- Handle<Object> locales, Handle<Object> options_obj) {
- plural_rules->set_flags(0);
+MaybeHandle<JSPluralRules> JSPluralRules::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> options_obj) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -135,9 +102,6 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
MAYBE_RETURN(maybe_type, MaybeHandle<JSPluralRules>());
Type type = maybe_type.FromJust();
- // 8. Set pluralRules.[[Type]] to t.
- plural_rules->set_type(type);
-
// Note: The spec says we should do ResolveLocale after performing
// SetNumberFormatDigitOptions but we need the locale to create all
// the ICU data structures.
@@ -150,48 +114,64 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
Intl::ResolvedLocale r =
Intl::ResolveLocale(isolate, JSPluralRules::GetAvailableLocales(),
requested_locales, matcher, {});
-
- // 12. Set pluralRules.[[Locale]] to the value of r.[[locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- plural_rules->set_locale(*locale_str);
+
+ icu::number::LocalizedNumberFormatter icu_number_formatter =
+ icu::number::NumberFormatter::withLocale(r.icu_locale)
+ .roundingMode(UNUM_ROUND_HALFUP);
std::unique_ptr<icu::PluralRules> icu_plural_rules;
- std::unique_ptr<icu::DecimalFormat> icu_decimal_format;
- InitializeICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules,
- &icu_decimal_format);
+ bool success =
+ CreateICUPluralRules(isolate, r.icu_locale, type, &icu_plural_rules);
+ if (!success) {
+ // Remove extensions and try again.
+ icu::Locale no_extension_locale(r.icu_locale.getBaseName());
+ success = CreateICUPluralRules(isolate, no_extension_locale, type,
+ &icu_plural_rules);
+ icu_number_formatter =
+ icu::number::NumberFormatter::withLocale(no_extension_locale)
+ .roundingMode(UNUM_ROUND_HALFUP);
+
+ if (!success) {
+ FATAL("Failed to create ICU PluralRules, are ICU data files missing?");
+ }
+ }
+
CHECK_NOT_NULL(icu_plural_rules.get());
- CHECK_NOT_NULL(icu_decimal_format.get());
// 9. Perform ? SetNumberFormatDigitOptions(pluralRules, options, 0, 3).
Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
- Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3);
+ Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3, false);
MAYBE_RETURN(maybe_digit_options, MaybeHandle<JSPluralRules>());
Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
-
- icu_decimal_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
- icu_decimal_format->setMinimumIntegerDigits(
- digit_options.minimum_integer_digits);
- icu_decimal_format->setMinimumFractionDigits(
- digit_options.minimum_fraction_digits);
- icu_decimal_format->setMaximumFractionDigits(
- digit_options.maximum_fraction_digits);
- if (digit_options.minimum_significant_digits > 0) {
- icu_decimal_format->setMinimumSignificantDigits(
- digit_options.minimum_significant_digits);
- icu_decimal_format->setMaximumSignificantDigits(
- digit_options.maximum_significant_digits);
- }
+ icu_number_formatter = JSNumberFormat::SetDigitOptionsToFormatter(
+ icu_number_formatter, digit_options);
Handle<Managed<icu::PluralRules>> managed_plural_rules =
Managed<icu::PluralRules>::FromUniquePtr(isolate, 0,
std::move(icu_plural_rules));
- plural_rules->set_icu_plural_rules(*managed_plural_rules);
- Handle<Managed<icu::DecimalFormat>> managed_decimal_format =
- Managed<icu::DecimalFormat>::FromUniquePtr(isolate, 0,
- std::move(icu_decimal_format));
- plural_rules->set_icu_decimal_format(*managed_decimal_format);
+ Handle<Managed<icu::number::LocalizedNumberFormatter>>
+ managed_number_formatter =
+ Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
+ isolate, 0,
+ new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSPluralRules> plural_rules = Handle<JSPluralRules>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ plural_rules->set_flags(0);
+
+ // 8. Set pluralRules.[[Type]] to t.
+ plural_rules->set_type(type);
+
+ // 12. Set pluralRules.[[Locale]] to the value of r.[[locale]].
+ plural_rules->set_locale(*locale_str);
+
+ plural_rules->set_icu_plural_rules(*managed_plural_rules);
+ plural_rules->set_icu_number_formatter(*managed_number_formatter);
// 13. Return pluralRules.
return plural_rules;
@@ -202,31 +182,20 @@ MaybeHandle<String> JSPluralRules::ResolvePlural(
icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
- icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format().raw();
- CHECK_NOT_NULL(icu_decimal_format);
+ icu::number::LocalizedNumberFormatter* fmt =
+ plural_rules->icu_number_formatter().raw();
+ CHECK_NOT_NULL(fmt);
- // Currently, PluralRules doesn't implement all the options for rounding that
- // the Intl spec provides; format and parse the number to round to the
- // appropriate amount, then apply PluralRules.
- //
- // TODO(littledan): If a future ICU version supports an extended API to avoid
- // this step, then switch to that API. Bug thread:
- // http://bugs.icu-project.org/trac/ticket/12763
- icu::UnicodeString rounded_string;
- icu_decimal_format->format(number, rounded_string);
-
- icu::Formattable formattable;
UErrorCode status = U_ZERO_ERROR;
- icu_decimal_format->parse(rounded_string, formattable, status);
+ icu::number::FormattedNumber formatted_number =
+ fmt->formatDouble(number, status);
CHECK(U_SUCCESS(status));
- double rounded = formattable.getDouble(status);
+ icu::UnicodeString result =
+ icu_plural_rules->select(formatted_number, status);
CHECK(U_SUCCESS(status));
- icu::UnicodeString result = icu_plural_rules->select(rounded);
- return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+ return Intl::ToString(isolate, result);
}
namespace {
@@ -261,36 +230,27 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
CreateDataPropertyForOptions(isolate, options, plural_rules->TypeAsString(),
"type");
- icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format().raw();
- CHECK_NOT_NULL(icu_decimal_format);
-
- // This is a safe upcast as icu::DecimalFormat inherits from
- // icu::NumberFormat.
- icu::NumberFormat* icu_number_format =
- static_cast<icu::NumberFormat*>(icu_decimal_format);
+ UErrorCode status = U_ZERO_ERROR;
+ icu::number::LocalizedNumberFormatter* icu_number_formatter =
+ plural_rules->icu_number_formatter().raw();
+ icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status);
+ CHECK(U_SUCCESS(status));
- int min_int_digits = icu_number_format->getMinimumIntegerDigits();
- CreateDataPropertyForOptions(isolate, options, min_int_digits,
- "minimumIntegerDigits");
+ CreateDataPropertyForOptions(
+ isolate, options,
+ JSNumberFormat::MinimumIntegerDigitsFromSkeleton(skeleton),
+ "minimumIntegerDigits");
+ int32_t min = 0, max = 0;
+ JSNumberFormat::FractionDigitsFromSkeleton(skeleton, &min, &max);
- int min_fraction_digits = icu_number_format->getMinimumFractionDigits();
- CreateDataPropertyForOptions(isolate, options, min_fraction_digits,
- "minimumFractionDigits");
+ CreateDataPropertyForOptions(isolate, options, min, "minimumFractionDigits");
- int max_fraction_digits = icu_number_format->getMaximumFractionDigits();
- CreateDataPropertyForOptions(isolate, options, max_fraction_digits,
- "maximumFractionDigits");
+ CreateDataPropertyForOptions(isolate, options, max, "maximumFractionDigits");
- if (icu_decimal_format->areSignificantDigitsUsed()) {
- int min_significant_digits =
- icu_decimal_format->getMinimumSignificantDigits();
- CreateDataPropertyForOptions(isolate, options, min_significant_digits,
+ if (JSNumberFormat::SignificantDigitsFromSkeleton(skeleton, &min, &max)) {
+ CreateDataPropertyForOptions(isolate, options, min,
"minimumSignificantDigits");
-
- int max_significant_digits =
- icu_decimal_format->getMaximumSignificantDigits();
- CreateDataPropertyForOptions(isolate, options, max_significant_digits,
+ CreateDataPropertyForOptions(isolate, options, max,
"maximumSignificantDigits");
}
@@ -299,7 +259,6 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
- UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> categories(
icu_plural_rules->getKeywords(status));
CHECK(U_SUCCESS(status));
@@ -329,13 +288,39 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
return options;
}
+namespace {
+
+class PluralRulesAvailableLocales {
+ public:
+ PluralRulesAvailableLocales() {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> locales(
+ icu::PluralRules::getAvailableLocales(status));
+ CHECK(U_SUCCESS(status));
+ int32_t len = 0;
+ const char* locale = nullptr;
+ while ((locale = locales->next(&len, status)) != nullptr &&
+ U_SUCCESS(status)) {
+ std::string str(locale);
+ if (len > 3) {
+ std::replace(str.begin(), str.end(), '_', '-');
+ }
+ set_.insert(std::move(str));
+ }
+ }
+ const std::set<std::string>& Get() const { return set_; }
+
+ private:
+ std::set<std::string> set_;
+};
+
+} // namespace
+
const std::set<std::string>& JSPluralRules::GetAvailableLocales() {
- // TODO(ftang): For PluralRules, filter out locales that
- // don't support PluralRules.
- // PluralRules is missing an appropriate getAvailableLocales method,
- // so we should filter from all locales, but it's not clear how; see
- // https://ssl.icu-project.org/trac/ticket/12756
- return Intl::GetAvailableLocalesForLocale();
+ static base::LazyInstance<PluralRulesAvailableLocales>::type
+ available_locales = LAZY_INSTANCE_INITIALIZER;
+ return available_locales.Pointer()->Get();
+ // return Intl::GetAvailableLocalesForLocale();
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 249090bdf6..840efb07ed 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -22,8 +22,10 @@
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
-class DecimalFormat;
class PluralRules;
+namespace number {
+class LocalizedNumberFormatter;
+} // namespace number
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -31,9 +33,9 @@ namespace internal {
class JSPluralRules : public JSObject {
public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> Initialize(
- Isolate* isolate, Handle<JSPluralRules> plural_rules,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSPluralRules> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
static Handle<JSObject> ResolvedOptions(Isolate* isolate,
Handle<JSPluralRules> plural_rules);
@@ -45,12 +47,7 @@ class JSPluralRules : public JSObject {
// [[Type]] is one of the values "cardinal" or "ordinal",
// identifying the plural rules used.
- enum class Type {
- CARDINAL,
- ORDINAL,
-
- COUNT
- };
+ enum class Type { CARDINAL, ORDINAL };
inline void set_type(Type type);
inline Type type() const;
@@ -76,7 +73,8 @@ class JSPluralRules : public JSObject {
DECL_ACCESSORS(locale, String)
DECL_INT_ACCESSORS(flags)
DECL_ACCESSORS(icu_plural_rules, Managed<icu::PluralRules>)
- DECL_ACCESSORS(icu_decimal_format, Managed<icu::DecimalFormat>)
+ DECL_ACCESSORS(icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>)
OBJECT_CONSTRUCTORS(JSPluralRules, JSObject);
};
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index f33628b5c2..0683cfeec8 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -15,12 +15,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSProxy, JSReceiver)
-
-CAST_ACCESSOR(JSProxy)
-
-ACCESSORS(JSProxy, target, Object, kTargetOffset)
-ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSProxy)
bool JSProxy::IsRevoked() const { return !handler().IsJSReceiver(); }
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index c4f98927e9..8e29c08bc1 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -15,21 +15,14 @@ namespace v8 {
namespace internal {
// The JSProxy describes EcmaScript Harmony proxies
-class JSProxy : public JSReceiver {
+class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
public:
V8_WARN_UNUSED_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
Handle<Object>,
Handle<Object>);
- // [handler]: The handler property.
- DECL_ACCESSORS(handler, Object)
- // [target]: The target property.
- DECL_ACCESSORS(target, Object)
-
static MaybeHandle<NativeContext> GetFunctionRealm(Handle<JSProxy> proxy);
- DECL_CAST(JSProxy)
-
V8_INLINE bool IsRevoked() const;
static void Revoke(Handle<JSProxy> proxy);
@@ -70,6 +63,10 @@ class JSProxy : public JSReceiver {
V8_WARN_UNUSED_RESULT static Maybe<bool> CheckHasTrap(
Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
+ // ES6 9.5.10
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CheckDeleteTrap(
+ Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
+
// ES6 9.5.8
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
@@ -106,10 +103,6 @@ class JSProxy : public JSReceiver {
static const int kMaxIterationLimit = 100 * 1024;
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSReceiver::kHeaderSize,
- TORQUE_GENERATED_JSPROXY_FIELDS)
-
// kTargetOffset aliases with the elements of JSObject. The fact that
// JSProxy::target is a Javascript value which cannot be confused with an
// elements backing store is exploited by loading from this offset from an
@@ -125,7 +118,7 @@ class JSProxy : public JSReceiver {
PropertyDescriptor* desc,
Maybe<ShouldThrow> should_throw);
- OBJECT_CONSTRUCTORS(JSProxy, JSReceiver);
+ TQ_OBJECT_CONSTRUCTORS(JSProxy)
};
// JSProxyRevocableResult is just a JSObject with a specific initial map.
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index e525c66e3e..18355079f8 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -37,13 +37,13 @@ class JSRegExp : public JSObject {
// IRREGEXP: Compiled with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
struct FlagShiftBit {
- static const int kGlobal = 0;
- static const int kIgnoreCase = 1;
- static const int kMultiline = 2;
- static const int kSticky = 3;
- static const int kUnicode = 4;
- static const int kDotAll = 5;
- static const int kInvalid = 7;
+ static constexpr int kGlobal = 0;
+ static constexpr int kIgnoreCase = 1;
+ static constexpr int kMultiline = 2;
+ static constexpr int kSticky = 3;
+ static constexpr int kUnicode = 4;
+ static constexpr int kDotAll = 5;
+ static constexpr int kInvalid = 6;
};
enum Flag : uint8_t {
kNone = 0,
@@ -57,28 +57,31 @@ class JSRegExp : public JSObject {
kInvalid = 1 << FlagShiftBit::kInvalid, // Not included in FlagCount.
};
using Flags = base::Flags<Flag>;
- static constexpr int FlagCount() { return 6; }
-
- static int FlagShiftBits(Flag flag) {
- switch (flag) {
- case kGlobal:
- return FlagShiftBit::kGlobal;
- case kIgnoreCase:
- return FlagShiftBit::kIgnoreCase;
- case kMultiline:
- return FlagShiftBit::kMultiline;
- case kSticky:
- return FlagShiftBit::kSticky;
- case kUnicode:
- return FlagShiftBit::kUnicode;
- case kDotAll:
- return FlagShiftBit::kDotAll;
- default:
- STATIC_ASSERT(FlagCount() == 6);
- UNREACHABLE();
- }
+
+ static constexpr int kFlagCount = 6;
+
+ static constexpr Flag FlagFromChar(char c) {
+ STATIC_ASSERT(kFlagCount == 6);
+ // clang-format off
+ return c == 'g' ? kGlobal
+ : c == 'i' ? kIgnoreCase
+ : c == 'm' ? kMultiline
+ : c == 'y' ? kSticky
+ : c == 'u' ? kUnicode
+ : c == 's' ? kDotAll
+ : kInvalid;
+ // clang-format on
}
+ STATIC_ASSERT(static_cast<int>(kNone) == v8::RegExp::kNone);
+ STATIC_ASSERT(static_cast<int>(kGlobal) == v8::RegExp::kGlobal);
+ STATIC_ASSERT(static_cast<int>(kIgnoreCase) == v8::RegExp::kIgnoreCase);
+ STATIC_ASSERT(static_cast<int>(kMultiline) == v8::RegExp::kMultiline);
+ STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
+ STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
+ STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
+ STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
+
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
DECL_ACCESSORS(last_index, Object)
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 1ff66b1a12..fac3439b31 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSRelativeTimeFormat, icu_formatter,
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
inline void JSRelativeTimeFormat::set_style(Style style) {
- DCHECK_GT(Style::COUNT, style);
+ DCHECK_GE(StyleBits::kMax, style);
int hints = flags();
hints = StyleBits::update(hints, style);
set_flags(hints);
@@ -38,7 +38,7 @@ inline JSRelativeTimeFormat::Style JSRelativeTimeFormat::style() const {
}
inline void JSRelativeTimeFormat::set_numeric(Numeric numeric) {
- DCHECK_GT(Numeric::COUNT, numeric);
+ DCHECK_GE(NumericBits::kMax, numeric);
int hints = flags();
hints = NumericBits::update(hints, numeric);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 59a3bf7ea0..28f8c757ee 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -34,9 +34,8 @@ UDateRelativeDateTimeFormatterStyle getIcuStyle(
return UDAT_STYLE_SHORT;
case JSRelativeTimeFormat::Style::NARROW:
return UDAT_STYLE_NARROW;
- case JSRelativeTimeFormat::Style::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
} // namespace
@@ -54,11 +53,9 @@ JSRelativeTimeFormat::Numeric JSRelativeTimeFormat::getNumeric(
UNREACHABLE();
}
-MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
- Isolate* isolate, Handle<JSRelativeTimeFormat> relative_time_format_holder,
- Handle<Object> locales, Handle<Object> input_options) {
- relative_time_format_holder->set_flags(0);
-
+MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> input_options) {
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -125,7 +122,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Handle<String> locale_str = isolate->factory()->NewStringFromAsciiChecked(
maybe_locale_str.FromJust().c_str());
- relative_time_format_holder->set_locale(*locale_str);
// 15. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
@@ -136,9 +132,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
Style style_enum = maybe_style.FromJust();
- // 16. Set relativeTimeFormat.[[Style]] to s.
- relative_time_format_holder->set_style(style_enum);
-
// 17. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
@@ -147,9 +140,6 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
Numeric numeric_enum = maybe_numeric.FromJust();
- // 18. Set relativeTimeFormat.[[Numeric]] to numeric.
- relative_time_format_holder->set_numeric(numeric_enum);
-
// 19. Let relativeTimeFormat.[[NumberFormat]] be
// ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
icu::NumberFormat* number_format =
@@ -179,6 +169,21 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Managed<icu::RelativeDateTimeFormatter>::FromRawPtr(isolate, 0,
icu_formatter);
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSRelativeTimeFormat> relative_time_format_holder =
+ Handle<JSRelativeTimeFormat>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ relative_time_format_holder->set_flags(0);
+
+ relative_time_format_holder->set_locale(*locale_str);
+
+ // 16. Set relativeTimeFormat.[[Style]] to s.
+ relative_time_format_holder->set_style(style_enum);
+
+ // 18. Set relativeTimeFormat.[[Numeric]] to numeric.
+ relative_time_format_holder->set_numeric(numeric_enum);
+
// 21. Set relativeTimeFormat.[[InitializedRelativeTimeFormat]] to true.
relative_time_format_holder->set_icu_formatter(*managed_formatter);
@@ -214,9 +219,8 @@ Handle<String> JSRelativeTimeFormat::StyleAsString() const {
return GetReadOnlyRoots().short_string_handle();
case Style::NARROW:
return GetReadOnlyRoots().narrow_string_handle();
- case Style::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
Handle<String> JSRelativeTimeFormat::NumericAsString() const {
@@ -225,9 +229,8 @@ Handle<String> JSRelativeTimeFormat::NumericAsString() const {
return GetReadOnlyRoots().always_string_handle();
case Numeric::AUTO:
return GetReadOnlyRoots().auto_string_handle();
- case Numeric::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
namespace {
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 740336c29c..6e405e345e 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -30,12 +30,11 @@ namespace internal {
class JSRelativeTimeFormat : public JSObject {
public:
- // Initializes relative time format object with properties derived from input
+ // Creates relative time format object with properties derived from input
// locales and options.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSRelativeTimeFormat> Initialize(
- Isolate* isolate,
- Handle<JSRelativeTimeFormat> relative_time_format_holder,
- Handle<Object> locales, Handle<Object> options);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSRelativeTimeFormat> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSRelativeTimeFormat> format_holder);
@@ -67,10 +66,9 @@ class JSRelativeTimeFormat : public JSObject {
// ecma402/#sec-properties-of-intl-relativetimeformat-instances
enum class Style {
- LONG, // Everything spelled out.
- SHORT, // Abbreviations used when possible.
- NARROW, // Use the shortest possible form.
- COUNT
+ LONG, // Everything spelled out.
+ SHORT, // Abbreviations used when possible.
+ NARROW // Use the shortest possible form.
};
inline void set_style(Style style);
inline Style style() const;
@@ -82,9 +80,8 @@ class JSRelativeTimeFormat : public JSObject {
// ecma402/#sec-properties-of-intl-relativetimeformat-instances
enum class Numeric {
ALWAYS, // numerical descriptions are always used ("1 day ago")
- AUTO, // numerical descriptions are used only when no more specific
+ AUTO // numerical descriptions are used only when no more specific
// version is available ("yesterday")
- COUNT
};
inline void set_numeric(Numeric numeric);
inline Numeric numeric() const;
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index 24a827c030..b2d745179a 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -35,7 +35,7 @@ CAST_ACCESSOR(JSSegmentIterator)
inline void JSSegmentIterator::set_granularity(
JSSegmenter::Granularity granularity) {
- DCHECK_GT(JSSegmenter::Granularity::COUNT, granularity);
+ DCHECK_GE(GranularityBits::kMax, granularity);
int hints = flags();
hints = GranularityBits::update(hints, granularity);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index 3d2b19ca5c..509db37d44 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -37,9 +37,8 @@ Handle<String> JSSegmentIterator::GranularityAsString() const {
return GetReadOnlyRoots().word_string_handle();
case JSSegmenter::Granularity::SENTENCE:
return GetReadOnlyRoots().sentence_string_handle();
- case JSSegmenter::Granularity::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
MaybeHandle<JSSegmentIterator> JSSegmentIterator::Create(
@@ -49,22 +48,25 @@ MaybeHandle<JSSegmentIterator> JSSegmentIterator::Create(
// 1. Let iterator be ObjectCreate(%SegmentIteratorPrototype%).
Handle<Map> map = Handle<Map>(
isolate->native_context()->intl_segment_iterator_map(), isolate);
- Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(map);
+ Handle<Managed<icu::BreakIterator>> managed_break_iterator =
+ Managed<icu::BreakIterator>::FromRawPtr(isolate, 0, break_iterator);
+ Handle<Managed<icu::UnicodeString>> unicode_string =
+ Intl::SetTextToBreakIterator(isolate, text, break_iterator);
+
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSObject> result = isolate->factory()->NewJSObjectFromMap(map);
+ DisallowHeapAllocation no_gc;
Handle<JSSegmentIterator> segment_iterator =
Handle<JSSegmentIterator>::cast(result);
segment_iterator->set_flags(0);
segment_iterator->set_granularity(granularity);
// 2. Let iterator.[[SegmentIteratorSegmenter]] be segmenter.
- Handle<Managed<icu::BreakIterator>> managed_break_iterator =
- Managed<icu::BreakIterator>::FromRawPtr(isolate, 0, break_iterator);
segment_iterator->set_icu_break_iterator(*managed_break_iterator);
// 3. Let iterator.[[SegmentIteratorString]] be string.
- Managed<icu::UnicodeString> unicode_string =
- Intl::SetTextToBreakIterator(isolate, text, break_iterator);
- segment_iterator->set_unicode_string(unicode_string);
+ segment_iterator->set_unicode_string(*unicode_string);
// 4. Let iterator.[[SegmentIteratorIndex]] be 0.
// step 4 is stored inside break_iterator.
@@ -119,9 +121,8 @@ Handle<Object> JSSegmentIterator::BreakType() const {
return GetReadOnlyRoots().sep_string_handle();
}
return GetReadOnlyRoots().undefined_value_handle();
- case JSSegmenter::Granularity::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
// ecma402 #sec-segment-iterator-prototype-index
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index b4adf4c8e6..a31de29c25 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -27,7 +27,7 @@ ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
inline void JSSegmenter::set_granularity(Granularity granularity) {
- DCHECK_GT(Granularity::COUNT, granularity);
+ DCHECK_GE(GranularityBits::kMax, granularity);
int hints = flags();
hints = GranularityBits::update(hints, granularity);
set_flags(hints);
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 5321334678..7985cf1c99 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -30,11 +30,9 @@ JSSegmenter::Granularity JSSegmenter::GetGranularity(const char* str) {
UNREACHABLE();
}
-MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
- Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
- Handle<Object> locales, Handle<Object> input_options) {
- segmenter_holder->set_flags(0);
-
+MaybeHandle<JSSegmenter> JSSegmenter::New(Isolate* isolate, Handle<Map> map,
+ Handle<Object> locales,
+ Handle<Object> input_options) {
// 3. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -69,11 +67,8 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
Intl::ResolvedLocale r =
Intl::ResolveLocale(isolate, JSSegmenter::GetAvailableLocales(),
requested_locales, matcher, {});
-
- // 10. Set segmenter.[[Locale]] to the value of r.[[Locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
- segmenter_holder->set_locale(*locale_str);
// 13. Let granularity be ? GetOption(options, "granularity", "string", «
// "grapheme", "word", "sentence" », "grapheme").
@@ -85,9 +80,6 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
MAYBE_RETURN(maybe_granularity, MaybeHandle<JSSegmenter>());
Granularity granularity_enum = maybe_granularity.FromJust();
- // 14. Set segmenter.[[SegmenterGranularity]] to granularity.
- segmenter_holder->set_granularity(granularity_enum);
-
icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
@@ -107,8 +99,6 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
icu_break_iterator.reset(
icu::BreakIterator::createSentenceInstance(icu_locale, status));
break;
- case Granularity::COUNT:
- UNREACHABLE();
}
CHECK(U_SUCCESS(status));
@@ -118,6 +108,18 @@ MaybeHandle<JSSegmenter> JSSegmenter::Initialize(
Managed<icu::BreakIterator>::FromUniquePtr(isolate, 0,
std::move(icu_break_iterator));
+ // Now all properties are ready, so we can allocate the result object.
+ Handle<JSSegmenter> segmenter_holder = Handle<JSSegmenter>::cast(
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(map));
+ DisallowHeapAllocation no_gc;
+ segmenter_holder->set_flags(0);
+
+ // 10. Set segmenter.[[Locale]] to the value of r.[[Locale]].
+ segmenter_holder->set_locale(*locale_str);
+
+ // 14. Set segmenter.[[SegmenterGranularity]] to granularity.
+ segmenter_holder->set_granularity(granularity_enum);
+
segmenter_holder->set_icu_break_iterator(*managed_break_iterator);
return segmenter_holder;
}
@@ -157,9 +159,8 @@ Handle<String> JSSegmenter::GranularityAsString() const {
return GetReadOnlyRoots().word_string_handle();
case Granularity::SENTENCE:
return GetReadOnlyRoots().sentence_string_handle();
- case Granularity::COUNT:
- UNREACHABLE();
}
+ UNREACHABLE();
}
const std::set<std::string>& JSSegmenter::GetAvailableLocales() {
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 423dd67497..641cf106fb 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -30,11 +30,11 @@ namespace internal {
class JSSegmenter : public JSObject {
public:
- // Initializes segmenter object with properties derived from input
- // locales and options.
- V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmenter> Initialize(
- Isolate* isolate, Handle<JSSegmenter> segmenter_holder,
- Handle<Object> locales, Handle<Object> options);
+ // Creates segmenter object with properties derived from input locales and
+ // options.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSSegmenter> New(
+ Isolate* isolate, Handle<Map> map, Handle<Object> locales,
+ Handle<Object> options);
V8_WARN_UNUSED_RESULT static Handle<JSObject> ResolvedOptions(
Isolate* isolate, Handle<JSSegmenter> segmenter_holder);
@@ -56,8 +56,7 @@ class JSSegmenter : public JSObject {
enum class Granularity {
GRAPHEME, // for character-breaks
WORD, // for word-breaks
- SENTENCE, // for sentence-breaks
- COUNT
+ SENTENCE // for sentence-breaks
};
inline void set_granularity(Granularity granularity);
inline Granularity granularity() const;
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 6632a31002..46f28e883e 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -97,16 +97,16 @@ void JSFinalizationGroup::Register(
}
}
-void JSFinalizationGroup::Unregister(
- Handle<JSFinalizationGroup> finalization_group, Handle<Object> key,
- Isolate* isolate) {
+bool JSFinalizationGroup::Unregister(
+ Handle<JSFinalizationGroup> finalization_group,
+ Handle<JSReceiver> unregister_token, Isolate* isolate) {
// Iterate through the doubly linked list of WeakCells associated with the
// key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
// its FinalizationGroup; remove it from there.
if (!finalization_group->key_map().IsUndefined(isolate)) {
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
- Object value = key_map->Lookup(key);
+ Object value = key_map->Lookup(unregister_token);
Object undefined = ReadOnlyRoots(isolate).undefined_value();
while (value.IsWeakCell()) {
WeakCell weak_cell = WeakCell::cast(value);
@@ -116,9 +116,13 @@ void JSFinalizationGroup::Unregister(
weak_cell.set_key_list_next(undefined);
}
bool was_present;
- key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
+ key_map = ObjectHashTable::Remove(isolate, key_map, unregister_token,
+ &was_present);
finalization_group->set_key_map(*key_map);
+ return was_present;
}
+
+ return false;
}
bool JSFinalizationGroup::NeedsCleanup() const {
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index b846c2e608..6a401fecee 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -41,8 +41,9 @@ class JSFinalizationGroup : public JSObject {
Handle<JSReceiver> target,
Handle<Object> holdings, Handle<Object> key,
Isolate* isolate);
- inline static void Unregister(Handle<JSFinalizationGroup> finalization_group,
- Handle<Object> key, Isolate* isolate);
+ inline static bool Unregister(Handle<JSFinalizationGroup> finalization_group,
+ Handle<JSReceiver> unregister_token,
+ Isolate* isolate);
// Returns true if the cleared_cells list is non-empty.
inline bool NeedsCleanup() const;
@@ -57,24 +58,13 @@ class JSFinalizationGroup : public JSObject {
// Constructs an iterator for the WeakCells in the cleared_cells list and
// calls the user's cleanup function.
- static void Cleanup(Handle<JSFinalizationGroup> finalization_group,
- Isolate* isolate);
-
-// Layout description.
-#define JS_FINALIZATION_GROUP_FIELDS(V) \
- V(kNativeContextOffset, kTaggedSize) \
- V(kCleanupOffset, kTaggedSize) \
- V(kActiveCellsOffset, kTaggedSize) \
- V(kClearedCellsOffset, kTaggedSize) \
- V(kKeyMapOffset, kTaggedSize) \
- V(kNextOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
+ static void Cleanup(Isolate* isolate,
+ Handle<JSFinalizationGroup> finalization_group,
+ Handle<Object> callback);
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_FINALIZATION_GROUP_FIELDS)
-#undef JS_FINALIZATION_GROUP_FIELDS
+ TORQUE_GENERATED_JSFINALIZATION_GROUP_FIELDS)
// Bitfields in flags.
class ScheduledForCleanupField : public BitField<bool, 0, 1> {};
@@ -106,21 +96,9 @@ class WeakCell : public HeapObject {
DECL_ACCESSORS(key_list_prev, Object)
DECL_ACCESSORS(key_list_next, Object)
-// Layout description.
-#define WEAK_CELL_FIELDS(V) \
- V(kFinalizationGroupOffset, kTaggedSize) \
- V(kTargetOffset, kTaggedSize) \
- V(kHoldingsOffset, kTaggedSize) \
- V(kPrevOffset, kTaggedSize) \
- V(kNextOffset, kTaggedSize) \
- V(kKeyOffset, kTaggedSize) \
- V(kKeyListPrevOffset, kTaggedSize) \
- V(kKeyListNextOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WEAK_CELL_FIELDS)
-#undef WEAK_CELL_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WEAK_CELL_FIELDS)
class BodyDescriptor;
@@ -146,14 +124,9 @@ class JSWeakRef : public JSObject {
DECL_ACCESSORS(target, HeapObject)
-// Layout description.
-#define JS_WEAK_REF_FIELDS(V) \
- V(kTargetOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_WEAK_REF_FIELDS)
-#undef JS_WEAK_REF_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_REF_FIELDS)
class BodyDescriptor;
@@ -189,15 +162,10 @@ class JSFinalizationGroupCleanupIterator : public JSObject {
DECL_ACCESSORS(finalization_group, JSFinalizationGroup)
-// Layout description.
-#define JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS(V) \
- V(kFinalizationGroupOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
-#undef JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSFINALIZATION_GROUP_CLEANUP_ITERATOR_FIELDS)
OBJECT_CONSTRUCTORS(JSFinalizationGroupCleanupIterator, JSObject);
};
diff --git a/deps/v8/src/objects/keys.cc b/deps/v8/src/objects/keys.cc
index d3a1f6bdc2..18b38ed744 100644
--- a/deps/v8/src/objects/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -395,6 +395,11 @@ MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(
GetKeysConversion keys_conversion) {
+ // TODO(v8:9401): We should extend the fast path of KeyAccumulator::GetKeys to
+ // also use fast path even when filter = SKIP_SYMBOLS. We used to pass wrong
+ // filter to use fast path in cases where we tried to verify all properties
+ // are enumerable. However these checks weren't correct and passing the wrong
+ // filter led to wrong behaviour.
if (filter_ == ENUMERABLE_STRINGS) {
Handle<FixedArray> keys;
if (GetKeysFast(keys_conversion).ToHandle(&keys)) {
diff --git a/deps/v8/src/objects/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index 49683da267..ad0a058a92 100644
--- a/deps/v8/src/objects/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -209,11 +209,11 @@ int LayoutDescriptor::number_of_layout_words() {
}
uint32_t LayoutDescriptor::get_layout_word(int index) const {
- return get_uint32(index);
+ return get_uint32_relaxed(index);
}
void LayoutDescriptor::set_layout_word(int index, uint32_t value) {
- set_uint32(index, value);
+ set_uint32_relaxed(index, value);
}
// LayoutDescriptorHelper is a helper class for querying whether inobject
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index 1ddb333cff..32b43cd8f7 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -15,6 +15,10 @@
namespace v8 {
namespace internal {
+//
+// ObjectBoilerplateDescription
+//
+
OBJECT_CONSTRUCTORS_IMPL(ObjectBoilerplateDescription, FixedArray)
CAST_ACCESSOR(ObjectBoilerplateDescription)
@@ -22,6 +26,70 @@ CAST_ACCESSOR(ObjectBoilerplateDescription)
SMI_ACCESSORS(ObjectBoilerplateDescription, flags,
FixedArray::OffsetOfElementAt(kLiteralTypeOffset))
+Object ObjectBoilerplateDescription::name(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return name(isolate, index);
+}
+
+Object ObjectBoilerplateDescription::name(Isolate* isolate, int index) const {
+ // get() already checks for out of bounds access, but we do not want to allow
+ // access to the last element, if it is the number of properties.
+ DCHECK_NE(size(), index);
+ return get(isolate, 2 * index + kDescriptionStartIndex);
+}
+
+Object ObjectBoilerplateDescription::value(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return value(isolate, index);
+}
+
+Object ObjectBoilerplateDescription::value(Isolate* isolate, int index) const {
+ return get(isolate, 2 * index + 1 + kDescriptionStartIndex);
+}
+
+void ObjectBoilerplateDescription::set_key_value(int index, Object key,
+ Object value) {
+ DCHECK_LT(index, size());
+ DCHECK_GE(index, 0);
+ set(2 * index + kDescriptionStartIndex, key);
+ set(2 * index + 1 + kDescriptionStartIndex, value);
+}
+
+int ObjectBoilerplateDescription::size() const {
+ DCHECK_EQ(0, (length() - kDescriptionStartIndex -
+ (this->has_number_of_properties() ? 1 : 0)) %
+ 2);
+ // Rounding is intended.
+ return (length() - kDescriptionStartIndex) / 2;
+}
+
+bool ObjectBoilerplateDescription::has_number_of_properties() const {
+ return (length() - kDescriptionStartIndex) % 2 != 0;
+}
+
+int ObjectBoilerplateDescription::backing_store_size() const {
+ if (has_number_of_properties()) {
+ // If present, the last entry contains the number of properties.
+ return Smi::ToInt(this->get(length() - 1));
+ }
+ // If the number is not given explicitly, we assume there are no
+ // properties with computed names.
+ return size();
+}
+
+void ObjectBoilerplateDescription::set_backing_store_size(
+ int backing_store_size) {
+ DCHECK(has_number_of_properties());
+ DCHECK_NE(size(), backing_store_size);
+ CHECK(Smi::IsValid(backing_store_size));
+ // TODO(ishell): move this value to the header
+ set(length() - 1, Smi::FromInt(backing_store_size));
+}
+
+//
+// ClassBoilerplate
+//
+
OBJECT_CONSTRUCTORS_IMPL(ClassBoilerplate, FixedArray)
CAST_ACCESSOR(ClassBoilerplate)
@@ -52,6 +120,10 @@ ACCESSORS(ClassBoilerplate, instance_elements_template, Object,
ACCESSORS(ClassBoilerplate, instance_computed_properties, FixedArray,
FixedArray::OffsetOfElementAt(kPrototypeComputedPropertiesIndex))
+//
+// ArrayBoilerplateDescription
+//
+
OBJECT_CONSTRUCTORS_IMPL(ArrayBoilerplateDescription, Struct)
CAST_ACCESSOR(ArrayBoilerplateDescription)
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index bfdbd9317b..7328c11f31 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -17,56 +17,6 @@
namespace v8 {
namespace internal {
-Object ObjectBoilerplateDescription::name(int index) const {
- // get() already checks for out of bounds access, but we do not want to allow
- // access to the last element, if it is the number of properties.
- DCHECK_NE(size(), index);
- return get(2 * index + kDescriptionStartIndex);
-}
-
-Object ObjectBoilerplateDescription::value(int index) const {
- return get(2 * index + 1 + kDescriptionStartIndex);
-}
-
-void ObjectBoilerplateDescription::set_key_value(int index, Object key,
- Object value) {
- DCHECK_LT(index, size());
- DCHECK_GE(index, 0);
- set(2 * index + kDescriptionStartIndex, key);
- set(2 * index + 1 + kDescriptionStartIndex, value);
-}
-
-int ObjectBoilerplateDescription::size() const {
- DCHECK_EQ(0, (length() - kDescriptionStartIndex -
- (this->has_number_of_properties() ? 1 : 0)) %
- 2);
- // Rounding is intended.
- return (length() - kDescriptionStartIndex) / 2;
-}
-
-int ObjectBoilerplateDescription::backing_store_size() const {
- if (has_number_of_properties()) {
- // If present, the last entry contains the number of properties.
- return Smi::ToInt(this->get(length() - 1));
- }
- // If the number is not given explicitly, we assume there are no
- // properties with computed names.
- return size();
-}
-
-void ObjectBoilerplateDescription::set_backing_store_size(
- Isolate* isolate, int backing_store_size) {
- DCHECK(has_number_of_properties());
- DCHECK_NE(size(), backing_store_size);
- Handle<Object> backing_store_size_obj =
- isolate->factory()->NewNumberFromInt(backing_store_size);
- set(length() - 1, *backing_store_size_obj);
-}
-
-bool ObjectBoilerplateDescription::has_number_of_properties() const {
- return (length() - kDescriptionStartIndex) % 2 != 0;
-}
-
namespace {
inline int EncodeComputedEntry(ClassBoilerplate::ValueKind value_kind,
@@ -306,8 +256,12 @@ class ObjectDescriptor {
void IncPropertiesCount() { ++property_count_; }
void IncElementsCount() { ++element_count_; }
+ explicit ObjectDescriptor(int property_slack)
+ : property_slack_(property_slack) {}
+
bool HasDictionaryProperties() const {
- return computed_count_ > 0 || property_count_ > kMaxNumberOfDescriptors;
+ return computed_count_ > 0 ||
+ (property_count_ + property_slack_) > kMaxNumberOfDescriptors;
}
Handle<Object> properties_template() const {
@@ -324,17 +278,17 @@ class ObjectDescriptor {
return computed_properties_;
}
- void CreateTemplates(Isolate* isolate, int slack) {
+ void CreateTemplates(Isolate* isolate) {
Factory* factory = isolate->factory();
descriptor_array_template_ = factory->empty_descriptor_array();
properties_dictionary_template_ = factory->empty_property_dictionary();
- if (property_count_ || HasDictionaryProperties() || slack) {
+ if (property_count_ || computed_count_ || property_slack_) {
if (HasDictionaryProperties()) {
properties_dictionary_template_ = NameDictionary::New(
- isolate, property_count_ + computed_count_ + slack);
+ isolate, property_count_ + computed_count_ + property_slack_);
} else {
- descriptor_array_template_ =
- DescriptorArray::Allocate(isolate, 0, property_count_ + slack);
+ descriptor_array_template_ = DescriptorArray::Allocate(
+ isolate, 0, property_count_ + property_slack_);
}
}
elements_dictionary_template_ =
@@ -419,6 +373,7 @@ class ObjectDescriptor {
}
private:
+ const int property_slack_;
int property_count_ = 0;
int next_enumeration_index_ = PropertyDetails::kInitialIndex;
int element_count_ = 0;
@@ -454,8 +409,8 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
// in CanonicalHandleScope.
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- ObjectDescriptor static_desc;
- ObjectDescriptor instance_desc;
+ ObjectDescriptor static_desc(kMinimumClassPropertiesCount);
+ ObjectDescriptor instance_desc(kMinimumPrototypePropertiesCount);
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
@@ -475,7 +430,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
//
// Initialize class object template.
//
- static_desc.CreateTemplates(isolate, kMinimumClassPropertiesCount);
+ static_desc.CreateTemplates(isolate);
STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
{
// Add length_accessor.
@@ -509,7 +464,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
//
// Initialize prototype object template.
//
- instance_desc.CreateTemplates(isolate, kMinimumPrototypePropertiesCount);
+ instance_desc.CreateTemplates(isolate);
{
Handle<Object> value(
Smi::FromInt(ClassBoilerplate::kConstructorArgumentIndex), isolate);
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 35ae98a05b..f009a54f8a 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -21,20 +21,23 @@ class ClassLiteral;
// of properties in the backing store. This number includes properties with
// computed names that are not
// in the list.
+// TODO(ishell): Don't derive from FixedArray as it already has its own map.
class ObjectBoilerplateDescription : public FixedArray {
public:
- Object name(int index) const;
- Object value(int index) const;
+ inline Object name(int index) const;
+ inline Object name(Isolate* isolate, int index) const;
- void set_key_value(int index, Object key, Object value);
+ inline Object value(int index) const;
+ inline Object value(Isolate* isolate, int index) const;
+
+ inline void set_key_value(int index, Object key, Object value);
// The number of boilerplate properties.
- int size() const;
+ inline int size() const;
// Number of boilerplate properties and properties with computed names.
- int backing_store_size() const;
-
- void set_backing_store_size(Isolate* isolate, int backing_store_size);
+ inline int backing_store_size() const;
+ inline void set_backing_store_size(int backing_store_size);
// Used to encode ObjectLiteral::Flags for nested object literals
// Stored as the first element of the fixed array
@@ -47,7 +50,7 @@ class ObjectBoilerplateDescription : public FixedArray {
DECL_PRINTER(ObjectBoilerplateDescription)
private:
- bool has_number_of_properties() const;
+ inline bool has_number_of_properties() const;
OBJECT_CONSTRUCTORS(ObjectBoilerplateDescription, FixedArray);
};
diff --git a/deps/v8/src/objects/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index 5b2dbff258..648398be5e 100644
--- a/deps/v8/src/objects/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -31,7 +31,7 @@ LookupIterator::LookupIterator(Handle<Object> receiver, Handle<Name> name,
LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name, Handle<JSReceiver> holder,
Configuration configuration)
- : configuration_(ComputeConfiguration(configuration, name)),
+ : configuration_(ComputeConfiguration(isolate, configuration, name)),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(isolate),
@@ -90,7 +90,7 @@ Handle<Name> LookupIterator::GetName() {
}
bool LookupIterator::is_dictionary_holder() const {
- return !holder_->HasFastProperties();
+ return !holder_->HasFastProperties(isolate_);
}
Handle<Map> LookupIterator::transition_map() const {
@@ -111,23 +111,23 @@ Handle<T> LookupIterator::GetHolder() const {
bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
- return !receiver->map().is_extensible() &&
- (IsElement() || !name_->IsPrivate());
+ return !receiver->map(isolate_).is_extensible() &&
+ (IsElement() || !name_->IsPrivate(isolate_));
}
bool LookupIterator::IsCacheableTransition() {
DCHECK_EQ(TRANSITION, state_);
- return transition_->IsPropertyCell() ||
+ return transition_->IsPropertyCell(isolate_) ||
(transition_map()->is_dictionary_map() &&
- !GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
- transition_map()->GetBackPointer().IsMap();
+ !GetStoreTarget<JSReceiver>()->HasFastProperties(isolate_)) ||
+ transition_map()->GetBackPointer(isolate_).IsMap(isolate_);
}
void LookupIterator::UpdateProtector() {
if (IsElement()) return;
// This list must be kept in sync with
// CodeStubAssembler::CheckForAssociatedProtector!
- ReadOnlyRoots roots(heap());
+ ReadOnlyRoots roots(isolate_);
if (*name_ == roots.is_concat_spreadable_symbol() ||
*name_ == roots.constructor_string() || *name_ == roots.next_string() ||
*name_ == roots.species_symbol() || *name_ == roots.iterator_symbol() ||
@@ -139,52 +139,59 @@ void LookupIterator::UpdateProtector() {
int LookupIterator::descriptor_number() const {
DCHECK(!IsElement());
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
return number_;
}
int LookupIterator::dictionary_entry() const {
DCHECK(!IsElement());
DCHECK(has_property_);
- DCHECK(!holder_->HasFastProperties());
+ DCHECK(!holder_->HasFastProperties(isolate_));
return number_;
}
+// static
LookupIterator::Configuration LookupIterator::ComputeConfiguration(
- Configuration configuration, Handle<Name> name) {
- return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
+ Isolate* isolate, Configuration configuration, Handle<Name> name) {
+ return name->IsPrivate(isolate) ? OWN_SKIP_INTERCEPTOR : configuration;
}
+// static
Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
Handle<Object> receiver,
uint32_t index) {
- if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ if (receiver->IsJSReceiver(isolate))
+ return Handle<JSReceiver>::cast(receiver);
return GetRootForNonJSReceiver(isolate, receiver, index);
}
template <class T>
Handle<T> LookupIterator::GetStoreTarget() const {
- DCHECK(receiver_->IsJSReceiver());
- if (receiver_->IsJSGlobalProxy()) {
- Map map = JSGlobalProxy::cast(*receiver_).map();
- if (map.has_hidden_prototype()) {
- return handle(JSGlobalObject::cast(map.prototype()), isolate_);
+ DCHECK(receiver_->IsJSReceiver(isolate_));
+ if (receiver_->IsJSGlobalProxy(isolate_)) {
+ HeapObject prototype =
+ JSGlobalProxy::cast(*receiver_).map(isolate_).prototype(isolate_);
+ if (prototype.IsJSGlobalObject(isolate_)) {
+ return handle(JSGlobalObject::cast(prototype), isolate_);
}
}
return Handle<T>::cast(receiver_);
}
+// static
template <bool is_element>
-InterceptorInfo LookupIterator::GetInterceptor(JSObject holder) {
- return is_element ? holder.GetIndexedInterceptor()
- : holder.GetNamedInterceptor();
+InterceptorInfo LookupIterator::GetInterceptor(Isolate* isolate,
+ JSObject holder) {
+ return is_element ? holder.GetIndexedInterceptor(isolate)
+ : holder.GetNamedInterceptor(isolate);
}
inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
DCHECK_EQ(INTERCEPTOR, state_);
- InterceptorInfo result =
- IsElement() ? GetInterceptor<true>(JSObject::cast(*holder_))
- : GetInterceptor<false>(JSObject::cast(*holder_));
+ JSObject holder = JSObject::cast(*holder_);
+ InterceptorInfo result = IsElement()
+ ? GetInterceptor<true>(isolate_, holder)
+ : GetInterceptor<false>(isolate_, holder);
return handle(result, isolate_);
}
diff --git a/deps/v8/src/objects/lookup.cc b/deps/v8/src/objects/lookup.cc
index 744cf67482..33130aafe5 100644
--- a/deps/v8/src/objects/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -80,50 +80,6 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
return LookupIterator(isolate, receiver, name, configuration);
}
-// TODO(ishell): Consider removing this way of LookupIterator creation.
-// static
-LookupIterator LookupIterator::ForTransitionHandler(
- Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, MaybeHandle<Map> maybe_transition_map) {
- Handle<Map> transition_map;
- if (!maybe_transition_map.ToHandle(&transition_map) ||
- !transition_map->IsPrototypeValidityCellValid()) {
- // This map is not a valid transition handler, so full lookup is required.
- return LookupIterator(isolate, receiver, name);
- }
-
- PropertyDetails details = PropertyDetails::Empty();
- bool has_property;
- if (transition_map->is_dictionary_map()) {
- details = PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
- has_property = false;
- } else {
- details = transition_map->GetLastDescriptorDetails();
- has_property = true;
- }
-#ifdef DEBUG
- if (name->IsPrivate()) {
- DCHECK_EQ(DONT_ENUM, details.attributes());
- } else {
- DCHECK_EQ(NONE, details.attributes());
- }
-#endif
- LookupIterator it(isolate, receiver, name, transition_map, details,
- has_property);
-
- if (!transition_map->is_dictionary_map()) {
- int descriptor_number = transition_map->LastAdded();
- Handle<Map> new_map =
- Map::PrepareForDataProperty(isolate, transition_map, descriptor_number,
- PropertyConstness::kConst, value);
- // Reload information; this is no-op if nothing changed.
- it.property_details_ =
- new_map->instance_descriptors().GetDetails(descriptor_number);
- it.transition_ = new_map;
- }
- return it;
-}
-
LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name, Handle<Map> transition_map,
PropertyDetails details, bool has_property)
@@ -151,7 +107,7 @@ void LookupIterator::Start() {
holder_ = initial_holder_;
JSReceiver holder = *holder_;
- Map map = holder.map();
+ Map map = holder.map(isolate_);
state_ = LookupInHolder<is_element>(map, holder);
if (IsFound()) return;
@@ -169,7 +125,7 @@ void LookupIterator::Next() {
has_property_ = false;
JSReceiver holder = *holder_;
- Map map = holder.map();
+ Map map = holder.map(isolate_);
if (map.IsSpecialReceiverMap()) {
state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
@@ -195,7 +151,7 @@ void LookupIterator::NextInternal(Map map, JSReceiver holder) {
return;
}
holder = maybe_holder;
- map = holder.map();
+ map = holder.map(isolate_);
state_ = LookupInHolder<is_element>(map, holder);
} while (!IsFound());
@@ -218,17 +174,17 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
Isolate* isolate, Handle<Object> receiver, uint32_t index) {
// Strings are the only objects with properties (only elements) directly on
// the wrapper. Hence we can skip generating the wrapper for all other cases.
- if (receiver->IsString() &&
+ if (receiver->IsString(isolate) &&
index < static_cast<uint32_t>(String::cast(*receiver).length())) {
// TODO(verwaest): Speed this up. Perhaps use a cached wrapper on the native
// context, ensuring that we don't leak it into JS?
Handle<JSFunction> constructor = isolate->string_function();
Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
- Handle<JSValue>::cast(result)->set_value(*receiver);
+ Handle<JSPrimitiveWrapper>::cast(result)->set_value(*receiver);
return result;
}
- auto root =
- handle(receiver->GetPrototypeChainRootMap(isolate).prototype(), isolate);
+ auto root = handle(
+ receiver->GetPrototypeChainRootMap(isolate).prototype(isolate), isolate);
if (root->IsNull(isolate)) {
isolate->PushStackTraceAndDie(reinterpret_cast<void*>(receiver->ptr()));
}
@@ -236,8 +192,8 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
}
Handle<Map> LookupIterator::GetReceiverMap() const {
- if (receiver_->IsNumber()) return factory()->heap_number_map();
- return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
+ if (receiver_->IsNumber(isolate_)) return factory()->heap_number_map();
+ return handle(Handle<HeapObject>::cast(receiver_)->map(isolate_), isolate_);
}
bool LookupIterator::HasAccess() const {
@@ -250,13 +206,13 @@ template <bool is_element>
void LookupIterator::ReloadPropertyInformation() {
state_ = BEFORE_PROPERTY;
interceptor_state_ = InterceptorState::kUninitialized;
- state_ = LookupInHolder<is_element>(holder_->map(), *holder_);
- DCHECK(IsFound() || !holder_->HasFastProperties());
+ state_ = LookupInHolder<is_element>(holder_->map(isolate_), *holder_);
+ DCHECK(IsFound() || !holder_->HasFastProperties(isolate_));
}
namespace {
-bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
+bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, HeapObject object) {
static uint32_t context_slots[] = {
#define TYPED_ARRAY_CONTEXT_SLOTS(Type, type, TYPE, ctype) \
Context::TYPE##_ARRAY_FUN_INDEX,
@@ -265,91 +221,99 @@ bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
#undef TYPED_ARRAY_CONTEXT_SLOTS
};
- if (!holder.IsJSFunction()) return false;
+ if (!object.IsJSFunction(isolate)) return false;
return std::any_of(
std::begin(context_slots), std::end(context_slots),
- [=](uint32_t slot) { return isolate->IsInAnyContext(holder, slot); });
+ [=](uint32_t slot) { return isolate->IsInAnyContext(object, slot); });
}
} // namespace
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
+ if (!receiver_->IsHeapObject()) return;
+ Handle<HeapObject> receiver = Handle<HeapObject>::cast(receiver_);
- ReadOnlyRoots roots(heap());
+ Handle<NativeContext> native_context = isolate_->native_context();
+
+ ReadOnlyRoots roots(isolate_);
if (*name_ == roots.constructor_string()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
!isolate_->IsPromiseSpeciesLookupChainIntact() &&
- !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact(native_context) &&
!isolate_->IsTypedArraySpeciesLookupChainIntact()) {
return;
}
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray()) {
+ if (receiver->IsJSArray(isolate_)) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
return;
- } else if (holder_->IsJSPromise()) {
+ } else if (receiver->IsJSPromise(isolate_)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
return;
- } else if (holder_->IsJSRegExp()) {
- if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
- isolate_->InvalidateRegExpSpeciesProtector();
+ } else if (receiver->IsJSRegExp(isolate_)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
+ isolate_->InvalidateRegExpSpeciesProtector(native_context);
return;
- } else if (holder_->IsJSTypedArray()) {
+ } else if (receiver->IsJSTypedArray(isolate_)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
return;
}
- if (holder_->map().is_prototype_map()) {
+ if (receiver->map(isolate_).is_prototype_map()) {
DisallowHeapAllocation no_gc;
// Setting the constructor of any prototype with the @@species protector
// (of any realm) also needs to invalidate the protector.
- // For typed arrays, we check a prototype of this holder since TypedArrays
- // have different prototypes for each type, and their parent prototype is
- // pointing the same TYPED_ARRAY_PROTOTYPE.
- if (isolate_->IsInAnyContext(*holder_,
+ // For typed arrays, we check a prototype of this receiver since
+ // TypedArrays have different prototypes for each type, and their parent
+ // prototype is pointing the same TYPED_ARRAY_PROTOTYPE.
+ if (isolate_->IsInAnyContext(*receiver,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayPrototypeConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::PROMISE_PROTOTYPE_INDEX)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::REGEXP_PROTOTYPE_INDEX)) {
- if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
- isolate_->InvalidateRegExpSpeciesProtector();
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
+ isolate_->InvalidateRegExpSpeciesProtector(native_context);
} else if (isolate_->IsInAnyContext(
- holder_->map().prototype(),
+ receiver->map(isolate_).prototype(isolate_),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
}
}
} else if (*name_ == roots.next_string()) {
- if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
+ if (receiver->IsJSArrayIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %ArrayIteratorPrototype% also needs to
// invalidate the array iterator protector.
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
- } else if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
+ } else if (receiver->IsJSMapIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
if (!isolate_->IsMapIteratorLookupChainIntact()) return;
isolate_->InvalidateMapIteratorProtector();
- } else if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
+ } else if (receiver->IsJSSetIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX)) {
if (!isolate_->IsSetIteratorLookupChainIntact()) return;
isolate_->InvalidateSetIteratorProtector();
- } else if (isolate_->IsInAnyContext(
- *receiver_,
+ } else if (receiver->IsJSStringIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver,
Context::INITIAL_STRING_ITERATOR_PROTOTYPE_INDEX)) {
// Setting the next property of %StringIteratorPrototype% invalidates the
// string iterator protector.
@@ -359,26 +323,26 @@ void LookupIterator::InternalUpdateProtector() {
} else if (*name_ == roots.species_symbol()) {
if (!isolate_->IsArraySpeciesLookupChainIntact() &&
!isolate_->IsPromiseSpeciesLookupChainIntact() &&
- !isolate_->IsRegExpSpeciesLookupChainIntact() &&
+ !isolate_->IsRegExpSpeciesLookupChainIntact(native_context) &&
!isolate_->IsTypedArraySpeciesLookupChainIntact()) {
return;
}
// Setting the Symbol.species property of any Array, Promise or TypedArray
// constructor invalidates the @@species protector
- if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
+ if (isolate_->IsInAnyContext(*receiver, Context::ARRAY_FUNCTION_INDEX)) {
if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
isolate_->InvalidateArraySpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::PROMISE_FUNCTION_INDEX)) {
if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
isolate_->InvalidatePromiseSpeciesProtector();
- } else if (isolate_->IsInAnyContext(*holder_,
+ } else if (isolate_->IsInAnyContext(*receiver,
Context::REGEXP_FUNCTION_INDEX)) {
- if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
- isolate_->InvalidateRegExpSpeciesProtector();
- } else if (IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
+ if (!isolate_->IsRegExpSpeciesLookupChainIntact(native_context)) return;
+ isolate_->InvalidateRegExpSpeciesProtector(native_context);
+ } else if (IsTypedArrayFunctionInAnyContext(isolate_, *receiver)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
}
@@ -386,23 +350,33 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
isolate_->InvalidateIsConcatSpreadableProtector();
} else if (*name_ == roots.iterator_symbol()) {
- if (holder_->IsJSArray()) {
+ if (receiver->IsJSArray(isolate_)) {
if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
isolate_->InvalidateArrayIteratorProtector();
+ } else if (receiver->IsJSSet(isolate_) || receiver->IsJSSetIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_SET_ITERATOR_PROTOTYPE_INDEX) ||
+ isolate_->IsInAnyContext(*receiver,
+ Context::INITIAL_SET_PROTOTYPE_INDEX)) {
+ if (isolate_->IsSetIteratorLookupChainIntact()) {
+ isolate_->InvalidateSetIteratorProtector();
+ }
+ } else if (receiver->IsJSMapIterator() ||
+ isolate_->IsInAnyContext(
+ *receiver, Context::INITIAL_MAP_ITERATOR_PROTOTYPE_INDEX)) {
+ if (isolate_->IsMapIteratorLookupChainIntact()) {
+ isolate_->InvalidateMapIteratorProtector();
+ }
} else if (isolate_->IsInAnyContext(
- *holder_, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
+ *receiver, Context::INITIAL_ITERATOR_PROTOTYPE_INDEX)) {
if (isolate_->IsMapIteratorLookupChainIntact()) {
isolate_->InvalidateMapIteratorProtector();
}
if (isolate_->IsSetIteratorLookupChainIntact()) {
isolate_->InvalidateSetIteratorProtector();
}
- } else if (isolate_->IsInAnyContext(*holder_,
- Context::INITIAL_SET_PROTOTYPE_INDEX)) {
- if (!isolate_->IsSetIteratorLookupChainIntact()) return;
- isolate_->InvalidateSetIteratorProtector();
} else if (isolate_->IsInAnyContext(
- *receiver_, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
+ *receiver, Context::INITIAL_STRING_PROTOTYPE_INDEX)) {
// Setting the Symbol.iterator property of String.prototype invalidates
// the string iterator protector. Symbol.iterator can also be set on a
// String wrapper, but not on a primitive string. We only support
@@ -414,7 +388,7 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
// Setting the "resolve" property on any %Promise% intrinsic object
// invalidates the Promise.resolve protector.
- if (isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX)) {
+ if (isolate_->IsInAnyContext(*receiver, Context::PROMISE_FUNCTION_INDEX)) {
isolate_->InvalidatePromiseResolveProtector();
}
} else if (*name_ == roots.then_string()) {
@@ -426,10 +400,10 @@ void LookupIterator::InternalUpdateProtector() {
// to guard the fast-path in AsyncGeneratorResolve, where we can skip
// the ResolvePromise step and go directly to FulfillPromise if we
// know that the Object.prototype doesn't contain a "then" method.
- if (holder_->IsJSPromise() ||
- isolate_->IsInAnyContext(*holder_,
+ if (receiver->IsJSPromise(isolate_) ||
+ isolate_->IsInAnyContext(*receiver,
Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ||
- isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
+ isolate_->IsInAnyContext(*receiver, Context::PROMISE_PROTOTYPE_INDEX)) {
isolate_->InvalidatePromiseThenProtector();
}
}
@@ -441,15 +415,16 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
// JSProxy does not have fast properties so we do an early return.
- DCHECK_IMPLIES(holder->IsJSProxy(), !holder->HasFastProperties());
- DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- if (holder->IsJSProxy()) return;
+ DCHECK_IMPLIES(holder->IsJSProxy(isolate_),
+ !holder->HasFastProperties(isolate_));
+ DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ if (holder->IsJSProxy(isolate_)) return;
Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- ElementsKind kind = holder_obj->GetElementsKind();
- ElementsKind to = value->OptimalElementsKind();
+ ElementsKind kind = holder_obj->GetElementsKind(isolate_);
+ ElementsKind to = value->OptimalElementsKind(isolate_);
if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
@@ -464,17 +439,18 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
return;
}
- if (holder_obj->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject(isolate_)) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
- Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()),
+ JSGlobalObject::cast(*holder_obj).global_dictionary(isolate_),
+ isolate());
+ Handle<PropertyCell> cell(dictionary->CellAt(isolate_, dictionary_entry()),
isolate());
property_details_ = cell->property_details();
PropertyCell::PrepareForValue(isolate(), dictionary, dictionary_entry(),
value, property_details_);
return;
}
- if (!holder_obj->HasFastProperties()) return;
+ if (!holder_obj->HasFastProperties(isolate_)) return;
PropertyConstness new_constness = PropertyConstness::kConst;
if (constness() == PropertyConstness::kConst) {
@@ -485,20 +461,28 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
new_constness = PropertyConstness::kMutable;
}
- Handle<Map> old_map(holder_obj->map(), isolate_);
- Handle<Map> new_map = Map::PrepareForDataProperty(
- isolate(), old_map, descriptor_number(), new_constness, value);
+ Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
+ DCHECK(!old_map->is_dictionary_map());
- if (old_map.is_identical_to(new_map)) {
- // Update the property details if the representation was None.
- if (constness() != new_constness || representation().IsNone()) {
- property_details_ =
- new_map->instance_descriptors().GetDetails(descriptor_number());
+ Handle<Map> new_map = Map::Update(isolate_, old_map);
+ if (!new_map->is_dictionary_map()) {
+ new_map = Map::PrepareForDataProperty(
+ isolate(), new_map, descriptor_number(), new_constness, value);
+
+ if (old_map.is_identical_to(new_map)) {
+ // Update the property details if the representation was None.
+ if (constness() != new_constness || representation().IsNone()) {
+ property_details_ = new_map->instance_descriptors(isolate_).GetDetails(
+ descriptor_number());
+ }
+ return;
}
- return;
}
+ // We should only get here if the new_map is different from the old map,
+ // otherwise we would have falled through to the is_identical_to check above.
+ DCHECK_NE(*old_map, *new_map);
- JSObject::MigrateToMap(holder_obj, new_map);
+ JSObject::MigrateToMap(isolate_, holder_obj, new_map);
ReloadPropertyInformation<false>();
}
@@ -510,53 +494,59 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
// Property details can never change for private properties.
- if (holder->IsJSProxy()) {
- DCHECK(name()->IsPrivate());
+ if (holder->IsJSProxy(isolate_)) {
+ DCHECK(name()->IsPrivate(isolate_));
return;
}
Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- DCHECK(!holder_obj->HasTypedArrayElements());
- DCHECK(attributes != NONE || !holder_obj->HasFastElements());
- Handle<FixedArrayBase> elements(holder_obj->elements(), isolate());
- holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
- number_, value, attributes);
+ DCHECK(!holder_obj->HasTypedArrayElements(isolate_));
+ DCHECK(attributes != NONE || !holder_obj->HasFastElements(isolate_));
+ Handle<FixedArrayBase> elements(holder_obj->elements(isolate_), isolate());
+ holder_obj->GetElementsAccessor(isolate_)->Reconfigure(
+ holder_obj, elements, number_, value, attributes);
ReloadPropertyInformation<true>();
- } else if (holder_obj->HasFastProperties()) {
- Handle<Map> old_map(holder_obj->map(), isolate_);
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- isolate_, old_map, descriptor_number(), i::kData, attributes);
+ } else if (holder_obj->HasFastProperties(isolate_)) {
+ Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
- new_map =
- Map::PrepareForDataProperty(isolate(), new_map, descriptor_number(),
- PropertyConstness::kMutable, value);
- JSObject::MigrateToMap(holder_obj, new_map);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ isolate_, old_map, descriptor_number(), i::kData, attributes,
+ PropertyConstness::kMutable);
+ if (!new_map->is_dictionary_map()) {
+ // Make sure that the data property has a compatible representation.
+ // TODO(leszeks): Do this as part of ReconfigureExistingProperty.
+ new_map =
+ Map::PrepareForDataProperty(isolate(), new_map, descriptor_number(),
+ PropertyConstness::kMutable, value);
+ }
+ JSObject::MigrateToMap(isolate_, holder_obj, new_map);
ReloadPropertyInformation<false>();
}
- if (!IsElement() && !holder_obj->HasFastProperties()) {
+ if (!IsElement() && !holder_obj->HasFastProperties(isolate_)) {
PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
- if (holder_obj->map().is_prototype_map() &&
+ if (holder_obj->map(isolate_).is_prototype_map() &&
(property_details_.attributes() & READ_ONLY) == 0 &&
(attributes & READ_ONLY) != 0) {
// Invalidate prototype validity cell when a property is reconfigured
// from writable to read-only as this may invalidate transitioning store
// IC handlers.
- JSObject::InvalidatePrototypeChains(holder->map());
+ JSObject::InvalidatePrototypeChains(holder->map(isolate_));
}
- if (holder_obj->IsJSGlobalObject()) {
+ if (holder_obj->IsJSGlobalObject(isolate_)) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
+ JSGlobalObject::cast(*holder_obj).global_dictionary(isolate_),
+ isolate());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
isolate(), dictionary, dictionary_entry(), value, details);
cell->set_value(*value);
property_details_ = cell->property_details();
} else {
- Handle<NameDictionary> dictionary(holder_obj->property_dictionary(),
- isolate());
+ Handle<NameDictionary> dictionary(
+ holder_obj->property_dictionary(isolate_), isolate());
PropertyDetails original_details =
dictionary->DetailsAt(dictionary_entry());
int enumeration_index = original_details.dictionary_index();
@@ -583,21 +573,21 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
void LookupIterator::PrepareTransitionToDataProperty(
Handle<JSReceiver> receiver, Handle<Object> value,
PropertyAttributes attributes, StoreOrigin store_origin) {
- DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
+ DCHECK_IMPLIES(receiver->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
if (state_ == TRANSITION) return;
- if (!IsElement() && name()->IsPrivate()) {
+ if (!IsElement() && name()->IsPrivate(isolate_)) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
DCHECK(state_ != LookupIterator::ACCESSOR ||
- (GetAccessors()->IsAccessorInfo() &&
+ (GetAccessors()->IsAccessorInfo(isolate_) &&
AccessorInfo::cast(*GetAccessors()).is_special_data_property()));
DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
- Handle<Map> map(receiver->map(), isolate_);
+ Handle<Map> map(receiver->map(isolate_), isolate_);
// Dictionary maps can always have additional data properties.
if (map->is_dictionary_map()) {
@@ -608,9 +598,9 @@ void LookupIterator::PrepareTransitionToDataProperty(
int entry;
Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
global, name(), PropertyCellType::kUninitialized, &entry);
- Handle<GlobalDictionary> dictionary(global->global_dictionary(),
+ Handle<GlobalDictionary> dictionary(global->global_dictionary(isolate_),
isolate_);
- DCHECK(cell->value().IsTheHole(isolate_));
+ DCHECK(cell->value(isolate_).IsTheHole(isolate_));
DCHECK(!value->IsTheHole(isolate_));
transition_ = cell;
// Assign an enumeration index to the property and update
@@ -645,7 +635,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
property_details_ =
PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
} else {
- property_details_ = transition->GetLastDescriptorDetails();
+ property_details_ = transition->GetLastDescriptorDetails(isolate_);
has_property_ = true;
}
}
@@ -656,13 +646,14 @@ void LookupIterator::ApplyTransitionToDataProperty(
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
holder_ = receiver;
- if (receiver->IsJSGlobalObject()) {
- JSObject::InvalidatePrototypeChains(receiver->map());
+ if (receiver->IsJSGlobalObject(isolate_)) {
+ JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
state_ = DATA;
return;
}
Handle<Map> transition = transition_map();
- bool simple_transition = transition->GetBackPointer() == receiver->map();
+ bool simple_transition =
+ transition->GetBackPointer(isolate_) == receiver->map(isolate_);
if (configuration_ == DEFAULT && !transition->is_dictionary_map() &&
!transition->IsPrototypeValidityCellValid()) {
@@ -673,21 +664,23 @@ void LookupIterator::ApplyTransitionToDataProperty(
transition->set_prototype_validity_cell(*validity_cell);
}
- if (!receiver->IsJSProxy()) {
- JSObject::MigrateToMap(Handle<JSObject>::cast(receiver), transition);
+ if (!receiver->IsJSProxy(isolate_)) {
+ JSObject::MigrateToMap(isolate_, Handle<JSObject>::cast(receiver),
+ transition);
}
if (simple_transition) {
int number = transition->LastAdded();
number_ = static_cast<uint32_t>(number);
- property_details_ = transition->GetLastDescriptorDetails();
+ property_details_ = transition->GetLastDescriptorDetails(isolate_);
state_ = DATA;
- } else if (receiver->map().is_dictionary_map()) {
- Handle<NameDictionary> dictionary(receiver->property_dictionary(),
+ } else if (receiver->map(isolate_).is_dictionary_map()) {
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(isolate_),
isolate_);
int entry;
- if (receiver->map().is_prototype_map() && receiver->IsJSObject()) {
- JSObject::InvalidatePrototypeChains(receiver->map());
+ if (receiver->map(isolate_).is_prototype_map() &&
+ receiver->IsJSObject(isolate_)) {
+ JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
}
dictionary = NameDictionary::Add(isolate(), dictionary, name(),
isolate_->factory()->uninitialized_value(),
@@ -708,11 +701,11 @@ void LookupIterator::Delete() {
Handle<JSReceiver> holder = Handle<JSReceiver>::cast(holder_);
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ElementsAccessor* accessor = object->GetElementsAccessor();
+ ElementsAccessor* accessor = object->GetElementsAccessor(isolate_);
accessor->Delete(object, number_);
} else {
- DCHECK(!name()->IsPrivateName());
- bool is_prototype_map = holder->map().is_prototype_map();
+ DCHECK(!name()->IsPrivateName(isolate_));
+ bool is_prototype_map = holder->map(isolate_).is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
@@ -721,13 +714,13 @@ void LookupIterator::Delete() {
PropertyNormalizationMode mode =
is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
- if (holder->HasFastProperties()) {
- JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
- "DeletingProperty");
+ if (holder->HasFastProperties(isolate_)) {
+ JSObject::NormalizeProperties(isolate_, Handle<JSObject>::cast(holder),
+ mode, 0, "DeletingProperty");
ReloadPropertyInformation<false>();
}
JSReceiver::DeleteNormalizedProperty(holder, number_);
- if (holder->IsJSObject()) {
+ if (holder->IsJSObject(isolate_)) {
JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
}
}
@@ -742,12 +735,12 @@ void LookupIterator::TransitionToAccessorProperty(
// handled via a trap. Adding properties to primitive values is not
// observable.
Handle<JSObject> receiver = GetStoreTarget<JSObject>();
- if (!IsElement() && name()->IsPrivate()) {
+ if (!IsElement() && name()->IsPrivate(isolate_)) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
- if (!IsElement() && !receiver->map().is_dictionary_map()) {
- Handle<Map> old_map(receiver->map(), isolate_);
+ if (!IsElement() && !receiver->map(isolate_).is_dictionary_map()) {
+ Handle<Map> old_map(receiver->map(isolate_), isolate_);
if (!holder_.is_identical_to(receiver)) {
holder_ = receiver;
@@ -760,13 +753,14 @@ void LookupIterator::TransitionToAccessorProperty(
Handle<Map> new_map = Map::TransitionToAccessorProperty(
isolate_, old_map, name_, descriptor, getter, setter, attributes);
- bool simple_transition = new_map->GetBackPointer() == receiver->map();
- JSObject::MigrateToMap(receiver, new_map);
+ bool simple_transition =
+ new_map->GetBackPointer(isolate_) == receiver->map(isolate_);
+ JSObject::MigrateToMap(isolate_, receiver, new_map);
if (simple_transition) {
int number = new_map->LastAdded();
number_ = static_cast<uint32_t>(number);
- property_details_ = new_map->GetLastDescriptorDetails();
+ property_details_ = new_map->GetLastDescriptorDetails(isolate_);
state_ = ACCESSOR;
return;
}
@@ -776,7 +770,7 @@ void LookupIterator::TransitionToAccessorProperty(
}
Handle<AccessorPair> pair;
- if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
+ if (state() == ACCESSOR && GetAccessors()->IsAccessorPair(isolate_)) {
pair = Handle<AccessorPair>::cast(GetAccessors());
// If the component and attributes are identical, nothing has to be done.
if (pair->Equals(*getter, *setter)) {
@@ -818,13 +812,14 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
receiver, details);
receiver->RequireSlowElements(*dictionary);
- if (receiver->HasSlowArgumentsElements()) {
- FixedArray parameter_map = FixedArray::cast(receiver->elements());
+ if (receiver->HasSlowArgumentsElements(isolate_)) {
+ FixedArray parameter_map = FixedArray::cast(receiver->elements(isolate_));
uint32_t length = parameter_map.length() - 2;
if (number_ < length) {
- parameter_map.set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
+ parameter_map.set(number_ + 2,
+ ReadOnlyRoots(isolate_).the_hole_value());
}
- FixedArray::cast(receiver->elements()).set(1, *dictionary);
+ FixedArray::cast(receiver->elements(isolate_)).set(1, *dictionary);
} else {
receiver->set_elements(*dictionary);
}
@@ -832,13 +827,13 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
ReloadPropertyInformation<true>();
} else {
PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES;
- if (receiver->map().is_prototype_map()) {
- JSObject::InvalidatePrototypeChains(receiver->map());
+ if (receiver->map(isolate_).is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(receiver->map(isolate_));
mode = KEEP_INOBJECT_PROPERTIES;
}
// Normalize object to make this operation simple.
- JSObject::NormalizeProperties(receiver, mode, 0,
+ JSObject::NormalizeProperties(isolate_, receiver, mode, 0,
"TransitionToAccessorPair");
JSObject::SetNormalizedProperty(receiver, name_, pair, details);
@@ -859,61 +854,54 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
// Optimization that only works if configuration_ is not mutable.
if (!check_prototype_chain()) return true;
- DisallowHeapAllocation no_gc;
if (*receiver_ == *holder_) return true;
- if (!receiver_->IsJSReceiver()) return false;
- JSReceiver current = JSReceiver::cast(*receiver_);
- JSReceiver object = *holder_;
- if (!current.map().has_hidden_prototype()) return false;
- // JSProxy do not occur as hidden prototypes.
- if (object.IsJSProxy()) return false;
- PrototypeIterator iter(isolate(), current, kStartAtPrototype,
- PrototypeIterator::END_AT_NON_HIDDEN);
- while (!iter.IsAtEnd()) {
- if (iter.GetCurrent<JSReceiver>() == object) return true;
- iter.Advance();
- }
- return false;
+ if (!receiver_->IsJSGlobalProxy(isolate_)) return false;
+ return Handle<JSGlobalProxy>::cast(receiver_)->map(isolate_).prototype(
+ isolate_) == *holder_;
}
Handle<Object> LookupIterator::FetchValue() const {
Object result;
if (IsElement()) {
Handle<JSObject> holder = GetHolder<JSObject>();
- ElementsAccessor* accessor = holder->GetElementsAccessor();
+ ElementsAccessor* accessor = holder->GetElementsAccessor(isolate_);
return accessor->Get(holder, number_);
- } else if (holder_->IsJSGlobalObject()) {
+ } else if (holder_->IsJSGlobalObject(isolate_)) {
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- result = holder->global_dictionary().ValueAt(number_);
- } else if (!holder_->HasFastProperties()) {
- result = holder_->property_dictionary().ValueAt(number_);
+ result = holder->global_dictionary(isolate_).ValueAt(isolate_, number_);
+ } else if (!holder_->HasFastProperties(isolate_)) {
+ result = holder_->property_dictionary(isolate_).ValueAt(isolate_, number_);
} else if (property_details_.location() == kField) {
DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
- FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result = holder_->map().instance_descriptors().GetStrongValue(number_);
+ result =
+ holder_->map(isolate_).instance_descriptors(isolate_).GetStrongValue(
+ isolate_, number_);
}
return handle(result, isolate_);
}
bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
DCHECK(!IsElement());
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
Handle<JSObject> holder = GetHolder<JSObject>();
- FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+ FieldIndex field_index =
+ FieldIndex::ForDescriptor(holder->map(isolate_), number_);
if (property_details_.representation().IsDouble()) {
- if (!value.IsNumber()) return false;
+ if (!value.IsNumber(isolate_)) return false;
uint64_t bits;
- if (holder->IsUnboxedDoubleField(field_index)) {
+ if (holder->IsUnboxedDoubleField(isolate_, field_index)) {
bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
} else {
- Object current_value = holder->RawFastPropertyAt(field_index);
- DCHECK(current_value.IsMutableHeapNumber());
+ Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
+ DCHECK(current_value.IsMutableHeapNumber(isolate_));
bits = MutableHeapNumber::cast(current_value).value_as_bits();
}
// Use bit representation of double to to check for hole double, since
@@ -927,11 +915,11 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
}
return Object::SameNumberValue(bit_cast<double>(bits), value.Number());
} else {
- Object current_value = holder->RawFastPropertyAt(field_index);
+ Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
if (current_value.IsUninitialized(isolate()) || current_value == value) {
return true;
}
- return current_value.IsNumber() && value.IsNumber() &&
+ return current_value.IsNumber(isolate_) && value.IsNumber(isolate_) &&
Object::SameNumberValue(current_value.Number(), value.Number());
}
}
@@ -946,7 +934,7 @@ int LookupIterator::GetFieldDescriptorIndex() const {
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(kAccessor, property_details_.kind());
return descriptor_number();
@@ -954,36 +942,38 @@ int LookupIterator::GetAccessorIndex() const {
Handle<Map> LookupIterator::GetFieldOwnerMap() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
- Map holder_map = holder_->map();
+ Map holder_map = holder_->map(isolate_);
return handle(holder_map.FindFieldOwner(isolate(), descriptor_number()),
isolate_);
}
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
- return FieldIndex::ForDescriptor(holder_->map(), descriptor_number());
+ return FieldIndex::ForDescriptor(holder_->map(isolate_), descriptor_number());
}
Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
+ DCHECK(holder_->HasFastProperties(isolate_));
DCHECK_EQ(kField, property_details_.location());
return handle(
- holder_->map().instance_descriptors().GetFieldType(descriptor_number()),
+ holder_->map(isolate_).instance_descriptors(isolate_).GetFieldType(
+ isolate_, descriptor_number()),
isolate_);
}
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- return handle(holder->global_dictionary().CellAt(dictionary_entry()),
- isolate_);
+ return handle(
+ holder->global_dictionary(isolate_).CellAt(isolate_, dictionary_entry()),
+ isolate_);
}
Handle<Object> LookupIterator::GetAccessors() const {
@@ -1003,9 +993,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
Handle<JSReceiver> holder = GetHolder<JSReceiver>();
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
- ElementsAccessor* accessor = object->GetElementsAccessor();
+ ElementsAccessor* accessor = object->GetElementsAccessor(isolate_);
accessor->Set(object, number_, *value);
- } else if (holder->HasFastProperties()) {
+ } else if (holder->HasFastProperties(isolate_)) {
if (property_details_.location() == kField) {
// Check that in case of VariableMode::kConst field the existing value is
// equal to |value|.
@@ -1018,21 +1008,22 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
- } else if (holder->IsJSGlobalObject()) {
+ } else if (holder->IsJSGlobalObject(isolate_)) {
GlobalDictionary dictionary =
- JSGlobalObject::cast(*holder).global_dictionary();
- dictionary.CellAt(dictionary_entry()).set_value(*value);
+ JSGlobalObject::cast(*holder).global_dictionary(isolate_);
+ dictionary.CellAt(isolate_, dictionary_entry()).set_value(*value);
} else {
- DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- NameDictionary dictionary = holder->property_dictionary();
+ DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ NameDictionary dictionary = holder->property_dictionary(isolate_);
dictionary.ValueAtPut(dictionary_entry(), *value);
}
}
template <bool is_element>
bool LookupIterator::SkipInterceptor(JSObject holder) {
- auto info = GetInterceptor<is_element>(holder);
- if (!is_element && name_->IsSymbol() && !info.can_intercept_symbols()) {
+ InterceptorInfo info = GetInterceptor<is_element>(isolate_, holder);
+ if (!is_element && name_->IsSymbol(isolate_) &&
+ !info.can_intercept_symbols()) {
return true;
}
if (info.non_masking()) {
@@ -1051,18 +1042,19 @@ bool LookupIterator::SkipInterceptor(JSObject holder) {
JSReceiver LookupIterator::NextHolder(Map map) {
DisallowHeapAllocation no_gc;
- if (map.prototype() == ReadOnlyRoots(heap()).null_value()) {
+ if (map.prototype(isolate_) == ReadOnlyRoots(isolate_).null_value()) {
return JSReceiver();
}
- if (!check_prototype_chain() && !map.has_hidden_prototype()) {
+ if (!check_prototype_chain() && !map.IsJSGlobalProxyMap()) {
return JSReceiver();
}
- return JSReceiver::cast(map.prototype());
+ return JSReceiver::cast(map.prototype(isolate_));
}
LookupIterator::State LookupIterator::NotFound(JSReceiver const holder) const {
DCHECK(!IsElement());
- if (!holder.IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
+ if (!holder.IsJSTypedArray(isolate_) || !name_->IsString(isolate_))
+ return NOT_FOUND;
return IsSpecialIndex(String::cast(*name_)) ? INTEGER_INDEXED_EXOTIC
: NOT_FOUND;
}
@@ -1084,27 +1076,27 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
switch (state_) {
case NOT_FOUND:
if (map.IsJSProxyMap()) {
- if (is_element || !name_->IsPrivate()) return JSPROXY;
+ if (is_element || !name_->IsPrivate(isolate_)) return JSPROXY;
}
if (map.is_access_check_needed()) {
- if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
+ if (is_element || !name_->IsPrivate(isolate_)) return ACCESS_CHECK;
}
V8_FALLTHROUGH;
case ACCESS_CHECK:
if (check_interceptor() && HasInterceptor<is_element>(map) &&
!SkipInterceptor<is_element>(JSObject::cast(holder))) {
- if (is_element || !name_->IsPrivate()) return INTERCEPTOR;
+ if (is_element || !name_->IsPrivate(isolate_)) return INTERCEPTOR;
}
V8_FALLTHROUGH;
case INTERCEPTOR:
if (!is_element && map.IsJSGlobalObjectMap()) {
GlobalDictionary dict =
- JSGlobalObject::cast(holder).global_dictionary();
+ JSGlobalObject::cast(holder).global_dictionary(isolate_);
int number = dict.FindEntry(isolate(), name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
- PropertyCell cell = dict.CellAt(number_);
- if (cell.value().IsTheHole(isolate_)) return NOT_FOUND;
+ PropertyCell cell = dict.CellAt(isolate_, number_);
+ if (cell.value(isolate_).IsTheHole(isolate_)) return NOT_FOUND;
property_details_ = cell.property_details();
has_property_ = true;
switch (property_details_.kind()) {
@@ -1136,12 +1128,13 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
if (is_element) {
JSObject js_object = JSObject::cast(holder);
- ElementsAccessor* accessor = js_object.GetElementsAccessor();
- FixedArrayBase backing_store = js_object.elements();
+ ElementsAccessor* accessor = js_object.GetElementsAccessor(isolate_);
+ FixedArrayBase backing_store = js_object.elements(isolate_);
number_ =
accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
if (number_ == kMaxUInt32) {
- return holder.IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+ return holder.IsJSTypedArray(isolate_) ? INTEGER_INDEXED_EXOTIC
+ : NOT_FOUND;
}
property_details_ = accessor->GetDetails(js_object, number_);
if (map.has_frozen_or_sealed_elements()) {
@@ -1149,14 +1142,14 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
property_details_ = property_details_.CopyAddAttributes(attrs);
}
} else if (!map.is_dictionary_map()) {
- DescriptorArray descriptors = map.instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors(isolate_);
int number = descriptors.SearchWithCache(isolate_, *name_, map);
if (number == DescriptorArray::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
property_details_ = descriptors.GetDetails(number_);
} else {
- DCHECK_IMPLIES(holder.IsJSProxy(), name()->IsPrivate());
- NameDictionary dict = holder.property_dictionary();
+ DCHECK_IMPLIES(holder.IsJSProxy(isolate_), name()->IsPrivate(isolate_));
+ NameDictionary dict = holder.property_dictionary(isolate_);
int number = dict.FindEntry(isolate(), name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
@@ -1191,15 +1184,15 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
bool LookupIterator::TryLookupCachedProperty() {
return state() == LookupIterator::ACCESSOR &&
- GetAccessors()->IsAccessorPair() && LookupCachedProperty();
+ GetAccessors()->IsAccessorPair(isolate_) && LookupCachedProperty();
}
bool LookupIterator::LookupCachedProperty() {
DCHECK_EQ(state(), LookupIterator::ACCESSOR);
- DCHECK(GetAccessors()->IsAccessorPair());
+ DCHECK(GetAccessors()->IsAccessorPair(isolate_));
AccessorPair accessor_pair = AccessorPair::cast(*GetAccessors());
- Handle<Object> getter(accessor_pair.getter(), isolate());
+ Handle<Object> getter(accessor_pair.getter(isolate_), isolate());
MaybeHandle<Name> maybe_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
if (maybe_name.is_null()) return false;
diff --git a/deps/v8/src/objects/lookup.h b/deps/v8/src/objects/lookup.h
index 820b8ef9b0..565ea4bb75 100644
--- a/deps/v8/src/objects/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -93,10 +93,6 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
bool* success, Configuration configuration = DEFAULT);
- static LookupIterator ForTransitionHandler(
- Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, MaybeHandle<Map> maybe_transition_map);
-
void Restart() {
InterceptorState state = InterceptorState::kUninitialized;
IsElement() ? RestartInternal<true>(state) : RestartInternal<false>(state);
@@ -239,7 +235,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
template <bool is_element>
bool SkipInterceptor(JSObject holder);
template <bool is_element>
- static inline InterceptorInfo GetInterceptor(JSObject holder);
+ static inline InterceptorInfo GetInterceptor(Isolate* isolate,
+ JSObject holder);
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
@@ -247,7 +244,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
inline int descriptor_number() const;
inline int dictionary_entry() const;
- static inline Configuration ComputeConfiguration(Configuration configuration,
+ static inline Configuration ComputeConfiguration(Isolate* isolate,
+ Configuration configuration,
Handle<Name> name);
static Handle<JSReceiver> GetRootForNonJSReceiver(
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 8c26196fb5..6a9359e3a0 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -30,20 +30,13 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
-DescriptorArray Map::instance_descriptors() const {
- return DescriptorArray::cast(READ_FIELD(*this, kInstanceDescriptorsOffset));
+DEF_GETTER(Map, instance_descriptors, DescriptorArray) {
+ return TaggedField<DescriptorArray, kInstanceDescriptorsOffset>::load(isolate,
+ *this);
}
-DescriptorArray Map::synchronized_instance_descriptors() const {
- return DescriptorArray::cast(
- ACQUIRE_READ_FIELD(*this, kInstanceDescriptorsOffset));
-}
-
-void Map::set_synchronized_instance_descriptors(DescriptorArray value,
- WriteBarrierMode mode) {
- RELEASE_WRITE_FIELD(*this, kInstanceDescriptorsOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kInstanceDescriptorsOffset, value, mode);
-}
+SYNCHRONIZED_ACCESSORS(Map, synchronized_instance_descriptors, DescriptorArray,
+ kInstanceDescriptorsOffset)
// A freshly allocated layout descriptor can be set on an existing map.
// We need to use release-store and acquire-load accessor pairs to ensure
@@ -54,6 +47,12 @@ SYNCHRONIZED_ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
FLAG_unbox_double_fields)
WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
+ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true,
+ value.IsNull() || value.IsJSReceiver())
+
+ACCESSORS_CHECKED(Map, prototype_info, Object,
+ kTransitionsOrPrototypeInfoOffset, this->is_prototype_map())
+
// |bit_field| fields.
// Concurrent access to |has_prototype_slot| and |has_non_instance_prototype|
// is explicitly whitelisted here. The former is never modified after the map
@@ -74,37 +73,35 @@ BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_prototype_slot,
Map::HasPrototypeSlotBit)
// |bit_field2| fields.
-BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
-BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
-BIT_FIELD_ACCESSORS(Map, bit_field2, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, new_target_is_base,
+ Map::NewTargetIsBaseBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_immutable_proto,
+ Map::IsImmutablePrototypeBit)
// |bit_field3| fields.
BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_in_retained_map_list,
Map::IsInRetainedMapListBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_prototype_map, Map::IsPrototypeMapBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_immutable_proto,
- Map::IsImmutablePrototypeBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, new_target_is_base,
- Map::NewTargetIsBaseBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_extensible, Map::IsExtensibleBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
Map::MayHaveInterestingSymbolsBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
Map::ConstructionCounterBits)
-InterceptorInfo Map::GetNamedInterceptor() {
+DEF_GETTER(Map, GetNamedInterceptor, InterceptorInfo) {
DCHECK(has_named_interceptor());
- FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info.GetNamedPropertyHandler());
+ FunctionTemplateInfo info = GetFunctionTemplateInfo(isolate);
+ return InterceptorInfo::cast(info.GetNamedPropertyHandler(isolate));
}
-InterceptorInfo Map::GetIndexedInterceptor() {
+DEF_GETTER(Map, GetIndexedInterceptor, InterceptorInfo) {
DCHECK(has_indexed_interceptor());
- FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info.GetIndexedPropertyHandler());
+ FunctionTemplateInfo info = GetFunctionTemplateInfo(isolate);
+ return InterceptorInfo::cast(info.GetIndexedPropertyHandler(isolate));
}
bool Map::IsMostGeneralFieldType(Representation representation,
@@ -113,7 +110,8 @@ bool Map::IsMostGeneralFieldType(Representation representation,
}
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
- return instance_type == JS_ARRAY_TYPE || instance_type == JS_VALUE_TYPE ||
+ return instance_type == JS_ARRAY_TYPE ||
+ instance_type == JS_PRIMITIVE_WRAPPER_TYPE ||
instance_type == JS_ARGUMENTS_TYPE;
}
@@ -136,10 +134,25 @@ void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
}
}
+Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
+ PropertyNormalizationMode mode, const char* reason) {
+ return Normalize(isolate, fast_map, fast_map->elements_kind(), mode, reason);
+}
+
+bool Map::EquivalentToForNormalization(const Map other,
+ PropertyNormalizationMode mode) const {
+ return EquivalentToForNormalization(other, elements_kind(), mode);
+}
+
bool Map::IsUnboxedDoubleField(FieldIndex index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return IsUnboxedDoubleField(isolate, index);
+}
+
+bool Map::IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
if (!index.is_inobject()) return false;
- return !layout_descriptor().IsTagged(index.property_index());
+ return !layout_descriptor(isolate).IsTagged(index.property_index());
}
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
@@ -160,8 +173,8 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
}
}
-PropertyDetails Map::GetLastDescriptorDetails() const {
- return instance_descriptors().GetDetails(LastAdded());
+PropertyDetails Map::GetLastDescriptorDetails(Isolate* isolate) const {
+ return instance_descriptors(isolate).GetDetails(LastAdded());
}
int Map::LastAdded() const {
@@ -375,7 +388,7 @@ void Map::CopyUnusedPropertyFields(Map map) {
void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map map) {
int value = map.used_or_unused_instance_size_in_words();
- if (value >= JSValue::kFieldsAdded) {
+ if (value >= JSPrimitiveWrapper::kFieldsAdded) {
// Unused in-object fields. Adjust the offset from the object’s start
// so it matches the distance to the object’s end.
value += instance_size_in_words() - map.instance_size_in_words();
@@ -570,22 +583,13 @@ bool Map::IsPrimitiveMap() const {
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
-HeapObject Map::prototype() const {
- return HeapObject::cast(READ_FIELD(*this, kPrototypeOffset));
-}
-
-void Map::set_prototype(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value.IsNull() || value.IsJSReceiver());
- WRITE_FIELD(*this, kPrototypeOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kPrototypeOffset, value, mode);
-}
-
LayoutDescriptor Map::layout_descriptor_gc_safe() const {
DCHECK(FLAG_unbox_double_fields);
// The loaded value can be dereferenced on background thread to load the
// bitmap. We need acquire load in order to ensure that the bitmap
// initializing stores are also visible to the background thread.
- Object layout_desc = ACQUIRE_READ_FIELD(*this, kLayoutDescriptorOffset);
+ Object layout_desc =
+ TaggedField<Object, kLayoutDescriptorOffset>::Acquire_Load(*this);
return LayoutDescriptor::cast_gc_safe(layout_desc);
}
@@ -593,7 +597,8 @@ bool Map::HasFastPointerLayout() const {
DCHECK(FLAG_unbox_double_fields);
// The loaded value is used for SMI check only and is not dereferenced,
// so relaxed load is safe.
- Object layout_desc = RELAXED_READ_FIELD(*this, kLayoutDescriptorOffset);
+ Object layout_desc =
+ TaggedField<Object, kLayoutDescriptorOffset>::Relaxed_Load(*this);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
@@ -686,36 +691,17 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
#endif
}
-HeapObject Map::GetBackPointer() const {
- Object object = constructor_or_backpointer();
- if (object.IsMap()) {
+DEF_GETTER(Map, GetBackPointer, HeapObject) {
+ Object object = constructor_or_backpointer(isolate);
+ if (object.IsMap(isolate)) {
return Map::cast(object);
}
- return GetReadOnlyRoots().undefined_value();
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ return GetReadOnlyRoots(isolate).undefined_value();
}
-Map Map::ElementsTransitionMap() {
- DisallowHeapAllocation no_gc;
- // TODO(delphick): While it's safe to pass nullptr for Isolate* here as
- // SearchSpecial doesn't need it, this is really ugly. Perhaps factor out a
- // base class for methods not requiring an Isolate?
- return TransitionsAccessor(nullptr, *this, &no_gc)
- .SearchSpecial(GetReadOnlyRoots().elements_transition_symbol());
-}
-
-Object Map::prototype_info() const {
- DCHECK(is_prototype_map());
- return READ_FIELD(*this, Map::kTransitionsOrPrototypeInfoOffset);
-}
-
-void Map::set_prototype_info(Object value, WriteBarrierMode mode) {
- CHECK(is_prototype_map());
- WRITE_FIELD(*this, Map::kTransitionsOrPrototypeInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, Map::kTransitionsOrPrototypeInfoOffset,
- value, mode);
-}
-
-void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
+void Map::SetBackPointer(HeapObject value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
CHECK(value.IsMap());
CHECK(GetBackPointer().IsUndefined());
@@ -724,6 +710,13 @@ void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
set_constructor_or_backpointer(value, mode);
}
+// static
+Map Map::ElementsTransitionMap(Isolate* isolate) {
+ DisallowHeapAllocation no_gc;
+ return TransitionsAccessor(isolate, *this, &no_gc)
+ .SearchSpecial(ReadOnlyRoots(isolate).elements_transition_symbol());
+}
+
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS(Map, constructor_or_backpointer, Object,
@@ -736,23 +729,24 @@ bool Map::IsPrototypeValidityCellValid() const {
return value == Smi::FromInt(Map::kPrototypeChainValid);
}
-Object Map::GetConstructor() const {
- Object maybe_constructor = constructor_or_backpointer();
+DEF_GETTER(Map, GetConstructor, Object) {
+ Object maybe_constructor = constructor_or_backpointer(isolate);
// Follow any back pointers.
- while (maybe_constructor.IsMap()) {
+ while (maybe_constructor.IsMap(isolate)) {
maybe_constructor =
- Map::cast(maybe_constructor).constructor_or_backpointer();
+ Map::cast(maybe_constructor).constructor_or_backpointer(isolate);
}
return maybe_constructor;
}
-FunctionTemplateInfo Map::GetFunctionTemplateInfo() const {
- Object constructor = GetConstructor();
- if (constructor.IsJSFunction()) {
- DCHECK(JSFunction::cast(constructor).shared().IsApiFunction());
- return JSFunction::cast(constructor).shared().get_api_func_data();
+DEF_GETTER(Map, GetFunctionTemplateInfo, FunctionTemplateInfo) {
+ Object constructor = GetConstructor(isolate);
+ if (constructor.IsJSFunction(isolate)) {
+ // TODO(ishell): IsApiFunction(isolate) and get_api_func_data(isolate)
+ DCHECK(JSFunction::cast(constructor).shared(isolate).IsApiFunction());
+ return JSFunction::cast(constructor).shared(isolate).get_api_func_data();
}
- DCHECK(constructor.IsFunctionTemplateInfo());
+ DCHECK(constructor.IsFunctionTemplateInfo(isolate));
return FunctionTemplateInfo::cast(constructor);
}
@@ -805,8 +799,8 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
-bool HeapObject::IsNormalizedMapCache() const {
- if (!IsWeakFixedArray()) return false;
+DEF_GETTER(HeapObject, IsNormalizedMapCache, bool) {
+ if (!IsWeakFixedArray(isolate)) return false;
if (WeakFixedArray::cast(*this).length() != NormalizedMapCache::kEntries) {
return false;
}
diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index 855fdabdf3..d21f0e1a12 100644
--- a/deps/v8/src/objects/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -201,10 +201,9 @@ void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
*old_descriptors_ == integrity_source_map_->instance_descriptors());
}
-MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
- result_map_ = Map::CopyGeneralizeAllFields(
- isolate_, old_map_, new_elements_kind_, modified_descriptor_, new_kind_,
- new_attributes_, reason);
+MapUpdater::State MapUpdater::Normalize(const char* reason) {
+ result_map_ = Map::Normalize(isolate_, old_map_, new_elements_kind_,
+ CLEAR_INOBJECT_PROPERTIES, reason);
state_ = kEnd;
return state_; // Done.
}
@@ -310,14 +309,14 @@ MapUpdater::State MapUpdater::FindRootMap() {
}
if (!old_map_->EquivalentToForTransition(*root_map_)) {
- return CopyGeneralizeAllFields("GenAll_NotEquivalent");
+ return Normalize("Normalize_NotEquivalent");
} else if (old_map_->is_extensible() != root_map_->is_extensible()) {
DCHECK(!old_map_->is_extensible());
DCHECK(root_map_->is_extensible());
// We have an integrity level transition in the tree, let us make a note
// of that transition to be able to replay it later.
if (!TrySaveIntegrityLevelTransitions()) {
- return CopyGeneralizeAllFields("GenAll_PrivateSymbolsOnNonExtensible");
+ return Normalize("Normalize_PrivateSymbolsOnNonExtensible");
}
// We want to build transitions to the original element kind (before
@@ -335,7 +334,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
!(IsTransitionableFastElementsKind(from_kind) &&
IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
- return CopyGeneralizeAllFields("GenAll_InvalidElementsTransition");
+ return Normalize("Normalize_InvalidElementsTransition");
}
int root_nof = root_map_->NumberOfOwnDescriptors();
@@ -344,13 +343,13 @@ MapUpdater::State MapUpdater::FindRootMap() {
old_descriptors_->GetDetails(modified_descriptor_);
if (old_details.kind() != new_kind_ ||
old_details.attributes() != new_attributes_) {
- return CopyGeneralizeAllFields("GenAll_RootModification1");
+ return Normalize("Normalize_RootModification1");
}
if (old_details.location() != kField) {
- return CopyGeneralizeAllFields("GenAll_RootModification2");
+ return Normalize("Normalize_RootModification2");
}
if (!new_representation_.fits_into(old_details.representation())) {
- return CopyGeneralizeAllFields("GenAll_RootModification4");
+ return Normalize("Normalize_RootModification4");
}
DCHECK_EQ(kData, old_details.kind());
@@ -394,7 +393,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
!EqualImmutableValues(GetValue(i),
tmp_descriptors->GetStrongValue(i))) {
// TODO(ishell): mutable accessors are not implemented yet.
- return CopyGeneralizeAllFields("GenAll_Incompatible");
+ return Normalize("Normalize_Incompatible");
}
if (!IsGeneralizableTo(old_details.location(), tmp_details.location())) {
break;
@@ -484,7 +483,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (old_details.kind() == kAccessor &&
!EqualImmutableValues(GetValue(i),
tmp_descriptors->GetStrongValue(i))) {
- return CopyGeneralizeAllFields("GenAll_Incompatible");
+ return Normalize("Normalize_Incompatible");
}
DCHECK(!tmp_map->is_deprecated());
target_map_ = tmp_map;
@@ -723,7 +722,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
// contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
if (maybe_transition.is_null() && !transitions.CanHaveMoreTransitions()) {
- return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ return Normalize("Normalize_CantHaveMoreTransitions");
}
old_map_->NotifyLeafMapLayoutChange(isolate_);
@@ -787,7 +786,7 @@ MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
TransitionsAccessor transitions(isolate_, target_map_);
if (!transitions.CanHaveMoreTransitions()) {
- return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ return Normalize("Normalize_CantHaveMoreTransitions");
}
result_map_ = Map::CopyForPreventExtensions(
diff --git a/deps/v8/src/objects/map-updater.h b/deps/v8/src/objects/map-updater.h
index 3ba86eacbc..6ee373cbdf 100644
--- a/deps/v8/src/objects/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -123,9 +123,8 @@ class MapUpdater {
State ConstructNewMapWithIntegrityLevelTransition();
// When a requested reconfiguration can not be done the result is a copy
- // of |old_map_| where every field has |Tagged| representation and |Any|
- // field type. This map is disconnected from the transition tree.
- State CopyGeneralizeAllFields(const char* reason);
+ // of |old_map_| in dictionary mode.
+ State Normalize(const char* reason);
// Returns name of a |descriptor| property.
inline Name GetKey(int descriptor) const;
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 43d8c305c5..7b4f1abd05 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -85,6 +85,21 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
os << "]\n";
}
+Map Map::GetStructMap(Isolate* isolate, InstanceType type) {
+ Map map;
+ switch (type) {
+#define MAKE_CASE(TYPE, Name, name) \
+ case TYPE: \
+ map = ReadOnlyRoots(isolate).name##_map(); \
+ break;
+ STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ default:
+ UNREACHABLE();
+ }
+ return map;
+}
+
VisitorId Map::GetVisitorId(Map map) {
STATIC_ASSERT(kVisitorIdCount <= 256);
@@ -262,7 +277,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_TYPE:
@@ -337,12 +352,20 @@ VisitorId Map::GetVisitorId(Map map) {
if (instance_type == WASM_CAPI_FUNCTION_DATA_TYPE) {
return kVisitWasmCapiFunctionData;
}
+ if (instance_type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
+ return kVisitWasmIndirectFunctionTable;
+ }
return kVisitStruct;
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
return kVisitDataHandler;
+ case SOURCE_TEXT_MODULE_TYPE:
+ return kVisitSourceTextModule;
+ case SYNTHETIC_MODULE_TYPE:
+ return kVisitSyntheticModule;
+
default:
UNREACHABLE();
}
@@ -458,7 +481,7 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
return MaybeHandle<Map>();
}
- Representation representation = constant->OptimalRepresentation();
+ Representation representation = constant->OptimalRepresentation(isolate);
Handle<FieldType> type = constant->OptimalType(isolate, representation);
return CopyWithField(isolate, map, name, type, attributes,
PropertyConstness::kConst, representation, flag);
@@ -570,61 +593,6 @@ bool Map::HasOutOfObjectProperties() const {
return GetInObjectProperties() < NumberOfFields();
}
-Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
- ElementsKind elements_kind,
- int modify_index, PropertyKind kind,
- PropertyAttributes attributes,
- const char* reason) {
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descriptors = DescriptorArray::CopyUpTo(
- isolate, old_descriptors, number_of_own_descriptors);
- descriptors->GeneralizeAllFields();
-
- Handle<LayoutDescriptor> new_layout_descriptor(
- LayoutDescriptor::FastPointerLayout(), isolate);
- Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
- MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
-
- // Unless the instance is being migrated, ensure that modify_index is a field.
- if (modify_index >= 0) {
- PropertyDetails details = descriptors->GetDetails(modify_index);
- if (details.constness() != PropertyConstness::kMutable ||
- details.location() != kField || details.attributes() != attributes) {
- int field_index = details.location() == kField
- ? details.field_index()
- : new_map->NumberOfFields();
- Descriptor d = Descriptor::DataField(
- isolate, handle(descriptors->GetKey(modify_index), isolate),
- field_index, attributes, Representation::Tagged());
- descriptors->Replace(modify_index, &d);
- if (details.location() != kField) {
- new_map->AccountAddedPropertyField();
- }
- } else {
- DCHECK(details.attributes() == attributes);
- }
-
- if (FLAG_trace_generalization) {
- MaybeHandle<FieldType> field_type = FieldType::None(isolate);
- if (details.location() == kField) {
- field_type = handle(
- map->instance_descriptors().GetFieldType(modify_index), isolate);
- }
- map->PrintGeneralization(
- isolate, stdout, reason, modify_index,
- new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
- details.location() == kDescriptor, details.representation(),
- Representation::Tagged(), details.constness(), details.constness(),
- field_type, MaybeHandle<Object>(), FieldType::Any(isolate),
- MaybeHandle<Object>());
- }
- }
- new_map->set_elements_kind(elements_kind);
- return new_map;
-}
-
void Map::DeprecateTransitionTree(Isolate* isolate) {
if (is_deprecated()) return;
DisallowHeapAllocation no_gc;
@@ -648,7 +616,8 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
LayoutDescriptor new_layout_descriptor) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
- if (NumberOfOwnDescriptors() == 0 || GetBackPointer().IsUndefined(isolate)) {
+ if (NumberOfOwnDescriptors() == 0 ||
+ GetBackPointer(isolate).IsUndefined(isolate)) {
return;
}
@@ -659,8 +628,8 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Map current = *this;
MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
to_replace.number_of_descriptors());
- while (current.instance_descriptors() == to_replace) {
- Object next = current.GetBackPointer();
+ while (current.instance_descriptors(isolate) == to_replace) {
+ Object next = current.GetBackPointer(isolate);
if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
current.SetEnumLength(kInvalidEnumCacheSentinel);
current.UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
@@ -673,7 +642,7 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Map Map::FindRootMap(Isolate* isolate) const {
Map result = *this;
while (true) {
- Object back = result.GetBackPointer();
+ Object back = result.GetBackPointer(isolate);
if (back.IsUndefined(isolate)) {
// Initial map always owns descriptors and doesn't have unused entries
// in the descriptor array.
@@ -688,10 +657,11 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField, instance_descriptors().GetDetails(descriptor).location());
+ DCHECK_EQ(kField,
+ instance_descriptors(isolate).GetDetails(descriptor).location());
Map result = *this;
while (true) {
- Object back = result.GetBackPointer();
+ Object back = result.GetBackPointer(isolate);
if (back.IsUndefined(isolate)) break;
const Map parent = Map::cast(back);
if (parent.NumberOfOwnDescriptors() <= descriptor) break;
@@ -927,7 +897,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// Figure out the most restrictive integrity level transition (it should
// be the last one in the transition tree).
DCHECK(!map.is_extensible());
- Map previous = Map::cast(map.GetBackPointer());
+ Map previous = Map::cast(map.GetBackPointer(isolate));
TransitionsAccessor last_transitions(isolate, previous, no_allocation);
if (!last_transitions.HasIntegrityLevelTransitionTo(
map, &(info.integrity_level_symbol), &(info.integrity_level))) {
@@ -945,7 +915,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// transitions. If we encounter any non-integrity level transition interleaved
// with integrity level transitions, just bail out.
while (!source_map.is_extensible()) {
- previous = Map::cast(source_map.GetBackPointer());
+ previous = Map::cast(source_map.GetBackPointer(isolate));
TransitionsAccessor transitions(isolate, previous, no_allocation);
if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
return info;
@@ -1234,9 +1204,9 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
// (ElementsTransitionAndStoreStub does not support that).
- for (root_map = root_map.ElementsTransitionMap();
+ for (root_map = root_map.ElementsTransitionMap(isolate);
!root_map.is_null() && root_map.has_fast_elements();
- root_map = root_map.ElementsTransitionMap()) {
+ root_map = root_map.ElementsTransitionMap(isolate)) {
// If root_map's elements kind doesn't match any of the elements kind in
// the candidates there is no need to do any additional work.
if (!HasElementsKind(candidates, root_map.elements_kind())) continue;
@@ -1263,7 +1233,7 @@ static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind kind = map.elements_kind();
while (kind != to_kind) {
- Map next_map = current_map.ElementsTransitionMap();
+ Map next_map = current_map.ElementsTransitionMap(isolate);
if (next_map.is_null()) return current_map;
kind = next_map.elements_kind();
current_map = next_map;
@@ -1401,25 +1371,23 @@ int Map::NumberOfEnumerableProperties() const {
}
int Map::NextFreePropertyIndex() const {
- int free_index = 0;
int number_of_own_descriptors = NumberOfOwnDescriptors();
DescriptorArray descs = instance_descriptors();
- for (int i = 0; i < number_of_own_descriptors; i++) {
+ // Search properties backwards to find the last field.
+ for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
- int candidate = details.field_index() + details.field_width_in_words();
- if (candidate > free_index) free_index = candidate;
+ return details.field_index() + details.field_width_in_words();
}
}
- return free_index;
+ return 0;
}
bool Map::OnlyHasSimpleProperties() const {
// Wrapped string elements aren't explicitly stored in the elements backing
// store, but are loaded indirectly from the underlying string.
return !IsStringWrapperElementsKind(elements_kind()) &&
- !IsSpecialReceiverMap() && !has_hidden_prototype() &&
- !is_dictionary_map();
+ !IsSpecialReceiverMap() && !is_dictionary_map();
}
bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
@@ -1478,6 +1446,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
}
Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
+ ElementsKind new_elements_kind,
PropertyNormalizationMode mode, const char* reason) {
DCHECK(!fast_map->is_dictionary_map());
@@ -1489,7 +1458,8 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
Handle<Map> new_map;
- if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) {
+ if (use_cache &&
+ cache->Get(fast_map, new_elements_kind, mode).ToHandle(&new_map)) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) new_map->DictionaryMapVerify(isolate);
#endif
@@ -1499,6 +1469,7 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
// except for the code cache, which can contain some ICs which can be
// applied to the shared map, dependent code and weak cell cache.
Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
+ fresh->set_elements_kind(new_elements_kind);
STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
Map::kDependentCodeOffset + kTaggedSize);
@@ -1508,8 +1479,12 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
// The IsInRetainedMapListBit might be different if the {new_map}
// that we got from the {cache} was already embedded into optimized
// code somewhere.
- DCHECK_EQ(fresh->bit_field3() & ~IsInRetainedMapListBit::kMask,
- new_map->bit_field3() & ~IsInRetainedMapListBit::kMask);
+ // The IsMigrationTargetBit might be different if the {new_map} from
+ // {cache} has already been marked as a migration target.
+ constexpr int ignored_bit_field3_bits =
+ IsInRetainedMapListBit::kMask | IsMigrationTargetBit::kMask;
+ DCHECK_EQ(fresh->bit_field3() & ~ignored_bit_field3_bits,
+ new_map->bit_field3() & ~ignored_bit_field3_bits);
int offset = Map::kBitField3Offset + kInt32Size;
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
reinterpret_cast<void*>(new_map->address() + offset),
@@ -1530,13 +1505,14 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
#endif
} else {
new_map = Map::CopyNormalized(isolate, fast_map, mode);
+ new_map->set_elements_kind(new_elements_kind);
if (use_cache) {
cache->Set(fast_map, new_map);
isolate->counters()->maps_normalized()->Increment();
}
- if (FLAG_trace_maps) {
- LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
- }
+ }
+ if (FLAG_trace_maps) {
+ LOG(isolate, MapEvent("Normalize", *fast_map, *new_map, reason));
}
fast_map->NotifyLeafMapLayoutChange(isolate);
return new_map;
@@ -1870,7 +1846,7 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
DCHECK_EQ(map->FindRootMap(isolate).NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
- maybe_elements_transition_map = map->ElementsTransitionMap();
+ maybe_elements_transition_map = map->ElementsTransitionMap(isolate);
DCHECK(
maybe_elements_transition_map.is_null() ||
(maybe_elements_transition_map.elements_kind() == DICTIONARY_ELEMENTS &&
@@ -2093,7 +2069,7 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
PropertyAttributes attributes =
map->instance_descriptors().GetDetails(descriptor).attributes();
- Representation representation = value->OptimalRepresentation();
+ Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
MapUpdater mu(isolate, map);
@@ -2108,11 +2084,11 @@ Handle<Map> Map::PrepareForDataProperty(Isolate* isolate, Handle<Map> map,
int descriptor,
PropertyConstness constness,
Handle<Object> value) {
+ // Update to the newest map before storing the property.
+ map = Update(isolate, map);
// Dictionaries can store any property value.
DCHECK(!map->is_dictionary_map());
- // Update to the newest map before storing the property.
- return UpdateDescriptorForValue(isolate, Update(isolate, map), descriptor,
- constness, value);
+ return UpdateDescriptorForValue(isolate, map, descriptor, constness, value);
}
Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
@@ -2152,7 +2128,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
if (!map->TooManyFastProperties(store_origin)) {
- Representation representation = value->OptimalRepresentation();
+ Representation representation = value->OptimalRepresentation(isolate);
Handle<FieldType> type = value->OptimalType(isolate, representation);
maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
constness, representation, flag);
@@ -2204,16 +2180,16 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
int descriptor, PropertyKind kind,
- PropertyAttributes attributes) {
+ PropertyAttributes attributes,
+ PropertyConstness constness) {
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
if (!map->GetBackPointer().IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
- // back pointers.
- return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
- descriptor, kind, attributes,
- "GenAll_AttributesMismatchProtoMap");
+ // back pointers, normalize and try to hit the map cache instead.
+ return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
+ "Normalize_AttributesMismatchProtoMap");
}
if (FLAG_trace_generalization) {
@@ -2223,7 +2199,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
MapUpdater mu(isolate, map);
DCHECK_EQ(kData, kind); // Only kData case is supported so far.
Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, PropertyConstness::kConst, Representation::None(),
+ descriptor, attributes, constness, Representation::None(),
FieldType::None(isolate));
return new_map;
}
@@ -2243,12 +2219,12 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
DCHECK(name->IsUniqueName());
- // Dictionary maps can always have additional data properties.
- if (map->is_dictionary_map()) return map;
-
// Migrate to the newest map before transitioning to the new property.
map = Update(isolate, map);
+ // Dictionary maps can always have additional data properties.
+ if (map->is_dictionary_map()) return map;
+
PropertyNormalizationMode mode = map->is_prototype_map()
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
@@ -2433,8 +2409,7 @@ bool CheckEquivalent(const Map first, const Map second) {
first.instance_type() == second.instance_type() &&
first.bit_field() == second.bit_field() &&
first.is_extensible() == second.is_extensible() &&
- first.new_target_is_base() == second.new_target_is_base() &&
- first.has_hidden_prototype() == second.has_hidden_prototype();
+ first.new_target_is_base() == second.new_target_is_base();
}
} // namespace
@@ -2442,7 +2417,6 @@ bool CheckEquivalent(const Map first, const Map second) {
bool Map::EquivalentToForTransition(const Map other) const {
CHECK_EQ(GetConstructor(), other.GetConstructor());
CHECK_EQ(instance_type(), other.instance_type());
- CHECK_EQ(has_hidden_prototype(), other.has_hidden_prototype());
if (bit_field() != other.bit_field()) return false;
if (new_target_is_base() != other.new_target_is_base()) return false;
@@ -2477,10 +2451,16 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
}
bool Map::EquivalentToForNormalization(const Map other,
+ ElementsKind elements_kind,
PropertyNormalizationMode mode) const {
int properties =
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other.GetInObjectProperties();
- return CheckEquivalent(*this, other) && bit_field2() == other.bit_field2() &&
+ // Make sure the elements_kind bits are in bit_field2.
+ DCHECK_EQ(this->elements_kind(), Map::ElementsKindBits::decode(bit_field2()));
+ int adjusted_other_bit_field2 =
+ Map::ElementsKindBits::update(other.bit_field2(), elements_kind);
+ return CheckEquivalent(*this, other) &&
+ bit_field2() == adjusted_other_bit_field2 &&
GetInObjectProperties() == properties &&
JSObject::GetEmbedderFieldCount(*this) ==
JSObject::GetEmbedderFieldCount(other);
@@ -2639,7 +2619,6 @@ void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
} else {
DCHECK(prototype->IsNull(isolate) || prototype->IsJSProxy());
}
- map->set_has_hidden_prototype(prototype->IsJSGlobalObject());
WriteBarrierMode wb_mode =
prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
@@ -2672,6 +2651,7 @@ Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) {
}
MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
+ ElementsKind elements_kind,
PropertyNormalizationMode mode) {
DisallowHeapAllocation no_gc;
MaybeObject value = WeakFixedArray::Get(GetIndex(fast_map));
@@ -2681,7 +2661,8 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
}
Map normalized_map = Map::cast(heap_object);
- if (!normalized_map.EquivalentToForNormalization(*fast_map, mode)) {
+ if (!normalized_map.EquivalentToForNormalization(*fast_map, elements_kind,
+ mode)) {
return MaybeHandle<Map>();
}
return handle(normalized_map, GetIsolate());
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 814f8ed3be..c9da19b3e3 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -65,13 +65,16 @@ enum InstanceType : uint16_t;
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
V(Struct) \
V(Symbol) \
+ V(SyntheticModule) \
V(ThinString) \
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
V(WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable) \
V(WasmInstanceObject) \
V(WeakArray) \
V(WeakCell)
@@ -138,22 +141,22 @@ using MapHandles = std::vector<Handle<Map>>;
// | | - has_prototype_slot (bit 7) |
// +----------+---------------------------------------------+
// | Byte | [bit_field2] |
-// | | - is_extensible (bit 0) |
-// | | - is_prototype_map (bit 1) |
-// | | - has_hidden_prototype (bit 2) |
+// | | - new_target_is_base (bit 0) |
+// | | - is_immutable_proto (bit 1) |
+// | | - unused bit (bit 2) |
// | | - elements_kind (bits 3..7) |
// +----+----------+---------------------------------------------+
// | Int | [bit_field3] |
// | | - enum_length (bit 0..9) |
// | | - number_of_own_descriptors (bit 10..19) |
-// | | - is_dictionary_map (bit 20) |
-// | | - owns_descriptors (bit 21) |
-// | | - is_in_retained_map_list (bit 22) |
-// | | - is_deprecated (bit 23) |
-// | | - is_unstable (bit 24) |
-// | | - is_migration_target (bit 25) |
-// | | - is_immutable_proto (bit 26) |
-// | | - new_target_is_base (bit 27) |
+// | | - is_prototype_map (bit 20) |
+// | | - is_dictionary_map (bit 21) |
+// | | - owns_descriptors (bit 22) |
+// | | - is_in_retained_map_list (bit 23) |
+// | | - is_deprecated (bit 24) |
+// | | - is_unstable (bit 25) |
+// | | - is_migration_target (bit 26) |
+// | | - is_extensible (bit 28) |
// | | - may_have_interesting_symbols (bit 28) |
// | | - construction_counter (bit 29..31) |
// | | |
@@ -212,8 +215,8 @@ class Map : public HeapObject {
Handle<Map> map, Handle<Context> native_context);
// Retrieve interceptors.
- inline InterceptorInfo GetNamedInterceptor();
- inline InterceptorInfo GetIndexedInterceptor();
+ DECL_GETTER(GetNamedInterceptor, InterceptorInfo)
+ DECL_GETTER(GetIndexedInterceptor, InterceptorInfo)
// Instance type.
DECL_PRIMITIVE_ACCESSORS(instance_type, InstanceType)
@@ -265,10 +268,10 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
// Bit positions for |bit_field2|.
-#define MAP_BIT_FIELD2_FIELDS(V, _) \
- V(IsExtensibleBit, bool, 1, _) \
- V(IsPrototypeMapBit, bool, 1, _) \
- V(HasHiddenPrototypeBit, bool, 1, _) \
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ V(NewTargetIsBaseBit, bool, 1, _) \
+ V(IsImmutablePrototypeBit, bool, 1, _) \
+ V(UnusedBit, bool, 1, _) \
V(ElementsKindBits, ElementsKind, 5, _)
DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
@@ -287,14 +290,14 @@ class Map : public HeapObject {
#define MAP_BIT_FIELD3_FIELDS(V, _) \
V(EnumLengthBits, int, kDescriptorIndexBitCount, _) \
V(NumberOfOwnDescriptorsBits, int, kDescriptorIndexBitCount, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
V(IsDictionaryMapBit, bool, 1, _) \
V(OwnsDescriptorsBit, bool, 1, _) \
V(IsInRetainedMapListBit, bool, 1, _) \
V(IsDeprecatedBit, bool, 1, _) \
V(IsUnstableBit, bool, 1, _) \
V(IsMigrationTargetBit, bool, 1, _) \
- V(IsImmutablePrototypeBit, bool, 1, _) \
- V(NewTargetIsBaseBit, bool, 1, _) \
+ V(IsExtensibleBit, bool, 1, _) \
V(MayHaveInterestingSymbolsBit, bool, 1, _) \
V(ConstructionCounterBits, int, 3, _)
@@ -378,9 +381,6 @@ class Map : public HeapObject {
DECL_BOOLEAN_ACCESSORS(has_prototype_slot)
- // Tells whether the instance with this map has a hidden prototype.
- DECL_BOOLEAN_ACCESSORS(has_hidden_prototype)
-
// Records and queries whether the instance has a named interceptor.
DECL_BOOLEAN_ACCESSORS(has_named_interceptor)
@@ -431,7 +431,7 @@ class Map : public HeapObject {
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
bool DictionaryElementsInPrototypeChainOnly(Isolate* isolate);
- inline Map ElementsTransitionMap();
+ inline Map ElementsTransitionMap(Isolate* isolate);
inline FixedArrayBase GetInitialElements() const;
@@ -545,9 +545,14 @@ class Map : public HeapObject {
V8_EXPORT_PRIVATE static Handle<Map> Normalize(Isolate* isolate,
Handle<Map> map,
+ ElementsKind new_elements_kind,
PropertyNormalizationMode mode,
const char* reason);
+ inline static Handle<Map> Normalize(Isolate* isolate, Handle<Map> fast_map,
+ PropertyNormalizationMode mode,
+ const char* reason);
+
// Tells whether the map is used for JSObjects in dictionary mode (ie
// normalized objects, ie objects for which HasFastProperties returns false).
// A map can never be used for both dictionary mode and fast mode JSObjects.
@@ -573,19 +578,18 @@ class Map : public HeapObject {
// Returns null_value if there's neither a constructor function nor a
// FunctionTemplateInfo available.
DECL_ACCESSORS(constructor_or_backpointer, Object)
- inline Object GetConstructor() const;
- inline FunctionTemplateInfo GetFunctionTemplateInfo() const;
+ DECL_GETTER(GetConstructor, Object)
+ DECL_GETTER(GetFunctionTemplateInfo, FunctionTemplateInfo)
inline void SetConstructor(Object constructor,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with the constructor (see above).
- inline HeapObject GetBackPointer() const;
- inline void SetBackPointer(Object value,
+ DECL_GETTER(GetBackPointer, HeapObject)
+ inline void SetBackPointer(HeapObject value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
- inline DescriptorArray instance_descriptors() const;
- inline DescriptorArray synchronized_instance_descriptors() const;
+ DECL_GETTER(instance_descriptors, DescriptorArray)
V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate,
DescriptorArray descriptors,
int number_of_own_descriptors);
@@ -629,7 +633,7 @@ class Map : public HeapObject {
// chain state.
inline bool IsPrototypeValidityCellValid() const;
- inline PropertyDetails GetLastDescriptorDetails() const;
+ inline PropertyDetails GetLastDescriptorDetails(Isolate* isolate) const;
inline int LastAdded() const;
@@ -742,7 +746,7 @@ class Map : public HeapObject {
PropertyAttributes attributes);
V8_EXPORT_PRIVATE static Handle<Map> ReconfigureExistingProperty(
Isolate* isolate, Handle<Map> map, int descriptor, PropertyKind kind,
- PropertyAttributes attributes);
+ PropertyAttributes attributes, PropertyConstness constness);
inline void AppendDescriptor(Isolate* isolate, Descriptor* desc);
@@ -794,6 +798,8 @@ class Map : public HeapObject {
inline bool CanTransition() const;
+ static Map GetStructMap(Isolate* isolate, InstanceType type);
+
#define DECL_TESTER(Type, ...) inline bool Is##Type##Map() const;
INSTANCE_TYPE_CHECKERS(DECL_TESTER)
#undef DECL_TESTER
@@ -836,15 +842,19 @@ class Map : public HeapObject {
class BodyDescriptor;
- // Compares this map to another to see if they describe equivalent objects.
+ // Compares this map to another to see if they describe equivalent objects,
+ // up to the given |elements_kind|.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
// The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(const Map other,
+ bool EquivalentToForNormalization(const Map other, ElementsKind elements_kind,
PropertyNormalizationMode mode) const;
+ inline bool EquivalentToForNormalization(
+ const Map other, PropertyNormalizationMode mode) const;
// Returns true if given field is unboxed double.
inline bool IsUnboxedDoubleField(FieldIndex index) const;
+ inline bool IsUnboxedDoubleField(Isolate* isolate, FieldIndex index) const;
void PrintMapDetails(std::ostream& os);
@@ -932,14 +942,6 @@ class Map : public HeapObject {
static Handle<Map> CopyNormalized(Isolate* isolate, Handle<Map> map,
PropertyNormalizationMode mode);
- // TODO(ishell): Move to MapUpdater.
- static Handle<Map> CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
- ElementsKind elements_kind,
- int modify_index,
- PropertyKind kind,
- PropertyAttributes attributes,
- const char* reason);
-
void DeprecateTransitionTree(Isolate* isolate);
void ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
@@ -966,13 +968,13 @@ class Map : public HeapObject {
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
// Use the high-level instance_descriptors/SetInstanceDescriptors instead.
- inline void set_synchronized_instance_descriptors(
- DescriptorArray array, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ DECL_ACCESSORS(synchronized_instance_descriptors, DescriptorArray)
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
friend class MapUpdater;
+ friend class ConcurrentMarkingVisitor;
OBJECT_CONSTRUCTORS(Map, HeapObject);
};
@@ -986,6 +988,7 @@ class NormalizedMapCache : public WeakFixedArray {
static Handle<NormalizedMapCache> New(Isolate* isolate);
V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
+ ElementsKind elements_kind,
PropertyNormalizationMode mode);
void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
@@ -993,7 +996,7 @@ class NormalizedMapCache : public WeakFixedArray {
DECL_VERIFIER(NormalizedMapCache)
private:
- friend bool HeapObject::IsNormalizedMapCache() const;
+ friend bool HeapObject::IsNormalizedMapCache(Isolate* isolate) const;
static const int kEntries = 64;
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index a1645c0604..304cf90d28 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -30,6 +30,10 @@ class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
+
+ private:
+ template <typename TFieldType, int kFieldOffset>
+ friend class TaggedField;
};
// A HeapObjectReference is either a strong reference to a HeapObject, a weak
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index a3bc31b63a..1ab9b9fb04 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -6,9 +6,12 @@
#define V8_OBJECTS_MODULE_INL_H_
#include "src/objects/module.h"
+#include "src/objects/source-text-module.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
+#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -16,74 +19,86 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Module, Struct)
-OBJECT_CONSTRUCTORS_IMPL(ModuleInfoEntry, Struct)
+OBJECT_CONSTRUCTORS_IMPL(Module, HeapObject)
+OBJECT_CONSTRUCTORS_IMPL(SourceTextModule, Module)
+OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfoEntry, Struct)
+OBJECT_CONSTRUCTORS_IMPL(SyntheticModule, Module)
OBJECT_CONSTRUCTORS_IMPL(JSModuleNamespace, JSObject)
NEVER_READ_ONLY_SPACE_IMPL(Module)
+NEVER_READ_ONLY_SPACE_IMPL(SourceTextModule)
+NEVER_READ_ONLY_SPACE_IMPL(SyntheticModule)
CAST_ACCESSOR(Module)
-ACCESSORS(Module, code, Object, kCodeOffset)
+CAST_ACCESSOR(SourceTextModule)
+CAST_ACCESSOR(SyntheticModule)
ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
-ACCESSORS(Module, regular_exports, FixedArray, kRegularExportsOffset)
-ACCESSORS(Module, regular_imports, FixedArray, kRegularImportsOffset)
ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
-ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
-ACCESSORS(Module, script, Script, kScriptOffset)
ACCESSORS(Module, exception, Object, kExceptionOffset)
-ACCESSORS(Module, import_meta, Object, kImportMetaOffset)
SMI_ACCESSORS(Module, status, kStatusOffset)
-SMI_ACCESSORS(Module, dfs_index, kDfsIndexOffset)
-SMI_ACCESSORS(Module, dfs_ancestor_index, kDfsAncestorIndexOffset)
SMI_ACCESSORS(Module, hash, kHashOffset)
-ModuleInfo Module::info() const {
+ACCESSORS(SourceTextModule, code, Object, kCodeOffset)
+ACCESSORS(SourceTextModule, regular_exports, FixedArray, kRegularExportsOffset)
+ACCESSORS(SourceTextModule, regular_imports, FixedArray, kRegularImportsOffset)
+ACCESSORS(SourceTextModule, requested_modules, FixedArray,
+ kRequestedModulesOffset)
+ACCESSORS(SourceTextModule, script, Script, kScriptOffset)
+ACCESSORS(SourceTextModule, import_meta, Object, kImportMetaOffset)
+SMI_ACCESSORS(SourceTextModule, dfs_index, kDfsIndexOffset)
+SMI_ACCESSORS(SourceTextModule, dfs_ancestor_index, kDfsAncestorIndexOffset)
+
+ACCESSORS(SyntheticModule, name, String, kNameOffset)
+ACCESSORS(SyntheticModule, export_names, FixedArray, kExportNamesOffset)
+ACCESSORS(SyntheticModule, evaluation_steps, Foreign, kEvaluationStepsOffset)
+
+SourceTextModuleInfo SourceTextModule::info() const {
return (status() >= kEvaluating)
- ? ModuleInfo::cast(code())
+ ? SourceTextModuleInfo::cast(code())
: GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
}
CAST_ACCESSOR(JSModuleNamespace)
ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
-CAST_ACCESSOR(ModuleInfoEntry)
-ACCESSORS(ModuleInfoEntry, export_name, Object, kExportNameOffset)
-ACCESSORS(ModuleInfoEntry, local_name, Object, kLocalNameOffset)
-ACCESSORS(ModuleInfoEntry, import_name, Object, kImportNameOffset)
-SMI_ACCESSORS(ModuleInfoEntry, module_request, kModuleRequestOffset)
-SMI_ACCESSORS(ModuleInfoEntry, cell_index, kCellIndexOffset)
-SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
-SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
+CAST_ACCESSOR(SourceTextModuleInfoEntry)
+ACCESSORS(SourceTextModuleInfoEntry, export_name, Object, kExportNameOffset)
+ACCESSORS(SourceTextModuleInfoEntry, local_name, Object, kLocalNameOffset)
+ACCESSORS(SourceTextModuleInfoEntry, import_name, Object, kImportNameOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, module_request, kModuleRequestOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, cell_index, kCellIndexOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, beg_pos, kBegPosOffset)
+SMI_ACCESSORS(SourceTextModuleInfoEntry, end_pos, kEndPosOffset)
-OBJECT_CONSTRUCTORS_IMPL(ModuleInfo, FixedArray)
-CAST_ACCESSOR(ModuleInfo)
+OBJECT_CONSTRUCTORS_IMPL(SourceTextModuleInfo, FixedArray)
+CAST_ACCESSOR(SourceTextModuleInfo)
-FixedArray ModuleInfo::module_requests() const {
+FixedArray SourceTextModuleInfo::module_requests() const {
return FixedArray::cast(get(kModuleRequestsIndex));
}
-FixedArray ModuleInfo::special_exports() const {
+FixedArray SourceTextModuleInfo::special_exports() const {
return FixedArray::cast(get(kSpecialExportsIndex));
}
-FixedArray ModuleInfo::regular_exports() const {
+FixedArray SourceTextModuleInfo::regular_exports() const {
return FixedArray::cast(get(kRegularExportsIndex));
}
-FixedArray ModuleInfo::regular_imports() const {
+FixedArray SourceTextModuleInfo::regular_imports() const {
return FixedArray::cast(get(kRegularImportsIndex));
}
-FixedArray ModuleInfo::namespace_imports() const {
+FixedArray SourceTextModuleInfo::namespace_imports() const {
return FixedArray::cast(get(kNamespaceImportsIndex));
}
-FixedArray ModuleInfo::module_request_positions() const {
+FixedArray SourceTextModuleInfo::module_request_positions() const {
return FixedArray::cast(get(kModuleRequestPositionsIndex));
}
#ifdef DEBUG
-bool ModuleInfo::Equals(ModuleInfo other) const {
+bool SourceTextModuleInfo::Equals(SourceTextModuleInfo other) const {
return regular_exports() == other.regular_exports() &&
regular_imports() == other.regular_imports() &&
special_exports() == other.special_exports() &&
@@ -93,6 +108,30 @@ bool ModuleInfo::Equals(ModuleInfo other) const {
}
#endif
+struct ModuleHandleHash {
+ V8_INLINE size_t operator()(Handle<Module> module) const {
+ return module->hash();
+ }
+};
+
+struct ModuleHandleEqual {
+ V8_INLINE bool operator()(Handle<Module> lhs, Handle<Module> rhs) const {
+ return *lhs == *rhs;
+ }
+};
+
+class UnorderedModuleSet
+ : public std::unordered_set<Handle<Module>, ModuleHandleHash,
+ ModuleHandleEqual,
+ ZoneAllocator<Handle<Module>>> {
+ public:
+ explicit UnorderedModuleSet(Zone* zone)
+ : std::unordered_set<Handle<Module>, ModuleHandleHash, ModuleHandleEqual,
+ ZoneAllocator<Handle<Module>>>(
+ 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
+ ZoneAllocator<Handle<Module>>(zone)) {}
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index ea40989df1..4e89050360 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -14,169 +14,25 @@
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
-#include "src/utils/ostreams.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
-struct ModuleHandleHash {
- V8_INLINE size_t operator()(Handle<Module> module) const {
- return module->hash();
- }
-};
-
-struct ModuleHandleEqual {
- V8_INLINE bool operator()(Handle<Module> lhs, Handle<Module> rhs) const {
- return *lhs == *rhs;
- }
-};
-
-struct StringHandleHash {
- V8_INLINE size_t operator()(Handle<String> string) const {
- return string->Hash();
- }
-};
-
-struct StringHandleEqual {
- V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
- return lhs->Equals(*rhs);
- }
-};
-
-class UnorderedStringSet
- : public std::unordered_set<Handle<String>, StringHandleHash,
- StringHandleEqual,
- ZoneAllocator<Handle<String>>> {
- public:
- explicit UnorderedStringSet(Zone* zone)
- : std::unordered_set<Handle<String>, StringHandleHash, StringHandleEqual,
- ZoneAllocator<Handle<String>>>(
- 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
- ZoneAllocator<Handle<String>>(zone)) {}
-};
-
-class UnorderedModuleSet
- : public std::unordered_set<Handle<Module>, ModuleHandleHash,
- ModuleHandleEqual,
- ZoneAllocator<Handle<Module>>> {
- public:
- explicit UnorderedModuleSet(Zone* zone)
- : std::unordered_set<Handle<Module>, ModuleHandleHash, ModuleHandleEqual,
- ZoneAllocator<Handle<Module>>>(
- 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
- ZoneAllocator<Handle<Module>>(zone)) {}
-};
-
-class UnorderedStringMap
- : public std::unordered_map<
- Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
- ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>> {
- public:
- explicit UnorderedStringMap(Zone* zone)
- : std::unordered_map<
- Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
- ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>>(
- 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
- ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>(
- zone)) {}
-};
-
-class Module::ResolveSet
- : public std::unordered_map<
- Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
- ModuleHandleEqual,
- ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>> {
- public:
- explicit ResolveSet(Zone* zone)
- : std::unordered_map<Handle<Module>, UnorderedStringSet*,
- ModuleHandleHash, ModuleHandleEqual,
- ZoneAllocator<std::pair<const Handle<Module>,
- UnorderedStringSet*>>>(
- 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
- ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>(
- zone)),
- zone_(zone) {}
-
- Zone* zone() const { return zone_; }
-
- private:
- Zone* zone_;
-};
-
-int Module::ExportIndex(int cell_index) {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kExport);
- return cell_index - 1;
-}
-
-int Module::ImportIndex(int cell_index) {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
- return -cell_index - 1;
-}
-
-void Module::CreateIndirectExport(Isolate* isolate, Handle<Module> module,
- Handle<String> name,
- Handle<ModuleInfoEntry> entry) {
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name).IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, name, entry);
- module->set_exports(*exports);
-}
-
-void Module::CreateExport(Isolate* isolate, Handle<Module> module,
- int cell_index, Handle<FixedArray> names) {
- DCHECK_LT(0, names->length());
- Handle<Cell> cell =
- isolate->factory()->NewCell(isolate->factory()->undefined_value());
- module->regular_exports().set(ExportIndex(cell_index), *cell);
-
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- for (int i = 0, n = names->length(); i < n; ++i) {
- Handle<String> name(String::cast(names->get(i)), isolate);
- DCHECK(exports->Lookup(name).IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, name, cell);
- }
- module->set_exports(*exports);
-}
-
-Cell Module::GetCell(int cell_index) {
- DisallowHeapAllocation no_gc;
- Object cell;
- switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
- case ModuleDescriptor::kImport:
- cell = regular_imports().get(ImportIndex(cell_index));
- break;
- case ModuleDescriptor::kExport:
- cell = regular_exports().get(ExportIndex(cell_index));
- break;
- case ModuleDescriptor::kInvalid:
- UNREACHABLE();
- }
- return Cell::cast(cell);
-}
-
-Handle<Object> Module::LoadVariable(Isolate* isolate, Handle<Module> module,
- int cell_index) {
- return handle(module->GetCell(cell_index).value(), isolate);
-}
-
-void Module::StoreVariable(Handle<Module> module, int cell_index,
- Handle<Object> value) {
- DisallowHeapAllocation no_gc;
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kExport);
- module->GetCell(cell_index).set_value(*value);
-}
-
#ifdef DEBUG
void Module::PrintStatusTransition(Status new_status) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Changing module status from " << status() << " to " << new_status
<< " for ";
- script().GetNameOrSourceURL().Print(os);
+ if (this->IsSourceTextModule()) {
+ Handle<Script> script(SourceTextModule::cast(*this).script(),
+ GetIsolate());
+ script->GetNameOrSourceURL().Print(os);
+ } else {
+ SyntheticModule::cast(*this).name().Print(os);
+ }
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -194,70 +50,80 @@ void Module::SetStatus(Status new_status) {
set_status(new_status);
}
+void Module::RecordError(Isolate* isolate) {
+ DisallowHeapAllocation no_alloc;
+ DCHECK(exception().IsTheHole(isolate));
+ Object the_exception = isolate->pending_exception();
+ DCHECK(!the_exception.IsTheHole(isolate));
+
+ if (this->IsSourceTextModule()) {
+ Handle<SourceTextModule> self(SourceTextModule::cast(*this), GetIsolate());
+ self->set_code(self->info());
+ }
+#ifdef DEBUG
+ PrintStatusTransition(Module::kErrored);
+#endif // DEBUG
+ set_status(Module::kErrored);
+ set_exception(the_exception);
+}
+
void Module::ResetGraph(Isolate* isolate, Handle<Module> module) {
DCHECK_NE(module->status(), kInstantiating);
DCHECK_NE(module->status(), kEvaluating);
if (module->status() != kPreInstantiating) return;
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+
+ Handle<FixedArray> requested_modules =
+ module->IsSourceTextModule()
+ ? Handle<FixedArray>(
+ Handle<SourceTextModule>::cast(module)->requested_modules(),
+ isolate)
+ : Handle<FixedArray>();
Reset(isolate, module);
- for (int i = 0; i < requested_modules->length(); ++i) {
- Handle<Object> descendant(requested_modules->get(i), isolate);
- if (descendant->IsModule()) {
- ResetGraph(isolate, Handle<Module>::cast(descendant));
- } else {
- DCHECK(descendant->IsUndefined(isolate));
+ if (module->IsSourceTextModule()) {
+ for (int i = 0; i < requested_modules->length(); ++i) {
+ Handle<Object> descendant(requested_modules->get(i), isolate);
+ if (descendant->IsModule()) {
+ ResetGraph(isolate, Handle<Module>::cast(descendant));
+ } else {
+ DCHECK(descendant->IsUndefined(isolate));
+ }
}
+ } else {
+ DCHECK(module->IsSyntheticModule());
+ // Nothing else to do here.
}
}
void Module::Reset(Isolate* isolate, Handle<Module> module) {
- Factory* factory = isolate->factory();
-
DCHECK(module->status() == kPreInstantiating ||
module->status() == kInstantiating);
DCHECK(module->exception().IsTheHole(isolate));
- DCHECK(module->import_meta().IsTheHole(isolate));
// The namespace object cannot exist, because it would have been created
// by RunInitializationCode, which is called only after this module's SCC
// succeeds instantiation.
DCHECK(!module->module_namespace().IsJSModuleNamespace());
- Handle<ObjectHashTable> exports =
- ObjectHashTable::New(isolate, module->info().RegularExportCount());
- Handle<FixedArray> regular_exports =
- factory->NewFixedArray(module->regular_exports().length());
- Handle<FixedArray> regular_imports =
- factory->NewFixedArray(module->regular_imports().length());
- Handle<FixedArray> requested_modules =
- factory->NewFixedArray(module->requested_modules().length());
-
- if (module->status() == kInstantiating) {
- module->set_code(JSFunction::cast(module->code()).shared());
- }
#ifdef DEBUG
module->PrintStatusTransition(kUninstantiated);
#endif // DEBUG
- module->set_status(kUninstantiated);
- module->set_exports(*exports);
- module->set_regular_exports(*regular_exports);
- module->set_regular_imports(*regular_imports);
- module->set_requested_modules(*requested_modules);
- module->set_dfs_index(-1);
- module->set_dfs_ancestor_index(-1);
-}
-void Module::RecordError(Isolate* isolate) {
- DisallowHeapAllocation no_alloc;
- DCHECK(exception().IsTheHole(isolate));
- Object the_exception = isolate->pending_exception();
- DCHECK(!the_exception.IsTheHole(isolate));
+ int export_count;
- set_code(info());
-#ifdef DEBUG
- PrintStatusTransition(Module::kErrored);
-#endif // DEBUG
- set_status(Module::kErrored);
- set_exception(the_exception);
+ if (module->IsSourceTextModule()) {
+ Handle<SourceTextModule> source_text_module =
+ Handle<SourceTextModule>::cast(module);
+ export_count = source_text_module->regular_exports().length();
+ SourceTextModule::Reset(isolate, source_text_module);
+ } else {
+ export_count =
+ Handle<SyntheticModule>::cast(module)->export_names().length();
+ // Nothing to do here.
+ }
+
+ Handle<ObjectHashTable> exports = ObjectHashTable::New(isolate, export_count);
+
+ module->set_exports(*exports);
+ module->set_status(kUninstantiated);
}
Object Module::GetException() {
@@ -267,46 +133,6 @@ Object Module::GetException() {
return exception();
}
-SharedFunctionInfo Module::GetSharedFunctionInfo() const {
- DisallowHeapAllocation no_alloc;
- DCHECK_NE(status(), Module::kEvaluating);
- DCHECK_NE(status(), Module::kEvaluated);
- switch (status()) {
- case kUninstantiated:
- case kPreInstantiating:
- DCHECK(code().IsSharedFunctionInfo());
- return SharedFunctionInfo::cast(code());
- case kInstantiating:
- DCHECK(code().IsJSFunction());
- return JSFunction::cast(code()).shared();
- case kInstantiated:
- DCHECK(code().IsJSGeneratorObject());
- return JSGeneratorObject::cast(code()).function().shared();
- case kEvaluating:
- case kEvaluated:
- case kErrored:
- UNREACHABLE();
- }
-
- UNREACHABLE();
-}
-
-MaybeHandle<Cell> Module::ResolveImport(Isolate* isolate, Handle<Module> module,
- Handle<String> name, int module_request,
- MessageLocation loc, bool must_resolve,
- Module::ResolveSet* resolve_set) {
- Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(module_request)), isolate);
- Handle<String> specifier(
- String::cast(module->info().module_requests().get(module_request)),
- isolate);
- MaybeHandle<Cell> result =
- Module::ResolveExport(isolate, requested_module, specifier, name, loc,
- must_resolve, resolve_set);
- DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
- return result;
-}
-
MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Handle<String> module_specifier,
Handle<String> export_name,
@@ -314,121 +140,16 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Module::ResolveSet* resolve_set) {
DCHECK_GE(module->status(), kPreInstantiating);
DCHECK_NE(module->status(), kEvaluating);
- Handle<Object> object(module->exports().Lookup(export_name), isolate);
- if (object->IsCell()) {
- // Already resolved (e.g. because it's a local export).
- return Handle<Cell>::cast(object);
- }
- // Check for cycle before recursing.
- {
- // Attempt insertion with a null string set.
- auto result = resolve_set->insert({module, nullptr});
- UnorderedStringSet*& name_set = result.first->second;
- if (result.second) {
- // |module| wasn't in the map previously, so allocate a new name set.
- Zone* zone = resolve_set->zone();
- name_set =
- new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
- } else if (name_set->count(export_name)) {
- // Cycle detected.
- if (must_resolve) {
- return isolate->Throw<Cell>(
- isolate->factory()->NewSyntaxError(
- MessageTemplate::kCyclicModuleDependency, export_name,
- module_specifier),
- &loc);
- }
- return MaybeHandle<Cell>();
- }
- name_set->insert(export_name);
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::ResolveExport(
+ isolate, Handle<SourceTextModule>::cast(module), module_specifier,
+ export_name, loc, must_resolve, resolve_set);
+ } else {
+ return SyntheticModule::ResolveExport(
+ isolate, Handle<SyntheticModule>::cast(module), module_specifier,
+ export_name, loc, must_resolve);
}
-
- if (object->IsModuleInfoEntry()) {
- // Not yet resolved indirect export.
- Handle<ModuleInfoEntry> entry = Handle<ModuleInfoEntry>::cast(object);
- Handle<String> import_name(String::cast(entry->import_name()), isolate);
- Handle<Script> script(module->script(), isolate);
- MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
-
- Handle<Cell> cell;
- if (!ResolveImport(isolate, module, import_name, entry->module_request(),
- new_loc, true, resolve_set)
- .ToHandle(&cell)) {
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Cell>();
- }
-
- // The export table may have changed but the entry in question should be
- // unchanged.
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name).IsModuleInfoEntry());
-
- exports = ObjectHashTable::Put(exports, export_name, cell);
- module->set_exports(*exports);
- return cell;
- }
-
- DCHECK(object->IsTheHole(isolate));
- return Module::ResolveExportUsingStarExports(isolate, module,
- module_specifier, export_name,
- loc, must_resolve, resolve_set);
-}
-
-MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
- Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
- Handle<String> export_name, MessageLocation loc, bool must_resolve,
- Module::ResolveSet* resolve_set) {
- if (!export_name->Equals(ReadOnlyRoots(isolate).default_string())) {
- // Go through all star exports looking for the given name. If multiple star
- // exports provide the name, make sure they all map it to the same cell.
- Handle<Cell> unique_cell;
- Handle<FixedArray> special_exports(module->info().special_exports(),
- isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- i::Handle<i::ModuleInfoEntry> entry(
- i::ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name().IsUndefined(isolate)) {
- continue; // Indirect export.
- }
-
- Handle<Script> script(module->script(), isolate);
- MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
-
- Handle<Cell> cell;
- if (ResolveImport(isolate, module, export_name, entry->module_request(),
- new_loc, false, resolve_set)
- .ToHandle(&cell)) {
- if (unique_cell.is_null()) unique_cell = cell;
- if (*unique_cell != *cell) {
- return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
- MessageTemplate::kAmbiguousExport,
- module_specifier, export_name),
- &loc);
- }
- } else if (isolate->has_pending_exception()) {
- return MaybeHandle<Cell>();
- }
- }
-
- if (!unique_cell.is_null()) {
- // Found a unique star export for this name.
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name).IsTheHole(isolate));
- exports = ObjectHashTable::Put(exports, export_name, unique_cell);
- module->set_exports(*exports);
- return unique_cell;
- }
- }
-
- // Unresolvable.
- if (must_resolve) {
- return isolate->Throw<Cell>(
- isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
- module_specifier, export_name),
- &loc);
- }
- return MaybeHandle<Cell>();
}
bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
@@ -438,7 +159,14 @@ bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Instantiating module ";
- module->script().GetNameOrSourceURL().Print(os);
+ if (module->IsSourceTextModule()) {
+ Handle<SourceTextModule>::cast(module)
+ ->script()
+ .GetNameOrSourceURL()
+ .Print(os);
+ } else {
+ Handle<SyntheticModule>::cast(module)->name().Print(os);
+ }
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -450,7 +178,7 @@ bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
return false;
}
Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneForwardList<Handle<Module>> stack(&zone);
+ ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
unsigned dfs_index = 0;
if (!FinishInstantiate(isolate, module, &stack, &dfs_index, &zone)) {
for (auto& descendant : stack) {
@@ -474,188 +202,31 @@ bool Module::PrepareInstantiate(Isolate* isolate, Handle<Module> module,
module->SetStatus(kPreInstantiating);
STACK_CHECK(isolate, false);
- // Obtain requested modules.
- Handle<ModuleInfo> module_info(module->info(), isolate);
- Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- for (int i = 0, length = module_requests->length(); i < length; ++i) {
- Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
- v8::Local<v8::Module> api_requested_module;
- if (!callback(context, v8::Utils::ToLocal(specifier),
- v8::Utils::ToLocal(module))
- .ToLocal(&api_requested_module)) {
- isolate->PromoteScheduledException();
- return false;
- }
- Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
- requested_modules->set(i, *requested_module);
- }
-
- // Recurse.
- for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
- isolate);
- if (!PrepareInstantiate(isolate, requested_module, context, callback)) {
- return false;
- }
- }
-
- // Set up local exports.
- // TODO(neis): Create regular_exports array here instead of in factory method?
- for (int i = 0, n = module_info->RegularExportCount(); i < n; ++i) {
- int cell_index = module_info->RegularExportCellIndex(i);
- Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
- isolate);
- CreateExport(isolate, module, cell_index, export_names);
- }
-
- // Partially set up indirect exports.
- // For each indirect export, we create the appropriate slot in the export
- // table and store its ModuleInfoEntry there. When we later find the correct
- // Cell in the module that actually provides the value, we replace the
- // ModuleInfoEntry by that Cell (see ResolveExport).
- Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- Handle<Object> export_name(entry->export_name(), isolate);
- if (export_name->IsUndefined(isolate)) continue; // Star export.
- CreateIndirectExport(isolate, module, Handle<String>::cast(export_name),
- entry);
- }
-
- DCHECK_EQ(module->status(), kPreInstantiating);
- return true;
-}
-
-bool Module::RunInitializationCode(Isolate* isolate, Handle<Module> module) {
- DCHECK_EQ(module->status(), kInstantiating);
- Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
- DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
- Handle<Object> receiver = isolate->factory()->undefined_value();
- Handle<Object> argv[] = {module};
- MaybeHandle<Object> maybe_generator =
- Execution::Call(isolate, function, receiver, arraysize(argv), argv);
- Handle<Object> generator;
- if (!maybe_generator.ToHandle(&generator)) {
- DCHECK(isolate->has_pending_exception());
- return false;
- }
- DCHECK_EQ(*function, Handle<JSGeneratorObject>::cast(generator)->function());
- module->set_code(*generator);
- return true;
-}
-
-bool Module::MaybeTransitionComponent(Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack,
- Status new_status) {
- DCHECK(new_status == kInstantiated || new_status == kEvaluated);
- SLOW_DCHECK(
- // {module} is on the {stack}.
- std::count_if(stack->begin(), stack->end(),
- [&](Handle<Module> m) { return *m == *module; }) == 1);
- DCHECK_LE(module->dfs_ancestor_index(), module->dfs_index());
- if (module->dfs_ancestor_index() == module->dfs_index()) {
- // This is the root of its strongly connected component.
- Handle<Module> ancestor;
- do {
- ancestor = stack->front();
- stack->pop_front();
- DCHECK_EQ(ancestor->status(),
- new_status == kInstantiated ? kInstantiating : kEvaluating);
- if (new_status == kInstantiated) {
- if (!RunInitializationCode(isolate, ancestor)) return false;
- }
- ancestor->SetStatus(new_status);
- } while (*ancestor != *module);
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::PrepareInstantiate(
+ isolate, Handle<SourceTextModule>::cast(module), context, callback);
+ } else {
+ return SyntheticModule::PrepareInstantiate(
+ isolate, Handle<SyntheticModule>::cast(module), context, callback);
}
- return true;
}
bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack,
+ ZoneForwardList<Handle<SourceTextModule>>* stack,
unsigned* dfs_index, Zone* zone) {
DCHECK_NE(module->status(), kEvaluating);
if (module->status() >= kInstantiating) return true;
DCHECK_EQ(module->status(), kPreInstantiating);
STACK_CHECK(isolate, false);
- // Instantiate SharedFunctionInfo and mark module as instantiating for
- // the recursion.
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
- isolate);
- Handle<JSFunction> function =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- shared, isolate->native_context());
- module->set_code(*function);
- module->SetStatus(kInstantiating);
- module->set_dfs_index(*dfs_index);
- module->set_dfs_ancestor_index(*dfs_index);
- stack->push_front(module);
- (*dfs_index)++;
-
- // Recurse.
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
- isolate);
- if (!FinishInstantiate(isolate, requested_module, stack, dfs_index, zone)) {
- return false;
- }
-
- DCHECK_NE(requested_module->status(), kEvaluating);
- DCHECK_GE(requested_module->status(), kInstantiating);
- SLOW_DCHECK(
- // {requested_module} is instantiating iff it's on the {stack}.
- (requested_module->status() == kInstantiating) ==
- std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
- return *m == *requested_module;
- }));
-
- if (requested_module->status() == kInstantiating) {
- module->set_dfs_ancestor_index(
- std::min(module->dfs_ancestor_index(),
- requested_module->dfs_ancestor_index()));
- }
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::FinishInstantiate(
+ isolate, Handle<SourceTextModule>::cast(module), stack, dfs_index,
+ zone);
+ } else {
+ return SyntheticModule::FinishInstantiate(
+ isolate, Handle<SyntheticModule>::cast(module));
}
-
- Handle<Script> script(module->script(), isolate);
- Handle<ModuleInfo> module_info(module->info(), isolate);
-
- // Resolve imports.
- Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
- for (int i = 0, n = regular_imports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
- Handle<String> name(String::cast(entry->import_name()), isolate);
- MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
- ResolveSet resolve_set(zone);
- Handle<Cell> cell;
- if (!ResolveImport(isolate, module, name, entry->module_request(), loc,
- true, &resolve_set)
- .ToHandle(&cell)) {
- return false;
- }
- module->regular_imports().set(ImportIndex(entry->cell_index()), *cell);
- }
-
- // Resolve indirect exports.
- Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- Handle<Object> name(entry->export_name(), isolate);
- if (name->IsUndefined(isolate)) continue; // Star export.
- MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
- ResolveSet resolve_set(zone);
- if (ResolveExport(isolate, module, Handle<String>(),
- Handle<String>::cast(name), loc, true, &resolve_set)
- .is_null()) {
- return false;
- }
- }
-
- return MaybeTransitionComponent(isolate, module, stack, kInstantiated);
}
MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
@@ -663,7 +234,14 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Evaluating module ";
- module->script().GetNameOrSourceURL().Print(os);
+ if (module->IsSourceTextModule()) {
+ Handle<SourceTextModule>::cast(module)
+ ->script()
+ .GetNameOrSourceURL()
+ .Print(os);
+ } else {
+ Handle<SyntheticModule>::cast(module)->name().Print(os);
+ }
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -677,7 +255,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
DCHECK_GE(module->status(), kInstantiated);
Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneForwardList<Handle<Module>> stack(&zone);
+ ZoneForwardList<Handle<SourceTextModule>> stack(&zone);
unsigned dfs_index = 0;
Handle<Object> result;
if (!Evaluate(isolate, module, &stack, &dfs_index).ToHandle(&result)) {
@@ -693,9 +271,9 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
return result;
}
-MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack,
- unsigned* dfs_index) {
+MaybeHandle<Object> Module::Evaluate(
+ Isolate* isolate, Handle<Module> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
if (module->status() == kErrored) {
isolate->Throw(module->GetException());
return MaybeHandle<Object>();
@@ -706,134 +284,13 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
DCHECK_EQ(module->status(), kInstantiated);
STACK_CHECK(isolate, MaybeHandle<Object>());
- Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
- isolate);
- module->set_code(
- generator->function().shared().scope_info().ModuleDescriptorInfo());
- module->SetStatus(kEvaluating);
- module->set_dfs_index(*dfs_index);
- module->set_dfs_ancestor_index(*dfs_index);
- stack->push_front(module);
- (*dfs_index)++;
-
- // Recursion.
- Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
- for (int i = 0, length = requested_modules->length(); i < length; ++i) {
- Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
- isolate);
- RETURN_ON_EXCEPTION(
- isolate, Evaluate(isolate, requested_module, stack, dfs_index), Object);
-
- DCHECK_GE(requested_module->status(), kEvaluating);
- DCHECK_NE(requested_module->status(), kErrored);
- SLOW_DCHECK(
- // {requested_module} is evaluating iff it's on the {stack}.
- (requested_module->status() == kEvaluating) ==
- std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
- return *m == *requested_module;
- }));
-
- if (requested_module->status() == kEvaluating) {
- module->set_dfs_ancestor_index(
- std::min(module->dfs_ancestor_index(),
- requested_module->dfs_ancestor_index()));
- }
+ if (module->IsSourceTextModule()) {
+ return SourceTextModule::Evaluate(
+ isolate, Handle<SourceTextModule>::cast(module), stack, dfs_index);
+ } else {
+ return SyntheticModule::Evaluate(isolate,
+ Handle<SyntheticModule>::cast(module));
}
-
- // Evaluation of module body.
- Handle<JSFunction> resume(
- isolate->native_context()->generator_next_internal(), isolate);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
- Object);
- DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
-
- CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
- return handle(JSIteratorResult::cast(*result).value(), isolate);
-}
-
-namespace {
-
-void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
- UnorderedModuleSet* visited) {
- DCHECK_GE(module->status(), Module::kInstantiating);
-
- if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
-
- bool cycle = !visited->insert(module).second;
- if (cycle) return;
- Handle<ObjectHashTable> exports(module->exports(), isolate);
- UnorderedStringMap more_exports(zone);
-
- // TODO(neis): Only allocate more_exports if there are star exports.
- // Maybe split special_exports into indirect_exports and star_exports.
-
- ReadOnlyRoots roots(isolate);
- Handle<FixedArray> special_exports(module->info().special_exports(), isolate);
- for (int i = 0, n = special_exports->length(); i < n; ++i) {
- Handle<ModuleInfoEntry> entry(
- ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name().IsUndefined(roots)) {
- continue; // Indirect export.
- }
-
- Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(entry->module_request())),
- isolate);
-
- // Recurse.
- FetchStarExports(isolate, requested_module, zone, visited);
-
- // Collect all of [requested_module]'s exports that must be added to
- // [module]'s exports (i.e. to [exports]). We record these in
- // [more_exports]. Ambiguities (conflicting exports) are marked by mapping
- // the name to undefined instead of a Cell.
- Handle<ObjectHashTable> requested_exports(requested_module->exports(),
- isolate);
- for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
- Object key;
- if (!requested_exports->ToKey(roots, i, &key)) continue;
- Handle<String> name(String::cast(key), isolate);
-
- if (name->Equals(roots.default_string())) continue;
- if (!exports->Lookup(name).IsTheHole(roots)) continue;
-
- Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
- auto insert_result = more_exports.insert(std::make_pair(name, cell));
- if (!insert_result.second) {
- auto it = insert_result.first;
- if (*it->second == *cell || it->second->IsUndefined(roots)) {
- // We already recorded this mapping before, or the name is already
- // known to be ambiguous. In either case, there's nothing to do.
- } else {
- DCHECK(it->second->IsCell());
- // Different star exports provide different cells for this name, hence
- // mark the name as ambiguous.
- it->second = roots.undefined_value_handle();
- }
- }
- }
- }
-
- // Copy [more_exports] into [exports].
- for (const auto& elem : more_exports) {
- if (elem.second->IsUndefined(isolate)) continue; // Ambiguous export.
- DCHECK(!elem.first->Equals(ReadOnlyRoots(isolate).default_string()));
- DCHECK(elem.second->IsCell());
- exports = ObjectHashTable::Put(exports, elem.first, elem.second);
- }
- module->set_exports(*exports);
-}
-
-} // anonymous namespace
-
-Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
- Handle<Module> module,
- int module_request) {
- Handle<Module> requested_module(
- Module::cast(module->requested_modules().get(module_request)), isolate);
- return Module::GetModuleNamespace(isolate, requested_module);
}
Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
@@ -848,7 +305,12 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
// Collect the export names.
Zone zone(isolate->allocator(), ZONE_NAME);
UnorderedModuleSet visited(&zone);
- FetchStarExports(isolate, module, &zone, &visited);
+
+ if (module->IsSourceTextModule()) {
+ SourceTextModule::FetchStarExports(
+ isolate, Handle<SourceTextModule>::cast(module), &zone, &visited);
+ }
+
Handle<ObjectHashTable> exports(module->exports(), isolate);
ZoneVector<Handle<String>> names(&zone);
names.reserve(exports->NumberOfElements());
@@ -874,7 +336,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
// Create the properties in the namespace object. Transition the object
// to dictionary mode so that property addition is faster.
PropertyAttributes attr = DONT_DELETE;
- JSObject::NormalizeProperties(ns, CLEAR_INOBJECT_PROPERTIES,
+ JSObject::NormalizeProperties(isolate, ns, CLEAR_INOBJECT_PROPERTIES,
static_cast<int>(names.size()),
"JSModuleNamespace");
for (const auto& name : names) {
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index a1672dce7e..b776ddb0be 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -9,6 +9,7 @@
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
#include "src/objects/struct.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,35 +21,23 @@ template <typename T>
class Handle;
class Isolate;
class JSModuleNamespace;
-class ModuleDescriptor;
-class ModuleInfo;
-class ModuleInfoEntry;
+class SourceTextModuleDescriptor;
+class SourceTextModuleInfo;
+class SourceTextModuleInfoEntry;
class String;
class Zone;
-// The runtime representation of an ECMAScript module.
-class Module : public Struct {
+// Module is the base class for ECMAScript module types, roughly corresponding
+// to Abstract Module Record.
+// https://tc39.github.io/ecma262/#sec-abstract-module-records
+class Module : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
DECL_CAST(Module)
DECL_VERIFIER(Module)
DECL_PRINTER(Module)
- // The code representing this module, or an abstraction thereof.
- // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
- // a ModuleInfo, depending on the state (status) the module is in. See
- // Module::ModuleVerify() for the precise invariant.
- DECL_ACCESSORS(code, Object)
-
- // Arrays of cells corresponding to regular exports and regular imports.
- // A cell's position in the array is determined by the cell index of the
- // associated module entry (which coincides with the variable index of the
- // associated variable).
- DECL_ACCESSORS(regular_exports, FixedArray)
- DECL_ACCESSORS(regular_imports, FixedArray)
-
// The complete export table, mapping an export name to its cell.
- // TODO(neis): We may want to remove the regular exports from the table.
DECL_ACCESSORS(exports, ObjectHashTable)
// Hash for this object (a random non-zero Smi).
@@ -67,31 +56,12 @@ class Module : public Struct {
kErrored
};
- // The exception in the case {status} is kErrored.
- Object GetException();
-
- // The shared function info in case {status} is not kEvaluating, kEvaluated or
- // kErrored.
- SharedFunctionInfo GetSharedFunctionInfo() const;
-
// The namespace object (or undefined).
DECL_ACCESSORS(module_namespace, HeapObject)
- // Modules imported or re-exported by this module.
- // Corresponds 1-to-1 to the module specifier strings in
- // ModuleInfo::module_requests.
- DECL_ACCESSORS(requested_modules, FixedArray)
-
- // [script]: Script from which the module originates.
- DECL_ACCESSORS(script, Script)
-
- // The value of import.meta inside of this module.
- // Lazily initialized on first access. It's the hole before first access and
- // a JSObject afterwards.
- DECL_ACCESSORS(import_meta, Object)
-
- // Get the ModuleInfo associated with the code.
- inline ModuleInfo info() const;
+ // The exception in the case {status} is kErrored.
+ Object GetException();
+ DECL_ACCESSORS(exception, Object)
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
@@ -105,63 +75,20 @@ class Module : public Struct {
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Isolate* isolate, Handle<Module> module);
- Cell GetCell(int cell_index);
- static Handle<Object> LoadVariable(Isolate* isolate, Handle<Module> module,
- int cell_index);
- static void StoreVariable(Handle<Module> module, int cell_index,
- Handle<Object> value);
-
- static int ImportIndex(int cell_index);
- static int ExportIndex(int cell_index);
-
- // Get the namespace object for [module_request] of [module]. If it doesn't
- // exist yet, it is created.
- static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
- Handle<Module> module,
- int module_request);
-
// Get the namespace object for [module]. If it doesn't exist yet, it is
// created.
static Handle<JSModuleNamespace> GetModuleNamespace(Isolate* isolate,
Handle<Module> module);
// Layout description.
-#define MODULE_FIELDS(V) \
- V(kCodeOffset, kTaggedSize) \
- V(kExportsOffset, kTaggedSize) \
- V(kRegularExportsOffset, kTaggedSize) \
- V(kRegularImportsOffset, kTaggedSize) \
- V(kHashOffset, kTaggedSize) \
- V(kModuleNamespaceOffset, kTaggedSize) \
- V(kRequestedModulesOffset, kTaggedSize) \
- V(kStatusOffset, kTaggedSize) \
- V(kDfsIndexOffset, kTaggedSize) \
- V(kDfsAncestorIndexOffset, kTaggedSize) \
- V(kExceptionOffset, kTaggedSize) \
- V(kScriptOffset, kTaggedSize) \
- V(kImportMetaOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, MODULE_FIELDS)
-#undef MODULE_FIELDS
-
- private:
- friend class Factory;
-
- DECL_ACCESSORS(exception, Object)
-
- // TODO(neis): Don't store those in the module object?
- DECL_INT_ACCESSORS(dfs_index)
- DECL_INT_ACCESSORS(dfs_ancestor_index)
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_MODULE_FIELDS)
- // Helpers for Instantiate and Evaluate.
+ using BodyDescriptor =
+ FixedBodyDescriptor<kExportsOffset, kHeaderSize, kHeaderSize>;
- static void CreateExport(Isolate* isolate, Handle<Module> module,
- int cell_index, Handle<FixedArray> names);
- static void CreateIndirectExport(Isolate* isolate, Handle<Module> module,
- Handle<String> name,
- Handle<ModuleInfoEntry> entry);
+ protected:
+ friend class Factory;
// The [must_resolve] argument indicates whether or not an exception should be
// thrown in case the module does not provide an export named [name]
@@ -176,32 +103,18 @@ class Module : public Struct {
Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
- static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
- Isolate* isolate, Handle<Module> module, Handle<String> name,
- int module_request, MessageLocation loc, bool must_resolve,
- ResolveSet* resolve_set);
-
- static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
- Isolate* isolate, Handle<Module> module, Handle<String> module_specifier,
- Handle<String> export_name, MessageLocation loc, bool must_resolve,
- ResolveSet* resolve_set);
static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
Isolate* isolate, Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack, unsigned* dfs_index, Zone* zone);
- static V8_WARN_UNUSED_RESULT bool RunInitializationCode(
- Isolate* isolate, Handle<Module> module);
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
+ Zone* zone);
static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack, unsigned* dfs_index);
-
- static V8_WARN_UNUSED_RESULT bool MaybeTransitionComponent(
- Isolate* isolate, Handle<Module> module,
- ZoneForwardList<Handle<Module>>* stack, Status new_status);
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index);
// Set module's status back to kUninstantiated and reset other internal state.
// This is used when instantiation fails.
@@ -217,7 +130,7 @@ class Module : public Struct {
void PrintStatusTransition(Status new_status);
#endif // DEBUG
- OBJECT_CONSTRUCTORS(Module, Struct);
+ OBJECT_CONSTRUCTORS(Module, HeapObject);
};
// When importing a module namespace (import * as foo from "bar"), a
@@ -250,93 +163,16 @@ class JSModuleNamespace : public JSObject {
kInObjectFieldCount,
};
-// Layout description.
-#define JS_MODULE_NAMESPACE_FIELDS(V) \
- V(kModuleOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0) \
- V(kInObjectFieldsOffset, kTaggedSize* kInObjectFieldCount) \
- /* Total size. */ \
- V(kSize, 0)
-
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_MODULE_NAMESPACE_FIELDS)
-#undef JS_MODULE_NAMESPACE_FIELDS
-
- OBJECT_CONSTRUCTORS(JSModuleNamespace, JSObject);
-};
-
-// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
-class ModuleInfo : public FixedArray {
- public:
- DECL_CAST(ModuleInfo)
-
- static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
- ModuleDescriptor* descr);
-
- inline FixedArray module_requests() const;
- inline FixedArray special_exports() const;
- inline FixedArray regular_exports() const;
- inline FixedArray regular_imports() const;
- inline FixedArray namespace_imports() const;
- inline FixedArray module_request_positions() const;
-
- // Accessors for [regular_exports].
- int RegularExportCount() const;
- String RegularExportLocalName(int i) const;
- int RegularExportCellIndex(int i) const;
- FixedArray RegularExportExportNames(int i) const;
+ TORQUE_GENERATED_JSMODULE_NAMESPACE_FIELDS)
-#ifdef DEBUG
- inline bool Equals(ModuleInfo other) const;
-#endif
+ // We need to include in-object fields
+ // TODO(v8:8944): improve handling of in-object fields
+ static constexpr int kSize =
+ kHeaderSize + (kTaggedSize * kInObjectFieldCount);
- private:
- friend class Factory;
- friend class ModuleDescriptor;
- enum {
- kModuleRequestsIndex,
- kSpecialExportsIndex,
- kRegularExportsIndex,
- kNamespaceImportsIndex,
- kRegularImportsIndex,
- kModuleRequestPositionsIndex,
- kLength
- };
- enum {
- kRegularExportLocalNameOffset,
- kRegularExportCellIndexOffset,
- kRegularExportExportNamesOffset,
- kRegularExportLength
- };
- OBJECT_CONSTRUCTORS(ModuleInfo, FixedArray);
-};
-
-class ModuleInfoEntry : public Struct {
- public:
- DECL_CAST(ModuleInfoEntry)
- DECL_PRINTER(ModuleInfoEntry)
- DECL_VERIFIER(ModuleInfoEntry)
-
- DECL_ACCESSORS(export_name, Object)
- DECL_ACCESSORS(local_name, Object)
- DECL_ACCESSORS(import_name, Object)
- DECL_INT_ACCESSORS(module_request)
- DECL_INT_ACCESSORS(cell_index)
- DECL_INT_ACCESSORS(beg_pos)
- DECL_INT_ACCESSORS(end_pos)
-
- static Handle<ModuleInfoEntry> New(Isolate* isolate,
- Handle<Object> export_name,
- Handle<Object> local_name,
- Handle<Object> import_name,
- int module_request, int cell_index,
- int beg_pos, int end_pos);
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- TORQUE_GENERATED_MODULE_INFO_ENTRY_FIELDS)
-
- OBJECT_CONSTRUCTORS(ModuleInfoEntry, Struct);
+ OBJECT_CONSTRUCTORS(JSModuleNamespace, JSObject);
};
} // namespace internal
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index b3e04bbd50..8aded12fb5 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -16,14 +16,9 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Name, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(Symbol, Name)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Name)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Symbol)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(Symbol)
-
-ACCESSORS(Symbol, name, Object, kNameOffset)
-INT_ACCESSORS(Symbol, flags, kFlagsOffset)
BIT_FIELD_ACCESSORS(Symbol, flags, is_private, Symbol::IsPrivateBit)
BIT_FIELD_ACCESSORS(Symbol, flags, is_well_known_symbol,
Symbol::IsWellKnownSymbolBit)
@@ -44,20 +39,14 @@ void Symbol::set_is_private_name() {
set_flags(Symbol::IsPrivateNameBit::update(flags(), true));
}
-bool Name::IsUniqueName() const {
- uint32_t type = map().instance_type();
+DEF_GETTER(Name, IsUniqueName, bool) {
+ uint32_t type = map(isolate).instance_type();
bool result = (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
(kStringTag | kNotInternalizedTag);
SLOW_DCHECK(result == HeapObject::IsUniqueName());
return result;
}
-uint32_t Name::hash_field() { return ReadField<uint32_t>(kHashFieldOffset); }
-
-void Name::set_hash_field(uint32_t value) {
- WriteField<uint32_t>(kHashFieldOffset, value);
-}
-
bool Name::Equals(Name other) {
if (other == *this) return true;
if ((this->IsInternalizedString() && other.IsInternalizedString()) ||
@@ -91,17 +80,17 @@ uint32_t Name::Hash() {
return String::cast(*this).ComputeAndSetHash();
}
-bool Name::IsInterestingSymbol() const {
- return IsSymbol() && Symbol::cast(*this).is_interesting_symbol();
+DEF_GETTER(Name, IsInterestingSymbol, bool) {
+ return IsSymbol(isolate) && Symbol::cast(*this).is_interesting_symbol();
}
-bool Name::IsPrivate() {
- return this->IsSymbol() && Symbol::cast(*this).is_private();
+DEF_GETTER(Name, IsPrivate, bool) {
+ return this->IsSymbol(isolate) && Symbol::cast(*this).is_private();
}
-bool Name::IsPrivateName() {
+DEF_GETTER(Name, IsPrivateName, bool) {
bool is_private_name =
- this->IsSymbol() && Symbol::cast(*this).is_private_name();
+ this->IsSymbol(isolate) && Symbol::cast(*this).is_private_name();
DCHECK_IMPLIES(is_private_name, IsPrivate());
return is_private_name;
}
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index 8b2a8f0a01..b13aa30fb0 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -7,7 +7,7 @@
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
-#include "torque-generated/field-offsets-tq.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,12 +17,8 @@ namespace internal {
// The Name abstract class captures anything that can be used as a property
// name, i.e., strings and symbols. All names store a hash value.
-class Name : public HeapObject {
+class Name : public TorqueGeneratedName<Name, HeapObject> {
public:
- // Get and set the hash field of the name.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
// Tells whether the hash code has been computed.
inline bool HasHashCode();
@@ -43,15 +39,19 @@ class Name : public HeapObject {
// symbol properties are added, so we can optimize lookups on objects
// that don't have the flag.
inline bool IsInterestingSymbol() const;
+ inline bool IsInterestingSymbol(Isolate* isolate) const;
// If the name is private, it can only name own properties.
- inline bool IsPrivate();
+ inline bool IsPrivate() const;
+ inline bool IsPrivate(Isolate* isolate) const;
// If the name is a private name, it should behave like a private
// symbol but also throw on property access miss.
- inline bool IsPrivateName();
+ inline bool IsPrivateName() const;
+ inline bool IsPrivateName(Isolate* isolate) const;
inline bool IsUniqueName() const;
+ inline bool IsUniqueName(Isolate* isolate) const;
static inline bool ContainsCachedArrayIndex(uint32_t hash);
@@ -62,15 +62,10 @@ class Name : public HeapObject {
V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToFunctionName(
Isolate* isolate, Handle<Name> name, Handle<String> prefix);
- DECL_CAST(Name)
-
DECL_PRINTER(Name)
void NameShortPrint();
int NameShortPrint(Vector<char> str);
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_NAME_FIELDS)
-
// Mask constant for checking if a name has a computed hash code
// and if it is a string that is an array index. The least significant bit
// indicates whether a hash code has been computed. If the hash code has
@@ -131,17 +126,12 @@ class Name : public HeapObject {
protected:
static inline bool IsHashFieldComputed(uint32_t field);
- OBJECT_CONSTRUCTORS(Name, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Name)
};
// ES6 symbols.
-class Symbol : public Name {
+class Symbol : public TorqueGeneratedSymbol<Symbol, Name> {
public:
- // [name]: The print name of a symbol, or undefined if none.
- DECL_ACCESSORS(name, Object)
-
- DECL_INT_ACCESSORS(flags)
-
// [is_private]: Whether this is a private symbol. Private symbols can only
// be used to designate own properties of objects.
DECL_BOOLEAN_ACCESSORS(is_private)
@@ -169,15 +159,10 @@ class Symbol : public Name {
inline bool is_private_name() const;
inline void set_is_private_name();
- DECL_CAST(Symbol)
-
// Dispatched behavior.
DECL_PRINTER(Symbol)
DECL_VERIFIER(Symbol)
- DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize,
- TORQUE_GENERATED_SYMBOL_FIELDS)
-
// Flags layout.
#define FLAGS_BIT_FIELDS(V, _) \
V(IsPrivateBit, bool, 1, _) \
@@ -199,7 +184,7 @@ class Symbol : public Name {
// TODO(cbruni): remove once the new maptracer is in place.
friend class Name; // For PrivateSymbolToName.
- OBJECT_CONSTRUCTORS(Symbol, Name);
+ TQ_OBJECT_CONSTRUCTORS(Symbol)
};
} // namespace internal
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
index 78452de502..c15b212eec 100644
--- a/deps/v8/src/objects/object-list-macros.h
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -27,6 +27,7 @@ class FunctionLiteral;
class FunctionTemplateInfo;
class JSAsyncGeneratorObject;
class JSGlobalProxy;
+class SourceTextModule;
class JSPromise;
class JSProxy;
class JSProxyRevocableResult;
@@ -35,7 +36,7 @@ class LayoutDescriptor;
class LookupIterator;
class FieldType;
class Module;
-class ModuleInfoEntry;
+class SourceTextModuleInfoEntry;
class MutableHeapNumber;
class ObjectHashTable;
class ObjectTemplateInfo;
@@ -53,6 +54,7 @@ class ScriptContextTable;
class SharedFunctionInfo;
class StringStream;
class Symbol;
+class SyntheticModule;
class FeedbackCell;
class FeedbackMetadata;
class FeedbackVector;
@@ -134,10 +136,13 @@ class ZoneForwardList;
V(JSAsyncGeneratorObject) \
V(JSBoundFunction) \
V(JSCollection) \
+ V(JSCollectionIterator) \
V(JSContextExtensionObject) \
V(JSDataView) \
V(JSDate) \
V(JSError) \
+ V(JSFinalizationGroup) \
+ V(JSFinalizationGroupCleanupIterator) \
V(JSFunction) \
V(JSGeneratorObject) \
V(JSGlobalObject) \
@@ -147,6 +152,7 @@ class ZoneForwardList;
V(JSMessageObject) \
V(JSModuleNamespace) \
V(JSObject) \
+ V(JSPrimitiveWrapper) \
V(JSPromise) \
V(JSProxy) \
V(JSReceiver) \
@@ -158,18 +164,15 @@ class ZoneForwardList;
V(JSSloppyArgumentsObject) \
V(JSStringIterator) \
V(JSTypedArray) \
- V(JSValue) \
- V(JSWeakRef) \
V(JSWeakCollection) \
- V(JSFinalizationGroup) \
- V(JSFinalizationGroupCleanupIterator) \
+ V(JSWeakRef) \
V(JSWeakMap) \
V(JSWeakSet) \
V(LoadHandler) \
V(Map) \
V(MapCache) \
+ V(Module) \
V(Microtask) \
- V(ModuleInfo) \
V(MutableHeapNumber) \
V(Name) \
V(NameDictionary) \
@@ -202,6 +205,8 @@ class ZoneForwardList;
V(SmallOrderedHashMap) \
V(SmallOrderedHashSet) \
V(SmallOrderedNameDictionary) \
+ V(SourceTextModule) \
+ V(SourceTextModuleInfo) \
V(StoreHandler) \
V(String) \
V(StringSet) \
@@ -210,6 +215,7 @@ class ZoneForwardList;
V(Struct) \
V(Symbol) \
V(SymbolWrapper) \
+ V(SyntheticModule) \
V(TemplateInfo) \
V(TemplateList) \
V(ThinString) \
@@ -248,9 +254,17 @@ class ZoneForwardList;
#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) V(HashTable)
+// Logical sub-types of heap objects that don't correspond to a C++ class but
+// represent some specialization in terms of additional constraints.
+#define HEAP_OBJECT_SPECIALIZED_TYPE_LIST(V) \
+ V(CallableApiObject) \
+ V(CallableJSProxy) \
+ V(NonNullForeign)
+
#define HEAP_OBJECT_TYPE_LIST(V) \
HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
- HEAP_OBJECT_TEMPLATE_TYPE_LIST(V)
+ HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) \
+ HEAP_OBJECT_SPECIALIZED_TYPE_LIST(V)
#define ODDBALL_LIST(V) \
V(Undefined, undefined_value) \
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index c8ebf57ce7..b96c03c00f 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -11,12 +11,16 @@
#undef NEVER_READ_ONLY_SPACE
#undef NEVER_READ_ONLY_SPACE_IMPL
#undef DECL_PRIMITIVE_ACCESSORS
+#undef DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
+#undef DECL_SYNCHRONIZED_INT_ACCESSORS
#undef DECL_INT32_ACCESSORS
#undef DECL_UINT16_ACCESSORS
#undef DECL_INT16_ACCESSORS
#undef DECL_UINT8_ACCESSORS
+#undef DECL_GETTER
+#undef DEF_GETTER
#undef DECL_ACCESSORS
#undef DECL_CAST
#undef CAST_ACCESSOR
@@ -45,13 +49,10 @@
#undef TYPE_CHECKER
#undef RELAXED_INT16_ACCESSORS
#undef FIELD_ADDR
-#undef READ_FIELD
-#undef READ_WEAK_FIELD
#undef ACQUIRE_READ_FIELD
#undef RELAXED_READ_FIELD
#undef RELAXED_READ_WEAK_FIELD
#undef WRITE_FIELD
-#undef WRITE_WEAK_FIELD
#undef RELEASE_WRITE_FIELD
#undef RELAXED_WRITE_FIELD
#undef RELAXED_WRITE_WEAK_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 1f499d4fba..8f9e51ca9e 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -14,15 +14,18 @@
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
// Since this changes visibility, it should always be last in a class
// definition.
-#define OBJECT_CONSTRUCTORS(Type, ...) \
- public: \
- constexpr Type() : __VA_ARGS__() {} \
- \
- protected: \
+#define OBJECT_CONSTRUCTORS(Type, ...) \
+ public: \
+ constexpr Type() : __VA_ARGS__() {} \
+ \
+ protected: \
+ template <typename TFieldType, int kFieldOffset> \
+ friend class TaggedField; \
+ \
explicit inline Type(Address ptr)
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
@@ -34,22 +37,27 @@
// TODO(leszeks): Add checks in the factory that we never allocate these
// objects in RO space.
-#define NEVER_READ_ONLY_SPACE_IMPL(Type) \
- Heap* Type::GetHeap() const { \
- return NeverReadOnlySpaceObject::GetHeap(*this); \
- } \
- Isolate* Type::GetIsolate() const { \
- return NeverReadOnlySpaceObject::GetIsolate(*this); \
+#define NEVER_READ_ONLY_SPACE_IMPL(Type) \
+ Heap* Type::GetHeap() const { return GetHeapFromWritableObject(*this); } \
+ Isolate* Type::GetIsolate() const { \
+ return GetIsolateFromWritableObject(*this); \
}
#define DECL_PRIMITIVE_ACCESSORS(name, type) \
inline type name() const; \
inline void set_##name(type value);
+#define DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, type) \
+ inline type synchronized_##name() const; \
+ inline void synchronized_set_##name(type value);
+
#define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
#define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
+#define DECL_SYNCHRONIZED_INT_ACCESSORS(name) \
+ DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, int)
+
#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
#define DECL_UINT16_ACCESSORS(name) \
@@ -64,8 +72,22 @@
inline uint8_t name() const; \
inline void set_##name(int value);
+// TODO(ishell): eventually isolate-less getters should not be used anymore.
+// For full pointer-mode the C++ compiler should optimize away unused isolate
+// parameter.
+#define DECL_GETTER(name, type) \
+ inline type name() const; \
+ inline type name(Isolate* isolate) const;
+
+#define DEF_GETTER(holder, name, type) \
+ type holder::name() const { \
+ Isolate* isolate = GetIsolateForPtrCompr(*this); \
+ return holder::name(isolate); \
+ } \
+ type holder::name(Isolate* isolate) const
+
#define DECL_ACCESSORS(name, type) \
- inline type name() const; \
+ DECL_GETTER(name, type) \
inline void set_##name(type value, \
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -112,14 +134,14 @@
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
set_condition) \
- type holder::name() const { \
- type value = type::cast(READ_FIELD(*this, offset)); \
+ DEF_GETTER(holder, name, type) { \
+ type value = TaggedField<type, offset>::load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(type value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
- WRITE_FIELD(*this, offset, value); \
+ TaggedField<type, offset>::store(*this, value); \
CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
@@ -129,17 +151,17 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
-#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
- get_condition, set_condition) \
- type holder::name() const { \
- type value = type::cast(ACQUIRE_READ_FIELD(*this, offset)); \
- DCHECK(get_condition); \
- return value; \
- } \
- void holder::set_##name(type value, WriteBarrierMode mode) { \
- DCHECK(set_condition); \
- RELEASE_WRITE_FIELD(*this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
+#define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
+ get_condition, set_condition) \
+ DEF_GETTER(holder, name, type) { \
+ type value = TaggedField<type, offset>::Acquire_Load(isolate, *this); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(type value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ TaggedField<type, offset>::Release_Store(*this, value); \
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode); \
}
#define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
@@ -151,14 +173,15 @@
#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
set_condition) \
- MaybeObject holder::name() const { \
- MaybeObject value = READ_WEAK_FIELD(*this, offset); \
+ DEF_GETTER(holder, name, MaybeObject) { \
+ MaybeObject value = \
+ TaggedField<MaybeObject, offset>::load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
- WRITE_WEAK_FIELD(*this, offset, value); \
+ TaggedField<MaybeObject, offset>::store(*this, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
}
@@ -169,36 +192,44 @@
WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi.
-#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
- int holder::name() const { \
- DCHECK(condition); \
- Object value = READ_FIELD(*this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::set_##name(int value) { \
- DCHECK(condition); \
- WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
+#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
+ int holder::name() const { \
+ DCHECK(condition); \
+ Smi value = TaggedField<Smi, offset>::load(*this); \
+ return value.value(); \
+ } \
+ void holder::set_##name(int value) { \
+ DCHECK(condition); \
+ TaggedField<Smi, offset>::store(*this, Smi::FromInt(value)); \
}
#define SMI_ACCESSORS(holder, name, offset) \
SMI_ACCESSORS_CHECKED(holder, name, offset, true)
-#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
- int holder::synchronized_##name() const { \
- Object value = ACQUIRE_READ_FIELD(*this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::synchronized_set_##name(int value) { \
- RELEASE_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
+#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::synchronized_##name() const { \
+ Smi value = TaggedField<Smi, offset>::Acquire_Load(*this); \
+ return value.value(); \
+ } \
+ void holder::synchronized_set_##name(int value) { \
+ TaggedField<Smi, offset>::Release_Store(*this, Smi::FromInt(value)); \
+ }
+
+#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
+ int holder::relaxed_read_##name() const { \
+ Smi value = TaggedField<Smi, offset>::Relaxed_Load(*this); \
+ return value.value(); \
+ } \
+ void holder::relaxed_write_##name(int value) { \
+ TaggedField<Smi, offset>::Relaxed_Store(*this, Smi::FromInt(value)); \
}
-#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
- int holder::relaxed_read_##name() const { \
- Object value = RELAXED_READ_FIELD(*this, offset); \
- return Smi::ToInt(value); \
- } \
- void holder::relaxed_write_##name(int value) { \
- RELAXED_WRITE_FIELD(*this, offset, Smi::FromInt(value)); \
+#define TQ_SMI_ACCESSORS(holder, name) \
+ int holder::name() const { \
+ return TorqueGenerated##holder<holder, Super>::name().value(); \
+ } \
+ void holder::set_##name(int value) { \
+ TorqueGenerated##holder<holder, Super>::set_##name(Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
@@ -223,9 +254,9 @@
return instance_type == forinstancetype; \
}
-#define TYPE_CHECKER(type, ...) \
- bool HeapObject::Is##type() const { \
- return InstanceTypeChecker::Is##type(map().instance_type()); \
+#define TYPE_CHECKER(type, ...) \
+ DEF_GETTER(HeapObject, Is##type, bool) { \
+ return InstanceTypeChecker::Is##type(map(isolate).instance_type()); \
}
#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
@@ -238,39 +269,26 @@
#define FIELD_ADDR(p, offset) ((p).ptr() + offset - kHeapObjectTag)
-#define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
-
-#define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
-
#define ACQUIRE_READ_FIELD(p, offset) \
- ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load()
+ TaggedField<Object>::Acquire_Load(p, offset)
#define RELAXED_READ_FIELD(p, offset) \
- ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
+ TaggedField<Object>::Relaxed_Load(p, offset)
#define RELAXED_READ_WEAK_FIELD(p, offset) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
+ TaggedField<MaybeObject>::Relaxed_Load(p, offset)
-#ifdef V8_CONCURRENT_MARKING
-#define WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#define WRITE_WEAK_FIELD(p, offset, value) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#else
#define WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).store(value)
-#define WRITE_WEAK_FIELD(p, offset, value) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
-#endif
+ TaggedField<Object>::store(p, offset, value)
#define RELEASE_WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).Release_Store(value)
+ TaggedField<Object>::Release_Store(p, offset, value)
#define RELAXED_WRITE_FIELD(p, offset, value) \
- ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
+ TaggedField<Object>::Relaxed_Store(p, offset, value)
#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
- MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
+ TaggedField<MaybeObject>::Relaxed_Store(p, offset, value)
#define WRITE_BARRIER(object, offset, value) \
do { \
@@ -412,12 +430,15 @@
set(IndexForEntry(i) + k##name##Offset, value); \
}
-#define TQ_OBJECT_CONSTRUCTORS(Type) \
- public: \
- constexpr Type() = default; \
- \
- protected: \
- inline explicit Type(Address ptr); \
+#define TQ_OBJECT_CONSTRUCTORS(Type) \
+ public: \
+ constexpr Type() = default; \
+ \
+ protected: \
+ template <typename TFieldType, int kFieldOffset> \
+ friend class TaggedField; \
+ \
+ inline explicit Type(Address ptr); \
friend class TorqueGenerated##Type<Type, Super>;
#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type) \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 8626165647..51e380695e 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -19,6 +19,8 @@
#include "src/objects/js-weak-refs.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/source-text-module.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -918,7 +920,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case JS_GENERATOR_OBJECT_TYPE:
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
case JS_ARRAY_ITERATOR_TYPE:
@@ -1043,6 +1045,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
} else if (type == WASM_CAPI_FUNCTION_DATA_TYPE) {
return Op::template apply<WasmCapiFunctionData::BodyDescriptor>(p1, p2,
p3, p4);
+ } else if (type == WASM_INDIRECT_FUNCTION_TABLE_TYPE) {
+ return Op::template apply<WasmIndirectFunctionTable::BodyDescriptor>(
+ p1, p2, p3, p4);
} else {
return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
}
@@ -1051,6 +1056,12 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
return Op::template apply<DataHandler::BodyDescriptor>(p1, p2, p3, p4);
+ case SOURCE_TEXT_MODULE_TYPE:
+ return Op::template apply<SourceTextModule::BodyDescriptor>(p1, p2, p3,
+ p4);
+ case SYNTHETIC_MODULE_TYPE:
+ return Op::template apply<SyntheticModule::BodyDescriptor>(p1, p2, p3,
+ p4);
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 90824c68ef..b4c8591e5c 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -7,6 +7,8 @@
#include "src/init/heap-symbols.h"
+#include "torque-generated/instance-types-tq.h"
+
namespace v8 {
namespace internal {
@@ -31,7 +33,7 @@ namespace internal {
// HeapObject::Size, HeapObject::IterateBody, the typeof operator, and
// Object::IsString.
//
-// NOTE: Everything following JS_VALUE_TYPE is considered a
+// NOTE: Everything following JS_PRIMITIVE_WRAPPER_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
//
@@ -80,6 +82,7 @@ namespace internal {
V(ACCESSOR_PAIR_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(ALLOCATION_MEMENTO_TYPE) \
+ V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
V(ASM_WASM_DATA_TYPE) \
V(ASYNC_GENERATOR_REQUEST_TYPE) \
V(CLASS_POSITIONS_TYPE) \
@@ -89,24 +92,23 @@ namespace internal {
V(FUNCTION_TEMPLATE_RARE_DATA_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
V(INTERPRETER_DATA_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(PROMISE_CAPABILITY_TYPE) \
V(PROMISE_REACTION_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE) \
+ V(SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(STACK_TRACE_FRAME_TYPE) \
V(TEMPLATE_OBJECT_DESCRIPTION_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
- V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
V(WASM_CAPI_FUNCTION_DATA_TYPE) \
V(WASM_DEBUG_INFO_TYPE) \
V(WASM_EXCEPTION_TAG_TYPE) \
V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
+ V(WASM_INDIRECT_FUNCTION_TABLE_TYPE) \
V(WASM_JS_FUNCTION_DATA_TYPE) \
\
V(CALLABLE_TASK_TYPE) \
@@ -116,6 +118,11 @@ namespace internal {
V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
V(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE) \
\
+ TORQUE_DEFINED_INSTANCE_TYPES(V) \
+ \
+ V(SOURCE_TEXT_MODULE_TYPE) \
+ V(SYNTHETIC_MODULE_TYPE) \
+ \
V(ALLOCATION_SITE_TYPE) \
V(EMBEDDER_DATA_ARRAY_TYPE) \
\
@@ -174,7 +181,7 @@ namespace internal {
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_MODULE_NAMESPACE_TYPE) \
V(JS_SPECIAL_API_OBJECT_TYPE) \
- V(JS_VALUE_TYPE) \
+ V(JS_PRIMITIVE_WRAPPER_TYPE) \
V(JS_API_OBJECT_TYPE) \
V(JS_OBJECT_TYPE) \
\
@@ -296,6 +303,8 @@ namespace internal {
V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
aliased_arguments_entry) \
V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
@@ -308,8 +317,6 @@ namespace internal {
function_template_rare_data) \
V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
- V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
- V(_, MODULE_TYPE, Module, module) \
V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
@@ -317,20 +324,22 @@ namespace internal {
V(_, SCRIPT_TYPE, Script, script) \
V(_, SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE, \
SourcePositionTableWithFrameCache, source_position_table_with_frame_cache) \
+ V(_, SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, SourceTextModuleInfoEntry, \
+ module_info_entry) \
V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
template_object_description) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
V(_, TUPLE3_TYPE, Tuple3, tuple3) \
- V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
- array_boilerplate_description) \
V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
wasm_capi_function_data) \
V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
wasm_exported_function_data) \
+ V(_, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
+ wasm_indirect_function_table) \
V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \
V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
@@ -347,14 +356,18 @@ namespace internal {
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
// Produces (NAME, Name, name) entries.
-#define STRUCT_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
+#define STRUCT_LIST(V) \
+ STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V) \
+ TORQUE_STRUCT_LIST_GENERATOR(STRUCT_LIST_ADAPTER, V)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_MAPS_LIST entry
#define STRUCT_MAPS_LIST_ADAPTER(V, NAME, Name, name) \
V(Map, name##_map, Name##Map)
// Produces (Map, struct_name_map, StructNameMap) entries
-#define STRUCT_MAPS_LIST(V) STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
+#define STRUCT_MAPS_LIST(V) \
+ STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V) \
+ TORQUE_STRUCT_LIST_GENERATOR(STRUCT_MAPS_LIST_ADAPTER, V)
//
// The following macros define list of allocation size objects and list of
diff --git a/deps/v8/src/objects/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index ce92d64f2f..b6748401c0 100644
--- a/deps/v8/src/objects/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -15,11 +15,12 @@
#include "src/objects/objects.h"
#include "src/base/bits.h"
+#include "src/base/memory.h"
#include "src/builtins/builtins.h"
-#include "src/common/v8memory.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/read-only-heap-inl.h"
#include "src/numbers/conversions.h"
#include "src/numbers/double.h"
#include "src/objects/bigint.h"
@@ -37,6 +38,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
+#include "src/objects/tagged-field-inl.h"
#include "src/objects/tagged-impl-inl.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
@@ -64,30 +66,37 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kTaggedSize : 1;
}
-bool HeapObject::IsSloppyArgumentsElements() const {
- return IsFixedArrayExact();
+DEF_GETTER(HeapObject, IsSloppyArgumentsElements, bool) {
+ return IsFixedArrayExact(isolate);
}
-bool HeapObject::IsJSSloppyArgumentsObject() const {
- return IsJSArgumentsObject();
+DEF_GETTER(HeapObject, IsJSSloppyArgumentsObject, bool) {
+ return IsJSArgumentsObject(isolate);
}
-bool HeapObject::IsJSGeneratorObject() const {
- return map().instance_type() == JS_GENERATOR_OBJECT_TYPE ||
- IsJSAsyncFunctionObject() || IsJSAsyncGeneratorObject();
+DEF_GETTER(HeapObject, IsJSGeneratorObject, bool) {
+ return map(isolate).instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ IsJSAsyncFunctionObject(isolate) || IsJSAsyncGeneratorObject(isolate);
}
-bool HeapObject::IsDataHandler() const {
- return IsLoadHandler() || IsStoreHandler();
+DEF_GETTER(HeapObject, IsDataHandler, bool) {
+ return IsLoadHandler(isolate) || IsStoreHandler(isolate);
}
-bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
+DEF_GETTER(HeapObject, IsClassBoilerplate, bool) {
+ return IsFixedArrayExact(isolate);
+}
-#define IS_TYPE_FUNCTION_DEF(type_) \
- bool Object::Is##type_() const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
+ } \
+ bool Object::Is##type_(Isolate* isolate) const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(isolate); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+IS_TYPE_FUNCTION_DEF(HashTableBase)
+IS_TYPE_FUNCTION_DEF(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DEF
#define IS_TYPE_FUNCTION_DEF(Type, Value) \
@@ -140,109 +149,166 @@ bool HeapObject::IsNullOrUndefined() const {
return IsNullOrUndefined(GetReadOnlyRoots());
}
-bool HeapObject::IsUniqueName() const {
- return IsInternalizedString() || IsSymbol();
+DEF_GETTER(HeapObject, IsUniqueName, bool) {
+ return IsInternalizedString(isolate) || IsSymbol(isolate);
}
-bool HeapObject::IsFunction() const {
+DEF_GETTER(HeapObject, IsFunction, bool) {
STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- return map().instance_type() >= FIRST_FUNCTION_TYPE;
+ return map(isolate).instance_type() >= FIRST_FUNCTION_TYPE;
}
-bool HeapObject::IsCallable() const { return map().is_callable(); }
+DEF_GETTER(HeapObject, IsCallable, bool) { return map(isolate).is_callable(); }
-bool HeapObject::IsConstructor() const { return map().is_constructor(); }
+DEF_GETTER(HeapObject, IsCallableJSProxy, bool) {
+ return IsCallable(isolate) && IsJSProxy(isolate);
+}
-bool HeapObject::IsModuleInfo() const {
- return map() == GetReadOnlyRoots().module_info_map();
+DEF_GETTER(HeapObject, IsCallableApiObject, bool) {
+ InstanceType type = map(isolate).instance_type();
+ return IsCallable(isolate) &&
+ (type == JS_API_OBJECT_TYPE || type == JS_SPECIAL_API_OBJECT_TYPE);
}
-bool HeapObject::IsTemplateInfo() const {
- return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
+DEF_GETTER(HeapObject, IsNonNullForeign, bool) {
+ return IsForeign(isolate) &&
+ Foreign::cast(*this).foreign_address() != kNullAddress;
}
-bool HeapObject::IsConsString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsCons();
+DEF_GETTER(HeapObject, IsConstructor, bool) {
+ return map(isolate).is_constructor();
}
-bool HeapObject::IsThinString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsThin();
+DEF_GETTER(HeapObject, IsSourceTextModuleInfo, bool) {
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ return map(isolate) == GetReadOnlyRoots(isolate).module_info_map();
}
-bool HeapObject::IsSlicedString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSliced();
+DEF_GETTER(HeapObject, IsTemplateInfo, bool) {
+ return IsObjectTemplateInfo(isolate) || IsFunctionTemplateInfo(isolate);
}
-bool HeapObject::IsSeqString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSequential();
+DEF_GETTER(HeapObject, IsConsString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsCons();
}
-bool HeapObject::IsSeqOneByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSequential() &&
- String::cast(*this).IsOneByteRepresentation();
+DEF_GETTER(HeapObject, IsThinString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsThin();
}
-bool HeapObject::IsSeqTwoByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsSequential() &&
- String::cast(*this).IsTwoByteRepresentation();
+DEF_GETTER(HeapObject, IsSlicedString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSliced();
}
-bool HeapObject::IsExternalString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsExternal();
+DEF_GETTER(HeapObject, IsSeqString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSequential();
}
-bool HeapObject::IsExternalOneByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsExternal() &&
- String::cast(*this).IsOneByteRepresentation();
+DEF_GETTER(HeapObject, IsSeqOneByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSequential() &&
+ String::cast(*this).IsOneByteRepresentation(isolate);
}
-bool HeapObject::IsExternalTwoByteString() const {
- if (!IsString()) return false;
- return StringShape(String::cast(*this)).IsExternal() &&
- String::cast(*this).IsTwoByteRepresentation();
+DEF_GETTER(HeapObject, IsSeqTwoByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsSequential() &&
+ String::cast(*this).IsTwoByteRepresentation(isolate);
}
-bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
+DEF_GETTER(HeapObject, IsExternalString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsExternal();
+}
-bool Object::IsNumeric() const { return IsNumber() || IsBigInt(); }
+DEF_GETTER(HeapObject, IsExternalOneByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsExternal() &&
+ String::cast(*this).IsOneByteRepresentation(isolate);
+}
-bool HeapObject::IsFiller() const {
- InstanceType instance_type = map().instance_type();
+DEF_GETTER(HeapObject, IsExternalTwoByteString, bool) {
+ if (!IsString(isolate)) return false;
+ return StringShape(String::cast(*this).map(isolate)).IsExternal() &&
+ String::cast(*this).IsTwoByteRepresentation(isolate);
+}
+
+bool Object::IsNumber() const {
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.IsHeapNumber(isolate);
+}
+
+bool Object::IsNumber(Isolate* isolate) const {
+ return IsSmi() || IsHeapNumber(isolate);
+}
+
+bool Object::IsNumeric() const {
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.IsHeapNumber(isolate) ||
+ this_heap_object.IsBigInt(isolate);
+}
+
+bool Object::IsNumeric(Isolate* isolate) const {
+ return IsNumber(isolate) || IsBigInt(isolate);
+}
+
+DEF_GETTER(HeapObject, IsFiller, bool) {
+ InstanceType instance_type = map(isolate).instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-bool HeapObject::IsJSWeakCollection() const {
- return IsJSWeakMap() || IsJSWeakSet();
+DEF_GETTER(HeapObject, IsJSWeakCollection, bool) {
+ return IsJSWeakMap(isolate) || IsJSWeakSet(isolate);
+}
+
+DEF_GETTER(HeapObject, IsJSCollection, bool) {
+ return IsJSMap(isolate) || IsJSSet(isolate);
}
-bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
+DEF_GETTER(HeapObject, IsPromiseReactionJobTask, bool) {
+ return IsPromiseFulfillReactionJobTask(isolate) ||
+ IsPromiseRejectReactionJobTask(isolate);
+}
-bool HeapObject::IsPromiseReactionJobTask() const {
- return IsPromiseFulfillReactionJobTask() || IsPromiseRejectReactionJobTask();
+DEF_GETTER(HeapObject, IsFrameArray, bool) {
+ return IsFixedArrayExact(isolate);
}
-bool HeapObject::IsFrameArray() const { return IsFixedArrayExact(); }
+DEF_GETTER(HeapObject, IsArrayList, bool) {
+ // Can't use ReadOnlyRoots(isolate) as this isolate could be produced by
+ // i::GetIsolateForPtrCompr(HeapObject).
+ ReadOnlyRoots roots = GetReadOnlyRoots(isolate);
+ return *this == roots.empty_fixed_array() ||
+ map(isolate) == roots.array_list_map();
+}
-bool HeapObject::IsArrayList() const {
- return map() == GetReadOnlyRoots().array_list_map() ||
- *this == GetReadOnlyRoots().empty_fixed_array();
+DEF_GETTER(HeapObject, IsRegExpMatchInfo, bool) {
+ return IsFixedArrayExact(isolate);
}
-bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
+bool Object::IsLayoutDescriptor() const {
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.IsByteArray(isolate);
+}
-bool Object::IsLayoutDescriptor() const { return IsSmi() || IsByteArray(); }
+bool Object::IsLayoutDescriptor(Isolate* isolate) const {
+ return IsSmi() || IsByteArray(isolate);
+}
-bool HeapObject::IsDeoptimizationData() const {
+DEF_GETTER(HeapObject, IsDeoptimizationData, bool) {
// Must be a fixed array.
- if (!IsFixedArrayExact()) return false;
+ if (!IsFixedArrayExact(isolate)) return false;
// There's no sure way to detect the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can
@@ -255,79 +321,98 @@ bool HeapObject::IsDeoptimizationData() const {
return length >= 0 && length % DeoptimizationData::kDeoptEntrySize == 0;
}
-bool HeapObject::IsHandlerTable() const {
- if (!IsFixedArrayExact()) return false;
+DEF_GETTER(HeapObject, IsHandlerTable, bool) {
+ if (!IsFixedArrayExact(isolate)) return false;
// There's actually no way to see the difference between a fixed array and
// a handler table array.
return true;
}
-bool HeapObject::IsTemplateList() const {
- if (!IsFixedArrayExact()) return false;
+DEF_GETTER(HeapObject, IsTemplateList, bool) {
+ if (!IsFixedArrayExact(isolate)) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
if (FixedArray::cast(*this).length() < 1) return false;
return true;
}
-bool HeapObject::IsDependentCode() const {
- if (!IsWeakFixedArray()) return false;
+DEF_GETTER(HeapObject, IsDependentCode, bool) {
+ if (!IsWeakFixedArray(isolate)) return false;
// There's actually no way to see the difference between a weak fixed array
// and a dependent codes array.
return true;
}
-bool HeapObject::IsAbstractCode() const {
- return IsBytecodeArray() || IsCode();
+DEF_GETTER(HeapObject, IsAbstractCode, bool) {
+ return IsBytecodeArray(isolate) || IsCode(isolate);
}
-bool HeapObject::IsStringWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsString();
+DEF_GETTER(HeapObject, IsStringWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsString(isolate);
}
-bool HeapObject::IsBooleanWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsBoolean();
+DEF_GETTER(HeapObject, IsBooleanWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsBoolean(isolate);
}
-bool HeapObject::IsScriptWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsScript();
+DEF_GETTER(HeapObject, IsScriptWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsScript(isolate);
}
-bool HeapObject::IsNumberWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsNumber();
+DEF_GETTER(HeapObject, IsNumberWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsNumber(isolate);
}
-bool HeapObject::IsBigIntWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsBigInt();
+DEF_GETTER(HeapObject, IsBigIntWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsBigInt(isolate);
}
-bool HeapObject::IsSymbolWrapper() const {
- return IsJSValue() && JSValue::cast(*this).value().IsSymbol();
+DEF_GETTER(HeapObject, IsSymbolWrapper, bool) {
+ return IsJSPrimitiveWrapper(isolate) &&
+ JSPrimitiveWrapper::cast(*this).value().IsSymbol(isolate);
}
-bool HeapObject::IsJSArrayBufferView() const {
- return IsJSDataView() || IsJSTypedArray();
+DEF_GETTER(HeapObject, IsJSArrayBufferView, bool) {
+ return IsJSDataView(isolate) || IsJSTypedArray(isolate);
}
-bool HeapObject::IsStringSet() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsJSCollectionIterator, bool) {
+ return IsJSMapIterator(isolate) || IsJSSetIterator(isolate);
+}
-bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsStringSet, bool) { return IsHashTable(isolate); }
-bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsObjectHashSet, bool) { return IsHashTable(isolate); }
-bool HeapObject::IsMapCache() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsCompilationCacheTable, bool) {
+ return IsHashTable(isolate);
+}
-bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsMapCache, bool) { return IsHashTable(isolate); }
-bool Object::IsHashTableBase() const { return IsHashTable(); }
+DEF_GETTER(HeapObject, IsObjectHashTable, bool) { return IsHashTable(isolate); }
-bool Object::IsSmallOrderedHashTable() const {
- return IsSmallOrderedHashSet() || IsSmallOrderedHashMap() ||
- IsSmallOrderedNameDictionary();
+DEF_GETTER(HeapObject, IsHashTableBase, bool) { return IsHashTable(isolate); }
+
+DEF_GETTER(HeapObject, IsSmallOrderedHashTable, bool) {
+ return IsSmallOrderedHashSet(isolate) || IsSmallOrderedHashMap(isolate) ||
+ IsSmallOrderedNameDictionary(isolate);
}
bool Object::IsPrimitive() const {
- return IsSmi() || HeapObject::cast(*this).map().IsPrimitiveMap();
+ if (IsSmi()) return true;
+ HeapObject this_heap_object = HeapObject::cast(*this);
+ Isolate* isolate = GetIsolateForPtrCompr(this_heap_object);
+ return this_heap_object.map(isolate).IsPrimitiveMap();
+}
+
+bool Object::IsPrimitive(Isolate* isolate) const {
+ return IsSmi() || HeapObject::cast(*this).map(isolate).IsPrimitiveMap();
}
// static
@@ -339,19 +424,21 @@ Maybe<bool> Object::IsArray(Handle<Object> object) {
return JSProxy::IsArray(Handle<JSProxy>::cast(object));
}
-bool HeapObject::IsUndetectable() const { return map().is_undetectable(); }
+DEF_GETTER(HeapObject, IsUndetectable, bool) {
+ return map(isolate).is_undetectable();
+}
-bool HeapObject::IsAccessCheckNeeded() const {
- if (IsJSGlobalProxy()) {
+DEF_GETTER(HeapObject, IsAccessCheckNeeded, bool) {
+ if (IsJSGlobalProxy(isolate)) {
const JSGlobalProxy proxy = JSGlobalProxy::cast(*this);
JSGlobalObject global = proxy.GetIsolate()->context().global_object();
return proxy.IsDetachedFrom(global);
}
- return map().is_access_check_needed();
+ return map(isolate).is_access_check_needed();
}
-bool HeapObject::IsStruct() const {
- switch (map().instance_type()) {
+DEF_GETTER(HeapObject, IsStruct, bool) {
+ switch (map(isolate).instance_type()) {
#define MAKE_STRUCT_CASE(TYPE, Name, name) \
case TYPE: \
return true;
@@ -374,10 +461,13 @@ bool HeapObject::IsStruct() const {
}
}
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
- } \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
+ } \
+ bool Object::Is##Name(Isolate* isolate) const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(isolate); \
+ } \
TYPE_CHECKER(Name)
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
@@ -441,25 +531,28 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-Representation Object::OptimalRepresentation() {
+Representation Object::OptimalRepresentation(Isolate* isolate) const {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
return Representation::Smi();
- } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ }
+ HeapObject heap_object = HeapObject::cast(*this);
+ if (FLAG_track_double_fields && heap_object.IsHeapNumber(isolate)) {
return Representation::Double();
- } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ } else if (FLAG_track_computed_fields &&
+ heap_object.IsUninitialized(
+ heap_object.GetReadOnlyRoots(isolate))) {
return Representation::None();
} else if (FLAG_track_heap_object_fields) {
- DCHECK(IsHeapObject());
return Representation::HeapObject();
} else {
return Representation::Tagged();
}
}
-ElementsKind Object::OptimalElementsKind() {
+ElementsKind Object::OptimalElementsKind(Isolate* isolate) const {
if (IsSmi()) return PACKED_SMI_ELEMENTS;
- if (IsNumber()) return PACKED_DOUBLE_ELEMENTS;
+ if (IsNumber(isolate)) return PACKED_DOUBLE_ELEMENTS;
return PACKED_ELEMENTS;
}
@@ -618,18 +711,18 @@ HeapObject MapWord::ToForwardingAddress() {
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(Isolate* isolate, int offset) {
- VerifyPointer(isolate, READ_FIELD(*this, offset));
+ VerifyPointer(isolate, TaggedField<Object>::load(isolate, *this, offset));
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
- MaybeObject::VerifyMaybeObjectPointer(isolate,
- READ_WEAK_FIELD(*this, offset));
+ MaybeObject::VerifyMaybeObjectPointer(
+ isolate, TaggedField<MaybeObject>::load(isolate, *this, offset));
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
void HeapObject::VerifySmiField(int offset) {
- CHECK(READ_FIELD(*this, offset).IsSmi());
+ CHECK(TaggedField<Object>::load(*this, offset).IsSmi());
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
@@ -639,7 +732,15 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
-Map HeapObject::map() const { return map_word().ToMap(); }
+ReadOnlyRoots HeapObject::GetReadOnlyRoots(Isolate* isolate) const {
+#ifdef V8_COMPRESS_POINTERS
+ return ReadOnlyRoots(isolate);
+#else
+ return GetReadOnlyRoots();
+#endif
+}
+
+DEF_GETTER(HeapObject, map, Map) { return map_word(isolate).ToMap(); }
void HeapObject::set_map(Map value) {
if (!value.is_null()) {
@@ -655,8 +756,8 @@ void HeapObject::set_map(Map value) {
}
}
-Map HeapObject::synchronized_map() const {
- return synchronized_map_word().ToMap();
+DEF_GETTER(HeapObject, synchronized_map, Map) {
+ return synchronized_map_word(isolate).ToMap();
}
void HeapObject::synchronized_set_map(Map value) {
@@ -693,24 +794,31 @@ void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
}
}
-MapWordSlot HeapObject::map_slot() const {
- return MapWordSlot(FIELD_ADDR(*this, kMapOffset));
+ObjectSlot HeapObject::map_slot() const {
+ return ObjectSlot(MapField::address(*this));
}
-MapWord HeapObject::map_word() const {
- return MapWord(map_slot().Relaxed_Load().ptr());
+DEF_GETTER(HeapObject, map_word, MapWord) {
+ return MapField::Relaxed_Load(isolate, *this);
}
void HeapObject::set_map_word(MapWord map_word) {
- map_slot().Relaxed_Store(Object(map_word.value_));
+ MapField::Relaxed_Store(*this, map_word);
}
-MapWord HeapObject::synchronized_map_word() const {
- return MapWord(map_slot().Acquire_Load().ptr());
+DEF_GETTER(HeapObject, synchronized_map_word, MapWord) {
+ return MapField::Acquire_Load(isolate, *this);
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
- map_slot().Release_Store(Object(map_word.value_));
+ MapField::Release_Store(*this, map_word);
+}
+
+bool HeapObject::synchronized_compare_and_swap_map_word(MapWord old_map_word,
+ MapWord new_map_word) {
+ Tagged_t result =
+ MapField::Release_CompareAndSwap(*this, old_map_word, new_map_word);
+ return result == static_cast<Tagged_t>(old_map_word.ptr());
}
int HeapObject::Size() const { return SizeFromMap(map()); }
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 8cc22fa0e5..9963cba472 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -25,13 +25,13 @@
#include "src/builtins/builtins.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/common/message-template.h"
#include "src/date/date.h"
#include "src/debug/debug.h"
#include "src/execution/arguments.h"
#include "src/execution/execution.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/microtask-queue.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
@@ -104,7 +104,7 @@
#include "src/objects/template-objects-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/parsing/preparse-data.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/strings/string-builder-inl.h"
#include "src/strings/string-search.h"
#include "src/strings/string-stream.h"
@@ -116,6 +116,9 @@
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
+#include "torque-generated/class-definitions-tq-inl.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
+
namespace v8 {
namespace internal {
@@ -209,8 +212,8 @@ Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
DCHECK(object->FitsRepresentation(representation));
return object;
}
- return isolate->factory()->NewHeapNumber(
- MutableHeapNumber::cast(*object).value());
+ return isolate->factory()->NewHeapNumberFromBits(
+ MutableHeapNumber::cast(*object).value_as_bits());
}
MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
@@ -242,7 +245,7 @@ MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
isolate);
}
Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
- Handle<JSValue>::cast(result)->set_value(*object);
+ Handle<JSPrimitiveWrapper>::cast(result)->set_value(*object);
return result;
}
@@ -2387,9 +2390,9 @@ void DescriptorArray::GeneralizeAllFields() {
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
details = details.CopyWithConstness(PropertyConstness::kMutable);
- SetValue(i, FieldType::Any());
+ SetValue(i, MaybeObject::FromObject(FieldType::Any()));
}
- set(ToDetailsIndex(i), MaybeObject::FromObject(details.AsSmi()));
+ SetDetails(i, details);
}
}
@@ -3043,27 +3046,34 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
}
// Enforce the invariant.
+ return JSProxy::CheckDeleteTrap(isolate, name, target);
+}
+
+Maybe<bool> JSProxy::CheckDeleteTrap(Isolate* isolate, Handle<Name> name,
+ Handle<JSReceiver> target) {
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
PropertyDescriptor target_desc;
- Maybe<bool> owned =
+ Maybe<bool> target_found =
JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
- MAYBE_RETURN(owned, Nothing<bool>());
- if (owned.FromJust()) {
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 11. If targetDesc is undefined, return true.
+ if (target_found.FromJust()) {
+ // 12. If targetDesc.[[Configurable]] is false, throw a TypeError exception.
if (!target_desc.configurable()) {
- isolate->Throw(*factory->NewTypeError(
+ isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
return Nothing<bool>();
}
// 13. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
// 14. If extensibleTarget is false, throw a TypeError exception.
- Maybe<bool> extensible = JSReceiver::IsExtensible(target);
- MAYBE_RETURN(extensible, Nothing<bool>());
- if (!extensible.FromJust()) {
- isolate->Throw(*factory->NewTypeError(
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxyDeletePropertyNonExtensible, name));
return Nothing<bool>();
}
}
-
return Just(true);
}
@@ -3269,7 +3279,11 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
new_len_desc, should_throw);
}
// 13. If oldLenDesc.[[Writable]] is false, return false.
- if (!old_len_desc.writable()) {
+ if (!old_len_desc.writable() ||
+ // Also handle the {configurable: true} case since we later use
+ // JSArray::SetLength instead of OrdinaryDefineOwnProperty to change
+ // the length, and it doesn't have access to the descriptor anymore.
+ new_len_desc->configurable()) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
isolate->factory()->length_string()));
@@ -4294,8 +4308,10 @@ bool DescriptorArray::IsEqualTo(DescriptorArray other) {
if (number_of_all_descriptors() != other.number_of_all_descriptors()) {
return false;
}
- for (int i = 0; i < number_of_all_descriptors(); ++i) {
- if (get(i) != other.get(i)) return false;
+ for (int i = 0; i < number_of_descriptors(); ++i) {
+ if (GetKey(i) != other.GetKey(i)) return false;
+ if (GetDetails(i).AsSmi() != other.GetDetails(i).AsSmi()) return false;
+ if (GetValue(i) != other.GetValue(i)) return false;
}
return true;
}
@@ -4500,7 +4516,8 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
return value;
}
-Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
+Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
+ Handle<Context> native_context,
Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
@@ -4512,13 +4529,12 @@ Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- Map maybe_elements_transition = current_map->ElementsTransitionMap();
+ Map maybe_elements_transition = current_map->ElementsTransitionMap(isolate);
if (!maybe_elements_transition.is_null()) {
- new_map = handle(maybe_elements_transition, native_context->GetIsolate());
+ new_map = handle(maybe_elements_transition, isolate);
} else {
- new_map =
- Map::CopyAsElementsKind(native_context->GetIsolate(), current_map,
- next_kind, INSERT_TRANSITION);
+ new_map = Map::CopyAsElementsKind(isolate, current_map, next_kind,
+ INSERT_TRANSITION);
}
DCHECK_EQ(next_kind, new_map->elements_kind());
native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
@@ -4855,22 +4871,12 @@ std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::ToTracedValue(
const char* SharedFunctionInfo::kTraceScope =
"v8::internal::SharedFunctionInfo";
-uint64_t SharedFunctionInfo::TraceID() const {
- // TODO(bmeurer): We use a combination of Script ID and function literal
- // ID (within the Script) to uniquely identify SharedFunctionInfos. This
- // can add significant overhead, and we should probably find a better way
- // to uniquely identify SharedFunctionInfos over time.
+uint64_t SharedFunctionInfo::TraceID(FunctionLiteral* literal) const {
+ int literal_id =
+ literal ? literal->function_literal_id() : function_literal_id();
Script script = Script::cast(this->script());
- WeakFixedArray script_functions = script.shared_function_infos();
- for (int i = 0; i < script_functions.length(); ++i) {
- HeapObject script_function;
- if (script_functions.Get(i).GetHeapObjectIfWeak(&script_function) &&
- script_function.address() == address()) {
- return (static_cast<uint64_t>(script.id() + 1) << 32) |
- (static_cast<uint64_t>(i));
- }
- }
- UNREACHABLE();
+ return (static_cast<uint64_t>(script.id() + 1) << 32) |
+ (static_cast<uint64_t>(literal_id));
}
std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::TraceIDRef()
@@ -4946,21 +4952,17 @@ WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
Script script)
- : ScriptIterator(isolate, handle(script.shared_function_infos(), isolate)) {
-}
+ : ScriptIterator(handle(script.shared_function_infos(), isolate)) {}
SharedFunctionInfo::ScriptIterator::ScriptIterator(
- Isolate* isolate, Handle<WeakFixedArray> shared_function_infos)
- : isolate_(isolate),
- shared_function_infos_(shared_function_infos),
- index_(0) {}
+ Handle<WeakFixedArray> shared_function_infos)
+ : shared_function_infos_(shared_function_infos), index_(0) {}
SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
while (index_ < shared_function_infos_->length()) {
MaybeObject raw = shared_function_infos_->Get(index_++);
HeapObject heap_object;
- if (!raw->GetHeapObject(&heap_object) ||
- heap_object.IsUndefined(isolate_)) {
+ if (!raw->GetHeapObject(&heap_object) || heap_object.IsUndefined()) {
continue;
}
return SharedFunctionInfo::cast(heap_object);
@@ -4968,13 +4970,15 @@ SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
return SharedFunctionInfo();
}
-void SharedFunctionInfo::ScriptIterator::Reset(Script script) {
- shared_function_infos_ = handle(script.shared_function_infos(), isolate_);
+void SharedFunctionInfo::ScriptIterator::Reset(Isolate* isolate,
+ Script script) {
+ shared_function_infos_ = handle(script.shared_function_infos(), isolate);
index_ = 0;
}
SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
- : script_iterator_(isolate),
+ : isolate_(isolate),
+ script_iterator_(isolate),
noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
sfi_iterator_(isolate, script_iterator_.Next()) {}
@@ -4986,7 +4990,7 @@ SharedFunctionInfo SharedFunctionInfo::GlobalIterator::Next() {
if (!next.is_null()) return SharedFunctionInfo::cast(next);
Script next_script = script_iterator_.Next();
if (next_script.is_null()) return SharedFunctionInfo();
- sfi_iterator_.Reset(next_script);
+ sfi_iterator_.Reset(isolate_, next_script);
}
}
@@ -5148,7 +5152,6 @@ void SharedFunctionInfo::DiscardCompiled(
handle(shared_info->inferred_name(), isolate);
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
- int function_literal_id = shared_info->FunctionLiteralId(isolate);
shared_info->DiscardCompiledMetadata(isolate);
@@ -5163,8 +5166,7 @@ void SharedFunctionInfo::DiscardCompiled(
// validity checks, since we're performing the unusual task of decompiling.
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreparseData(
- inferred_name_val, start_position, end_position,
- function_literal_id);
+ inferred_name_val, start_position, end_position);
shared_info->set_function_data(*data);
}
}
@@ -5273,28 +5275,6 @@ bool SharedFunctionInfo::IsInlineable() {
int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
-int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
- DisallowHeapAllocation no_gc;
-
- Object script_obj = script();
- if (!script_obj.IsScript()) return kFunctionLiteralIdInvalid;
-
- WeakFixedArray shared_info_list =
- Script::cast(script_obj).shared_function_infos();
- SharedFunctionInfo::ScriptIterator iterator(
- isolate,
- Handle<WeakFixedArray>(reinterpret_cast<Address*>(&shared_info_list)));
-
- for (SharedFunctionInfo shared = iterator.Next(); !shared.is_null();
- shared = iterator.Next()) {
- if (shared == *this) {
- return iterator.CurrentIndex();
- }
- }
-
- return kFunctionLiteralIdInvalid;
-}
-
// Output the source code without any allocation in the heap.
std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
const SharedFunctionInfo s = v.value;
@@ -5365,6 +5345,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_language_mode(lit->language_mode());
shared_info->set_is_wrapped(lit->is_wrapped());
+ shared_info->set_function_literal_id(lit->function_literal_id());
// shared_info->set_kind(lit->kind());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
@@ -5409,7 +5390,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithPreparseData(
lit->inferred_name(), lit->start_position(), lit->end_position(),
- lit->function_literal_id(), preparse_data);
+ preparse_data);
shared_info->set_uncompiled_data(*data);
needs_position_info = false;
}
@@ -5418,8 +5399,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
if (needs_position_info) {
Handle<UncompiledData> data =
isolate->factory()->NewUncompiledDataWithoutPreparseData(
- lit->inferred_name(), lit->start_position(), lit->end_position(),
- lit->function_literal_id());
+ lit->inferred_name(), lit->start_position(), lit->end_position());
shared_info->set_uncompiled_data(*data);
}
}
@@ -5510,21 +5490,6 @@ int SharedFunctionInfo::EndPosition() const {
return kNoSourcePosition;
}
-int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
- // Fast path for the common case when the SFI is uncompiled and so the
- // function literal id is already in the uncompiled data.
- if (HasUncompiledData() && uncompiled_data().has_function_literal_id()) {
- int id = uncompiled_data().function_literal_id();
- // Make sure the id is what we should have found with the slow path.
- DCHECK_EQ(id, FindIndexInScript(isolate));
- return id;
- }
-
- // Otherwise, search for the function in the SFI's script's function list,
- // and return its index in that list.
- return FindIndexInScript(isolate);
-}
-
void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
Object maybe_scope_info = name_or_scope_info();
if (maybe_scope_info.IsScopeInfo()) {
@@ -5561,16 +5526,6 @@ void SharedFunctionInfo::EnsureSourcePositionsAvailable(
}
}
-bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
- if (length() != other.length()) return false;
-
- for (int i = 0; i < length(); ++i) {
- if (get(i) != other.get(i)) return false;
- }
-
- return true;
-}
-
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
DCHECK_GE(capacity, 0);
@@ -6128,42 +6083,14 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
namespace {
-constexpr JSRegExp::Flag kCharFlagValues[] = {
- JSRegExp::kGlobal, // g
- JSRegExp::kInvalid, // h
- JSRegExp::kIgnoreCase, // i
- JSRegExp::kInvalid, // j
- JSRegExp::kInvalid, // k
- JSRegExp::kInvalid, // l
- JSRegExp::kMultiline, // m
- JSRegExp::kInvalid, // n
- JSRegExp::kInvalid, // o
- JSRegExp::kInvalid, // p
- JSRegExp::kInvalid, // q
- JSRegExp::kInvalid, // r
- JSRegExp::kDotAll, // s
- JSRegExp::kInvalid, // t
- JSRegExp::kUnicode, // u
- JSRegExp::kInvalid, // v
- JSRegExp::kInvalid, // w
- JSRegExp::kInvalid, // x
- JSRegExp::kSticky, // y
-};
-
-constexpr JSRegExp::Flag CharToFlag(uc16 flag_char) {
- return (flag_char < 'g' || flag_char > 'y')
- ? JSRegExp::kInvalid
- : kCharFlagValues[flag_char - 'g'];
-}
-
JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
bool* success) {
- STATIC_ASSERT(CharToFlag('g') == JSRegExp::kGlobal);
- STATIC_ASSERT(CharToFlag('i') == JSRegExp::kIgnoreCase);
- STATIC_ASSERT(CharToFlag('m') == JSRegExp::kMultiline);
- STATIC_ASSERT(CharToFlag('s') == JSRegExp::kDotAll);
- STATIC_ASSERT(CharToFlag('u') == JSRegExp::kUnicode);
- STATIC_ASSERT(CharToFlag('y') == JSRegExp::kSticky);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('g') == JSRegExp::kGlobal);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('i') == JSRegExp::kIgnoreCase);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('m') == JSRegExp::kMultiline);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('s') == JSRegExp::kDotAll);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('u') == JSRegExp::kUnicode);
+ STATIC_ASSERT(JSRegExp::FlagFromChar('y') == JSRegExp::kSticky);
int length = flags->length();
if (length == 0) {
@@ -6171,14 +6098,14 @@ JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
return JSRegExp::kNone;
}
// A longer flags string cannot be valid.
- if (length > JSRegExp::FlagCount()) return JSRegExp::Flags(0);
+ if (length > JSRegExp::kFlagCount) return JSRegExp::Flags(0);
// Initialize {value} to {kInvalid} to allow 2-in-1 duplicate/invalid check.
JSRegExp::Flags value = JSRegExp::kInvalid;
if (flags->IsSeqOneByteString()) {
DisallowHeapAllocation no_gc;
SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = CharToFlag(seq_flags.Get(i));
+ JSRegExp::Flag flag = JSRegExp::FlagFromChar(seq_flags.Get(i));
// Duplicate or invalid flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
@@ -6188,7 +6115,7 @@ JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
DisallowHeapAllocation no_gc;
String::FlatContent flags_content = flags->GetFlatContent(no_gc);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = CharToFlag(flags_content.Get(i));
+ JSRegExp::Flag flag = JSRegExp::FlagFromChar(flags_content.Get(i));
// Duplicate or invalid flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
@@ -6224,15 +6151,20 @@ template <typename Char>
int CountRequiredEscapes(Handle<String> source) {
DisallowHeapAllocation no_gc;
int escapes = 0;
+ bool in_char_class = false;
Vector<const Char> src = source->GetCharVector<Char>(no_gc);
for (int i = 0; i < src.length(); i++) {
const Char c = src[i];
if (c == '\\') {
// Escape. Skip next character;
i++;
- } else if (c == '/') {
+ } else if (c == '/' && !in_char_class) {
// Not escaped forward-slash needs escape.
escapes++;
+ } else if (c == '[') {
+ in_char_class = true;
+ } else if (c == ']') {
+ in_char_class = false;
} else if (c == '\n') {
escapes++;
} else if (c == '\r') {
@@ -6245,6 +6177,7 @@ int CountRequiredEscapes(Handle<String> source) {
DCHECK(!unibrow::IsLineTerminator(static_cast<unibrow::uchar>(c)));
}
}
+ DCHECK(!in_char_class);
return escapes;
}
@@ -6262,16 +6195,19 @@ Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
Vector<Char> dst(result->GetChars(no_gc), result->length());
int s = 0;
int d = 0;
- // TODO(v8:1982): Fully implement
- // https://tc39.github.io/ecma262/#sec-escaperegexppattern
+ bool in_char_class = false;
while (s < src.length()) {
if (src[s] == '\\') {
// Escape. Copy this and next character.
dst[d++] = src[s++];
if (s == src.length()) break;
- } else if (src[s] == '/') {
+ } else if (src[s] == '/' && !in_char_class) {
// Not escaped forward-slash needs escape.
dst[d++] = '\\';
+ } else if (src[s] == '[') {
+ in_char_class = true;
+ } else if (src[s] == ']') {
+ in_char_class = false;
} else if (src[s] == '\n') {
WriteStringToCharVector(dst, &d, "\\n");
s++;
@@ -6292,6 +6228,7 @@ Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
dst[d++] = src[s++];
}
DCHECK_EQ(result->length(), d);
+ DCHECK(!in_char_class);
return result;
}
@@ -6348,13 +6285,13 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
source = String::Flatten(isolate, source);
+ RETURN_ON_EXCEPTION(isolate, RegExp::Compile(isolate, regexp, source, flags),
+ JSRegExp);
+
Handle<String> escaped_source;
ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
EscapeRegExpSource(isolate, source), JSRegExp);
- RETURN_ON_EXCEPTION(
- isolate, RegExpImpl::Compile(isolate, regexp, source, flags), JSRegExp);
-
regexp->set_source(*escaped_source);
regexp->set_flags(Smi::FromInt(flags));
@@ -6701,8 +6638,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
} else { // !FLAG_thin_strings
if (string->IsConsString()) {
Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ cons->set_first(*result);
+ cons->set_second(ReadOnlyRoots(isolate).empty_string());
} else if (string->IsSlicedString()) {
STATIC_ASSERT(static_cast<int>(ConsString::kSize) ==
static_cast<int>(SlicedString::kSize));
@@ -6713,8 +6650,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
: isolate->factory()->cons_string_map();
string->set_map(*map);
Handle<ConsString> cons = Handle<ConsString>::cast(string);
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ cons->set_first(*result);
+ cons->set_second(ReadOnlyRoots(isolate).empty_string());
}
}
return result;
@@ -7925,9 +7862,13 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
// static
void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
+ const char* cell_name,
Handle<PropertyCell> cell,
Handle<Object> new_value) {
if (cell->value() != *new_value) {
+ if (FLAG_trace_protector_invalidation) {
+ isolate->TraceProtectorInvalidation(cell_name);
+ }
cell->set_value(*new_value);
cell->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
@@ -8127,7 +8068,9 @@ HashTable<NameDictionary, NameDictionaryShape>::Shrink(Isolate* isolate,
int additionalCapacity);
void JSFinalizationGroup::Cleanup(
- Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
+ Isolate* isolate, Handle<JSFinalizationGroup> finalization_group,
+ Handle<Object> cleanup) {
+ DCHECK(cleanup->IsCallable());
// It's possible that the cleared_cells list is empty, since
// FinalizationGroup.unregister() removed all its elements before this task
// ran. In that case, don't call the cleanup function.
@@ -8145,7 +8088,6 @@ void JSFinalizationGroup::Cleanup(
Handle<AllocationSite>::null()));
iterator->set_finalization_group(*finalization_group);
}
- Handle<Object> cleanup(finalization_group->cleanup(), isolate);
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
v8::Local<v8::Value> result;
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index 857f3ed0f6..d706b2dfb7 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -15,10 +15,11 @@
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
+#include "src/base/memory.h"
#include "src/codegen/constants-arch.h"
#include "src/common/assert-scope.h"
#include "src/common/checks.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/flags/flags.h"
#include "src/objects/elements-kind.h"
#include "src/objects/field-index.h"
@@ -49,22 +50,21 @@
// - JSCollection
// - JSSet
// - JSMap
-// - JSStringIterator
-// - JSSetIterator
-// - JSMapIterator
-// - JSWeakCollection
-// - JSWeakMap
-// - JSWeakSet
-// - JSRegExp
+// - JSDate
// - JSFunction
// - JSGeneratorObject
// - JSGlobalObject
// - JSGlobalProxy
-// - JSValue
-// - JSDate
+// - JSMapIterator
// - JSMessageObject
// - JSModuleNamespace
-// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
+// - JSPrimitiveWrapper
+// - JSRegExp
+// - JSSetIterator
+// - JSStringIterator
+// - JSWeakCollection
+// - JSWeakMap
+// - JSWeakSet
// - JSCollator // If V8_INTL_SUPPORT enabled.
// - JSDateTimeFormat // If V8_INTL_SUPPORT enabled.
// - JSListFormat // If V8_INTL_SUPPORT enabled.
@@ -72,8 +72,9 @@
// - JSNumberFormat // If V8_INTL_SUPPORT enabled.
// - JSPluralRules // If V8_INTL_SUPPORT enabled.
// - JSRelativeTimeFormat // If V8_INTL_SUPPORT enabled.
-// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
// - JSSegmenter // If V8_INTL_SUPPORT enabled.
+// - JSSegmentIterator // If V8_INTL_SUPPORT enabled.
+// - JSV8BreakIterator // If V8_INTL_SUPPORT enabled.
// - WasmExceptionObject
// - WasmGlobalObject
// - WasmInstanceObject
@@ -99,7 +100,7 @@
// - TemplateList
// - TransitionArray
// - ScopeInfo
-// - ModuleInfo
+// - SourceTextModuleInfo
// - ScriptContextTable
// - ClosureFeedbackCellArray
// - FixedDoubleArray
@@ -170,7 +171,9 @@
// - PromiseRejectReactionJobTask
// - PromiseResolveThenableJobTask
// - Module
-// - ModuleInfoEntry
+// - SourceTextModule
+// - SyntheticModule
+// - SourceTextModuleInfoEntry
// - FeedbackCell
// - FeedbackVector
// - PreparseData
@@ -265,9 +268,13 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
constexpr Object() : TaggedImpl(kNullAddress) {}
explicit constexpr Object(Address ptr) : TaggedImpl(ptr) {}
-#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
+#define IS_TYPE_FUNCTION_DECL(Type) \
+ V8_INLINE bool Is##Type() const; \
+ V8_INLINE bool Is##Type(Isolate* isolate) const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(HashTableBase)
+ IS_TYPE_FUNCTION_DECL(SmallOrderedHashTable)
#undef IS_TYPE_FUNCTION_DECL
// Oddball checks are faster when they are raw pointer comparisons, so the
@@ -277,18 +284,17 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_INLINE bool Is##Type(ReadOnlyRoots roots) const; \
V8_INLINE bool Is##Type() const;
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+ IS_TYPE_FUNCTION_DECL(NullOrUndefined, /* unused */)
#undef IS_TYPE_FUNCTION_DECL
- V8_INLINE bool IsNullOrUndefined(Isolate* isolate) const;
- V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
- V8_INLINE bool IsNullOrUndefined() const;
-
V8_INLINE bool IsZero() const;
V8_INLINE bool IsNoSharedNameSentinel() const;
enum class Conversion { kToNumber, kToNumeric };
-#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
+#define DECL_STRUCT_PREDICATE(NAME, Name, name) \
+ V8_INLINE bool Is##Name() const; \
+ V8_INLINE bool Is##Name(Isolate* isolate) const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
#undef DECL_STRUCT_PREDICATE
@@ -296,9 +302,6 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_INLINE
V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object);
- V8_INLINE bool IsHashTableBase() const;
- V8_INLINE bool IsSmallOrderedHashTable() const;
-
// Extract the number.
inline double Number() const;
V8_INLINE bool IsNaN() const;
@@ -306,9 +309,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value) const;
- inline Representation OptimalRepresentation();
+ inline Representation OptimalRepresentation(Isolate* isolate) const;
- inline ElementsKind OptimalElementsKind();
+ inline ElementsKind OptimalElementsKind(Isolate* isolate) const;
inline bool FitsRepresentation(Representation representation);
@@ -624,9 +627,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#endif
if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
// Bug(v8:8875) Double fields may be unaligned.
- return ReadUnalignedValue<T>(field_address(offset));
+ return base::ReadUnalignedValue<T>(field_address(offset));
} else {
- return Memory<T>(field_address(offset));
+ return base::Memory<T>(field_address(offset));
}
}
@@ -641,9 +644,9 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
#endif
if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
// Bug(v8:8875) Double fields may be unaligned.
- WriteUnalignedValue<T>(field_address(offset), value);
+ base::WriteUnalignedValue<T>(field_address(offset), value);
} else {
- Memory<T>(field_address(offset)) = value;
+ base::Memory<T>(field_address(offset)) = value;
}
}
@@ -743,13 +746,13 @@ class MapWord {
// View this map word as a forwarding address.
inline HeapObject ToForwardingAddress();
- static inline MapWord FromRawValue(uintptr_t value) { return MapWord(value); }
-
- inline uintptr_t ToRawValue() { return value_; }
+ inline Address ptr() { return value_; }
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
+ template <typename TFieldType, int kFieldOffset>
+ friend class TaggedField;
explicit MapWord(Address value) : value_(value) {}
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index e0d77b9043..bcca03ddca 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -22,7 +22,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
// Bug(v8:8875): HeapNumber's double may be unaligned.
- WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
+ base::WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
}
byte Oddball::kind() const {
@@ -38,8 +38,8 @@ Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
return Handle<Object>(input->to_number(), isolate);
}
-bool HeapObject::IsBoolean() const {
- return IsOddball() &&
+DEF_GETTER(HeapObject, IsBoolean, bool) {
+ return IsOddball(isolate) &&
((Oddball::cast(*this).kind() & Oddball::kNotBooleanMask) == 0);
}
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 0eaa7567e2..a2270b0a4a 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -54,7 +54,7 @@ template <class Derived>
Object SmallOrderedHashTable<Derived>::KeyAt(int entry) const {
DCHECK_LT(entry, Capacity());
Offset entry_offset = GetDataEntryOffset(entry, Derived::kKeyIndex);
- return READ_FIELD(*this, entry_offset);
+ return TaggedField<Object>::load(*this, entry_offset);
}
template <class Derived>
@@ -63,7 +63,7 @@ Object SmallOrderedHashTable<Derived>::GetDataEntry(int entry,
DCHECK_LT(entry, Capacity());
DCHECK_LE(static_cast<unsigned>(relative_index), Derived::kEntrySize);
Offset entry_offset = GetDataEntryOffset(entry, relative_index);
- return READ_FIELD(*this, entry_offset);
+ return TaggedField<Object>::load(*this, entry_offset);
}
OBJECT_CONSTRUCTORS_IMPL(SmallOrderedHashSet,
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index c4e64d2d6a..463d0e0384 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -508,6 +508,8 @@ void SmallOrderedHashTable<Derived>::Initialize(Isolate* isolate,
SetNumberOfBuckets(num_buckets);
SetNumberOfElements(0);
SetNumberOfDeletedElements(0);
+ memset(reinterpret_cast<void*>(field_address(PaddingOffset())), 0,
+ PaddingSize());
Address hashtable_start = GetHashTableStartAddress(capacity);
memset(reinterpret_cast<byte*>(hashtable_start), kNotFound,
@@ -930,7 +932,6 @@ OrderedHashTableHandler<SmallOrderedNameDictionary,
OrderedNameDictionary>::Allocate(Isolate* isolate,
int capacity);
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
Handle<HeapObject> table, Handle<Object> key) {
@@ -943,9 +944,7 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
// down to a smaller hash table.
return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
}
-#endif
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
@@ -956,7 +955,6 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
DCHECK(LargeTable::Is(table));
return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
}
-#endif
template bool
OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index a83109ed90..66dc36e81f 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -527,8 +527,16 @@ class SmallOrderedHashTable : public HeapObject {
return NumberOfDeletedElementsOffset() + kOneByteSize;
}
+ static constexpr Offset PaddingOffset() {
+ return NumberOfBucketsOffset() + kOneByteSize;
+ }
+
+ static constexpr size_t PaddingSize() {
+ return RoundUp<kTaggedSize>(PaddingOffset()) - PaddingOffset();
+ }
+
static constexpr Offset DataTableStartOffset() {
- return RoundUp<kTaggedSize>(NumberOfBucketsOffset());
+ return PaddingOffset() + PaddingSize();
}
static constexpr int DataTableSizeFor(int capacity) {
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index f23e63e50d..e9365c03a4 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -21,10 +21,19 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(PropertyArray, HeapObject)
CAST_ACCESSOR(PropertyArray)
+SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
+SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
+
Object PropertyArray::get(int index) const {
+ Isolate* isolate = GetIsolateForPtrCompr(*this);
+ return get(isolate, index);
+}
+
+Object PropertyArray::get(Isolate* isolate, int index) const {
DCHECK_LT(static_cast<unsigned>(index),
static_cast<unsigned>(this->length()));
- return RELAXED_READ_FIELD(*this, OffsetOfElementAt(index));
+ return TaggedField<Object>::Relaxed_Load(isolate, *this,
+ OffsetOfElementAt(index));
}
void PropertyArray::set(int index, Object value) {
@@ -47,34 +56,24 @@ void PropertyArray::set(int index, Object value, WriteBarrierMode mode) {
ObjectSlot PropertyArray::data_start() { return RawField(kHeaderSize); }
int PropertyArray::length() const {
- Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return LengthField::decode(value);
+ return LengthField::decode(length_and_hash());
}
void PropertyArray::initialize_length(int len) {
- DCHECK_LT(static_cast<unsigned>(len),
- static_cast<unsigned>(LengthField::kMax));
- WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(len));
+ DCHECK(LengthField::is_valid(len));
+ set_length_and_hash(len);
}
int PropertyArray::synchronized_length() const {
- Object value_obj = ACQUIRE_READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return LengthField::decode(value);
+ return LengthField::decode(synchronized_length_and_hash());
}
-int PropertyArray::Hash() const {
- Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
- return HashField::decode(value);
-}
+int PropertyArray::Hash() const { return HashField::decode(length_and_hash()); }
void PropertyArray::SetHash(int hash) {
- Object value_obj = READ_FIELD(*this, kLengthAndHashOffset);
- int value = Smi::ToInt(value_obj);
+ int value = length_and_hash();
value = HashField::update(value, hash);
- WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(value));
+ set_length_and_hash(value);
}
void PropertyArray::CopyElements(Isolate* isolate, int dst_index,
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 0c8b40ece2..5c71330280 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -30,6 +30,7 @@ class PropertyArray : public HeapObject {
inline int Hash() const;
inline Object get(int index) const;
+ inline Object get(Isolate* isolate, int index) const;
inline void set(int index, Object value);
// Setter with explicit barrier mode.
@@ -67,6 +68,11 @@ class PropertyArray : public HeapObject {
static const int kNoHashSentinel = 0;
+ private:
+ DECL_INT_ACCESSORS(length_and_hash)
+
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length_and_hash)
+
OBJECT_CONSTRUCTORS(PropertyArray, HeapObject);
};
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index 75a5132728..b336986f62 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -47,7 +47,7 @@ class PropertyCell : public HeapObject {
static Handle<PropertyCell> InvalidateEntry(
Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry);
- static void SetValueWithInvalidation(Isolate* isolate,
+ static void SetValueWithInvalidation(Isolate* isolate, const char* cell_name,
Handle<PropertyCell> cell,
Handle<Object> new_value);
diff --git a/deps/v8/src/objects/property.cc b/deps/v8/src/objects/property.cc
index c226c28a76..fba6fe3405 100644
--- a/deps/v8/src/objects/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -75,9 +75,10 @@ Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
PropertyAttributes attributes) {
+ Isolate* isolate = GetIsolateForPtrCompr(*key);
return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
kDescriptor, PropertyConstness::kConst,
- value->OptimalRepresentation(), 0);
+ value->OptimalRepresentation(isolate), 0);
}
Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
diff --git a/deps/v8/src/objects/prototype-inl.h b/deps/v8/src/objects/prototype-inl.h
index 5f7c3e23c5..2836186b12 100644
--- a/deps/v8/src/objects/prototype-inl.h
+++ b/deps/v8/src/objects/prototype-inl.h
@@ -48,7 +48,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Map receiver_map,
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(object_.IsJSReceiver());
Map map = JSReceiver::cast(object_).map();
- is_at_end_ = !map.has_hidden_prototype();
+ is_at_end_ = !map.IsJSGlobalProxyMap();
}
}
@@ -63,7 +63,7 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(handle_->IsJSReceiver());
Map map = JSReceiver::cast(*handle_).map();
- is_at_end_ = !map.has_hidden_prototype();
+ is_at_end_ = !map.IsJSGlobalProxyMap();
}
}
@@ -96,8 +96,9 @@ void PrototypeIterator::AdvanceIgnoringProxies() {
Map map = HeapObject::cast(object).map();
HeapObject prototype = map.prototype();
- is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map.has_hidden_prototype()
- : prototype.IsNull(isolate_);
+ is_at_end_ =
+ prototype.IsNull(isolate_) ||
+ (where_to_end_ == END_AT_NON_HIDDEN && !map.IsJSGlobalProxyMap());
if (handle_.is_null()) {
object_ = prototype;
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index af45e86af3..eca8bc1ecd 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -45,8 +45,9 @@ bool ScopeInfo::Equals(ScopeInfo other) const {
if (!ScopeInfo::cast(entry).Equals(ScopeInfo::cast(other_entry))) {
return false;
}
- } else if (entry.IsModuleInfo()) {
- if (!ModuleInfo::cast(entry).Equals(ModuleInfo::cast(other_entry))) {
+ } else if (entry.IsSourceTextModuleInfo()) {
+ if (!SourceTextModuleInfo::cast(entry).Equals(
+ SourceTextModuleInfo::cast(other_entry))) {
return false;
}
} else {
@@ -217,6 +218,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
uint32_t info =
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
+ RequiresBrandCheckField::encode(
+ var->get_requires_brand_check_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info.set(context_local_base + local_index, *var->name(), mode);
@@ -233,6 +236,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ RequiresBrandCheckField::encode(
+ var->get_requires_brand_check_flag()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
Smi::FromInt(properties));
@@ -271,6 +276,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
+ RequiresBrandCheckField::encode(
+ var->get_requires_brand_check_flag()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info.set(context_local_base + local_index, *var->name(), mode);
scope_info.set(context_local_info_base + local_index,
@@ -327,8 +334,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
// Module-specific information (only for module scopes).
if (scope->is_module_scope()) {
- Handle<ModuleInfo> module_info =
- ModuleInfo::New(isolate, zone, scope->AsModuleScope()->module());
+ Handle<SourceTextModuleInfo> module_info = SourceTextModuleInfo::New(
+ isolate, zone, scope->AsModuleScope()->module());
DCHECK_EQ(index, scope_info_handle->ModuleInfoIndex());
scope_info_handle->set(index++, *module_info);
DCHECK_EQ(index, scope_info_handle->ModuleVariableCountIndex());
@@ -444,6 +451,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
VariableModeField::encode(VariableMode::kConst) |
InitFlagField::encode(kCreatedInitialized) |
MaybeAssignedFlagField::encode(kNotAssigned) |
+ RequiresBrandCheckField::encode(kNoBrandCheck) |
ParameterNumberField::encode(ParameterNumberField::kMax);
scope_info->set(index++, Smi::FromInt(value));
}
@@ -649,9 +657,9 @@ ScopeInfo ScopeInfo::OuterScopeInfo() const {
return ScopeInfo::cast(get(OuterScopeInfoIndex()));
}
-ModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
+SourceTextModuleInfo ScopeInfo::ModuleDescriptorInfo() const {
DCHECK(scope_type() == MODULE_SCOPE);
- return ModuleInfo::cast(get(ModuleInfoIndex()));
+ return SourceTextModuleInfo::cast(get(ModuleInfoIndex()));
}
String ScopeInfo::ContextLocalName(int var) const {
@@ -700,6 +708,14 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
return MaybeAssignedFlagField::decode(value);
}
+RequiresBrandCheckFlag ScopeInfo::RequiresBrandCheck(int var) const {
+ DCHECK_LE(0, var);
+ DCHECK_LT(var, ContextLocalCount());
+ int info_index = ContextLocalInfosIndex() + var;
+ int value = Smi::ToInt(get(info_index));
+ return RequiresBrandCheckField::decode(value);
+}
+
// static
bool ScopeInfo::VariableIsSynthetic(String name) {
// There's currently no flag stored on the ScopeInfo to indicate that a
@@ -739,7 +755,8 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode,
InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag) {
+ MaybeAssignedFlag* maybe_assigned_flag,
+ RequiresBrandCheckFlag* requires_brand_check) {
DisallowHeapAllocation no_gc;
DCHECK(name.IsInternalizedString());
DCHECK_NOT_NULL(mode);
@@ -756,6 +773,7 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
*mode = scope_info.ContextLocalMode(var);
*init_flag = scope_info.ContextLocalInitFlag(var);
*maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var);
+ *requires_brand_check = scope_info.RequiresBrandCheck(var);
int result = Context::MIN_CONTEXT_SLOTS + var;
DCHECK_LT(result, scope_info.ContextLength());
@@ -873,15 +891,13 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
- Handle<Object> export_name,
- Handle<Object> local_name,
- Handle<Object> import_name,
- int module_request, int cell_index,
- int beg_pos, int end_pos) {
- Handle<ModuleInfoEntry> result =
- Handle<ModuleInfoEntry>::cast(isolate->factory()->NewStruct(
- MODULE_INFO_ENTRY_TYPE, AllocationType::kOld));
+Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
+ Isolate* isolate, Handle<Object> export_name, Handle<Object> local_name,
+ Handle<Object> import_name, int module_request, int cell_index, int beg_pos,
+ int end_pos) {
+ Handle<SourceTextModuleInfoEntry> result =
+ Handle<SourceTextModuleInfoEntry>::cast(isolate->factory()->NewStruct(
+ SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE, AllocationType::kOld));
result->set_export_name(*export_name);
result->set_local_name(*local_name);
result->set_import_name(*import_name);
@@ -892,8 +908,8 @@ Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
return result;
}
-Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
- ModuleDescriptor* descr) {
+Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
+ Isolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr) {
// Serialize module requests.
int size = static_cast<int>(descr->module_requests().size());
Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(size);
@@ -911,7 +927,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (auto entry : descr->special_exports()) {
- Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+ Handle<SourceTextModuleInfoEntry> serialized_entry =
+ entry->Serialize(isolate);
special_exports->set(i++, *serialized_entry);
}
}
@@ -922,7 +939,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (auto entry : descr->namespace_imports()) {
- Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+ Handle<SourceTextModuleInfoEntry> serialized_entry =
+ entry->Serialize(isolate);
namespace_imports->set(i++, *serialized_entry);
}
}
@@ -937,13 +955,14 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (const auto& elem : descr->regular_imports()) {
- Handle<ModuleInfoEntry> serialized_entry =
+ Handle<SourceTextModuleInfoEntry> serialized_entry =
elem.second->Serialize(isolate);
regular_imports->set(i++, *serialized_entry);
}
}
- Handle<ModuleInfo> result = isolate->factory()->NewModuleInfo();
+ Handle<SourceTextModuleInfo> result =
+ isolate->factory()->NewSourceTextModuleInfo();
result->set(kModuleRequestsIndex, *module_requests);
result->set(kSpecialExportsIndex, *special_exports);
result->set(kRegularExportsIndex, *regular_exports);
@@ -953,22 +972,22 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
return result;
}
-int ModuleInfo::RegularExportCount() const {
+int SourceTextModuleInfo::RegularExportCount() const {
DCHECK_EQ(regular_exports().length() % kRegularExportLength, 0);
return regular_exports().length() / kRegularExportLength;
}
-String ModuleInfo::RegularExportLocalName(int i) const {
+String SourceTextModuleInfo::RegularExportLocalName(int i) const {
return String::cast(regular_exports().get(i * kRegularExportLength +
kRegularExportLocalNameOffset));
}
-int ModuleInfo::RegularExportCellIndex(int i) const {
+int SourceTextModuleInfo::RegularExportCellIndex(int i) const {
return Smi::ToInt(regular_exports().get(i * kRegularExportLength +
kRegularExportCellIndexOffset));
}
-FixedArray ModuleInfo::RegularExportExportNames(int i) const {
+FixedArray SourceTextModuleInfo::RegularExportExportNames(int i) const {
return FixedArray::cast(regular_exports().get(
i * kRegularExportLength + kRegularExportExportNamesOffset));
}
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 8d43357631..0b8eb61b00 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -22,7 +22,7 @@ class Handle;
class Isolate;
template <typename T>
class MaybeHandle;
-class ModuleInfo;
+class SourceTextModuleInfo;
class Scope;
class Zone;
@@ -113,7 +113,7 @@ class ScopeInfo : public FixedArray {
int EndPosition() const;
void SetPositionInfo(int start, int end);
- ModuleInfo ModuleDescriptorInfo() const;
+ SourceTextModuleInfo ModuleDescriptorInfo() const;
// Return the name of the given context local.
String ContextLocalName(int var) const;
@@ -130,6 +130,9 @@ class ScopeInfo : public FixedArray {
// Return the initialization flag of the given context local.
MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var) const;
+ // Return whether access to the variable requries a brand check.
+ RequiresBrandCheckFlag RequiresBrandCheck(int var) const;
+
// Return true if this local was introduced by the compiler, and should not be
// exposed to the user in a debugger.
static bool VariableIsSynthetic(String name);
@@ -141,7 +144,8 @@ class ScopeInfo : public FixedArray {
// mode for that variable.
static int ContextSlotIndex(ScopeInfo scope_info, String name,
VariableMode* mode, InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
+ MaybeAssignedFlag* maybe_assigned_flag,
+ RequiresBrandCheckFlag* requires_brand_check);
// Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
// module variable with the given name (the index value of a MODULE variable
@@ -284,10 +288,10 @@ class ScopeInfo : public FixedArray {
// the scope belongs to a function or script.
// 7. OuterScopeInfoIndex:
// The outer scope's ScopeInfo or the hole if there's none.
- // 8. ModuleInfo, ModuleVariableCount, and ModuleVariables:
- // For a module scope, this part contains the ModuleInfo, the number of
- // MODULE-allocated variables, and the metadata of those variables. For
- // non-module scopes it is empty.
+ // 8. SourceTextModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // For a module scope, this part contains the SourceTextModuleInfo, the
+ // number of MODULE-allocated variables, and the metadata of those
+ // variables. For non-module scopes it is empty.
int ContextLocalNamesIndex() const;
int ContextLocalInfosIndex() const;
int ReceiverInfoIndex() const;
@@ -322,8 +326,11 @@ class ScopeInfo : public FixedArray {
class VariableModeField : public BitField<VariableMode, 0, 3> {};
class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
+ class RequiresBrandCheckField
+ : public BitField<RequiresBrandCheckFlag, MaybeAssignedFlagField::kNext,
+ 1> {};
class ParameterNumberField
- : public BitField<uint32_t, MaybeAssignedFlagField::kNext, 16> {};
+ : public BitField<uint32_t, RequiresBrandCheckField::kNext, 16> {};
friend class ScopeIterator;
friend std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index f5413ce1de..9778db5d90 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -91,7 +91,6 @@ CAST_ACCESSOR(UncompiledData)
ACCESSORS(UncompiledData, inferred_name, String, kInferredNameOffset)
INT32_ACCESSORS(UncompiledData, start_position, kStartPositionOffset)
INT32_ACCESSORS(UncompiledData, end_position, kEndPositionOffset)
-INT32_ACCESSORS(UncompiledData, function_literal_id, kFunctionLiteralIdOffset)
void UncompiledData::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return;
@@ -106,9 +105,9 @@ CAST_ACCESSOR(UncompiledDataWithPreparseData)
ACCESSORS(UncompiledDataWithPreparseData, preparse_data, PreparseData,
kPreparseDataOffset)
-bool HeapObject::IsUncompiledData() const {
- return IsUncompiledDataWithoutPreparseData() ||
- IsUncompiledDataWithPreparseData();
+DEF_GETTER(HeapObject, IsUncompiledData, bool) {
+ return IsUncompiledDataWithoutPreparseData(isolate) ||
+ IsUncompiledDataWithPreparseData(isolate);
}
OBJECT_CONSTRUCTORS_IMPL(InterpreterData, Struct)
@@ -128,6 +127,9 @@ ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
+INT32_ACCESSORS(SharedFunctionInfo, function_literal_id,
+ kFunctionLiteralIdOffset)
+
#if V8_SFI_HAS_UNIQUE_ID
INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
#endif
@@ -629,7 +631,7 @@ void SharedFunctionInfo::ClearPreparseData() {
// static
void UncompiledData::Initialize(
UncompiledData data, String inferred_name, int start_position,
- int end_position, int function_literal_id,
+ int end_position,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
data.set_inferred_name(inferred_name);
@@ -637,28 +639,22 @@ void UncompiledData::Initialize(
data, data.RawField(UncompiledData::kInferredNameOffset), inferred_name);
data.set_start_position(start_position);
data.set_end_position(end_position);
- data.set_function_literal_id(function_literal_id);
data.clear_padding();
}
void UncompiledDataWithPreparseData::Initialize(
UncompiledDataWithPreparseData data, String inferred_name,
- int start_position, int end_position, int function_literal_id,
- PreparseData scope_data,
+ int start_position, int end_position, PreparseData scope_data,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
UncompiledData::Initialize(data, inferred_name, start_position, end_position,
- function_literal_id, gc_notify_updated_slot);
+ gc_notify_updated_slot);
data.set_preparse_data(scope_data);
gc_notify_updated_slot(
data, data.RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
scope_data);
}
-bool UncompiledData::has_function_literal_id() {
- return function_literal_id() != kFunctionLiteralIdInvalid;
-}
-
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
return function_data().IsWasmExportedFunctionData();
}
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index a3b84ee46e..f7a82964b1 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -104,16 +104,12 @@ class UncompiledData : public HeapObject {
DECL_ACCESSORS(inferred_name, String)
DECL_INT32_ACCESSORS(start_position)
DECL_INT32_ACCESSORS(end_position)
- DECL_INT32_ACCESSORS(function_literal_id)
-
- // Returns true if the UncompiledData contains a valid function_literal_id.
- inline bool has_function_literal_id();
DECL_CAST(UncompiledData)
inline static void Initialize(
UncompiledData data, String inferred_name, int start_position,
- int end_position, int function_literal_id,
+ int end_position,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
@@ -126,7 +122,6 @@ class UncompiledData : public HeapObject {
/* Raw data fields. */ \
V(kStartPositionOffset, kInt32Size) \
V(kEndPositionOffset, kInt32Size) \
- V(kFunctionLiteralIdOffset, kInt32Size) \
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
/* Header size. */ \
V(kSize, 0)
@@ -172,8 +167,7 @@ class UncompiledDataWithPreparseData : public UncompiledData {
inline static void Initialize(
UncompiledDataWithPreparseData data, String inferred_name,
- int start_position, int end_position, int function_literal_id,
- PreparseData scope_data,
+ int start_position, int end_position, PreparseData scope_data,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
@@ -316,6 +310,11 @@ class SharedFunctionInfo : public HeapObject {
// function. The value is only reliable when the function has been compiled.
DECL_UINT16_ACCESSORS(expected_nof_properties)
+ // [function_literal_id] - uniquely identifies the FunctionLiteral this
+ // SharedFunctionInfo represents within its script, or -1 if this
+ // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
+ DECL_INT32_ACCESSORS(function_literal_id)
+
#if V8_SFI_HAS_UNIQUE_ID
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
@@ -385,9 +384,6 @@ class SharedFunctionInfo : public HeapObject {
inline bool HasInferredName();
inline String inferred_name();
- // Get the function literal id associated with this function, for parsing.
- V8_EXPORT_PRIVATE int FunctionLiteralId(Isolate* isolate) const;
-
// Break infos are contained in DebugInfo, this is a convenience method
// to simplify access.
V8_EXPORT_PRIVATE bool HasBreakInfo() const;
@@ -624,7 +620,7 @@ class SharedFunctionInfo : public HeapObject {
// Returns the unique TraceID for this SharedFunctionInfo (within the
// kTraceScope, works only for functions that have a Script and start/end
// position).
- uint64_t TraceID() const;
+ uint64_t TraceID(FunctionLiteral* literal = nullptr) const;
// Returns the unique trace ID reference for this SharedFunctionInfo
// (based on the |TraceID()| above).
@@ -634,16 +630,14 @@ class SharedFunctionInfo : public HeapObject {
class ScriptIterator {
public:
V8_EXPORT_PRIVATE ScriptIterator(Isolate* isolate, Script script);
- ScriptIterator(Isolate* isolate,
- Handle<WeakFixedArray> shared_function_infos);
+ explicit ScriptIterator(Handle<WeakFixedArray> shared_function_infos);
V8_EXPORT_PRIVATE SharedFunctionInfo Next();
int CurrentIndex() const { return index_ - 1; }
// Reset the iterator to run on |script|.
- void Reset(Script script);
+ void Reset(Isolate* isolate, Script script);
private:
- Isolate* isolate_;
Handle<WeakFixedArray> shared_function_infos_;
int index_;
DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
@@ -656,6 +650,7 @@ class SharedFunctionInfo : public HeapObject {
V8_EXPORT_PRIVATE SharedFunctionInfo Next();
private:
+ Isolate* isolate_;
Script::Iterator script_iterator_;
WeakArrayList::Iterator noscript_sfi_iterator_;
SharedFunctionInfo::ScriptIterator sfi_iterator_;
@@ -744,10 +739,6 @@ class SharedFunctionInfo : public HeapObject {
friend class V8HeapExplorer;
FRIEND_TEST(PreParserTest, LazyFunctionLength);
- // Find the index of this function in the parent script. Slow path of
- // FunctionLiteralId.
- int FindIndexInScript(Isolate* isolate) const;
-
OBJECT_CONSTRUCTORS(SharedFunctionInfo, HeapObject);
};
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index fa8b558939..85f6525399 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_SLOTS_H_
#define V8_OBJECTS_SLOTS_H_
+#include "src/base/memory.h"
#include "src/common/globals.h"
-#include "src/common/v8memory.h"
namespace v8 {
namespace internal {
@@ -192,11 +192,11 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
Reference(const Reference&) V8_NOEXCEPT = default;
Reference& operator=(const Reference& other) V8_NOEXCEPT {
- WriteUnalignedValue<T>(address_, other.value());
+ base::WriteUnalignedValue<T>(address_, other.value());
return *this;
}
Reference& operator=(T value) {
- WriteUnalignedValue<T>(address_, value);
+ base::WriteUnalignedValue<T>(address_, value);
return *this;
}
@@ -206,8 +206,8 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
void swap(Reference& other) {
T tmp = value();
- WriteUnalignedValue<T>(address_, other.value());
- WriteUnalignedValue<T>(other.address_, tmp);
+ base::WriteUnalignedValue<T>(address_, other.value());
+ base::WriteUnalignedValue<T>(other.address_, tmp);
}
bool operator<(const Reference& other) const {
@@ -219,7 +219,7 @@ class UnalignedSlot : public SlotBase<UnalignedSlot<T>, T, 1> {
}
private:
- T value() const { return ReadUnalignedValue<T>(address_); }
+ T value() const { return base::ReadUnalignedValue<T>(address_); }
Address address_;
};
diff --git a/deps/v8/src/objects/source-text-module.cc b/deps/v8/src/objects/source-text-module.cc
new file mode 100644
index 0000000000..e6637415c1
--- /dev/null
+++ b/deps/v8/src/objects/source-text-module.cc
@@ -0,0 +1,661 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/source-text-module.h"
+
+#include "src/api/api-inl.h"
+#include "src/ast/modules.h"
+#include "src/builtins/accessors.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+struct StringHandleHash {
+ V8_INLINE size_t operator()(Handle<String> string) const {
+ return string->Hash();
+ }
+};
+
+struct StringHandleEqual {
+ V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
+ return lhs->Equals(*rhs);
+ }
+};
+
+class UnorderedStringSet
+ : public std::unordered_set<Handle<String>, StringHandleHash,
+ StringHandleEqual,
+ ZoneAllocator<Handle<String>>> {
+ public:
+ explicit UnorderedStringSet(Zone* zone)
+ : std::unordered_set<Handle<String>, StringHandleHash, StringHandleEqual,
+ ZoneAllocator<Handle<String>>>(
+ 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+ ZoneAllocator<Handle<String>>(zone)) {}
+};
+
+class UnorderedStringMap
+ : public std::unordered_map<
+ Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+ ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>> {
+ public:
+ explicit UnorderedStringMap(Zone* zone)
+ : std::unordered_map<
+ Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+ ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>>(
+ 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+ ZoneAllocator<std::pair<const Handle<String>, Handle<Object>>>(
+ zone)) {}
+};
+
+class Module::ResolveSet
+ : public std::unordered_map<
+ Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
+ ModuleHandleEqual,
+ ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>> {
+ public:
+ explicit ResolveSet(Zone* zone)
+ : std::unordered_map<Handle<Module>, UnorderedStringSet*,
+ ModuleHandleHash, ModuleHandleEqual,
+ ZoneAllocator<std::pair<const Handle<Module>,
+ UnorderedStringSet*>>>(
+ 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
+ ZoneAllocator<std::pair<const Handle<Module>, UnorderedStringSet*>>(
+ zone)),
+ zone_(zone) {}
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Zone* zone_;
+};
+
+SharedFunctionInfo SourceTextModule::GetSharedFunctionInfo() const {
+ DisallowHeapAllocation no_alloc;
+ DCHECK_NE(status(), Module::kEvaluating);
+ DCHECK_NE(status(), Module::kEvaluated);
+ switch (status()) {
+ case kUninstantiated:
+ case kPreInstantiating:
+ DCHECK(code().IsSharedFunctionInfo());
+ return SharedFunctionInfo::cast(code());
+ case kInstantiating:
+ DCHECK(code().IsJSFunction());
+ return JSFunction::cast(code()).shared();
+ case kInstantiated:
+ DCHECK(code().IsJSGeneratorObject());
+ return JSGeneratorObject::cast(code()).function().shared();
+ case kEvaluating:
+ case kEvaluated:
+ case kErrored:
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+}
+
+int SourceTextModule::ExportIndex(int cell_index) {
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kExport);
+ return cell_index - 1;
+}
+
+int SourceTextModule::ImportIndex(int cell_index) {
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kImport);
+ return -cell_index - 1;
+}
+
+void SourceTextModule::CreateIndirectExport(
+ Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
+ Handle<SourceTextModuleInfoEntry> entry) {
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, entry);
+ module->set_exports(*exports);
+}
+
+void SourceTextModule::CreateExport(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ int cell_index, Handle<FixedArray> names) {
+ DCHECK_LT(0, names->length());
+ Handle<Cell> cell =
+ isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ module->regular_exports().set(ExportIndex(cell_index), *cell);
+
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ for (int i = 0, n = names->length(); i < n; ++i) {
+ Handle<String> name(String::cast(names->get(i)), isolate);
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, cell);
+ }
+ module->set_exports(*exports);
+}
+
+Cell SourceTextModule::GetCell(int cell_index) {
+ DisallowHeapAllocation no_gc;
+ Object cell;
+ switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case SourceTextModuleDescriptor::kImport:
+ cell = regular_imports().get(ImportIndex(cell_index));
+ break;
+ case SourceTextModuleDescriptor::kExport:
+ cell = regular_exports().get(ExportIndex(cell_index));
+ break;
+ case SourceTextModuleDescriptor::kInvalid:
+ UNREACHABLE();
+ break;
+ }
+ return Cell::cast(cell);
+}
+
+Handle<Object> SourceTextModule::LoadVariable(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ int cell_index) {
+ return handle(module->GetCell(cell_index).value(), isolate);
+}
+
+void SourceTextModule::StoreVariable(Handle<SourceTextModule> module,
+ int cell_index, Handle<Object> value) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kExport);
+ module->GetCell(cell_index).set_value(*value);
+}
+
+MaybeHandle<Cell> SourceTextModule::ResolveExport(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, Module::ResolveSet* resolve_set) {
+ Handle<Object> object(module->exports().Lookup(export_name), isolate);
+ if (object->IsCell()) {
+ // Already resolved (e.g. because it's a local export).
+ return Handle<Cell>::cast(object);
+ }
+
+ // Check for cycle before recursing.
+ {
+ // Attempt insertion with a null string set.
+ auto result = resolve_set->insert({module, nullptr});
+ UnorderedStringSet*& name_set = result.first->second;
+ if (result.second) {
+ // |module| wasn't in the map previously, so allocate a new name set.
+ Zone* zone = resolve_set->zone();
+ name_set =
+ new (zone->New(sizeof(UnorderedStringSet))) UnorderedStringSet(zone);
+ } else if (name_set->count(export_name)) {
+ // Cycle detected.
+ if (must_resolve) {
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kCyclicModuleDependency, export_name,
+ module_specifier),
+ &loc);
+ }
+ return MaybeHandle<Cell>();
+ }
+ name_set->insert(export_name);
+ }
+
+ if (object->IsSourceTextModuleInfoEntry()) {
+ // Not yet resolved indirect export.
+ Handle<SourceTextModuleInfoEntry> entry =
+ Handle<SourceTextModuleInfoEntry>::cast(object);
+ Handle<String> import_name(String::cast(entry->import_name()), isolate);
+ Handle<Script> script(module->script(), isolate);
+ MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
+
+ Handle<Cell> cell;
+ if (!ResolveImport(isolate, module, import_name, entry->module_request(),
+ new_loc, true, resolve_set)
+ .ToHandle(&cell)) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Cell>();
+ }
+
+ // The export table may have changed but the entry in question should be
+ // unchanged.
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(export_name).IsSourceTextModuleInfoEntry());
+
+ exports = ObjectHashTable::Put(exports, export_name, cell);
+ module->set_exports(*exports);
+ return cell;
+ }
+
+ DCHECK(object->IsTheHole(isolate));
+ return SourceTextModule::ResolveExportUsingStarExports(
+ isolate, module, module_specifier, export_name, loc, must_resolve,
+ resolve_set);
+}
+
+MaybeHandle<Cell> SourceTextModule::ResolveImport(
+ Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
+ int module_request, MessageLocation loc, bool must_resolve,
+ Module::ResolveSet* resolve_set) {
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules().get(module_request)), isolate);
+ Handle<String> specifier(
+ String::cast(module->info().module_requests().get(module_request)),
+ isolate);
+ MaybeHandle<Cell> result =
+ Module::ResolveExport(isolate, requested_module, specifier, name, loc,
+ must_resolve, resolve_set);
+ DCHECK_IMPLIES(isolate->has_pending_exception(), result.is_null());
+ return result;
+}
+
+MaybeHandle<Cell> SourceTextModule::ResolveExportUsingStarExports(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, Module::ResolveSet* resolve_set) {
+ if (!export_name->Equals(ReadOnlyRoots(isolate).default_string())) {
+ // Go through all star exports looking for the given name. If multiple star
+ // exports provide the name, make sure they all map it to the same cell.
+ Handle<Cell> unique_cell;
+ Handle<FixedArray> special_exports(module->info().special_exports(),
+ isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ i::Handle<i::SourceTextModuleInfoEntry> entry(
+ i::SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ if (!entry->export_name().IsUndefined(isolate)) {
+ continue; // Indirect export.
+ }
+
+ Handle<Script> script(module->script(), isolate);
+ MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
+
+ Handle<Cell> cell;
+ if (ResolveImport(isolate, module, export_name, entry->module_request(),
+ new_loc, false, resolve_set)
+ .ToHandle(&cell)) {
+ if (unique_cell.is_null()) unique_cell = cell;
+ if (*unique_cell != *cell) {
+ return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport,
+ module_specifier, export_name),
+ &loc);
+ }
+ } else if (isolate->has_pending_exception()) {
+ return MaybeHandle<Cell>();
+ }
+ }
+
+ if (!unique_cell.is_null()) {
+ // Found a unique star export for this name.
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ DCHECK(exports->Lookup(export_name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, export_name, unique_cell);
+ module->set_exports(*exports);
+ return unique_cell;
+ }
+ }
+
+ // Unresolvable.
+ if (must_resolve) {
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
+ module_specifier, export_name),
+ &loc);
+ }
+ return MaybeHandle<Cell>();
+}
+
+bool SourceTextModule::PrepareInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ v8::Local<v8::Context> context, v8::Module::ResolveCallback callback) {
+ // Obtain requested modules.
+ Handle<SourceTextModuleInfo> module_info(module->info(), isolate);
+ Handle<FixedArray> module_requests(module_info->module_requests(), isolate);
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = module_requests->length(); i < length; ++i) {
+ Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
+ v8::Local<v8::Module> api_requested_module;
+ if (!callback(context, v8::Utils::ToLocal(specifier),
+ v8::Utils::ToLocal(Handle<Module>::cast(module)))
+ .ToLocal(&api_requested_module)) {
+ isolate->PromoteScheduledException();
+ return false;
+ }
+ Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
+ requested_modules->set(i, *requested_module);
+ }
+
+ // Recurse.
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ if (!Module::PrepareInstantiate(isolate, requested_module, context,
+ callback)) {
+ return false;
+ }
+ }
+
+ // Set up local exports.
+ // TODO(neis): Create regular_exports array here instead of in factory method?
+ for (int i = 0, n = module_info->RegularExportCount(); i < n; ++i) {
+ int cell_index = module_info->RegularExportCellIndex(i);
+ Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
+ isolate);
+ CreateExport(isolate, module, cell_index, export_names);
+ }
+
+ // Partially set up indirect exports.
+ // For each indirect export, we create the appropriate slot in the export
+ // table and store its SourceTextModuleInfoEntry there. When we later find
+ // the correct Cell in the module that actually provides the value, we replace
+ // the SourceTextModuleInfoEntry by that Cell (see ResolveExport).
+ Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ Handle<Object> export_name(entry->export_name(), isolate);
+ if (export_name->IsUndefined(isolate)) continue; // Star export.
+ CreateIndirectExport(isolate, module, Handle<String>::cast(export_name),
+ entry);
+ }
+
+ DCHECK_EQ(module->status(), kPreInstantiating);
+ return true;
+}
+
+bool SourceTextModule::RunInitializationCode(Isolate* isolate,
+ Handle<SourceTextModule> module) {
+ DCHECK_EQ(module->status(), kInstantiating);
+ Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
+ DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
+ Handle<Object> receiver = isolate->factory()->undefined_value();
+ Handle<Object> argv[] = {module};
+ MaybeHandle<Object> maybe_generator =
+ Execution::Call(isolate, function, receiver, arraysize(argv), argv);
+ Handle<Object> generator;
+ if (!maybe_generator.ToHandle(&generator)) {
+ DCHECK(isolate->has_pending_exception());
+ return false;
+ }
+ DCHECK_EQ(*function, Handle<JSGeneratorObject>::cast(generator)->function());
+ module->set_code(*generator);
+ return true;
+}
+
+bool SourceTextModule::MaybeTransitionComponent(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status) {
+ DCHECK(new_status == kInstantiated || new_status == kEvaluated);
+ SLOW_DCHECK(
+ // {module} is on the {stack}.
+ std::count_if(stack->begin(), stack->end(),
+ [&](Handle<Module> m) { return *m == *module; }) == 1);
+ DCHECK_LE(module->dfs_ancestor_index(), module->dfs_index());
+ if (module->dfs_ancestor_index() == module->dfs_index()) {
+ // This is the root of its strongly connected component.
+ Handle<SourceTextModule> ancestor;
+ do {
+ ancestor = stack->front();
+ stack->pop_front();
+ DCHECK_EQ(ancestor->status(),
+ new_status == kInstantiated ? kInstantiating : kEvaluating);
+ if (new_status == kInstantiated) {
+ if (!SourceTextModule::RunInitializationCode(isolate, ancestor))
+ return false;
+ }
+ ancestor->SetStatus(new_status);
+ } while (*ancestor != *module);
+ }
+ return true;
+}
+
+bool SourceTextModule::FinishInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
+ Zone* zone) {
+ // Instantiate SharedFunctionInfo and mark module as instantiating for
+ // the recursion.
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
+ isolate);
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, isolate->native_context());
+ module->set_code(*function);
+ module->SetStatus(kInstantiating);
+ module->set_dfs_index(*dfs_index);
+ module->set_dfs_ancestor_index(*dfs_index);
+ stack->push_front(module);
+ (*dfs_index)++;
+
+ // Recurse.
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ if (!Module::FinishInstantiate(isolate, requested_module, stack, dfs_index,
+ zone)) {
+ return false;
+ }
+
+ DCHECK_NE(requested_module->status(), kEvaluating);
+ DCHECK_GE(requested_module->status(), kInstantiating);
+ SLOW_DCHECK(
+ // {requested_module} is instantiating iff it's on the {stack}.
+ (requested_module->status() == kInstantiating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ if (requested_module->status() == kInstantiating) {
+ // SyntheticModules go straight to kInstantiated so this must be a
+ // SourceTextModule
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ Handle<SourceTextModule>::cast(requested_module)
+ ->dfs_ancestor_index()));
+ }
+ }
+
+ Handle<Script> script(module->script(), isolate);
+ Handle<SourceTextModuleInfo> module_info(module->info(), isolate);
+
+ // Resolve imports.
+ Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
+ for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+ Handle<String> name(String::cast(entry->import_name()), isolate);
+ MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
+ ResolveSet resolve_set(zone);
+ Handle<Cell> cell;
+ if (!ResolveImport(isolate, module, name, entry->module_request(), loc,
+ true, &resolve_set)
+ .ToHandle(&cell)) {
+ return false;
+ }
+ module->regular_imports().set(ImportIndex(entry->cell_index()), *cell);
+ }
+
+ // Resolve indirect exports.
+ Handle<FixedArray> special_exports(module_info->special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ Handle<Object> name(entry->export_name(), isolate);
+ if (name->IsUndefined(isolate)) continue; // Star export.
+ MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
+ ResolveSet resolve_set(zone);
+ if (ResolveExport(isolate, module, Handle<String>(),
+ Handle<String>::cast(name), loc, true, &resolve_set)
+ .is_null()) {
+ return false;
+ }
+ }
+
+ return MaybeTransitionComponent(isolate, module, stack, kInstantiated);
+}
+
+void SourceTextModule::FetchStarExports(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ Zone* zone,
+ UnorderedModuleSet* visited) {
+ DCHECK_GE(module->status(), Module::kInstantiating);
+
+ if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
+
+ bool cycle = !visited->insert(module).second;
+ if (cycle) return;
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ UnorderedStringMap more_exports(zone);
+
+ // TODO(neis): Only allocate more_exports if there are star exports.
+ // Maybe split special_exports into indirect_exports and star_exports.
+
+ ReadOnlyRoots roots(isolate);
+ Handle<FixedArray> special_exports(module->info().special_exports(), isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<SourceTextModuleInfoEntry> entry(
+ SourceTextModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ if (!entry->export_name().IsUndefined(roots)) {
+ continue; // Indirect export.
+ }
+
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules().get(entry->module_request())),
+ isolate);
+
+ // Recurse.
+ if (requested_module->IsSourceTextModule())
+ FetchStarExports(isolate,
+ Handle<SourceTextModule>::cast(requested_module), zone,
+ visited);
+
+ // Collect all of [requested_module]'s exports that must be added to
+ // [module]'s exports (i.e. to [exports]). We record these in
+ // [more_exports]. Ambiguities (conflicting exports) are marked by mapping
+ // the name to undefined instead of a Cell.
+ Handle<ObjectHashTable> requested_exports(requested_module->exports(),
+ isolate);
+ for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
+ Object key;
+ if (!requested_exports->ToKey(roots, i, &key)) continue;
+ Handle<String> name(String::cast(key), isolate);
+
+ if (name->Equals(roots.default_string())) continue;
+ if (!exports->Lookup(name).IsTheHole(roots)) continue;
+
+ Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
+ auto insert_result = more_exports.insert(std::make_pair(name, cell));
+ if (!insert_result.second) {
+ auto it = insert_result.first;
+ if (*it->second == *cell || it->second->IsUndefined(roots)) {
+ // We already recorded this mapping before, or the name is already
+ // known to be ambiguous. In either case, there's nothing to do.
+ } else {
+ DCHECK(it->second->IsCell());
+ // Different star exports provide different cells for this name, hence
+ // mark the name as ambiguous.
+ it->second = roots.undefined_value_handle();
+ }
+ }
+ }
+ }
+
+ // Copy [more_exports] into [exports].
+ for (const auto& elem : more_exports) {
+ if (elem.second->IsUndefined(isolate)) continue; // Ambiguous export.
+ DCHECK(!elem.first->Equals(ReadOnlyRoots(isolate).default_string()));
+ DCHECK(elem.second->IsCell());
+ exports = ObjectHashTable::Put(exports, elem.first, elem.second);
+ }
+ module->set_exports(*exports);
+}
+
+Handle<JSModuleNamespace> SourceTextModule::GetModuleNamespace(
+ Isolate* isolate, Handle<SourceTextModule> module, int module_request) {
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules().get(module_request)), isolate);
+ return Module::GetModuleNamespace(isolate, requested_module);
+}
+
+MaybeHandle<Object> SourceTextModule::Evaluate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index) {
+ Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
+ isolate);
+ module->set_code(
+ generator->function().shared().scope_info().ModuleDescriptorInfo());
+ module->SetStatus(kEvaluating);
+ module->set_dfs_index(*dfs_index);
+ module->set_dfs_ancestor_index(*dfs_index);
+ stack->push_front(module);
+ (*dfs_index)++;
+
+ // Recursion.
+ Handle<FixedArray> requested_modules(module->requested_modules(), isolate);
+ for (int i = 0, length = requested_modules->length(); i < length; ++i) {
+ Handle<Module> requested_module(Module::cast(requested_modules->get(i)),
+ isolate);
+ RETURN_ON_EXCEPTION(
+ isolate, Module::Evaluate(isolate, requested_module, stack, dfs_index),
+ Object);
+
+ DCHECK_GE(requested_module->status(), kEvaluating);
+ DCHECK_NE(requested_module->status(), kErrored);
+ SLOW_DCHECK(
+ // {requested_module} is evaluating iff it's on the {stack}.
+ (requested_module->status() == kEvaluating) ==
+ std::count_if(stack->begin(), stack->end(), [&](Handle<Module> m) {
+ return *m == *requested_module;
+ }));
+
+ if (requested_module->status() == kEvaluating) {
+ // SyntheticModules go straight to kEvaluated so this must be a
+ // SourceTextModule
+ module->set_dfs_ancestor_index(
+ std::min(module->dfs_ancestor_index(),
+ Handle<SourceTextModule>::cast(requested_module)
+ ->dfs_ancestor_index()));
+ }
+ }
+
+ // Evaluation of module body.
+ Handle<JSFunction> resume(
+ isolate->native_context()->generator_next_internal(), isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
+ Object);
+ DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
+
+ CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
+ return handle(JSIteratorResult::cast(*result).value(), isolate);
+}
+
+void SourceTextModule::Reset(Isolate* isolate,
+ Handle<SourceTextModule> module) {
+ Factory* factory = isolate->factory();
+
+ DCHECK(module->import_meta().IsTheHole(isolate));
+
+ Handle<FixedArray> regular_exports =
+ factory->NewFixedArray(module->regular_exports().length());
+ Handle<FixedArray> regular_imports =
+ factory->NewFixedArray(module->regular_imports().length());
+ Handle<FixedArray> requested_modules =
+ factory->NewFixedArray(module->requested_modules().length());
+
+ if (module->status() == kInstantiating) {
+ module->set_code(JSFunction::cast(module->code()).shared());
+ }
+ module->set_regular_exports(*regular_exports);
+ module->set_regular_imports(*regular_imports);
+ module->set_requested_modules(*requested_modules);
+ module->set_dfs_index(-1);
+ module->set_dfs_ancestor_index(-1);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/source-text-module.h b/deps/v8/src/objects/source-text-module.h
new file mode 100644
index 0000000000..5c20b7018b
--- /dev/null
+++ b/deps/v8/src/objects/source-text-module.h
@@ -0,0 +1,220 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SOURCE_TEXT_MODULE_H_
+#define V8_OBJECTS_SOURCE_TEXT_MODULE_H_
+
+#include "src/objects/module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class UnorderedModuleSet;
+
+// The runtime representation of an ECMAScript Source Text Module Record.
+// https://tc39.github.io/ecma262/#sec-source-text-module-records
+class SourceTextModule : public Module {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_CAST(SourceTextModule)
+ DECL_VERIFIER(SourceTextModule)
+ DECL_PRINTER(SourceTextModule)
+
+ // The code representing this module, or an abstraction thereof.
+ // This is either a SharedFunctionInfo, a JSFunction, a JSGeneratorObject, or
+ // a SourceTextModuleInfo, depending on the state (status) the module is in.
+ // See SourceTextModule::SourceTextModuleVerify() for the precise invariant.
+ DECL_ACCESSORS(code, Object)
+
+ // Arrays of cells corresponding to regular exports and regular imports.
+ // A cell's position in the array is determined by the cell index of the
+ // associated module entry (which coincides with the variable index of the
+ // associated variable).
+ DECL_ACCESSORS(regular_exports, FixedArray)
+ DECL_ACCESSORS(regular_imports, FixedArray)
+
+ // The shared function info in case {status} is not kEvaluating, kEvaluated or
+ // kErrored.
+ SharedFunctionInfo GetSharedFunctionInfo() const;
+
+ // Modules imported or re-exported by this module.
+ // Corresponds 1-to-1 to the module specifier strings in
+ // SourceTextModuleInfo::module_requests.
+ DECL_ACCESSORS(requested_modules, FixedArray)
+
+ // [script]: Script from which the module originates.
+ DECL_ACCESSORS(script, Script)
+
+ // The value of import.meta inside of this module.
+ // Lazily initialized on first access. It's the hole before first access and
+ // a JSObject afterwards.
+ DECL_ACCESSORS(import_meta, Object)
+
+ // Get the SourceTextModuleInfo associated with the code.
+ inline SourceTextModuleInfo info() const;
+
+ Cell GetCell(int cell_index);
+ static Handle<Object> LoadVariable(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ int cell_index);
+ static void StoreVariable(Handle<SourceTextModule> module, int cell_index,
+ Handle<Object> value);
+
+ static int ImportIndex(int cell_index);
+ static int ExportIndex(int cell_index);
+
+ // Get the namespace object for [module_request] of [module]. If it doesn't
+ // exist yet, it is created.
+ static Handle<JSModuleNamespace> GetModuleNamespace(
+ Isolate* isolate, Handle<SourceTextModule> module, int module_request);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(Module::kHeaderSize,
+ TORQUE_GENERATED_SOURCE_TEXT_MODULE_FIELDS)
+
+ using BodyDescriptor =
+ SubclassBodyDescriptor<Module::BodyDescriptor,
+ FixedBodyDescriptor<kCodeOffset, kSize, kSize>>;
+
+ private:
+ friend class Factory;
+ friend class Module;
+
+ // TODO(neis): Don't store those in the module object?
+ DECL_INT_ACCESSORS(dfs_index)
+ DECL_INT_ACCESSORS(dfs_ancestor_index)
+
+ // Helpers for Instantiate and Evaluate.
+
+ static void CreateExport(Isolate* isolate, Handle<SourceTextModule> module,
+ int cell_index, Handle<FixedArray> names);
+ static void CreateIndirectExport(Isolate* isolate,
+ Handle<SourceTextModule> module,
+ Handle<String> name,
+ Handle<SourceTextModuleInfoEntry> entry);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExport(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
+ Isolate* isolate, Handle<SourceTextModule> module, Handle<String> name,
+ int module_request, MessageLocation loc, bool must_resolve,
+ ResolveSet* resolve_set);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
+
+ static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ v8::Local<v8::Context> context, v8::Module::ResolveCallback callback);
+ static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index,
+ Zone* zone);
+ static V8_WARN_UNUSED_RESULT bool RunInitializationCode(
+ Isolate* isolate, Handle<SourceTextModule> module);
+
+ static void FetchStarExports(Isolate* isolate,
+ Handle<SourceTextModule> module, Zone* zone,
+ UnorderedModuleSet* visited);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, unsigned* dfs_index);
+
+ static V8_WARN_UNUSED_RESULT bool MaybeTransitionComponent(
+ Isolate* isolate, Handle<SourceTextModule> module,
+ ZoneForwardList<Handle<SourceTextModule>>* stack, Status new_status);
+
+ static void Reset(Isolate* isolate, Handle<SourceTextModule> module);
+
+ OBJECT_CONSTRUCTORS(SourceTextModule, Module);
+};
+
+// SourceTextModuleInfo is to SourceTextModuleDescriptor what ScopeInfo is to
+// Scope.
+class SourceTextModuleInfo : public FixedArray {
+ public:
+ DECL_CAST(SourceTextModuleInfo)
+
+ static Handle<SourceTextModuleInfo> New(Isolate* isolate, Zone* zone,
+ SourceTextModuleDescriptor* descr);
+
+ inline FixedArray module_requests() const;
+ inline FixedArray special_exports() const;
+ inline FixedArray regular_exports() const;
+ inline FixedArray regular_imports() const;
+ inline FixedArray namespace_imports() const;
+ inline FixedArray module_request_positions() const;
+
+ // Accessors for [regular_exports].
+ int RegularExportCount() const;
+ String RegularExportLocalName(int i) const;
+ int RegularExportCellIndex(int i) const;
+ FixedArray RegularExportExportNames(int i) const;
+
+#ifdef DEBUG
+ inline bool Equals(SourceTextModuleInfo other) const;
+#endif
+
+ private:
+ friend class Factory;
+ friend class SourceTextModuleDescriptor;
+ enum {
+ kModuleRequestsIndex,
+ kSpecialExportsIndex,
+ kRegularExportsIndex,
+ kNamespaceImportsIndex,
+ kRegularImportsIndex,
+ kModuleRequestPositionsIndex,
+ kLength
+ };
+ enum {
+ kRegularExportLocalNameOffset,
+ kRegularExportCellIndexOffset,
+ kRegularExportExportNamesOffset,
+ kRegularExportLength
+ };
+
+ OBJECT_CONSTRUCTORS(SourceTextModuleInfo, FixedArray);
+};
+
+class SourceTextModuleInfoEntry : public Struct {
+ public:
+ DECL_CAST(SourceTextModuleInfoEntry)
+ DECL_PRINTER(SourceTextModuleInfoEntry)
+ DECL_VERIFIER(SourceTextModuleInfoEntry)
+
+ DECL_ACCESSORS(export_name, Object)
+ DECL_ACCESSORS(local_name, Object)
+ DECL_ACCESSORS(import_name, Object)
+ DECL_INT_ACCESSORS(module_request)
+ DECL_INT_ACCESSORS(cell_index)
+ DECL_INT_ACCESSORS(beg_pos)
+ DECL_INT_ACCESSORS(end_pos)
+
+ static Handle<SourceTextModuleInfoEntry> New(
+ Isolate* isolate, Handle<Object> export_name, Handle<Object> local_name,
+ Handle<Object> import_name, int module_request, int cell_index,
+ int beg_pos, int end_pos);
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ Struct::kHeaderSize,
+ TORQUE_GENERATED_SOURCE_TEXT_MODULE_INFO_ENTRY_FIELDS)
+
+ OBJECT_CONSTRUCTORS(SourceTextModuleInfoEntry, Struct);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SOURCE_TEXT_MODULE_H_
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 8069e6e5c9..e72af4df94 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -32,11 +32,15 @@ ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
kScriptNameOrSourceUrlOffset)
ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameOffset)
+ACCESSORS(StackFrameInfo, method_name, Object, kMethodNameOffset)
+ACCESSORS(StackFrameInfo, type_name, Object, kTypeNameOffset)
+ACCESSORS(StackFrameInfo, eval_origin, Object, kEvalOriginOffset)
ACCESSORS(StackFrameInfo, wasm_module_name, Object, kWasmModuleNameOffset)
SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_asmjs_wasm, kIsAsmJsWasmBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script, kIsUserJavaScriptBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, kIsToplevelBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_async, kIsAsyncBit)
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index f427d7eae2..558449d85a 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -5,85 +5,144 @@
#include "src/objects/stack-frame-info.h"
#include "src/objects/stack-frame-info-inl.h"
+#include "src/strings/string-builder-inl.h"
namespace v8 {
namespace internal {
+// static
int StackTraceFrame::GetLineNumber(Handle<StackTraceFrame> frame) {
int line = GetFrameInfo(frame)->line_number();
return line != StackFrameBase::kNone ? line : Message::kNoLineNumberInfo;
}
+// static
+int StackTraceFrame::GetOneBasedLineNumber(Handle<StackTraceFrame> frame) {
+ // JavaScript line numbers are already 1-based. Wasm line numbers need
+ // to be adjusted.
+ int line = StackTraceFrame::GetLineNumber(frame);
+ if (StackTraceFrame::IsWasm(frame) && line >= 0) line++;
+ return line;
+}
+
+// static
int StackTraceFrame::GetColumnNumber(Handle<StackTraceFrame> frame) {
int column = GetFrameInfo(frame)->column_number();
return column != StackFrameBase::kNone ? column : Message::kNoColumnInfo;
}
+// static
+int StackTraceFrame::GetOneBasedColumnNumber(Handle<StackTraceFrame> frame) {
+ // JavaScript colun numbers are already 1-based. Wasm column numbers need
+ // to be adjusted.
+ int column = StackTraceFrame::GetColumnNumber(frame);
+ if (StackTraceFrame::IsWasm(frame) && column >= 0) column++;
+ return column;
+}
+
+// static
int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
int id = GetFrameInfo(frame)->script_id();
return id != StackFrameBase::kNone ? id : Message::kNoScriptIdInfo;
}
+// static
int StackTraceFrame::GetPromiseAllIndex(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->promise_all_index();
}
+// static
Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
auto name = GetFrameInfo(frame)->script_name();
return handle(name, frame->GetIsolate());
}
+// static
Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
Handle<StackTraceFrame> frame) {
auto name = GetFrameInfo(frame)->script_name_or_source_url();
return handle(name, frame->GetIsolate());
}
+// static
Handle<Object> StackTraceFrame::GetFunctionName(Handle<StackTraceFrame> frame) {
auto name = GetFrameInfo(frame)->function_name();
return handle(name, frame->GetIsolate());
}
+// static
+Handle<Object> StackTraceFrame::GetMethodName(Handle<StackTraceFrame> frame) {
+ auto name = GetFrameInfo(frame)->method_name();
+ return handle(name, frame->GetIsolate());
+}
+
+// static
+Handle<Object> StackTraceFrame::GetTypeName(Handle<StackTraceFrame> frame) {
+ auto name = GetFrameInfo(frame)->type_name();
+ return handle(name, frame->GetIsolate());
+}
+
+// static
+Handle<Object> StackTraceFrame::GetEvalOrigin(Handle<StackTraceFrame> frame) {
+ auto origin = GetFrameInfo(frame)->eval_origin();
+ return handle(origin, frame->GetIsolate());
+}
+
+// static
Handle<Object> StackTraceFrame::GetWasmModuleName(
Handle<StackTraceFrame> frame) {
auto module = GetFrameInfo(frame)->wasm_module_name();
return handle(module, frame->GetIsolate());
}
+// static
bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_eval();
}
+// static
bool StackTraceFrame::IsConstructor(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_constructor();
}
+// static
bool StackTraceFrame::IsWasm(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_wasm();
}
+// static
+bool StackTraceFrame::IsAsmJsWasm(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_asmjs_wasm();
+}
+
+// static
bool StackTraceFrame::IsUserJavaScript(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_user_java_script();
}
+// static
bool StackTraceFrame::IsToplevel(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_toplevel();
}
+// static
bool StackTraceFrame::IsAsync(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_async();
}
+// static
bool StackTraceFrame::IsPromiseAll(Handle<StackTraceFrame> frame) {
return GetFrameInfo(frame)->is_promise_all();
}
+// static
Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
Handle<StackTraceFrame> frame) {
if (frame->frame_info().IsUndefined()) InitializeFrameInfo(frame);
return handle(StackFrameInfo::cast(frame->frame_info()), frame->GetIsolate());
}
+// static
void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
Isolate* isolate = frame->GetIsolate();
Handle<StackFrameInfo> frame_info = isolate->factory()->NewStackFrameInfo(
@@ -97,5 +156,259 @@ void StackTraceFrame::InitializeFrameInfo(Handle<StackTraceFrame> frame) {
frame->set_frame_index(-1);
}
+Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
+ Handle<FixedArray> stack_trace) {
+ // For the empty case, a empty FrameArray needs to be allocated so the rest
+ // of the code doesn't has to be special cased everywhere.
+ if (stack_trace->length() == 0) {
+ return isolate->factory()->NewFrameArray(0);
+ }
+
+ // Retrieve the FrameArray from the first StackTraceFrame.
+ DCHECK_GT(stack_trace->length(), 0);
+ Handle<StackTraceFrame> frame(StackTraceFrame::cast(stack_trace->get(0)),
+ isolate);
+ return handle(FrameArray::cast(frame->frame_array()), isolate);
+}
+
+namespace {
+
+bool IsNonEmptyString(Handle<Object> object) {
+ return (object->IsString() && String::cast(*object).length() > 0);
+}
+
+void AppendFileLocation(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> file_name = StackTraceFrame::GetScriptNameOrSourceUrl(frame);
+ if (!file_name->IsString() && StackTraceFrame::IsEval(frame)) {
+ Handle<Object> eval_origin = StackTraceFrame::GetEvalOrigin(frame);
+ DCHECK(eval_origin->IsString());
+ builder->AppendString(Handle<String>::cast(eval_origin));
+ builder->AppendCString(", "); // Expecting source position to follow.
+ }
+
+ if (IsNonEmptyString(file_name)) {
+ builder->AppendString(Handle<String>::cast(file_name));
+ } else {
+ // Source code does not originate from a file and is not native, but we
+ // can still get the source position inside the source string, e.g. in
+ // an eval string.
+ builder->AppendCString("<anonymous>");
+ }
+
+ int line_number = StackTraceFrame::GetLineNumber(frame);
+ if (line_number != Message::kNoLineNumberInfo) {
+ builder->AppendCharacter(':');
+ builder->AppendInt(line_number);
+
+ int column_number = StackTraceFrame::GetColumnNumber(frame);
+ if (column_number != Message::kNoColumnInfo) {
+ builder->AppendCharacter(':');
+ builder->AppendInt(column_number);
+ }
+ }
+}
+
+int StringIndexOf(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (pattern->length() > subject->length()) return -1;
+ return String::IndexOf(isolate, subject, pattern, 0);
+}
+
+// Returns true iff
+// 1. the subject ends with '.' + pattern, or
+// 2. subject == pattern.
+bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (String::Equals(isolate, subject, pattern)) return true;
+
+ FlatStringReader subject_reader(isolate, String::Flatten(isolate, subject));
+ FlatStringReader pattern_reader(isolate, String::Flatten(isolate, pattern));
+
+ int pattern_index = pattern_reader.length() - 1;
+ int subject_index = subject_reader.length() - 1;
+ for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
+ if (subject_index < 0) {
+ return false;
+ }
+
+ const uc32 subject_char = subject_reader.Get(subject_index);
+ if (i == pattern_reader.length()) {
+ if (subject_char != '.') return false;
+ } else if (subject_char != pattern_reader.Get(pattern_index)) {
+ return false;
+ }
+
+ pattern_index--;
+ subject_index--;
+ }
+
+ return true;
+}
+
+void AppendMethodCall(Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> type_name = StackTraceFrame::GetTypeName(frame);
+ Handle<Object> method_name = StackTraceFrame::GetMethodName(frame);
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+
+ if (IsNonEmptyString(function_name)) {
+ Handle<String> function_string = Handle<String>::cast(function_name);
+ if (IsNonEmptyString(type_name)) {
+ Handle<String> type_string = Handle<String>::cast(type_name);
+ bool starts_with_type_name =
+ (StringIndexOf(isolate, function_string, type_string) == 0);
+ if (!starts_with_type_name) {
+ builder->AppendString(type_string);
+ builder->AppendCharacter('.');
+ }
+ }
+ builder->AppendString(function_string);
+
+ if (IsNonEmptyString(method_name)) {
+ Handle<String> method_string = Handle<String>::cast(method_name);
+ if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
+ builder->AppendCString(" [as ");
+ builder->AppendString(method_string);
+ builder->AppendCharacter(']');
+ }
+ }
+ } else {
+ if (IsNonEmptyString(type_name)) {
+ builder->AppendString(Handle<String>::cast(type_name));
+ builder->AppendCharacter('.');
+ }
+ if (IsNonEmptyString(method_name)) {
+ builder->AppendString(Handle<String>::cast(method_name));
+ } else {
+ builder->AppendCString("<anonymous>");
+ }
+ }
+}
+
+void SerializeJSStackFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+
+ const bool is_toplevel = StackTraceFrame::IsToplevel(frame);
+ const bool is_async = StackTraceFrame::IsAsync(frame);
+ const bool is_promise_all = StackTraceFrame::IsPromiseAll(frame);
+ const bool is_constructor = StackTraceFrame::IsConstructor(frame);
+ // Note: Keep the {is_method_call} predicate in sync with the corresponding
+ // predicate in factory.cc where the StackFrameInfo is created.
+ // Otherwise necessary fields for serialzing this frame might be
+ // missing.
+ const bool is_method_call = !(is_toplevel || is_constructor);
+
+ if (is_async) {
+ builder.AppendCString("async ");
+ }
+ if (is_promise_all) {
+ builder.AppendCString("Promise.all (index ");
+ builder.AppendInt(StackTraceFrame::GetPromiseAllIndex(frame));
+ builder.AppendCString(")");
+ return;
+ }
+ if (is_method_call) {
+ AppendMethodCall(isolate, frame, &builder);
+ } else if (is_constructor) {
+ builder.AppendCString("new ");
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+ } else if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ AppendFileLocation(isolate, frame, &builder);
+ return;
+ }
+
+ builder.AppendCString(" (");
+ AppendFileLocation(isolate, frame, &builder);
+ builder.AppendCString(")");
+}
+
+void SerializeAsmJsWasmStackFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ // The string should look exactly as the respective javascript frame string.
+ // Keep this method in line to
+ // JSStackFrame::ToString(IncrementalStringBuilder&).
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ builder.AppendCString(" (");
+ }
+
+ AppendFileLocation(isolate, frame, &builder);
+
+ if (IsNonEmptyString(function_name)) builder.AppendCString(")");
+
+ return;
+}
+
+void SerializeWasmStackFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ Handle<Object> module_name = StackTraceFrame::GetWasmModuleName(frame);
+ Handle<Object> function_name = StackTraceFrame::GetFunctionName(frame);
+ const bool has_name = !module_name->IsNull() || !function_name->IsNull();
+ if (has_name) {
+ if (module_name->IsNull()) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ builder.AppendString(Handle<String>::cast(module_name));
+ if (!function_name->IsNull()) {
+ builder.AppendCString(".");
+ builder.AppendString(Handle<String>::cast(function_name));
+ }
+ }
+ builder.AppendCString(" (");
+ }
+
+ const int wasm_func_index = StackTraceFrame::GetLineNumber(frame);
+
+ builder.AppendCString("wasm-function[");
+ builder.AppendInt(wasm_func_index);
+ builder.AppendCString("]:");
+
+ char buffer[16];
+ SNPrintF(ArrayVector(buffer), "0x%x",
+ StackTraceFrame::GetColumnNumber(frame));
+ builder.AppendCString(buffer);
+
+ if (has_name) builder.AppendCString(")");
+}
+
+} // namespace
+
+void SerializeStackTraceFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+) {
+ // Ordering here is important, as AsmJs frames are also marked as Wasm.
+ if (StackTraceFrame::IsAsmJsWasm(frame)) {
+ SerializeAsmJsWasmStackFrame(isolate, frame, builder);
+ } else if (StackTraceFrame::IsWasm(frame)) {
+ SerializeWasmStackFrame(isolate, frame, builder);
+ } else {
+ SerializeJSStackFrame(isolate, frame, builder);
+ }
+}
+
+MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
+ Handle<StackTraceFrame> frame) {
+ IncrementalStringBuilder builder(isolate);
+ SerializeStackTraceFrame(isolate, frame, builder);
+ return builder.Finish();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index 44826f67e6..3d91c5374f 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -25,10 +25,14 @@ class StackFrameInfo : public Struct {
DECL_ACCESSORS(script_name, Object)
DECL_ACCESSORS(script_name_or_source_url, Object)
DECL_ACCESSORS(function_name, Object)
+ DECL_ACCESSORS(method_name, Object)
+ DECL_ACCESSORS(type_name, Object)
+ DECL_ACCESSORS(eval_origin, Object)
DECL_ACCESSORS(wasm_module_name, Object)
DECL_BOOLEAN_ACCESSORS(is_eval)
DECL_BOOLEAN_ACCESSORS(is_constructor)
DECL_BOOLEAN_ACCESSORS(is_wasm)
+ DECL_BOOLEAN_ACCESSORS(is_asmjs_wasm)
DECL_BOOLEAN_ACCESSORS(is_user_java_script)
DECL_BOOLEAN_ACCESSORS(is_toplevel)
DECL_BOOLEAN_ACCESSORS(is_async)
@@ -49,10 +53,11 @@ class StackFrameInfo : public Struct {
static const int kIsEvalBit = 0;
static const int kIsConstructorBit = 1;
static const int kIsWasmBit = 2;
- static const int kIsUserJavaScriptBit = 3;
- static const int kIsToplevelBit = 4;
- static const int kIsAsyncBit = 5;
- static const int kIsPromiseAllBit = 6;
+ static const int kIsAsmJsWasmBit = 3;
+ static const int kIsUserJavaScriptBit = 4;
+ static const int kIsToplevelBit = 5;
+ static const int kIsAsyncBit = 6;
+ static const int kIsPromiseAllBit = 7;
OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
};
@@ -80,18 +85,24 @@ class StackTraceFrame : public Struct {
TORQUE_GENERATED_STACK_TRACE_FRAME_FIELDS)
static int GetLineNumber(Handle<StackTraceFrame> frame);
+ static int GetOneBasedLineNumber(Handle<StackTraceFrame> frame);
static int GetColumnNumber(Handle<StackTraceFrame> frame);
+ static int GetOneBasedColumnNumber(Handle<StackTraceFrame> frame);
static int GetScriptId(Handle<StackTraceFrame> frame);
static int GetPromiseAllIndex(Handle<StackTraceFrame> frame);
static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
static Handle<Object> GetFunctionName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetMethodName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetTypeName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetEvalOrigin(Handle<StackTraceFrame> frame);
static Handle<Object> GetWasmModuleName(Handle<StackTraceFrame> frame);
static bool IsEval(Handle<StackTraceFrame> frame);
static bool IsConstructor(Handle<StackTraceFrame> frame);
static bool IsWasm(Handle<StackTraceFrame> frame);
+ static bool IsAsmJsWasm(Handle<StackTraceFrame> frame);
static bool IsUserJavaScript(Handle<StackTraceFrame> frame);
static bool IsToplevel(Handle<StackTraceFrame> frame);
static bool IsAsync(Handle<StackTraceFrame> frame);
@@ -104,6 +115,22 @@ class StackTraceFrame : public Struct {
static void InitializeFrameInfo(Handle<StackTraceFrame> frame);
};
+// Small helper that retrieves the FrameArray from a stack-trace
+// consisting of a FixedArray of StackTraceFrame objects.
+// This helper is only temporary until all FrameArray use-sites have
+// been converted to use StackTraceFrame and StackFrameInfo objects.
+V8_EXPORT_PRIVATE
+Handle<FrameArray> GetFrameArrayFromStackTrace(Isolate* isolate,
+ Handle<FixedArray> stack_trace);
+
+class IncrementalStringBuilder;
+void SerializeStackTraceFrame(
+ Isolate* isolate, Handle<StackTraceFrame> frame,
+ IncrementalStringBuilder& builder // NOLINT(runtime/references)
+);
+MaybeHandle<String> SerializeStackTraceFrame(Isolate* isolate,
+ Handle<StackTraceFrame> frame);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 0d8f83ca86..db724e0cf1 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -22,8 +22,6 @@
namespace v8 {
namespace internal {
-INT32_ACCESSORS(String, length, kLengthOffset)
-
int String::synchronized_length() const {
return base::AsAtomic32::Acquire_Load(
reinterpret_cast<const int32_t*>(FIELD_ADDR(*this, kLengthOffset)));
@@ -34,29 +32,21 @@ void String::synchronized_set_length(int value) {
reinterpret_cast<int32_t*>(FIELD_ADDR(*this, kLengthOffset)), value);
}
-OBJECT_CONSTRUCTORS_IMPL(String, Name)
-OBJECT_CONSTRUCTORS_IMPL(SeqString, String)
-OBJECT_CONSTRUCTORS_IMPL(SeqOneByteString, SeqString)
-OBJECT_CONSTRUCTORS_IMPL(SeqTwoByteString, SeqString)
-OBJECT_CONSTRUCTORS_IMPL(InternalizedString, String)
-OBJECT_CONSTRUCTORS_IMPL(ConsString, String)
-OBJECT_CONSTRUCTORS_IMPL(ThinString, String)
-OBJECT_CONSTRUCTORS_IMPL(SlicedString, String)
+TQ_OBJECT_CONSTRUCTORS_IMPL(String)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SeqString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SeqOneByteString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SeqTwoByteString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(InternalizedString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ConsString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(ThinString)
+TQ_OBJECT_CONSTRUCTORS_IMPL(SlicedString)
OBJECT_CONSTRUCTORS_IMPL(ExternalString, String)
OBJECT_CONSTRUCTORS_IMPL(ExternalOneByteString, ExternalString)
OBJECT_CONSTRUCTORS_IMPL(ExternalTwoByteString, ExternalString)
-CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(InternalizedString)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(ThinString)
StringShape::StringShape(const String str) : type_(str.map().instance_type()) {
set_valid();
@@ -147,16 +137,17 @@ STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
-bool String::IsOneByteRepresentation() const {
- uint32_t type = map().instance_type();
+DEF_GETTER(String, IsOneByteRepresentation, bool) {
+ uint32_t type = map(isolate).instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
-bool String::IsTwoByteRepresentation() const {
- uint32_t type = map().instance_type();
+DEF_GETTER(String, IsTwoByteRepresentation, bool) {
+ uint32_t type = map(isolate).instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
+// static
bool String::IsOneByteRepresentationUnderneath(String string) {
while (true) {
uint32_t type = string.map().instance_type();
@@ -398,7 +389,7 @@ String String::GetUnderlying() {
STATIC_ASSERT(static_cast<int>(ConsString::kFirstOffset) ==
static_cast<int>(ThinString::kActualOffset));
const int kUnderlyingOffset = SlicedString::kParentOffset;
- return String::cast(READ_FIELD(*this, kUnderlyingOffset));
+ return TaggedField<String, kUnderlyingOffset>::load(*this);
}
template <class Visitor>
@@ -527,49 +518,23 @@ int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
return SizeFor(length());
}
-String SlicedString::parent() {
- return String::cast(READ_FIELD(*this, kParentOffset));
-}
-
-void SlicedString::set_parent(Isolate* isolate, String parent,
- WriteBarrierMode mode) {
+void SlicedString::set_parent(String parent, WriteBarrierMode mode) {
DCHECK(parent.IsSeqString() || parent.IsExternalString());
- WRITE_FIELD(*this, kParentOffset, parent);
- CONDITIONAL_WRITE_BARRIER(*this, kParentOffset, parent, mode);
-}
-
-SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
-
-String ConsString::first() {
- return String::cast(READ_FIELD(*this, kFirstOffset));
+ TorqueGeneratedSlicedString<SlicedString, Super>::set_parent(parent, mode);
}
-Object ConsString::unchecked_first() { return READ_FIELD(*this, kFirstOffset); }
+TQ_SMI_ACCESSORS(SlicedString, offset)
-void ConsString::set_first(Isolate* isolate, String value,
- WriteBarrierMode mode) {
- WRITE_FIELD(*this, kFirstOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kFirstOffset, value, mode);
-}
-
-String ConsString::second() {
- return String::cast(READ_FIELD(*this, kSecondOffset));
+Object ConsString::unchecked_first() {
+ return TaggedField<Object, kFirstOffset>::load(*this);
}
Object ConsString::unchecked_second() {
return RELAXED_READ_FIELD(*this, kSecondOffset);
}
-void ConsString::set_second(Isolate* isolate, String value,
- WriteBarrierMode mode) {
- WRITE_FIELD(*this, kSecondOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kSecondOffset, value, mode);
-}
-
-ACCESSORS(ThinString, actual, String, kActualOffset)
-
-HeapObject ThinString::unchecked_actual() const {
- return HeapObject::unchecked_cast(READ_FIELD(*this, kActualOffset));
+DEF_GETTER(ThinString, unchecked_actual, HeapObject) {
+ return TaggedField<HeapObject, kActualOffset>::load(isolate, *this);
}
bool ExternalString::is_uncached() const {
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index cc513f88cb..d1981fd24d 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -61,8 +61,8 @@ Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
WriteToFlat(*cons, flat->GetChars(no_gc), 0, length);
result = flat;
}
- cons->set_first(isolate, *result);
- cons->set_second(isolate, ReadOnlyRoots(isolate).empty_string());
+ cons->set_first(*result);
+ cons->set_second(ReadOnlyRoots(isolate).empty_string());
DCHECK(result->IsFlat());
return result;
}
@@ -146,15 +146,15 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kUncachedSize) return false;
- Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (!GetIsolateFromWritableObject(*this, &isolate)) return false;
- Heap* heap = isolate->heap();
+ if (IsReadOnlyHeapObject(*this)) return false;
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect();
+
if (has_pointers) {
- heap->NotifyObjectLayoutChange(*this, size, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -163,7 +163,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// the address of the backing store. When we encounter uncached external
// strings in generated code, we need to bailout to runtime.
Map new_map;
- ReadOnlyRoots roots(heap);
+ ReadOnlyRoots roots(isolate);
if (size < ExternalString::kSizeOfAllExternalStrings) {
if (is_internalized) {
new_map = roots.uncached_external_internalized_string_map();
@@ -177,10 +177,11 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
- ClearRecordedSlots::kNo);
+ isolate->heap()->CreateFillerObjectAt(
+ this->address() + new_size, size - new_size, ClearRecordedSlots::kNo);
if (has_pointers) {
- heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ isolate->heap()->ClearRecordedSlotRange(this->address(),
+ this->address() + new_size);
}
// We are storing the new map using release store after creating a filler for
@@ -189,7 +190,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
self.SetResource(isolate, resource);
- heap->RegisterExternalString(*this);
+ isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
@@ -218,18 +219,16 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
int size = this->Size(); // Byte size of the original string.
// Abort if size does not allow in-place conversion.
if (size < ExternalString::kUncachedSize) return false;
- Isolate* isolate;
// Read-only strings cannot be made external, since that would mutate the
// string.
- if (!GetIsolateFromWritableObject(*this, &isolate)) return false;
- Heap* heap = isolate->heap();
+ if (IsReadOnlyHeapObject(*this)) return false;
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(*this).IsIndirect();
if (has_pointers) {
- heap->NotifyObjectLayoutChange(*this, size, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*this, size, no_allocation);
}
-
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string. Instead, we
@@ -237,7 +236,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// the address of the backing store. When we encounter uncached external
// strings in generated code, we need to bailout to runtime.
Map new_map;
- ReadOnlyRoots roots(heap);
+ ReadOnlyRoots roots(isolate);
if (size < ExternalString::kSizeOfAllExternalStrings) {
new_map = is_internalized
? roots.uncached_external_one_byte_internalized_string_map()
@@ -250,10 +249,11 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Byte size of the external String object.
int new_size = this->SizeFromMap(new_map);
- heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
- ClearRecordedSlots::kNo);
+ isolate->heap()->CreateFillerObjectAt(
+ this->address() + new_size, size - new_size, ClearRecordedSlots::kNo);
if (has_pointers) {
- heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ isolate->heap()->ClearRecordedSlotRange(this->address(),
+ this->address() + new_size);
}
// We are storing the new map using release store after creating a filler for
@@ -262,7 +262,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
ExternalOneByteString self = ExternalOneByteString::cast(*this);
self.SetResource(isolate, resource);
- heap->RegisterExternalString(*this);
+ isolate->heap()->RegisterExternalString(*this);
if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
@@ -272,9 +272,8 @@ bool String::SupportsExternalization() {
return i::ThinString::cast(*this).actual().SupportsExternalization();
}
- Isolate* isolate;
// RO_SPACE strings cannot be externalized.
- if (!GetIsolateFromWritableObject(*this, &isolate)) {
+ if (IsReadOnlyHeapObject(*this)) {
return false;
}
@@ -290,6 +289,7 @@ bool String::SupportsExternalization() {
DCHECK_LE(ExternalString::kUncachedSize, this->Size());
#endif
+ Isolate* isolate = GetIsolateFromWritableObject(*this);
return !isolate->heap()->IsInGCPostProcessing();
}
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 74fc8fa763..1a826eee3b 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -79,7 +79,7 @@ class StringShape {
// ordered sequence of zero or more 16-bit unsigned integer values.
//
// All string values have a length field.
-class String : public Name {
+class String : public TorqueGeneratedString<String, Name> {
public:
enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
@@ -152,21 +152,18 @@ class String : public Name {
template <typename Char>
inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
- // Get and set the length of the string.
- inline int length() const;
- inline void set_length(int value);
-
// Get and set the length of the string using acquire loads and release
// stores.
- inline int synchronized_length() const;
- inline void synchronized_set_length(int value);
+ DECL_SYNCHRONIZED_INT_ACCESSORS(length)
// Returns whether this string has only one-byte chars, i.e. all of them can
// be one-byte encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
// two-byte external representations even for one-byte data.
inline bool IsOneByteRepresentation() const;
+ inline bool IsOneByteRepresentation(Isolate* isolate) const;
inline bool IsTwoByteRepresentation() const;
+ inline bool IsTwoByteRepresentation(Isolate* isolate) const;
// Cons and slices have an encoding flag that may not represent the actual
// encoding of the underlying string. This is taken into account here.
@@ -320,8 +317,6 @@ class String : public Name {
static Handle<String> Trim(Isolate* isolate, Handle<String> string,
TrimMode mode);
- DECL_CAST(String)
-
V8_EXPORT_PRIVATE void PrintOn(FILE* out);
// For use during stack traces. Performs rudimentary sanity check.
@@ -338,9 +333,6 @@ class String : public Name {
inline bool IsFlat();
- DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize,
- TORQUE_GENERATED_STRING_FIELDS)
-
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -453,7 +445,7 @@ class String : public Name {
// Compute and set the hash code.
V8_EXPORT_PRIVATE uint32_t ComputeAndSetHash();
- OBJECT_CONSTRUCTORS(String, Name);
+ TQ_OBJECT_CONSTRUCTORS(String)
};
// clang-format off
@@ -477,30 +469,29 @@ class SubStringRange {
};
// The SeqString abstract class captures sequential string values.
-class SeqString : public String {
+class SeqString : public TorqueGeneratedSeqString<SeqString, String> {
public:
- DECL_CAST(SeqString)
-
// Truncate the string in-place if possible and return the result.
// In case of new_length == 0, the empty string is returned without
// truncating the original string.
V8_WARN_UNUSED_RESULT static Handle<String> Truncate(Handle<SeqString> string,
int new_length);
- OBJECT_CONSTRUCTORS(SeqString, String);
+ TQ_OBJECT_CONSTRUCTORS(SeqString)
};
-class InternalizedString : public String {
+class InternalizedString
+ : public TorqueGeneratedInternalizedString<InternalizedString, String> {
public:
- DECL_CAST(InternalizedString)
// TODO(neis): Possibly move some stuff from String here.
- OBJECT_CONSTRUCTORS(InternalizedString, String);
+ TQ_OBJECT_CONSTRUCTORS(InternalizedString)
};
// The OneByteString class captures sequential one-byte string objects.
// Each character in the OneByteString is an one-byte character.
-class SeqOneByteString : public SeqString {
+class SeqOneByteString
+ : public TorqueGeneratedSeqOneByteString<SeqOneByteString, SeqString> {
public:
static const bool kHasOneByteEncoding = true;
using Char = uint8_t;
@@ -518,8 +509,6 @@ class SeqOneByteString : public SeqString {
// is deterministic.
void clear_padding();
- DECL_CAST(SeqOneByteString)
-
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an OneByteString
// instance.
@@ -537,12 +526,13 @@ class SeqOneByteString : public SeqString {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(SeqOneByteString, SeqString);
+ TQ_OBJECT_CONSTRUCTORS(SeqOneByteString)
};
// The TwoByteString class captures sequential unicode string objects.
// Each character in the TwoByteString is a two-byte uint16_t.
-class SeqTwoByteString : public SeqString {
+class SeqTwoByteString
+ : public TorqueGeneratedSeqTwoByteString<SeqTwoByteString, SeqString> {
public:
static const bool kHasOneByteEncoding = false;
using Char = uint16_t;
@@ -560,8 +550,6 @@ class SeqTwoByteString : public SeqString {
// is deterministic.
void clear_padding();
- DECL_CAST(SeqTwoByteString)
-
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of a TwoByteString
// instance.
@@ -580,7 +568,7 @@ class SeqTwoByteString : public SeqString {
class BodyDescriptor;
- OBJECT_CONSTRUCTORS(SeqTwoByteString, SeqString);
+ TQ_OBJECT_CONSTRUCTORS(SeqTwoByteString)
};
// The ConsString class describes string values built by using the
@@ -591,32 +579,19 @@ class SeqTwoByteString : public SeqString {
// are non-ConsString string values. The string value represented by
// a ConsString can be obtained by concatenating the leaf string
// values in a left-to-right depth-first traversal of the tree.
-class ConsString : public String {
+class ConsString : public TorqueGeneratedConsString<ConsString, String> {
public:
- // First string of the cons cell.
- inline String first();
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
inline Object unchecked_first();
- inline void set_first(Isolate* isolate, String first,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- // Second string of the cons cell.
- inline String second();
// Doesn't check that the result is a string, even in debug mode. This is
// useful during GC where the mark bits confuse the checks.
inline Object unchecked_second();
- inline void set_second(Isolate* isolate, String second,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t Get(int index);
- DECL_CAST(ConsString)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_CONS_STRING_FIELDS)
-
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -624,7 +599,7 @@ class ConsString : public String {
DECL_VERIFIER(ConsString)
- OBJECT_CONSTRUCTORS(ConsString, String);
+ TQ_OBJECT_CONSTRUCTORS(ConsString)
};
// The ThinString class describes string objects that are just references
@@ -634,25 +609,18 @@ class ConsString : public String {
// internalized version (which is allocated as a new object).
// In terms of memory layout and most algorithms operating on strings,
// ThinStrings can be thought of as "one-part cons strings".
-class ThinString : public String {
+class ThinString : public TorqueGeneratedThinString<ThinString, String> {
public:
- // Actual string that this ThinString refers to.
- inline String actual() const;
inline HeapObject unchecked_actual() const;
- inline void set_actual(String s,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline HeapObject unchecked_actual(Isolate* isolate) const;
V8_EXPORT_PRIVATE uint16_t Get(int index);
- DECL_CAST(ThinString)
DECL_VERIFIER(ThinString)
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_THIN_STRING_FIELDS)
-
using BodyDescriptor = FixedBodyDescriptor<kActualOffset, kSize, kSize>;
- OBJECT_CONSTRUCTORS(ThinString, String);
+ TQ_OBJECT_CONSTRUCTORS(ThinString)
};
// The Sliced String class describes strings that are substrings of another
@@ -667,22 +635,14 @@ class ThinString : public String {
// - handling externalized parent strings
// - external strings as parent
// - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
-class SlicedString : public String {
+class SlicedString : public TorqueGeneratedSlicedString<SlicedString, String> {
public:
- inline String parent();
- inline void set_parent(Isolate* isolate, String parent,
+ inline void set_parent(String parent,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline int offset() const;
- inline void set_offset(int offset);
-
+ DECL_INT_ACCESSORS(offset)
// Dispatched behavior.
V8_EXPORT_PRIVATE uint16_t Get(int index);
- DECL_CAST(SlicedString)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
- TORQUE_GENERATED_SLICED_STRING_FIELDS)
-
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -690,7 +650,7 @@ class SlicedString : public String {
DECL_VERIFIER(SlicedString)
- OBJECT_CONSTRUCTORS(SlicedString, String);
+ TQ_OBJECT_CONSTRUCTORS(SlicedString)
};
// The ExternalString class describes string values that are backed by
@@ -705,6 +665,7 @@ class SlicedString : public String {
class ExternalString : public String {
public:
DECL_CAST(ExternalString)
+ DECL_VERIFIER(ExternalString)
DEFINE_FIELD_OFFSET_CONSTANTS(String::kHeaderSize,
TORQUE_GENERATED_EXTERNAL_STRING_FIELDS)
diff --git a/deps/v8/src/objects/synthetic-module.cc b/deps/v8/src/objects/synthetic-module.cc
new file mode 100644
index 0000000000..0cca30a37b
--- /dev/null
+++ b/deps/v8/src/objects/synthetic-module.cc
@@ -0,0 +1,108 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/synthetic-module.h"
+
+#include "src/api/api-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/objects/js-generator-inl.h"
+#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/shared-function-info.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// Implements SetSyntheticModuleBinding:
+// https://heycam.github.io/webidl/#setsyntheticmoduleexport
+void SyntheticModule::SetExport(Isolate* isolate,
+ Handle<SyntheticModule> module,
+ Handle<String> export_name,
+ Handle<Object> export_value) {
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ Handle<Object> export_object(exports->Lookup(export_name), isolate);
+ CHECK(export_object->IsCell());
+ Handle<Cell> export_cell(Handle<Cell>::cast(export_object));
+ // Spec step 2: Set the mutable binding of export_name to export_value
+ export_cell->set_value(*export_value);
+}
+
+// Implements Synthetic Module Record's ResolveExport concrete method:
+// https://heycam.github.io/webidl/#smr-resolveexport
+MaybeHandle<Cell> SyntheticModule::ResolveExport(
+ Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve) {
+ Handle<Object> object(module->exports().Lookup(export_name), isolate);
+ if (object->IsCell()) {
+ return Handle<Cell>::cast(object);
+ }
+
+ if (must_resolve) {
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(MessageTemplate::kUnresolvableExport,
+ module_specifier, export_name),
+ &loc);
+ }
+
+ return MaybeHandle<Cell>();
+}
+
+// Implements Synthetic Module Record's Instantiate concrete method :
+// https://heycam.github.io/webidl/#smr-instantiate
+bool SyntheticModule::PrepareInstantiate(Isolate* isolate,
+ Handle<SyntheticModule> module,
+ v8::Local<v8::Context> context,
+ v8::Module::ResolveCallback callback) {
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ Handle<FixedArray> export_names(module->export_names(), isolate);
+ // Spec step 7: For each export_name in module->export_names...
+ for (int i = 0, n = export_names->length(); i < n; ++i) {
+ // Spec step 7.1: Create a new mutable binding for export_name.
+ // Spec step 7.2: Initialize the new mutable binding to undefined.
+ Handle<Cell> cell =
+ isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ Handle<String> name(String::cast(export_names->get(i)), isolate);
+ CHECK(exports->Lookup(name).IsTheHole(isolate));
+ exports = ObjectHashTable::Put(exports, name, cell);
+ }
+ module->set_exports(*exports);
+ return true;
+}
+
+// Second step of module instantiation. No real work to do for SyntheticModule
+// as there are no imports or indirect exports to resolve;
+// just update status.
+bool SyntheticModule::FinishInstantiate(Isolate* isolate,
+ Handle<SyntheticModule> module) {
+ module->SetStatus(kInstantiated);
+ return true;
+}
+
+// Implements Synthetic Module Record's Evaluate concrete method:
+// https://heycam.github.io/webidl/#smr-evaluate
+MaybeHandle<Object> SyntheticModule::Evaluate(Isolate* isolate,
+ Handle<SyntheticModule> module) {
+ module->SetStatus(kEvaluating);
+
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps =
+ FUNCTION_CAST<v8::Module::SyntheticModuleEvaluationSteps>(
+ module->evaluation_steps().foreign_address());
+ v8::Local<v8::Value> result;
+ if (!evaluation_steps(
+ Utils::ToLocal(Handle<Context>::cast(isolate->native_context())),
+ Utils::ToLocal(Handle<Module>::cast(module)))
+ .ToLocal(&result)) {
+ isolate->PromoteScheduledException();
+ module->RecordError(isolate);
+ return MaybeHandle<Object>();
+ }
+
+ module->SetStatus(kEvaluated);
+ return Utils::OpenHandle(*result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/synthetic-module.h b/deps/v8/src/objects/synthetic-module.h
new file mode 100644
index 0000000000..9f91f2ce4a
--- /dev/null
+++ b/deps/v8/src/objects/synthetic-module.h
@@ -0,0 +1,69 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SYNTHETIC_MODULE_H_
+#define V8_OBJECTS_SYNTHETIC_MODULE_H_
+
+#include "src/objects/module.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// The runtime representation of a Synthetic Module Record, a module that can be
+// instantiated by an embedder with embedder-defined exports and evaluation
+// steps.
+// https://heycam.github.io/webidl/#synthetic-module-records
+class SyntheticModule : public Module {
+ public:
+ NEVER_READ_ONLY_SPACE
+ DECL_CAST(SyntheticModule)
+ DECL_VERIFIER(SyntheticModule)
+ DECL_PRINTER(SyntheticModule)
+
+ // The list of all names exported by this module
+ DECL_ACCESSORS(name, String)
+ DECL_ACCESSORS(export_names, FixedArray)
+ DECL_ACCESSORS(evaluation_steps, Foreign)
+
+ static void SetExport(Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> export_name,
+ Handle<Object> export_value);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(Module::kHeaderSize,
+ TORQUE_GENERATED_SYNTHETIC_MODULE_FIELDS)
+
+ using BodyDescriptor = SubclassBodyDescriptor<
+ Module::BodyDescriptor,
+ FixedBodyDescriptor<kExportNamesOffset, kSize, kSize>>;
+
+ private:
+ friend class Module;
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExport(
+ Isolate* isolate, Handle<SyntheticModule> module,
+ Handle<String> module_specifier, Handle<String> export_name,
+ MessageLocation loc, bool must_resolve);
+
+ static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
+ Isolate* isolate, Handle<SyntheticModule> module,
+ v8::Local<v8::Context> context, v8::Module::ResolveCallback callback);
+ static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
+ Isolate* isolate, Handle<SyntheticModule> module);
+
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
+ Isolate* isolate, Handle<SyntheticModule> module);
+
+ OBJECT_CONSTRUCTORS(SyntheticModule, Module);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SYNTHETIC_MODULE_H_
diff --git a/deps/v8/src/objects/tagged-field-inl.h b/deps/v8/src/objects/tagged-field-inl.h
new file mode 100644
index 0000000000..3cce536a14
--- /dev/null
+++ b/deps/v8/src/objects/tagged-field-inl.h
@@ -0,0 +1,162 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_FIELD_INL_H_
+#define V8_OBJECTS_TAGGED_FIELD_INL_H_
+
+#include "src/objects/tagged-field.h"
+
+#include "src/common/ptr-compr-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+template <typename T, int kFieldOffset>
+Address TaggedField<T, kFieldOffset>::address(HeapObject host, int offset) {
+ return host.address() + kFieldOffset + offset;
+}
+
+// static
+template <typename T, int kFieldOffset>
+Tagged_t* TaggedField<T, kFieldOffset>::location(HeapObject host, int offset) {
+ return reinterpret_cast<Tagged_t*>(address(host, offset));
+}
+
+// static
+template <typename T, int kFieldOffset>
+template <typename TOnHeapAddress>
+Address TaggedField<T, kFieldOffset>::tagged_to_full(
+ TOnHeapAddress on_heap_addr, Tagged_t tagged_value) {
+#ifdef V8_COMPRESS_POINTERS
+ if (kIsSmi) {
+ return DecompressTaggedSigned(tagged_value);
+ } else if (kIsHeapObject) {
+ return DecompressTaggedPointer(on_heap_addr, tagged_value);
+ } else {
+ return DecompressTaggedAny(on_heap_addr, tagged_value);
+ }
+#else
+ return tagged_value;
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+Tagged_t TaggedField<T, kFieldOffset>::full_to_tagged(Address value) {
+#ifdef V8_COMPRESS_POINTERS
+ return CompressTagged(value);
+#else
+ return value;
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
+ Tagged_t value = *location(host, offset);
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::load(Isolate* isolate, HeapObject host,
+ int offset) {
+ Tagged_t value = *location(host, offset);
+ return T(tagged_to_full(isolate, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
+#ifdef V8_CONCURRENT_MARKING
+ Relaxed_Store(host, value);
+#else
+ *location(host) = full_to_tagged(value.ptr());
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
+#ifdef V8_CONCURRENT_MARKING
+ Relaxed_Store(host, offset, value);
+#else
+ *location(host, offset) = full_to_tagged(value.ptr());
+#endif
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Relaxed_Load(Isolate* isolate, HeapObject host,
+ int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
+ return T(tagged_to_full(isolate, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
+ AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, int offset,
+ T value) {
+ AsAtomicTagged::Relaxed_Store(location(host, offset),
+ full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ return T(tagged_to_full(host.ptr(), value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Acquire_Load(Isolate* isolate, HeapObject host,
+ int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ return T(tagged_to_full(isolate, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, T value) {
+ AsAtomicTagged::Release_Store(location(host), full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, int offset,
+ T value) {
+ AsAtomicTagged::Release_Store(location(host, offset),
+ full_to_tagged(value.ptr()));
+}
+
+// static
+template <typename T, int kFieldOffset>
+Tagged_t TaggedField<T, kFieldOffset>::Release_CompareAndSwap(HeapObject host,
+ T old, T value) {
+ Tagged_t old_value = full_to_tagged(old.ptr());
+ Tagged_t new_value = full_to_tagged(value.ptr());
+ Tagged_t result = AsAtomicTagged::Release_CompareAndSwap(
+ location(host), old_value, new_value);
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_FIELD_INL_H_
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
new file mode 100644
index 0000000000..fbaaee5930
--- /dev/null
+++ b/deps/v8/src/objects/tagged-field.h
@@ -0,0 +1,76 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_FIELD_H_
+#define V8_OBJECTS_TAGGED_FIELD_H_
+
+#include "src/common/globals.h"
+
+#include "src/objects/objects.h"
+#include "src/objects/tagged-value.h"
+
+namespace v8 {
+namespace internal {
+
+// This helper static class represents a tagged field of type T at offset
+// kFieldOffset inside some host HeapObject.
+// For full-pointer mode this type adds no overhead but when pointer
+// compression is enabled such class allows us to use proper decompression
+// function depending on the field type.
+template <typename T, int kFieldOffset = 0>
+class TaggedField : public AllStatic {
+ public:
+ static_assert(std::is_base_of<Object, T>::value ||
+ std::is_same<MapWord, T>::value ||
+ std::is_same<MaybeObject, T>::value,
+ "T must be strong or weak tagged type or MapWord");
+
+ // True for Smi fields.
+ static constexpr bool kIsSmi = std::is_base_of<Smi, T>::value;
+
+ // True for HeapObject and MapWord fields. The latter may look like a Smi
+ // if it contains forwarding pointer but still requires tagged pointer
+ // decompression.
+ static constexpr bool kIsHeapObject =
+ std::is_base_of<HeapObject, T>::value || std::is_same<MapWord, T>::value;
+
+ static inline Address address(HeapObject host, int offset = 0);
+
+ static inline T load(HeapObject host, int offset = 0);
+ static inline T load(Isolate* isolate, HeapObject host, int offset = 0);
+
+ static inline void store(HeapObject host, T value);
+ static inline void store(HeapObject host, int offset, T value);
+
+ static inline T Relaxed_Load(HeapObject host, int offset = 0);
+ static inline T Relaxed_Load(Isolate* isolate, HeapObject host,
+ int offset = 0);
+
+ static inline void Relaxed_Store(HeapObject host, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
+
+ static inline T Acquire_Load(HeapObject host, int offset = 0);
+ static inline T Acquire_Load(Isolate* isolate, HeapObject host,
+ int offset = 0);
+
+ static inline void Release_Store(HeapObject host, T value);
+ static inline void Release_Store(HeapObject host, int offset, T value);
+
+ static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
+ T value);
+
+ private:
+ static inline Tagged_t* location(HeapObject host, int offset = 0);
+
+ template <typename TOnHeapAddress>
+ static inline Address tagged_to_full(TOnHeapAddress on_heap_addr,
+ Tagged_t tagged_value);
+
+ static inline Tagged_t full_to_tagged(Address value);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_FIELD_H_
diff --git a/deps/v8/src/objects/tagged-impl-inl.h b/deps/v8/src/objects/tagged-impl-inl.h
index f735a241a8..909f65a959 100644
--- a/deps/v8/src/objects/tagged-impl-inl.h
+++ b/deps/v8/src/objects/tagged-impl-inl.h
@@ -52,11 +52,11 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
- ROOT_PARAM, HeapObject* result) const {
+ Isolate* isolate, HeapObject* result) const {
if (kIsFull) return GetHeapObject(result);
// Implementation for compressed pointers.
if (!IsStrongOrWeak()) return false;
- *result = GetHeapObject(ROOT_VALUE);
+ *result = GetHeapObject(isolate);
return true;
}
@@ -79,14 +79,14 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
- ROOT_PARAM, HeapObject* result,
+ Isolate* isolate, HeapObject* result,
HeapObjectReferenceType* reference_type) const {
if (kIsFull) return GetHeapObject(result, reference_type);
// Implementation for compressed pointers.
if (!IsStrongOrWeak()) return false;
*reference_type = IsWeakOrCleared() ? HeapObjectReferenceType::WEAK
: HeapObjectReferenceType::STRONG;
- *result = GetHeapObject(ROOT_VALUE);
+ *result = GetHeapObject(isolate);
return true;
}
@@ -107,12 +107,12 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
- ROOT_PARAM, HeapObject* result) const {
+ Isolate* isolate, HeapObject* result) const {
if (kIsFull) return GetHeapObjectIfStrong(result);
// Implementation for compressed pointers.
if (IsStrong()) {
- *result =
- HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ *result = HeapObject::cast(
+ Object(DecompressTaggedPointer(isolate, static_cast<Tagged_t>(ptr_))));
return true;
}
return false;
@@ -132,11 +132,12 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong()
template <HeapObjectReferenceType kRefType, typename StorageType>
HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
- ROOT_PARAM) const {
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObjectAssumeStrong();
// Implementation for compressed pointers.
DCHECK(IsStrong());
- return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ return HeapObject::cast(
+ Object(DecompressTaggedPointer(isolate, static_cast<Tagged_t>(ptr_))));
}
//
@@ -161,12 +162,12 @@ bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
template <HeapObjectReferenceType kRefType, typename StorageType>
bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
- ROOT_PARAM, HeapObject* result) const {
+ Isolate* isolate, HeapObject* result) const {
if (kIsFull) return GetHeapObjectIfWeak(result);
// Implementation for compressed pointers.
if (kCanBeWeak) {
if (IsWeak()) {
- *result = GetHeapObject(ROOT_VALUE);
+ *result = GetHeapObject(isolate);
return true;
}
return false;
@@ -189,11 +190,11 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak() const {
template <HeapObjectReferenceType kRefType, typename StorageType>
HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak(
- ROOT_PARAM) const {
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObjectAssumeWeak();
// Implementation for compressed pointers.
DCHECK(IsWeak());
- return GetHeapObject(ROOT_VALUE);
+ return GetHeapObject(isolate);
}
//
@@ -214,17 +215,19 @@ HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject() const {
}
template <HeapObjectReferenceType kRefType, typename StorageType>
-HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(ROOT_PARAM) const {
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObject();
// Implementation for compressed pointers.
DCHECK(!IsSmi());
if (kCanBeWeak) {
DCHECK(!IsCleared());
- return HeapObject::cast(Object(
- DecompressTaggedPointer(ROOT_VALUE, ptr_ & ~kWeakHeapObjectMask)));
+ return HeapObject::cast(Object(DecompressTaggedPointer(
+ isolate, static_cast<Tagged_t>(ptr_) & ~kWeakHeapObjectMask)));
} else {
DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
- return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ return HeapObject::cast(
+ Object(DecompressTaggedPointer(isolate, static_cast<Tagged_t>(ptr_))));
}
}
@@ -242,13 +245,14 @@ Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi() const {
}
template <HeapObjectReferenceType kRefType, typename StorageType>
-Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi(ROOT_PARAM) const {
+Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi(
+ Isolate* isolate) const {
if (kIsFull) return GetHeapObjectOrSmi();
// Implementation for compressed pointers.
if (IsSmi()) {
- return Object(DecompressTaggedSigned(ptr_));
+ return Object(DecompressTaggedSigned(static_cast<Tagged_t>(ptr_)));
}
- return GetHeapObject(ROOT_VALUE);
+ return GetHeapObject(isolate);
}
} // namespace internal
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
index e3d982565f..111eabae2f 100644
--- a/deps/v8/src/objects/tagged-impl.h
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -40,16 +40,24 @@ class TaggedImpl {
// Make clang on Linux catch what MSVC complains about on Windows:
operator bool() const = delete;
- constexpr bool operator==(TaggedImpl other) const {
- return ptr_ == other.ptr_;
+ template <typename U>
+ constexpr bool operator==(TaggedImpl<kRefType, U> other) const {
+ static_assert(
+ std::is_same<U, Address>::value || std::is_same<U, Tagged_t>::value,
+ "U must be either Address or Tagged_t");
+ return static_cast<Tagged_t>(ptr_) == static_cast<Tagged_t>(other.ptr());
}
- constexpr bool operator!=(TaggedImpl other) const {
- return ptr_ != other.ptr_;
+ template <typename U>
+ constexpr bool operator!=(TaggedImpl<kRefType, U> other) const {
+ static_assert(
+ std::is_same<U, Address>::value || std::is_same<U, Tagged_t>::value,
+ "U must be either Address or Tagged_t");
+ return static_cast<Tagged_t>(ptr_) != static_cast<Tagged_t>(other.ptr());
}
// For using in std::set and std::map.
constexpr bool operator<(TaggedImpl other) const {
- return ptr_ < other.ptr();
+ return static_cast<Tagged_t>(ptr_) < static_cast<Tagged_t>(other.ptr());
}
constexpr StorageType ptr() const { return ptr_; }
@@ -99,50 +107,51 @@ class TaggedImpl {
//
// The following set of methods get HeapObject out of the tagged value
- // which may involve decompression in which case the ROOT_PARAM is required.
+ // which may involve decompression in which case the isolate root is required.
// If the pointer compression is not enabled then the variants with
- // ROOT_PARAM will be exactly the same as non-ROOT_PARAM ones.
+ // isolate parameter will be exactly the same as the ones witout isolate
+ // parameter.
//
// If this tagged value is a strong pointer to a HeapObject, returns true and
// sets *result. Otherwise returns false.
inline bool GetHeapObjectIfStrong(HeapObject* result) const;
- inline bool GetHeapObjectIfStrong(ROOT_PARAM, HeapObject* result) const;
+ inline bool GetHeapObjectIfStrong(Isolate* isolate, HeapObject* result) const;
// DCHECKs that this tagged value is a strong pointer to a HeapObject and
// returns the HeapObject.
inline HeapObject GetHeapObjectAssumeStrong() const;
- inline HeapObject GetHeapObjectAssumeStrong(ROOT_PARAM) const;
+ inline HeapObject GetHeapObjectAssumeStrong(Isolate* isolate) const;
// If this tagged value is a weak pointer to a HeapObject, returns true and
// sets *result. Otherwise returns false.
inline bool GetHeapObjectIfWeak(HeapObject* result) const;
- inline bool GetHeapObjectIfWeak(ROOT_PARAM, HeapObject* result) const;
+ inline bool GetHeapObjectIfWeak(Isolate* isolate, HeapObject* result) const;
// DCHECKs that this tagged value is a weak pointer to a HeapObject and
// returns the HeapObject.
inline HeapObject GetHeapObjectAssumeWeak() const;
- inline HeapObject GetHeapObjectAssumeWeak(ROOT_PARAM) const;
+ inline HeapObject GetHeapObjectAssumeWeak(Isolate* isolate) const;
// If this tagged value is a strong or weak pointer to a HeapObject, returns
// true and sets *result. Otherwise returns false.
inline bool GetHeapObject(HeapObject* result) const;
- inline bool GetHeapObject(ROOT_PARAM, HeapObject* result) const;
+ inline bool GetHeapObject(Isolate* isolate, HeapObject* result) const;
inline bool GetHeapObject(HeapObject* result,
HeapObjectReferenceType* reference_type) const;
- inline bool GetHeapObject(ROOT_PARAM, HeapObject* result,
+ inline bool GetHeapObject(Isolate* isolate, HeapObject* result,
HeapObjectReferenceType* reference_type) const;
// DCHECKs that this tagged value is a strong or a weak pointer to a
// HeapObject and returns the HeapObject.
inline HeapObject GetHeapObject() const;
- inline HeapObject GetHeapObject(ROOT_PARAM) const;
+ inline HeapObject GetHeapObject(Isolate* isolate) const;
// DCHECKs that this tagged value is a strong or a weak pointer to a
// HeapObject or a Smi and returns the HeapObject or Smi.
inline Object GetHeapObjectOrSmi() const;
- inline Object GetHeapObjectOrSmi(ROOT_PARAM) const;
+ inline Object GetHeapObjectOrSmi(Isolate* isolate) const;
// Cast operation is available only for full non-weak tagged values.
template <typename T>
diff --git a/deps/v8/src/objects/tagged-value-inl.h b/deps/v8/src/objects/tagged-value-inl.h
index 5eb0e20947..f409a4006b 100644
--- a/deps/v8/src/objects/tagged-value-inl.h
+++ b/deps/v8/src/objects/tagged-value-inl.h
@@ -9,7 +9,8 @@
#include "include/v8-internal.h"
#include "src/common/ptr-compr-inl.h"
-#include "src/objects/heap-object-inl.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/objects.h"
#include "src/objects/oddball.h"
#include "src/objects/tagged-impl-inl.h"
#include "src/roots/roots-inl.h"
@@ -17,17 +18,37 @@
namespace v8 {
namespace internal {
-Object StrongTaggedValue::ToObject(WITH_ROOT_PARAM(StrongTaggedValue object)) {
+inline StrongTaggedValue::StrongTaggedValue(Object o)
+ :
#ifdef V8_COMPRESS_POINTERS
- return Object(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+ TaggedImpl(CompressTagged(o.ptr()))
+#else
+ TaggedImpl(o.ptr())
+#endif
+{
+}
+
+Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) {
+#ifdef V8_COMPRESS_POINTERS
+ return Object(DecompressTaggedAny(isolate, object.ptr()));
#else
return Object(object.ptr());
#endif
}
-MaybeObject TaggedValue::ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object)) {
+inline TaggedValue::TaggedValue(MaybeObject o)
+ :
+#ifdef V8_COMPRESS_POINTERS
+ TaggedImpl(CompressTagged(o.ptr()))
+#else
+ TaggedImpl(o.ptr())
+#endif
+{
+}
+
+MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) {
#ifdef V8_COMPRESS_POINTERS
- return MaybeObject(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+ return MaybeObject(DecompressTaggedAny(isolate, object.ptr()));
#else
return MaybeObject(object.ptr());
#endif
diff --git a/deps/v8/src/objects/tagged-value.h b/deps/v8/src/objects/tagged-value.h
index bb7609f7c3..7b6192204a 100644
--- a/deps/v8/src/objects/tagged-value.h
+++ b/deps/v8/src/objects/tagged-value.h
@@ -21,8 +21,9 @@ class StrongTaggedValue
public:
constexpr StrongTaggedValue() : TaggedImpl() {}
explicit constexpr StrongTaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+ explicit StrongTaggedValue(Object o);
- inline static Object ToObject(WITH_ROOT_PARAM(StrongTaggedValue object));
+ inline static Object ToObject(Isolate* isolate, StrongTaggedValue object);
};
// Almost same as MaybeObject but this one deals with in-heap and potentially
@@ -32,8 +33,9 @@ class TaggedValue : public TaggedImpl<HeapObjectReferenceType::WEAK, Tagged_t> {
public:
constexpr TaggedValue() : TaggedImpl() {}
explicit constexpr TaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+ explicit TaggedValue(MaybeObject o);
- inline static MaybeObject ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object));
+ inline static MaybeObject ToMaybeObject(Isolate* isolate, TaggedValue object);
};
} // namespace internal
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 2f34a48a2a..d5b6293afe 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -16,11 +16,9 @@ namespace internal {
// static
Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
- Isolate* isolate, Handle<Context> native_context,
+ Isolate* isolate, Handle<NativeContext> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id) {
- DCHECK(native_context->IsNativeContext());
-
// Check the template weakmap to see if the template object already exists.
Handle<EphemeronHashTable> template_weakmap =
native_context->template_weakmap().IsUndefined(isolate)
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index 220f9dab1e..20ad742338 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -47,7 +47,7 @@ class TemplateObjectDescription final : public Struct {
DECL_CAST(TemplateObjectDescription)
static Handle<JSArray> GetTemplateObject(
- Isolate* isolate, Handle<Context> native_context,
+ Isolate* isolate, Handle<NativeContext> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id);
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index a1a098ffc0..d344174a0c 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -55,7 +55,7 @@ SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- HeapObject extra = function_template_info->rare_data();
+ HeapObject extra = function_template_info->rare_data(isolate);
if (extra.IsUndefined(isolate)) {
return AllocateFunctionTemplateRareData(isolate, function_template_info);
} else {
@@ -64,9 +64,9 @@ FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
}
#define RARE_ACCESSORS(Name, CamelName, Type) \
- Type FunctionTemplateInfo::Get##CamelName() { \
- HeapObject extra = rare_data(); \
- HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
+ DEF_GETTER(FunctionTemplateInfo, Get##CamelName, Type) { \
+ HeapObject extra = rare_data(isolate); \
+ HeapObject undefined = GetReadOnlyRoots(isolate).undefined_value(); \
return extra == undefined ? undefined \
: FunctionTemplateRareData::cast(extra).Name(); \
} \
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index 66cd038114..99142266ed 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -86,7 +86,7 @@ class FunctionTemplateInfo : public TemplateInfo {
DECL_ACCESSORS(rare_data, HeapObject)
#define DECL_RARE_ACCESSORS(Name, CamelName, Type) \
- inline Type Get##CamelName(); \
+ DECL_GETTER(Get##CamelName, Type) \
static inline void Set##CamelName( \
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
Handle<Type> Name);
diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index 893de78dc4..048774f49b 100644
--- a/deps/v8/src/objects/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -102,9 +102,8 @@ PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
return descriptors.GetDetails(descriptor);
}
-// static
PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
- return transition.GetLastDescriptorDetails();
+ return transition.GetLastDescriptorDetails(isolate_);
}
// static
@@ -195,13 +194,13 @@ void TransitionsAccessor::Reload() {
}
void TransitionsAccessor::Initialize() {
- raw_transitions_ = map_.raw_transitions();
+ raw_transitions_ = map_.raw_transitions(isolate_);
HeapObject heap_object;
if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized;
} else if (raw_transitions_->IsWeak()) {
encoding_ = kWeakRef;
- } else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
+ } else if (raw_transitions_->GetHeapObjectIfStrong(isolate_, &heap_object)) {
if (heap_object.IsTransitionArray()) {
encoding_ = kFullTransitionArray;
} else if (heap_object.IsPrototypeInfo()) {
diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc
index a2cd102aaf..843b790b7d 100644
--- a/deps/v8/src/objects/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -226,7 +226,7 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
Map target = SearchTransition(*name, kData, attributes);
if (target.is_null()) return MaybeHandle<Map>();
- PropertyDetails details = target.GetLastDescriptorDetails();
+ PropertyDetails details = target.GetLastDescriptorDetails(isolate_);
DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
if (requested_location == kFieldOnly && details.location() != kField) {
diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h
index b4dadcc22a..f21e8cd54e 100644
--- a/deps/v8/src/objects/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -147,7 +147,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
friend class MarkCompactCollector; // For HasSimpleTransitionTo.
friend class TransitionArray;
- static inline PropertyDetails GetSimpleTargetDetails(Map transition);
+ inline PropertyDetails GetSimpleTargetDetails(Map transition);
static inline Name GetSimpleTransitionKey(Map transition);
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 331a12b157..5a72dd6532 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -22,6 +22,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
#include "src/snapshot/code-serializer.h"
@@ -65,9 +66,6 @@ static size_t BytesNeededForVarint(T value) {
return result;
}
-// Note that some additional tag values are defined in Blink's
-// Source/bindings/core/v8/serialization/SerializationTag.h, which must
-// not clash with values defined here.
enum class SerializationTag : uint8_t {
// version:uint32_t (if at beginning of data, sets version > 0)
kVersion = 0xFF,
@@ -161,6 +159,40 @@ enum class SerializationTag : uint8_t {
// A transferred WebAssembly.Memory object. maximumPages:int32_t, then by
// SharedArrayBuffer tag and its data.
kWasmMemoryTransfer = 'm',
+ // A list of (subtag: ErrorTag, [subtag dependent data]). See ErrorTag for
+ // details.
+ kError = 'r',
+
+ // The following tags are reserved because they were in use in Chromium before
+ // the kHostObject tag was introduced in format version 13, at
+ // v8 refs/heads/master@{#43466}
+ // chromium/src refs/heads/master@{#453568}
+ //
+ // They must not be reused without a version check to prevent old values from
+ // starting to deserialize incorrectly. For simplicity, it's recommended to
+ // avoid them altogether.
+ //
+ // This is the set of tags that existed in SerializationTag.h at that time and
+ // still exist at the time of this writing (i.e., excluding those that were
+ // removed on the Chromium side because there should be no real user data
+ // containing them).
+ //
+ // It might be possible to also free up other tags which were never persisted
+ // (e.g. because they were used only for transfer) in the future.
+ kLegacyReservedMessagePort = 'M',
+ kLegacyReservedBlob = 'b',
+ kLegacyReservedBlobIndex = 'i',
+ kLegacyReservedFile = 'f',
+ kLegacyReservedFileIndex = 'e',
+ kLegacyReservedDOMFileSystem = 'd',
+ kLegacyReservedFileList = 'l',
+ kLegacyReservedFileListIndex = 'L',
+ kLegacyReservedImageData = '#',
+ kLegacyReservedImageBitmap = 'g',
+ kLegacyReservedImageBitmapTransfer = 'G',
+ kLegacyReservedOffscreenCanvas = 'H',
+ kLegacyReservedCryptoKey = 'K',
+ kLegacyReservedRTCCertificate = 'k',
};
namespace {
@@ -184,6 +216,28 @@ enum class WasmEncodingTag : uint8_t {
kRawBytes = 'y',
};
+// Sub-tags only meaningful for error serialization.
+enum class ErrorTag : uint8_t {
+ // The error is a EvalError. No accompanying data.
+ kEvalErrorPrototype = 'E',
+ // The error is a RangeError. No accompanying data.
+ kRangeErrorPrototype = 'R',
+ // The error is a ReferenceError. No accompanying data.
+ kReferenceErrorPrototype = 'F',
+ // The error is a SyntaxError. No accompanying data.
+ kSyntaxErrorPrototype = 'S',
+ // The error is a TypeError. No accompanying data.
+ kTypeErrorPrototype = 'T',
+ // The error is a URIError. No accompanying data.
+ kUriErrorPrototype = 'U',
+ // Followed by message: string.
+ kMessage = 'm',
+ // Followed by stack: string.
+ kStack = 's',
+ // The end of this error information.
+ kEnd = '.',
+};
+
} // namespace
ValueSerializer::ValueSerializer(Isolate* isolate,
@@ -505,8 +559,9 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_DATE_TYPE:
WriteJSDate(JSDate::cast(*receiver));
return ThrowIfOutOfMemory();
- case JS_VALUE_TYPE:
- return WriteJSValue(Handle<JSValue>::cast(receiver));
+ case JS_PRIMITIVE_WRAPPER_TYPE:
+ return WriteJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper>::cast(receiver));
case JS_REGEXP_TYPE:
WriteJSRegExp(JSRegExp::cast(*receiver));
return ThrowIfOutOfMemory();
@@ -519,6 +574,8 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
+ case JS_ERROR_TYPE:
+ return WriteJSError(Handle<JSObject>::cast(receiver));
case WASM_MODULE_TYPE: {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if (!FLAG_wasm_disable_structured_cloning || enabled_features.threads) {
@@ -720,7 +777,8 @@ void ValueSerializer::WriteJSDate(JSDate date) {
WriteDouble(date.value().Number());
}
-Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
+Maybe<bool> ValueSerializer::WriteJSPrimitiveWrapper(
+ Handle<JSPrimitiveWrapper> value) {
Object inner_value = value->value();
if (inner_value.IsTrue(isolate_)) {
WriteTag(SerializationTag::kTrueObject);
@@ -874,6 +932,60 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
return ThrowIfOutOfMemory();
}
+Maybe<bool> ValueSerializer::WriteJSError(Handle<JSObject> error) {
+ Handle<Object> stack;
+ PropertyDescriptor message_desc;
+ Maybe<bool> message_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate_, error, isolate_->factory()->message_string(), &message_desc);
+ MAYBE_RETURN(message_found, Nothing<bool>());
+
+ WriteTag(SerializationTag::kError);
+
+ Handle<HeapObject> prototype;
+ if (!JSObject::GetPrototype(isolate_, error).ToHandle(&prototype)) {
+ return Nothing<bool>();
+ }
+
+ if (*prototype == isolate_->eval_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kEvalErrorPrototype));
+ } else if (*prototype == isolate_->range_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kRangeErrorPrototype));
+ } else if (*prototype == isolate_->reference_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kReferenceErrorPrototype));
+ } else if (*prototype == isolate_->syntax_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kSyntaxErrorPrototype));
+ } else if (*prototype == isolate_->type_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kTypeErrorPrototype));
+ } else if (*prototype == isolate_->uri_error_function()->prototype()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kUriErrorPrototype));
+ } else {
+ // The default prototype in the deserialization side is Error.prototype, so
+ // we don't have to do anything here.
+ }
+
+ if (message_found.FromJust() &&
+ PropertyDescriptor::IsDataDescriptor(&message_desc)) {
+ Handle<String> message;
+ if (!Object::ToString(isolate_, message_desc.value()).ToHandle(&message)) {
+ return Nothing<bool>();
+ }
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kMessage));
+ WriteString(message);
+ }
+
+ if (!Object::GetProperty(isolate_, error, isolate_->factory()->stack_string())
+ .ToHandle(&stack)) {
+ return Nothing<bool>();
+ }
+ if (stack->IsString()) {
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kStack));
+ WriteString(Handle<String>::cast(stack));
+ }
+
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kEnd));
+ return ThrowIfOutOfMemory();
+}
+
Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
if (delegate_ != nullptr) {
// TODO(titzer): introduce a Utils::ToLocal for WasmModuleObject.
@@ -1238,7 +1350,7 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
case SerializationTag::kNumberObject:
case SerializationTag::kBigIntObject:
case SerializationTag::kStringObject:
- return ReadJSValue(tag);
+ return ReadJSPrimitiveWrapper(tag);
case SerializationTag::kRegExp:
return ReadJSRegExp();
case SerializationTag::kBeginJSMap:
@@ -1256,6 +1368,8 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
const bool is_shared = true;
return ReadJSArrayBuffer(is_shared);
}
+ case SerializationTag::kError:
+ return ReadJSError();
case SerializationTag::kWasmModule:
return ReadWasmModule();
case SerializationTag::kWasmModuleTransfer:
@@ -1519,24 +1633,25 @@ MaybeHandle<JSDate> ValueDeserializer::ReadJSDate() {
return date;
}
-MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
+MaybeHandle<JSPrimitiveWrapper> ValueDeserializer::ReadJSPrimitiveWrapper(
+ SerializationTag tag) {
uint32_t id = next_id_++;
- Handle<JSValue> value;
+ Handle<JSPrimitiveWrapper> value;
switch (tag) {
case SerializationTag::kTrueObject:
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->boolean_function(), allocation_));
value->set_value(ReadOnlyRoots(isolate_).true_value());
break;
case SerializationTag::kFalseObject:
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->boolean_function(), allocation_));
value->set_value(ReadOnlyRoots(isolate_).false_value());
break;
case SerializationTag::kNumberObject: {
double number;
- if (!ReadDouble().To(&number)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ if (!ReadDouble().To(&number)) return MaybeHandle<JSPrimitiveWrapper>();
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->number_function(), allocation_));
Handle<Object> number_object =
isolate_->factory()->NewNumber(number, allocation_);
@@ -1545,16 +1660,18 @@ MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
}
case SerializationTag::kBigIntObject: {
Handle<BigInt> bigint;
- if (!ReadBigInt().ToHandle(&bigint)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ if (!ReadBigInt().ToHandle(&bigint))
+ return MaybeHandle<JSPrimitiveWrapper>();
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->bigint_function(), allocation_));
value->set_value(*bigint);
break;
}
case SerializationTag::kStringObject: {
Handle<String> string;
- if (!ReadString().ToHandle(&string)) return MaybeHandle<JSValue>();
- value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ if (!ReadString().ToHandle(&string))
+ return MaybeHandle<JSPrimitiveWrapper>();
+ value = Handle<JSPrimitiveWrapper>::cast(isolate_->factory()->NewJSObject(
isolate_->string_function(), allocation_));
value->set_value(*string);
break;
@@ -1578,7 +1695,7 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
// Ensure the deserialized flags are valid.
// TODO(adamk): Can we remove this check now that dotAll is always-on?
- uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::FlagCount();
+ uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::kFlagCount;
if ((raw_flags & flags_mask) ||
!JSRegExp::New(isolate_, pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
@@ -1768,6 +1885,78 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
return typed_array;
}
+MaybeHandle<Object> ValueDeserializer::ReadJSError() {
+ Handle<Object> message = isolate_->factory()->undefined_value();
+ Handle<Object> stack = isolate_->factory()->undefined_value();
+ Handle<Object> no_caller;
+ auto constructor = isolate_->error_function();
+ bool done = false;
+
+ while (!done) {
+ uint8_t tag;
+ if (!ReadVarint<uint8_t>().To(&tag)) {
+ return MaybeHandle<JSObject>();
+ }
+ switch (static_cast<ErrorTag>(tag)) {
+ case ErrorTag::kEvalErrorPrototype:
+ constructor = isolate_->eval_error_function();
+ break;
+ case ErrorTag::kRangeErrorPrototype:
+ constructor = isolate_->range_error_function();
+ break;
+ case ErrorTag::kReferenceErrorPrototype:
+ constructor = isolate_->reference_error_function();
+ break;
+ case ErrorTag::kSyntaxErrorPrototype:
+ constructor = isolate_->syntax_error_function();
+ break;
+ case ErrorTag::kTypeErrorPrototype:
+ constructor = isolate_->type_error_function();
+ break;
+ case ErrorTag::kUriErrorPrototype:
+ constructor = isolate_->uri_error_function();
+ break;
+ case ErrorTag::kMessage: {
+ Handle<String> message_string;
+ if (!ReadString().ToHandle(&message_string)) {
+ return MaybeHandle<JSObject>();
+ }
+ message = message_string;
+ break;
+ }
+ case ErrorTag::kStack: {
+ Handle<String> stack_string;
+ if (!ReadString().ToHandle(&stack_string)) {
+ return MaybeHandle<JSObject>();
+ }
+ stack = stack_string;
+ break;
+ }
+ case ErrorTag::kEnd:
+ done = true;
+ break;
+ default:
+ return MaybeHandle<JSObject>();
+ }
+ }
+
+ Handle<Object> error;
+ if (!ErrorUtils::Construct(isolate_, constructor, constructor, message,
+ SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kNone)
+ .ToHandle(&error)) {
+ return MaybeHandle<Object>();
+ }
+
+ if (Object::SetProperty(
+ isolate_, error, isolate_->factory()->stack_trace_symbol(), stack,
+ StoreOrigin::kMaybeKeyed, Just(ShouldThrow::kThrowOnError))
+ .is_null()) {
+ return MaybeHandle<Object>();
+ }
+ return error;
+}
+
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModuleTransfer() {
auto enabled_features = wasm::WasmFeaturesFromIsolate(isolate_);
if ((FLAG_wasm_disable_structured_cloning && !enabled_features.threads) ||
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index b83227d9d3..9e381d7e76 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -11,7 +11,7 @@
#include "include/v8.h"
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/maybe-handles.h"
#include "src/utils/identity-map.h"
#include "src/utils/vector.h"
@@ -27,9 +27,9 @@ class JSArrayBuffer;
class JSArrayBufferView;
class JSDate;
class JSMap;
+class JSPrimitiveWrapper;
class JSRegExp;
class JSSet;
-class JSValue;
class MutableHeapNumber;
class Object;
class Oddball;
@@ -120,13 +120,15 @@ class ValueSerializer {
Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArray(Handle<JSArray> array) V8_WARN_UNUSED_RESULT;
void WriteJSDate(JSDate date);
- Maybe<bool> WriteJSValue(Handle<JSValue> value) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSPrimitiveWrapper(Handle<JSPrimitiveWrapper> value)
+ V8_WARN_UNUSED_RESULT;
void WriteJSRegExp(JSRegExp regexp);
Maybe<bool> WriteJSMap(Handle<JSMap> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSSet(Handle<JSSet> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView array_buffer);
+ Maybe<bool> WriteJSError(Handle<JSObject> error) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
@@ -264,7 +266,8 @@ class ValueDeserializer {
MaybeHandle<JSArray> ReadSparseJSArray() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArray> ReadDenseJSArray() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSDate> ReadJSDate() V8_WARN_UNUSED_RESULT;
- MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSPrimitiveWrapper> ReadJSPrimitiveWrapper(SerializationTag tag)
+ V8_WARN_UNUSED_RESULT;
MaybeHandle<JSRegExp> ReadJSRegExp() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSMap> ReadJSMap() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSSet> ReadJSSet() V8_WARN_UNUSED_RESULT;
@@ -274,6 +277,7 @@ class ValueDeserializer {
V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<Object> ReadJSError() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadWasmModule() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
MaybeHandle<WasmMemoryObject> ReadWasmMemory() V8_WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 177f214415..40e6e8b427 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
adamk@chromium.org
gsathya@chromium.org
leszeks@chromium.org
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index 62e8c0a47a..5a6ef376a8 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -6,7 +6,7 @@
#define V8_PARSING_EXPRESSION_SCOPE_H_
#include "src/ast/scopes.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/objects/function-kind.h"
#include "src/parsing/scanner.h"
#include "src/zone/zone.h" // For ScopedPtrList.
@@ -330,7 +330,7 @@ class VariableDeclarationParsingScope : public ExpressionScope<Types> {
// This also handles marking of loop variables in for-in and for-of
// loops, as determined by loop-nesting-depth.
DCHECK_NOT_NULL(var);
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
}
}
return var;
@@ -396,8 +396,8 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
using ExpressionScopeT = ExpressionScope<Types>;
using ScopeType = typename ExpressionScopeT::ScopeType;
- ExpressionParsingScope(ParserT* parser,
- ScopeType type = ExpressionScopeT::kExpression)
+ explicit ExpressionParsingScope(
+ ParserT* parser, ScopeType type = ExpressionScopeT::kExpression)
: ExpressionScopeT(parser, type),
variable_list_(parser->variable_buffer()),
has_async_arrow_in_scope_chain_(
@@ -437,8 +437,7 @@ class ExpressionParsingScope : public ExpressionScope<Types> {
}
this->mark_verified();
return this->parser()->RewriteInvalidReferenceExpression(
- expression, beg_pos, end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError);
+ expression, beg_pos, end_pos, MessageTemplate::kInvalidLhsInFor);
}
void RecordExpressionError(const Scanner::Location& loc,
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index ed9d80861b..0ae09d9897 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -97,7 +97,7 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
set_start_position(shared->StartPosition());
set_end_position(shared->EndPosition());
- function_literal_id_ = shared->FunctionLiteralId(isolate);
+ function_literal_id_ = shared->function_literal_id();
SetFunctionInfo(shared);
Handle<Script> script(Script::cast(shared->script()), isolate);
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 0ecd8ecedb..2dfb0d2461 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -16,7 +16,7 @@
#include "src/base/v8-fallthrough.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/function-kind.h"
@@ -624,6 +624,11 @@ class ParserBase {
}
}
+ RequiresBrandCheckFlag RequiresBrandCheck(ClassLiteralProperty::Kind kind) {
+ return kind == ClassLiteralProperty::Kind::FIELD ? kNoBrandCheck
+ : kRequiresBrandCheck;
+ }
+
const AstRawString* ClassFieldVariableName(AstValueFactory* ast_value_factory,
int index) {
std::string name = ".class-field-" + std::to_string(index);
@@ -780,7 +785,7 @@ class ParserBase {
if (scanner()->current_token() == Token::AWAIT && !is_async_function()) {
ReportMessageAt(scanner()->location(),
- MessageTemplate::kAwaitNotInAsyncFunction, kSyntaxError);
+ MessageTemplate::kAwaitNotInAsyncFunction);
return;
}
@@ -930,21 +935,19 @@ class ParserBase {
V8_NOINLINE void ReportMessage(MessageTemplate message) {
Scanner::Location source_location = scanner()->location();
impl()->ReportMessageAt(source_location, message,
- static_cast<const char*>(nullptr), kSyntaxError);
+ static_cast<const char*>(nullptr));
}
template <typename T>
- V8_NOINLINE void ReportMessage(MessageTemplate message, T arg,
- ParseErrorType error_type = kSyntaxError) {
+ V8_NOINLINE void ReportMessage(MessageTemplate message, T arg) {
Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message, arg, error_type);
+ impl()->ReportMessageAt(source_location, message, arg);
}
V8_NOINLINE void ReportMessageAt(Scanner::Location location,
- MessageTemplate message,
- ParseErrorType error_type) {
+ MessageTemplate message) {
impl()->ReportMessageAt(location, message,
- static_cast<const char*>(nullptr), error_type);
+ static_cast<const char*>(nullptr));
}
V8_NOINLINE void ReportUnexpectedToken(Token::Value token);
@@ -1213,9 +1216,9 @@ class ParserBase {
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
// we allow calls for web compatibility and rewrite them to a runtime throw.
- ExpressionT RewriteInvalidReferenceExpression(
- ExpressionT expression, int beg_pos, int end_pos, MessageTemplate message,
- ParseErrorType type = kReferenceError);
+ ExpressionT RewriteInvalidReferenceExpression(ExpressionT expression,
+ int beg_pos, int end_pos,
+ MessageTemplate message);
bool IsValidReferenceExpression(ExpressionT expression);
@@ -1305,7 +1308,7 @@ class ParserBase {
return factory()->NewReturnStatement(expr, pos, end_pos);
}
- ModuleDescriptor* module() const {
+ SourceTextModuleDescriptor* module() const {
return scope()->AsModuleScope()->module();
}
Scope* scope() const { return scope_; }
@@ -1567,8 +1570,7 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
if (class_scope == nullptr) {
impl()->ReportMessageAt(Scanner::Location(pos, pos + 1),
MessageTemplate::kInvalidPrivateFieldResolution,
- impl()->GetRawNameFromIdentifier(name),
- kSyntaxError);
+ impl()->GetRawNameFromIdentifier(name));
return impl()->FailureExpression();
}
key = impl()->ExpressionFromPrivateName(class_scope, name, pos);
@@ -1590,15 +1592,14 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
}
IdentifierT js_pattern = impl()->GetNextSymbol();
- Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
+ Maybe<int> flags = scanner()->ScanRegExpFlags();
if (flags.IsNothing()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
return impl()->FailureExpression();
}
- int js_flags = flags.FromJust();
Next();
- return factory()->NewRegExpLiteral(js_pattern, js_flags, pos);
+ return factory()->NewRegExpLiteral(js_pattern, flags.FromJust(), pos);
}
template <typename Impl>
@@ -2657,13 +2658,11 @@ ParserBase<Impl>::ParseAssignmentExpressionCoverGrammar() {
impl()->ReportMessageAt(loc,
MessageTemplate::kInvalidDestructuringTarget);
} else {
- // Reference Error if LHS is neither object literal nor an array literal
+ // Syntax Error if LHS is neither object literal nor an array literal
// (Parenthesized literals are
// CoverParenthesizedExpressionAndArrowParameterList).
// #sec-assignment-operators-static-semantics-early-errors
- impl()->ReportMessageAt(loc, MessageTemplate::kInvalidLhsInAssignment,
- static_cast<const char*>(nullptr),
- kReferenceError);
+ impl()->ReportMessageAt(loc, MessageTemplate::kInvalidLhsInAssignment);
}
}
expression_scope()->ValidateAsPattern(expression, lhs_beg_pos,
@@ -2905,7 +2904,7 @@ ParserBase<Impl>::ParseUnaryOrPrefixExpression() {
return impl()->FailureExpression();
}
- if (impl()->IsPropertyWithPrivateFieldKey(expression)) {
+ if (impl()->IsPrivateReference(expression)) {
ReportMessage(MessageTemplate::kDeletePrivateField);
return impl()->FailureExpression();
}
@@ -3291,7 +3290,18 @@ ParserBase<Impl>::ParseImportExpressions() {
return impl()->ImportMetaExpression(pos);
}
- Expect(Token::LPAREN);
+
+ if (V8_UNLIKELY(peek() != Token::LPAREN)) {
+ if (!parsing_module_) {
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kImportOutsideModule);
+ } else {
+ ReportUnexpectedToken(Next());
+ }
+ return impl()->FailureExpression();
+ }
+
+ Consume(Token::LPAREN);
if (peek() == Token::RPAREN) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kImportMissingSpecifier);
@@ -3332,7 +3342,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
expression_scope()->RecordThisUse();
- UseThis()->set_maybe_assigned();
+ UseThis()->SetMaybeAssigned();
return impl()->NewSuperCallReference(pos);
}
}
@@ -4291,7 +4301,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
impl()->ReportMessageAt(Scanner::Location(unresolvable->position(),
unresolvable->position() + 1),
MessageTemplate::kInvalidPrivateFieldResolution,
- unresolvable->raw_name(), kSyntaxError);
+ unresolvable->raw_name());
return impl()->FailureExpression();
}
@@ -4442,15 +4452,14 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::RewriteInvalidReferenceExpression(ExpressionT expression,
int beg_pos, int end_pos,
- MessageTemplate message,
- ParseErrorType type) {
+ MessageTemplate message) {
DCHECK(!IsValidReferenceExpression(expression));
if (impl()->IsIdentifier(expression)) {
DCHECK(is_strict(language_mode()));
DCHECK(impl()->IsEvalOrArguments(impl()->AsIdentifier(expression)));
ReportMessageAt(Scanner::Location(beg_pos, end_pos),
- MessageTemplate::kStrictEvalArguments, kSyntaxError);
+ MessageTemplate::kStrictEvalArguments);
return impl()->FailureExpression();
}
if (expression->IsCall() && !expression->AsCall()->is_tagged_template()) {
@@ -4467,7 +4476,7 @@ ParserBase<Impl>::RewriteInvalidReferenceExpression(ExpressionT expression,
ExpressionT error = impl()->NewThrowReferenceError(message, beg_pos);
return factory()->NewProperty(expression, error, beg_pos);
}
- ReportMessageAt(Scanner::Location(beg_pos, end_pos), message, type);
+ ReportMessageAt(Scanner::Location(beg_pos, end_pos), message);
return impl()->FailureExpression();
}
@@ -4561,7 +4570,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseV8Intrinsic() {
if (has_spread) {
ReportMessageAt(Scanner::Location(pos, position()),
- MessageTemplate::kIntrinsicWithSpread, kSyntaxError);
+ MessageTemplate::kIntrinsicWithSpread);
return impl()->FailureExpression();
}
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 380920b8ba..2a860da3d0 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -15,8 +15,8 @@
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/message-template.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
-#include "src/execution/message-template.h"
#include "src/logging/log.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/scope-info.h"
@@ -501,9 +501,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
Scope::DeserializationMode::kIncludingVariables);
scanner_.Initialize();
- if (FLAG_harmony_hashbang) {
- scanner_.SkipHashBang();
- }
+ scanner_.SkipHashBang();
FunctionLiteral* result = DoParseProgram(isolate, info);
MaybeResetCharacterStream(info, result);
MaybeProcessSourceRanges(info, result, stack_limit_);
@@ -1347,7 +1345,7 @@ Statement* Parser::ParseExportDeclaration() {
}
loc.end_pos = scanner()->location().end_pos;
- ModuleDescriptor* descriptor = module();
+ SourceTextModuleDescriptor* descriptor = module();
for (const AstRawString* name : names) {
descriptor->AddExport(name, name, loc, zone());
}
@@ -2783,13 +2781,15 @@ Variable* Parser::CreateSyntheticContextVariable(const AstRawString* name) {
return proxy->var();
}
-Variable* Parser::CreatePrivateNameVariable(ClassScope* scope,
- const AstRawString* name) {
+Variable* Parser::CreatePrivateNameVariable(
+ ClassScope* scope, RequiresBrandCheckFlag requires_brand_check,
+ const AstRawString* name) {
DCHECK_NOT_NULL(name);
int begin = position();
int end = end_position();
bool was_added = false;
- Variable* var = scope->DeclarePrivateName(name, &was_added);
+ Variable* var =
+ scope->DeclarePrivateName(name, requires_brand_check, &was_added);
if (!was_added) {
Scanner::Location loc(begin, end);
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration, var->raw_name());
@@ -2841,7 +2841,8 @@ void Parser::DeclarePrivateClassMember(ClassScope* scope,
}
}
- Variable* private_name_var = CreatePrivateNameVariable(scope, property_name);
+ Variable* private_name_var =
+ CreatePrivateNameVariable(scope, RequiresBrandCheck(kind), property_name);
int pos = property->value()->position();
if (pos == kNoSourcePosition) {
pos = property->key()->position();
@@ -2950,16 +2951,6 @@ Expression* Parser::RewriteClassLiteral(ClassScope* block_scope,
return class_literal;
}
-bool Parser::IsPropertyWithPrivateFieldKey(Expression* expression) {
- if (!expression->IsProperty()) return false;
- Property* property = expression->AsProperty();
-
- if (!property->key()->IsVariableProxy()) return false;
- VariableProxy* key = property->key()->AsVariableProxy();
-
- return key->IsPrivateName();
-}
-
void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
// For each var-binding that shadows a parameter, insert an assignment
// initializing the variable with the parameter.
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index cb1c473af5..b7fb19c26f 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -297,8 +297,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
VariableKind kind, int beg_pos, int end_pos,
ZonePtrList<const AstRawString>* names);
Variable* CreateSyntheticContextVariable(const AstRawString* synthetic_name);
- Variable* CreatePrivateNameVariable(ClassScope* scope,
- const AstRawString* name);
+ Variable* CreatePrivateNameVariable(
+ ClassScope* scope, RequiresBrandCheckFlag requires_brand_check,
+ const AstRawString* name);
FunctionLiteral* CreateInitializerFunction(
const char* name, DeclarationScope* scope,
ZonePtrList<ClassLiteral::Property>* fields);
@@ -373,8 +374,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return object_literal;
}
- bool IsPropertyWithPrivateFieldKey(Expression* property);
-
// Insert initializer statements for var-bindings shadowing parameter bindings
// from a non-simple parameter list.
void InsertShadowingVarBindingInitializers(Block* block);
@@ -538,6 +537,13 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return property != nullptr && property->obj()->IsThisExpression();
}
+ // Returns true if the expression is of type "obj.#foo".
+ V8_INLINE static bool IsPrivateReference(Expression* expression) {
+ DCHECK_NOT_NULL(expression);
+ Property* property = expression->AsProperty();
+ return property != nullptr && property->IsPrivateReference();
+ }
+
// This returns true if the expression is an indentifier (wrapped
// inside a variable proxy). We exclude the case of 'this', which
// has been converted to a variable proxy.
@@ -690,11 +696,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const char* arg = nullptr) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner_.set_parser_error();
}
@@ -703,11 +707,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const AstRawString* arg) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner_.set_parser_error();
}
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index b6331b2f9d..d792d5c184 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -30,26 +30,26 @@ MessageLocation PendingCompilationErrorHandler::MessageDetails::GetLocation(
return MessageLocation(script, start_position_, end_position_);
}
-void PendingCompilationErrorHandler::ReportMessageAt(
- int start_position, int end_position, MessageTemplate message,
- const char* arg, ParseErrorType error_type) {
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const char* arg) {
if (has_pending_error_) return;
has_pending_error_ = true;
error_details_ =
MessageDetails(start_position, end_position, message, nullptr, arg);
- error_type_ = error_type;
}
-void PendingCompilationErrorHandler::ReportMessageAt(
- int start_position, int end_position, MessageTemplate message,
- const AstRawString* arg, ParseErrorType error_type) {
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const AstRawString* arg) {
if (has_pending_error_) return;
has_pending_error_ = true;
error_details_ =
MessageDetails(start_position, end_position, message, arg, nullptr);
- error_type_ = error_type;
}
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
@@ -97,17 +97,8 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
isolate->debug()->OnCompileError(script);
Factory* factory = isolate->factory();
- Handle<Object> error;
- switch (error_type_) {
- case kReferenceError:
- error = factory->NewReferenceError(error_details_.message(), argument);
- break;
- case kSyntaxError:
- error = factory->NewSyntaxError(error_details_.message(), argument);
- break;
- default:
- UNREACHABLE();
- }
+ Handle<Object> error =
+ factory->NewSyntaxError(error_details_.message(), argument);
if (!error->IsJSObject()) {
isolate->Throw(*error, &location);
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index c6b9559931..cb2908eaf8 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -9,7 +9,7 @@
#include "src/base/macros.h"
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/handles/handles.h"
namespace v8 {
@@ -25,17 +25,13 @@ class Script;
class PendingCompilationErrorHandler {
public:
PendingCompilationErrorHandler()
- : has_pending_error_(false),
- stack_overflow_(false),
- error_type_(kSyntaxError) {}
+ : has_pending_error_(false), stack_overflow_(false) {}
void ReportMessageAt(int start_position, int end_position,
- MessageTemplate message, const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError);
+ MessageTemplate message, const char* arg = nullptr);
void ReportMessageAt(int start_position, int end_position,
- MessageTemplate message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError);
+ MessageTemplate message, const AstRawString* arg);
void ReportWarningAt(int start_position, int end_position,
MessageTemplate message, const char* arg = nullptr);
@@ -110,7 +106,6 @@ class PendingCompilationErrorHandler {
bool unidentifiable_error_ = false;
MessageDetails error_details_;
- ParseErrorType error_type_;
std::forward_list<MessageDetails> warning_messages_;
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 7f33d301cb..ea5e70a3c1 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -645,7 +645,7 @@ void BaseConsumedPreparseData<Data>::RestoreDataForVariable(Variable* var) {
#endif
uint8_t variable_data = scope_data_->ReadQuarter();
if (VariableMaybeAssignedField::decode(variable_data)) {
- var->set_maybe_assigned();
+ var->SetMaybeAssigned();
}
if (VariableContextAllocatedField::decode(variable_data)) {
var->set_is_used();
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 5d11bddb41..a078d79295 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -74,11 +74,9 @@ PreParser::PreParseResult PreParser::PreParseProgram() {
scope->set_is_being_lazily_parsed(true);
#endif
- if (FLAG_harmony_hashbang) {
- // Note: We should only skip the hashbang in non-Eval scripts
- // (currently, Eval is not handled by the PreParser).
- scanner()->SkipHashBang();
- }
+ // Note: We should only skip the hashbang in non-Eval scripts
+ // (currently, Eval is not handled by the PreParser).
+ scanner()->SkipHashBang();
// ModuleDeclarationInstantiation for Source Text Module Records creates a
// new Module Environment Record whose outer lexical environment record is
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index cca3b3675d..33c312f392 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -144,10 +144,10 @@ class PreParserExpression {
ExpressionTypeField::encode(kThisExpression));
}
- static PreParserExpression ThisPropertyWithPrivateFieldKey() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(
- kThisPropertyExpressionWithPrivateFieldKey));
+ static PreParserExpression ThisPrivateReference() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kThisPrivateReferenceExpression));
}
static PreParserExpression ThisProperty() {
@@ -162,10 +162,10 @@ class PreParserExpression {
ExpressionTypeField::encode(kPropertyExpression));
}
- static PreParserExpression PropertyWithPrivateFieldKey() {
+ static PreParserExpression PrivateReference() {
return PreParserExpression(
TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kPropertyExpressionWithPrivateFieldKey));
+ ExpressionTypeField::encode(kPrivateReferenceExpression));
}
static PreParserExpression Call() {
@@ -242,25 +242,23 @@ class PreParserExpression {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
ExpressionTypeField::decode(code_) ==
- kThisPropertyExpressionWithPrivateFieldKey);
+ kThisPrivateReferenceExpression);
}
bool IsProperty() const {
return TypeField::decode(code_) == kExpression &&
(ExpressionTypeField::decode(code_) == kPropertyExpression ||
ExpressionTypeField::decode(code_) == kThisPropertyExpression ||
+ ExpressionTypeField::decode(code_) == kPrivateReferenceExpression ||
ExpressionTypeField::decode(code_) ==
- kPropertyExpressionWithPrivateFieldKey ||
- ExpressionTypeField::decode(code_) ==
- kThisPropertyExpressionWithPrivateFieldKey);
+ kThisPrivateReferenceExpression);
}
- bool IsPropertyWithPrivateFieldKey() const {
+ bool IsPrivateReference() const {
return TypeField::decode(code_) == kExpression &&
- (ExpressionTypeField::decode(code_) ==
- kPropertyExpressionWithPrivateFieldKey ||
+ (ExpressionTypeField::decode(code_) == kPrivateReferenceExpression ||
ExpressionTypeField::decode(code_) ==
- kThisPropertyExpressionWithPrivateFieldKey);
+ kThisPrivateReferenceExpression);
}
bool IsCall() const {
@@ -332,9 +330,9 @@ class PreParserExpression {
enum ExpressionType {
kThisExpression,
kThisPropertyExpression,
- kThisPropertyExpressionWithPrivateFieldKey,
+ kThisPrivateReferenceExpression,
kPropertyExpression,
- kPropertyExpressionWithPrivateFieldKey,
+ kPrivateReferenceExpression,
kCallExpression,
kCallEvalExpression,
kCallTaggedTemplateExpression,
@@ -573,9 +571,9 @@ class PreParserFactory {
const PreParserExpression& key, int pos) {
if (key.IsIdentifier() && key.AsIdentifier().IsPrivateName()) {
if (obj.IsThis()) {
- return PreParserExpression::ThisPropertyWithPrivateFieldKey();
+ return PreParserExpression::ThisPrivateReference();
}
- return PreParserExpression::PropertyWithPrivateFieldKey();
+ return PreParserExpression::PrivateReference();
}
if (obj.IsThis()) {
@@ -848,7 +846,7 @@ class PreParserFuncNameInferrer {
class PreParserSourceRange {
public:
- PreParserSourceRange() {}
+ PreParserSourceRange() = default;
PreParserSourceRange(int start, int end) {}
static PreParserSourceRange Empty() { return PreParserSourceRange(); }
static PreParserSourceRange OpenEnded(int32_t start) { return Empty(); }
@@ -1045,9 +1043,8 @@ class PreParser : public ParserBase<PreParser> {
TemplateLiteralState* state, int start, const PreParserExpression& tag) {
return PreParserExpression::Default();
}
- V8_INLINE bool IsPropertyWithPrivateFieldKey(
- const PreParserExpression& expression) {
- return expression.IsPropertyWithPrivateFieldKey();
+ V8_INLINE bool IsPrivateReference(const PreParserExpression& expression) {
+ return expression.IsPrivateReference();
}
V8_INLINE void SetLanguageMode(Scope* scope, LanguageMode mode) {
scope->SetLanguageMode(mode);
@@ -1103,9 +1100,10 @@ class PreParser : public ParserBase<PreParser> {
// Don't bother actually binding the proxy.
}
- Variable* DeclarePrivateVariableName(const AstRawString* name,
- ClassScope* scope, bool* was_added) {
- return scope->DeclarePrivateName(name, was_added);
+ Variable* DeclarePrivateVariableName(
+ const AstRawString* name, ClassScope* scope,
+ RequiresBrandCheckFlag requires_brand_check, bool* was_added) {
+ return scope->DeclarePrivateName(name, requires_brand_check, was_added);
}
Variable* DeclareVariableName(const AstRawString* name, VariableMode mode,
@@ -1258,7 +1256,9 @@ class PreParser : public ParserBase<PreParser> {
return;
}
bool was_added;
- DeclarePrivateVariableName(property_name.string_, scope, &was_added);
+
+ DeclarePrivateVariableName(property_name.string_, scope,
+ RequiresBrandCheck(kind), &was_added);
if (!was_added) {
Scanner::Location loc(property.position(), property.position() + 1);
ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
@@ -1483,11 +1483,9 @@ class PreParser : public ParserBase<PreParser> {
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const char* arg = nullptr) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner()->set_parser_error();
}
@@ -1498,17 +1496,14 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
MessageTemplate message,
- const PreParserIdentifier& arg,
- ParseErrorType error_type = kSyntaxError) {
+ const PreParserIdentifier& arg) {
UNREACHABLE();
}
void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError) {
- pending_error_handler()->ReportMessageAt(source_location.beg_pos,
- source_location.end_pos, message,
- arg, error_type);
+ MessageTemplate message, const AstRawString* arg) {
+ pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, arg);
scanner()->set_parser_error();
}
@@ -1644,11 +1639,11 @@ class PreParser : public ParserBase<PreParser> {
return PreParserStatement::Jump();
}
- V8_INLINE void AddFormalParameter(PreParserFormalParameters* parameters,
- PreParserExpression& pattern,
- const PreParserExpression& initializer,
- int initializer_end_position,
- bool is_rest) {
+ V8_INLINE void AddFormalParameter(
+ PreParserFormalParameters* parameters,
+ PreParserExpression& pattern, // NOLINT(runtime/references)
+ const PreParserExpression& initializer, int initializer_end_position,
+ bool is_rest) {
DeclarationScope* scope = parameters->scope;
scope->RecordParameter(is_rest);
parameters->UpdateArityAndFunctionLength(!initializer.IsNull(), is_rest);
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 7758b2bb73..01ea0a0d02 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -590,7 +590,8 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
}
- while (cursor < end && output_cursor + 1 < buffer_start_ + kBufferSize) {
+ const uint16_t* max_buffer_end = buffer_start_ + kBufferSize;
+ while (cursor < end && output_cursor + 1 < max_buffer_end) {
unibrow::uchar t =
unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
if (V8_LIKELY(t <= unibrow::Utf16::kMaxNonSurrogateCharCode)) {
@@ -601,6 +602,15 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
*(output_cursor++) = unibrow::Utf16::LeadSurrogate(t);
*(output_cursor++) = unibrow::Utf16::TrailSurrogate(t);
}
+ // Fast path for ascii sequences.
+ size_t remaining = end - cursor;
+ size_t max_buffer = max_buffer_end - output_cursor;
+ int max_length = static_cast<int>(Min(remaining, max_buffer));
+ DCHECK_EQ(state, unibrow::Utf8::State::kAccept);
+ int ascii_length = NonAsciiStart(cursor, max_length);
+ CopyChars(output_cursor, cursor, ascii_length);
+ cursor += ascii_length;
+ output_cursor += ascii_length;
}
current_.pos.bytes = chunk.start.bytes + (cursor - chunk.data);
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 709f28a02d..2f74548020 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -1004,45 +1004,21 @@ bool Scanner::ScanRegExpPattern() {
return true;
}
-
-Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
+Maybe<int> Scanner::ScanRegExpFlags() {
DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
// Scan regular expression flags.
- int flags = 0;
+ JSRegExp::Flags flags;
while (IsIdentifierPart(c0_)) {
- RegExp::Flags flag = RegExp::kNone;
- switch (c0_) {
- case 'g':
- flag = RegExp::kGlobal;
- break;
- case 'i':
- flag = RegExp::kIgnoreCase;
- break;
- case 'm':
- flag = RegExp::kMultiline;
- break;
- case 's':
- flag = RegExp::kDotAll;
- break;
- case 'u':
- flag = RegExp::kUnicode;
- break;
- case 'y':
- flag = RegExp::kSticky;
- break;
- default:
- return Nothing<RegExp::Flags>();
- }
- if (flags & flag) {
- return Nothing<RegExp::Flags>();
- }
+ JSRegExp::Flags flag = JSRegExp::FlagFromChar(c0_);
+ if (flag == JSRegExp::kInvalid) return Nothing<int>();
+ if (flags & flag) return Nothing<int>();
Advance();
flags |= flag;
}
next().location.end_pos = source_pos();
- return Just(RegExp::Flags(flags));
+ return Just<int>(flags);
}
const AstRawString* Scanner::CurrentSymbol(
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 449aca46ff..e2865bca1c 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -11,7 +11,7 @@
#include "src/base/logging.h"
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/parsing/literal-buffer.h"
#include "src/parsing/token.h"
#include "src/strings/char-predicates.h"
@@ -392,7 +392,7 @@ class V8_EXPORT_PRIVATE Scanner {
// Returns true if a pattern is scanned.
bool ScanRegExpPattern();
// Scans the input as regular expression flags. Returns the flags on success.
- Maybe<RegExp::Flags> ScanRegExpFlags();
+ Maybe<int> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateContinuation() {
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index a912c2e1b2..472dbdbb10 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -173,7 +173,8 @@ void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
HeapObject object;
- CombinedHeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ CombinedHeapObjectIterator iterator(heap(),
+ HeapObjectIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
@@ -202,10 +203,21 @@ Isolate* HeapProfiler::isolate() const { return heap()->isolate(); }
void HeapProfiler::QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
+ {
+ CombinedHeapObjectIterator function_heap_iterator(
+ heap(), HeapObjectIterator::kFilterUnreachable);
+ for (HeapObject heap_obj = function_heap_iterator.Next();
+ !heap_obj.is_null(); heap_obj = function_heap_iterator.Next()) {
+ if (heap_obj.IsFeedbackVector()) {
+ FeedbackVector::cast(heap_obj).ClearSlots(isolate());
+ }
+ }
+ }
// We should return accurate information about live objects, so we need to
// collect all garbage first.
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
- CombinedHeapIterator heap_iterator(heap());
+ CombinedHeapObjectIterator heap_iterator(
+ heap(), HeapObjectIterator::kFilterUnreachable);
for (HeapObject heap_obj = heap_iterator.Next(); !heap_obj.is_null();
heap_obj = heap_iterator.Next()) {
if (!heap_obj.IsJSObject() || heap_obj.IsExternal(isolate())) continue;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index bc171360b5..df941eda96 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -395,7 +395,7 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
}
heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kHeapProfiler);
- CombinedHeapIterator iterator(heap_);
+ CombinedHeapObjectIterator iterator(heap_);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
FindOrAddEntry(obj.address(), obj.Size());
@@ -643,7 +643,7 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
}
int V8HeapExplorer::EstimateObjectsCount() {
- CombinedHeapIterator it(heap_, HeapIterator::kFilterUnreachable);
+ CombinedHeapObjectIterator it(heap_, HeapObjectIterator::kFilterUnreachable);
int objects_count = 0;
while (!it.Next().is_null()) ++objects_count;
return objects_count;
@@ -1446,7 +1446,8 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool interrupted = false;
- CombinedHeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+ CombinedHeapObjectIterator iterator(heap_,
+ HeapObjectIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next(), progress_->ProgressStep()) {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 756500151f..d3d3330e27 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -25,10 +25,8 @@
namespace v8 {
namespace internal {
-class AllocationTracker;
class AllocationTraceNode;
class HeapEntry;
-class HeapIterator;
class HeapProfiler;
class HeapSnapshot;
class HeapSnapshotGenerator;
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 1f2b4bc72a..b3ea07db34 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -270,9 +270,9 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// bytecode_array might be garbage, so don't actually dereference it. We
// avoid the frame->GetXXX functions since they call BytecodeArray::cast,
// which has a heap access in its DCHECK.
- i::Address bytecode_array = i::Memory<i::Address>(
+ i::Address bytecode_array = base::Memory<i::Address>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeArrayFromFp);
- i::Address bytecode_offset = i::Memory<i::Address>(
+ i::Address bytecode_offset = base::Memory<i::Address>(
frame->fp() + i::InterpreterFrameConstants::kBytecodeOffsetFromFp);
// If the bytecode array is a heap object and the bytecode offset is a
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index 7f916e12ea..250c8c6b88 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
jgruber@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
deleted file mode 100644
index b542add17b..0000000000
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#ifndef V8_REGEXP_JSREGEXP_INL_H_
-#define V8_REGEXP_JSREGEXP_INL_H_
-
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/objects.h"
-#include "src/regexp/jsregexp.h"
-#include "src/utils/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-
-RegExpImpl::GlobalCache::~GlobalCache() {
- // Deallocate the register array if we allocated it in the constructor
- // (as opposed to using the existing jsregexp_static_offsets_vector).
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(register_array_);
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::FetchNext() {
- current_match_index_++;
- if (current_match_index_ >= num_matches_) {
- // Current batch of results exhausted.
- // Fail if last batch was not even fully filled.
- if (num_matches_ < max_matches_) {
- num_matches_ = 0; // Signal failed match.
- return nullptr;
- }
-
- int32_t* last_match =
- &register_array_[(current_match_index_ - 1) * registers_per_match_];
- int last_end_index = last_match[1];
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- num_matches_ =
- RegExpImpl::AtomExecRaw(isolate_, regexp_, subject_, last_end_index,
- register_array_, register_array_size_);
- } else {
- int last_start_index = last_match[0];
- if (last_start_index == last_end_index) {
- // Zero-length match. Advance by one code point.
- last_end_index = AdvanceZeroLength(last_end_index);
- }
- if (last_end_index > subject_->length()) {
- num_matches_ = 0; // Signal failed match.
- return nullptr;
- }
- num_matches_ = RegExpImpl::IrregexpExecRaw(
- isolate_, regexp_, subject_, last_end_index, register_array_,
- register_array_size_);
- }
-
- if (num_matches_ <= 0) return nullptr;
- current_match_index_ = 0;
- return register_array_;
- } else {
- return &register_array_[current_match_index_ * registers_per_match_];
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
- int index = current_match_index_ * registers_per_match_;
- if (num_matches_ == 0) {
- // After a failed match we shift back by one result.
- index -= registers_per_match_;
- }
- return &register_array_[index];
-}
-
-RegExpEngine::CompilationResult::CompilationResult(Isolate* isolate,
- const char* error_message)
- : error_message(error_message),
- code(ReadOnlyRoots(isolate).the_hole_value()) {}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_REGEXP_JSREGEXP_INL_H_
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
deleted file mode 100644
index a6f3a5ebcb..0000000000
--- a/deps/v8/src/regexp/jsregexp.cc
+++ /dev/null
@@ -1,7055 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/regexp/jsregexp.h"
-
-#include <memory>
-#include <vector>
-
-#include "src/base/platform/platform.h"
-#include "src/codegen/compilation-cache.h"
-#include "src/diagnostics/code-tracer.h"
-#include "src/execution/execution.h"
-#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
-#include "src/heap/factory.h"
-#include "src/heap/heap-inl.h"
-#include "src/objects/elements.h"
-#include "src/regexp/interpreter-irregexp.h"
-#include "src/regexp/jsregexp-inl.h"
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
-#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/regexp/regexp-parser.h"
-#include "src/regexp/regexp-stack.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
-#include "src/runtime/runtime.h"
-#include "src/strings/string-search.h"
-#include "src/strings/unicode-decoder.h"
-#include "src/strings/unicode-inl.h"
-#include "src/utils/ostreams.h"
-#include "src/utils/splay-tree-inl.h"
-#include "src/zone/zone-list-inl.h"
-
-#ifdef V8_INTL_SUPPORT
-#include "unicode/locid.h"
-#include "unicode/uniset.h"
-#include "unicode/utypes.h"
-#endif // V8_INTL_SUPPORT
-
-#if V8_TARGET_ARCH_IA32
-#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-V8_WARN_UNUSED_RESULT
-static inline MaybeHandle<Object> ThrowRegExpException(
- Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- Handle<String> error_text) {
- THROW_NEW_ERROR(isolate, NewSyntaxError(MessageTemplate::kMalformedRegExp,
- pattern, error_text),
- Object);
-}
-
-inline void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> error_text) {
- USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
- error_text));
-}
-
-
-ContainedInLattice AddRange(ContainedInLattice containment,
- const int* ranges,
- int ranges_length,
- Interval new_range) {
- DCHECK_EQ(1, ranges_length & 1);
- DCHECK_EQ(String::kMaxCodePoint + 1, ranges[ranges_length - 1]);
- if (containment == kLatticeUnknown) return containment;
- bool inside = false;
- int last = 0;
- for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) {
- // Consider the range from last to ranges[i].
- // We haven't got to the new range yet.
- if (ranges[i] <= new_range.from()) continue;
- // New range is wholly inside last-ranges[i]. Note that new_range.to() is
- // inclusive, but the values in ranges are not.
- if (last <= new_range.from() && new_range.to() < ranges[i]) {
- return Combine(containment, inside ? kLatticeIn : kLatticeOut);
- }
- return kLatticeUnknown;
- }
- return containment;
-}
-
-// More makes code generation slower, less makes V8 benchmark score lower.
-const int kMaxLookaheadForBoyerMoore = 8;
-// In a 3-character pattern you can maximally step forwards 3 characters
-// at a time, which is not always enough to pay for the extra logic.
-const int kPatternTooShortForBoyerMoore = 2;
-
-// Identifies the sort of regexps where the regexp engine is faster
-// than the code used for atom matches.
-static bool HasFewDifferentCharacters(Handle<String> pattern) {
- int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
- if (length <= kPatternTooShortForBoyerMoore) return false;
- const int kMod = 128;
- bool character_found[kMod];
- int different = 0;
- memset(&character_found[0], 0, sizeof(character_found));
- for (int i = 0; i < length; i++) {
- int ch = (pattern->Get(i) & (kMod - 1));
- if (!character_found[ch]) {
- character_found[ch] = true;
- different++;
- // We declare a regexp low-alphabet if it has at least 3 times as many
- // characters as it has different characters.
- if (different * 3 > length) return false;
- }
- }
- return true;
-}
-
-// Generic RegExp methods. Dispatches to implementation specific methods.
-
-MaybeHandle<Object> RegExpImpl::Compile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags) {
- DCHECK(pattern->IsFlat());
-
- Zone zone(isolate->allocator(), ZONE_NAME);
- CompilationCache* compilation_cache = isolate->compilation_cache();
- MaybeHandle<FixedArray> maybe_cached =
- compilation_cache->LookupRegExp(pattern, flags);
- Handle<FixedArray> cached;
- if (maybe_cached.ToHandle(&cached)) {
- re->set_data(*cached);
- return re;
- }
-
- PostponeInterruptsScope postpone(isolate);
- RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
- DCHECK(!isolate->has_pending_exception());
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &parse_result)) {
- // Throw an exception if we fail to parse the pattern.
- return ThrowRegExpException(isolate, re, pattern, parse_result.error);
- }
-
- bool has_been_compiled = false;
-
- if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
- !HasFewDifferentCharacters(pattern)) {
- // Parse-tree is a single atom that is equal to the pattern.
- AtomCompile(isolate, re, pattern, flags, pattern);
- has_been_compiled = true;
- } else if (parse_result.tree->IsAtom() && !IsSticky(flags) &&
- parse_result.capture_count == 0) {
- RegExpAtom* atom = parse_result.tree->AsAtom();
- Vector<const uc16> atom_pattern = atom->data();
- Handle<String> atom_string;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, atom_string,
- isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
- if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
- AtomCompile(isolate, re, pattern, flags, atom_string);
- has_been_compiled = true;
- }
- }
- if (!has_been_compiled) {
- IrregexpInitialize(isolate, re, pattern, flags, parse_result.capture_count);
- }
- DCHECK(re->data().IsFixedArray());
- // Compilation succeeded so the data is set on the regexp
- // and we can store it in the cache.
- Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
- compilation_cache->PutRegExp(pattern, flags, data);
-
- return re;
-}
-
-MaybeHandle<Object> RegExpImpl::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info) {
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- return AtomExec(isolate, regexp, subject, index, last_match_info);
- case JSRegExp::IRREGEXP: {
- return IrregexpExec(isolate, regexp, subject, index, last_match_info);
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// RegExp Atom implementation: Simple string search using indexOf.
-
-void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
- Handle<String> match_pattern) {
- isolate->factory()->SetRegExpAtomData(re, JSRegExp::ATOM, pattern, flags,
- match_pattern);
-}
-
-static void SetAtomLastCapture(Isolate* isolate,
- Handle<RegExpMatchInfo> last_match_info,
- String subject, int from, int to) {
- SealHandleScope shs(isolate);
- last_match_info->SetNumberOfCaptureRegisters(2);
- last_match_info->SetLastSubject(subject);
- last_match_info->SetLastInput(subject);
- last_match_info->SetCapture(0, from);
- last_match_info->SetCapture(1, to);
-}
-
-int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index, int32_t* output,
- int output_size) {
- DCHECK_LE(0, index);
- DCHECK_LE(index, subject->length());
-
- subject = String::Flatten(isolate, subject);
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
-
- String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
- int needle_len = needle.length();
- DCHECK(needle.IsFlat());
- DCHECK_LT(0, needle_len);
-
- if (index + needle_len > subject->length()) {
- return RegExpImpl::RE_FAILURE;
- }
-
- for (int i = 0; i < output_size; i += 2) {
- String::FlatContent needle_content = needle.GetFlatContent(no_gc);
- String::FlatContent subject_content = subject->GetFlatContent(no_gc);
- DCHECK(needle_content.IsFlat());
- DCHECK(subject_content.IsFlat());
- // dispatch on type of strings
- index =
- (needle_content.IsOneByte()
- ? (subject_content.IsOneByte()
- ? SearchString(isolate, subject_content.ToOneByteVector(),
- needle_content.ToOneByteVector(), index)
- : SearchString(isolate, subject_content.ToUC16Vector(),
- needle_content.ToOneByteVector(), index))
- : (subject_content.IsOneByte()
- ? SearchString(isolate, subject_content.ToOneByteVector(),
- needle_content.ToUC16Vector(), index)
- : SearchString(isolate, subject_content.ToUC16Vector(),
- needle_content.ToUC16Vector(), index)));
- if (index == -1) {
- return i / 2; // Return number of matches.
- } else {
- output[i] = index;
- output[i+1] = index + needle_len;
- index += needle_len;
- }
- }
- return output_size / 2;
-}
-
-Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info) {
- static const int kNumRegisters = 2;
- STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
- int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
-
- int res =
- AtomExecRaw(isolate, re, subject, index, output_registers, kNumRegisters);
-
- if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
-
- DCHECK_EQ(res, RegExpImpl::RE_SUCCESS);
- SealHandleScope shs(isolate);
- SetAtomLastCapture(isolate, last_match_info, *subject, output_registers[0],
- output_registers[1]);
- return last_match_info;
-}
-
-
-// Irregexp implementation.
-
-// Ensures that the regexp object contains a compiled version of the
-// source for either one-byte or two-byte subject strings.
-// If the compiled version doesn't already exist, it is compiled
-// from the source pattern.
-// If compilation fails, an exception is thrown and this function
-// returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_one_byte) {
- Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
- if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
- DCHECK(FLAG_regexp_interpret_all ? compiled_code.IsByteArray()
- : compiled_code.IsCode());
- return true;
- }
- return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
-}
-
-bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_one_byte) {
- // Compile the RegExp.
- Zone zone(isolate->allocator(), ZONE_NAME);
- PostponeInterruptsScope postpone(isolate);
-#ifdef DEBUG
- Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
- // When arriving here entry can only be a smi representing an uncompiled
- // regexp.
- DCHECK(entry.IsSmi());
- int entry_value = Smi::ToInt(entry);
- DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
-#endif
-
- JSRegExp::Flags flags = re->GetFlags();
-
- Handle<String> pattern(re->Pattern(), isolate);
- pattern = String::Flatten(isolate, pattern);
- RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &compile_data)) {
- // Throw an exception if we fail to parse the pattern.
- // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
- USE(ThrowRegExpException(isolate, re, pattern, compile_data.error));
- return false;
- }
- RegExpEngine::CompilationResult result =
- RegExpEngine::Compile(isolate, &zone, &compile_data, flags, pattern,
- sample_subject, is_one_byte);
- if (result.error_message != nullptr) {
- // Unable to compile regexp.
- if (FLAG_correctness_fuzzer_suppressions &&
- strncmp(result.error_message, "Stack overflow", 15) == 0) {
- FATAL("Aborting on stack overflow");
- }
- Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
- CStrVector(result.error_message)).ToHandleChecked();
- ThrowRegExpException(isolate, re, error_message);
- return false;
- }
-
- Handle<FixedArray> data =
- Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
- data->set(JSRegExp::code_index(is_one_byte), result.code);
- SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
- int register_max = IrregexpMaxRegisterCount(*data);
- if (result.num_registers > register_max) {
- SetIrregexpMaxRegisterCount(*data, result.num_registers);
- }
-
- return true;
-}
-
-int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
- return Smi::cast(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex)).value();
-}
-
-void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
- re.set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
-}
-
-void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray re,
- Handle<FixedArray> value) {
- if (value.is_null()) {
- re.set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
- } else {
- re.set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
- }
-}
-
-int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
- return Smi::ToInt(re.get(JSRegExp::kIrregexpCaptureCountIndex));
-}
-
-int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
- return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
-}
-
-ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
- return ByteArray::cast(re.get(JSRegExp::code_index(is_one_byte)));
-}
-
-Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
- return Code::cast(re.get(JSRegExp::code_index(is_one_byte)));
-}
-
-void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags, int capture_count) {
- // Initialize compiled code entries to null.
- isolate->factory()->SetRegExpIrregexpData(re, JSRegExp::IRREGEXP, pattern,
- flags, capture_count);
-}
-
-int RegExpImpl::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject) {
- DCHECK(subject->IsFlat());
-
- // Check representation of the underlying storage.
- bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- if (!EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte)) return -1;
-
- if (FLAG_regexp_interpret_all) {
- // Byte-code regexp needs space allocated for all its registers.
- // The result captures are copied to the start of the registers array
- // if the match succeeds. This way those registers are not clobbered
- // when we set the last match info from last successful match.
- return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data())) +
- (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
- } else {
- // Native regexp only needs room to output captures. Registers are handled
- // internally.
- return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
- }
-}
-
-int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- int32_t* output, int output_size) {
- Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
-
- DCHECK_LE(0, index);
- DCHECK_LE(index, subject->length());
- DCHECK(subject->IsFlat());
-
- bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
-
- if (!FLAG_regexp_interpret_all) {
- DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
- do {
- EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
- Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
- // The stack is used to allocate registers for the compiled regexp code.
- // This means that in case of failure, the output registers array is left
- // untouched and contains the capture results from the previous successful
- // match. We can use that to set the last match info lazily.
- int res = NativeRegExpMacroAssembler::Match(code, subject, output,
- output_size, index, isolate);
- if (res != NativeRegExpMacroAssembler::RETRY) {
- DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
- isolate->has_pending_exception());
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) ==
- RE_SUCCESS);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::FAILURE) ==
- RE_FAILURE);
- STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION) ==
- RE_EXCEPTION);
- return res;
- }
- // If result is RETRY, the string has changed representation, and we
- // must restart from scratch.
- // In this case, it means we must make sure we are prepared to handle
- // the, potentially, different subject (the string can switch between
- // being internal and external, and even between being Latin1 and UC16,
- // but the characters are always the same).
- IrregexpPrepare(isolate, regexp, subject);
- is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- } while (true);
- UNREACHABLE();
- } else {
- DCHECK(FLAG_regexp_interpret_all);
- DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
- // We must have done EnsureCompiledIrregexp, so we can get the number of
- // registers.
- int number_of_capture_registers =
- (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
- int32_t* raw_output = &output[number_of_capture_registers];
-
- do {
- // We do not touch the actual capture result registers until we know there
- // has been a match so that we can use those capture results to set the
- // last match info.
- for (int i = number_of_capture_registers - 1; i >= 0; i--) {
- raw_output[i] = -1;
- }
- Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
- isolate);
-
- IrregexpInterpreter::Result result = IrregexpInterpreter::Match(
- isolate, byte_codes, subject, raw_output, index);
- DCHECK_IMPLIES(result == IrregexpInterpreter::EXCEPTION,
- isolate->has_pending_exception());
-
- switch (result) {
- case IrregexpInterpreter::SUCCESS:
- // Copy capture results to the start of the registers array.
- MemCopy(output, raw_output,
- number_of_capture_registers * sizeof(int32_t));
- return result;
- case IrregexpInterpreter::EXCEPTION:
- case IrregexpInterpreter::FAILURE:
- return result;
- case IrregexpInterpreter::RETRY:
- // The string has changed representation, and we must restart the
- // match.
- is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
- EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
- break;
- }
- } while (true);
- UNREACHABLE();
- }
-}
-
-MaybeHandle<Object> RegExpImpl::IrregexpExec(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int previous_index, Handle<RegExpMatchInfo> last_match_info) {
- DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
-
- subject = String::Flatten(isolate, subject);
-
- // Prepare space for the return values.
-#ifdef DEBUG
- if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
- String pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
- PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
- }
-#endif
- int required_registers =
- RegExpImpl::IrregexpPrepare(isolate, regexp, subject);
- if (required_registers < 0) {
- // Compiling failed with an exception.
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Object>();
- }
-
- int32_t* output_registers = nullptr;
- if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- output_registers = NewArray<int32_t>(required_registers);
- }
- std::unique_ptr<int32_t[]> auto_release(output_registers);
- if (output_registers == nullptr) {
- output_registers = isolate->jsregexp_static_offsets_vector();
- }
-
- int res =
- RegExpImpl::IrregexpExecRaw(isolate, regexp, subject, previous_index,
- output_registers, required_registers);
- if (res == RE_SUCCESS) {
- int capture_count =
- IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
- return SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
- output_registers);
- }
- if (res == RE_EXCEPTION) {
- DCHECK(isolate->has_pending_exception());
- return MaybeHandle<Object>();
- }
- DCHECK(res == RE_FAILURE);
- return isolate->factory()->null_value();
-}
-
-Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
- Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
- Handle<String> subject, int capture_count, int32_t* match) {
- // This is the only place where match infos can grow. If, after executing the
- // regexp, RegExpExecStub finds that the match info is too small, it restarts
- // execution in RegExpImpl::Exec, which finally grows the match info right
- // here.
-
- int capture_register_count = (capture_count + 1) * 2;
- Handle<RegExpMatchInfo> result = RegExpMatchInfo::ReserveCaptures(
- isolate, last_match_info, capture_register_count);
- result->SetNumberOfCaptureRegisters(capture_register_count);
-
- if (*result != *last_match_info) {
- if (*last_match_info == *isolate->regexp_last_match_info()) {
- // This inner condition is only needed for special situations like the
- // regexp fuzzer, where we pass our own custom RegExpMatchInfo to
- // RegExpImpl::Exec; there actually want to bypass the Isolate's match
- // info and execute the regexp without side effects.
- isolate->native_context()->set_regexp_last_match_info(*result);
- }
- }
-
- DisallowHeapAllocation no_allocation;
- if (match != nullptr) {
- for (int i = 0; i < capture_register_count; i += 2) {
- result->SetCapture(i, match[i]);
- result->SetCapture(i + 1, match[i + 1]);
- }
- }
- result->SetLastSubject(*subject);
- result->SetLastInput(*subject);
- return result;
-}
-
-RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject, Isolate* isolate)
- : register_array_(nullptr),
- register_array_size_(0),
- regexp_(regexp),
- subject_(subject),
- isolate_(isolate) {
- bool interpreted = FLAG_regexp_interpret_all;
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- static const int kAtomRegistersPerMatch = 2;
- registers_per_match_ = kAtomRegistersPerMatch;
- // There is no distinction between interpreted and native for atom regexps.
- interpreted = false;
- } else {
- registers_per_match_ =
- RegExpImpl::IrregexpPrepare(isolate_, regexp_, subject_);
- if (registers_per_match_ < 0) {
- num_matches_ = -1; // Signal exception.
- return;
- }
- }
-
- DCHECK(IsGlobal(regexp->GetFlags()));
- if (!interpreted) {
- register_array_size_ =
- Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
- max_matches_ = register_array_size_ / registers_per_match_;
- } else {
- // Global loop in interpreted regexp is not implemented. We choose
- // the size of the offsets vector so that it can only store one match.
- register_array_size_ = registers_per_match_;
- max_matches_ = 1;
- }
-
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- register_array_ = NewArray<int32_t>(register_array_size_);
- } else {
- register_array_ = isolate->jsregexp_static_offsets_vector();
- }
-
- // Set state so that fetching the results the first time triggers a call
- // to the compiled regexp.
- current_match_index_ = max_matches_ - 1;
- num_matches_ = max_matches_;
- DCHECK_LE(2, registers_per_match_); // Each match has at least one capture.
- DCHECK_GE(register_array_size_, registers_per_match_);
- int32_t* last_match =
- &register_array_[current_match_index_ * registers_per_match_];
- last_match[0] = -1;
- last_match[1] = 0;
-}
-
-int RegExpImpl::GlobalCache::AdvanceZeroLength(int last_index) {
- if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
- unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
- unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
- // Advance over the surrogate pair.
- return last_index + 2;
- }
- return last_index + 1;
-}
-
-// -------------------------------------------------------------------
-// Implementation of the Irregexp regular expression engine.
-//
-// The Irregexp regular expression engine is intended to be a complete
-// implementation of ECMAScript regular expressions. It generates either
-// bytecodes or native code.
-
-// The Irregexp regexp engine is structured in three steps.
-// 1) The parser generates an abstract syntax tree. See ast.cc.
-// 2) From the AST a node network is created. The nodes are all
-// subclasses of RegExpNode. The nodes represent states when
-// executing a regular expression. Several optimizations are
-// performed on the node network.
-// 3) From the nodes we generate either byte codes or native code
-// that can actually execute the regular expression (perform
-// the search). The code generation step is described in more
-// detail below.
-
-// Code generation.
-//
-// The nodes are divided into four main categories.
-// * Choice nodes
-// These represent places where the regular expression can
-// match in more than one way. For example on entry to an
-// alternation (foo|bar) or a repetition (*, +, ? or {}).
-// * Action nodes
-// These represent places where some action should be
-// performed. Examples include recording the current position
-// in the input string to a register (in order to implement
-// captures) or other actions on register for example in order
-// to implement the counters needed for {} repetitions.
-// * Matching nodes
-// These attempt to match some element part of the input string.
-// Examples of elements include character classes, plain strings
-// or back references.
-// * End nodes
-// These are used to implement the actions required on finding
-// a successful match or failing to find a match.
-//
-// The code generated (whether as byte codes or native code) maintains
-// some state as it runs. This consists of the following elements:
-//
-// * The capture registers. Used for string captures.
-// * Other registers. Used for counters etc.
-// * The current position.
-// * The stack of backtracking information. Used when a matching node
-// fails to find a match and needs to try an alternative.
-//
-// Conceptual regular expression execution model:
-//
-// There is a simple conceptual model of regular expression execution
-// which will be presented first. The actual code generated is a more
-// efficient simulation of the simple conceptual model:
-//
-// * Choice nodes are implemented as follows:
-// For each choice except the last {
-// push current position
-// push backtrack code location
-// <generate code to test for choice>
-// backtrack code location:
-// pop current position
-// }
-// <generate code to test for last choice>
-//
-// * Actions nodes are generated as follows
-// <push affected registers on backtrack stack>
-// <generate code to perform action>
-// push backtrack code location
-// <generate code to test for following nodes>
-// backtrack code location:
-// <pop affected registers to restore their state>
-// <pop backtrack location from stack and go to it>
-//
-// * Matching nodes are generated as follows:
-// if input string matches at current position
-// update current position
-// <generate code to test for following nodes>
-// else
-// <pop backtrack location from stack and go to it>
-//
-// Thus it can be seen that the current position is saved and restored
-// by the choice nodes, whereas the registers are saved and restored by
-// by the action nodes that manipulate them.
-//
-// The other interesting aspect of this model is that nodes are generated
-// at the point where they are needed by a recursive call to Emit(). If
-// the node has already been code generated then the Emit() call will
-// generate a jump to the previously generated code instead. In order to
-// limit recursion it is possible for the Emit() function to put the node
-// on a work list for later generation and instead generate a jump. The
-// destination of the jump is resolved later when the code is generated.
-//
-// Actual regular expression code generation.
-//
-// Code generation is actually more complicated than the above. In order
-// to improve the efficiency of the generated code some optimizations are
-// performed
-//
-// * Choice nodes have 1-character lookahead.
-// A choice node looks at the following character and eliminates some of
-// the choices immediately based on that character. This is not yet
-// implemented.
-// * Simple greedy loops store reduced backtracking information.
-// A quantifier like /.*foo/m will greedily match the whole input. It will
-// then need to backtrack to a point where it can match "foo". The naive
-// implementation of this would push each character position onto the
-// backtracking stack, then pop them off one by one. This would use space
-// proportional to the length of the input string. However since the "."
-// can only match in one way and always has a constant length (in this case
-// of 1) it suffices to store the current position on the top of the stack
-// once. Matching now becomes merely incrementing the current position and
-// backtracking becomes decrementing the current position and checking the
-// result against the stored current position. This is faster and saves
-// space.
-// * The current state is virtualized.
-// This is used to defer expensive operations until it is clear that they
-// are needed and to generate code for a node more than once, allowing
-// specialized an efficient versions of the code to be created. This is
-// explained in the section below.
-//
-// Execution state virtualization.
-//
-// Instead of emitting code, nodes that manipulate the state can record their
-// manipulation in an object called the Trace. The Trace object can record a
-// current position offset, an optional backtrack code location on the top of
-// the virtualized backtrack stack and some register changes. When a node is
-// to be emitted it can flush the Trace or update it. Flushing the Trace
-// will emit code to bring the actual state into line with the virtual state.
-// Avoiding flushing the state can postpone some work (e.g. updates of capture
-// registers). Postponing work can save time when executing the regular
-// expression since it may be found that the work never has to be done as a
-// failure to match can occur. In addition it is much faster to jump to a
-// known backtrack code location than it is to pop an unknown backtrack
-// location from the stack and jump there.
-//
-// The virtual state found in the Trace affects code generation. For example
-// the virtual state contains the difference between the actual current
-// position and the virtual current position, and matching code needs to use
-// this offset to attempt a match in the correct location of the input
-// string. Therefore code generated for a non-trivial trace is specialized
-// to that trace. The code generator therefore has the ability to generate
-// code for each node several times. In order to limit the size of the
-// generated code there is an arbitrary limit on how many specialized sets of
-// code may be generated for a given node. If the limit is reached, the
-// trace is flushed and a generic version of the code for a node is emitted.
-// This is subsequently used for that node. The code emitted for non-generic
-// trace is not recorded in the node and so it cannot currently be reused in
-// the event that code generation is requested for an identical trace.
-
-
-void RegExpTree::AppendToText(RegExpText* text, Zone* zone) {
- UNREACHABLE();
-}
-
-
-void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
- text->AddElement(TextElement::Atom(this), zone);
-}
-
-
-void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
- text->AddElement(TextElement::CharClass(this), zone);
-}
-
-
-void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
- for (int i = 0; i < elements()->length(); i++)
- text->AddElement(elements()->at(i), zone);
-}
-
-
-TextElement TextElement::Atom(RegExpAtom* atom) {
- return TextElement(ATOM, atom);
-}
-
-
-TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
- return TextElement(CHAR_CLASS, char_class);
-}
-
-
-int TextElement::length() const {
- switch (text_type()) {
- case ATOM:
- return atom()->length();
-
- case CHAR_CLASS:
- return 1;
- }
- UNREACHABLE();
-}
-
-
-DispatchTable* ChoiceNode::GetTable(bool ignore_case) {
- if (table_ == nullptr) {
- table_ = new(zone()) DispatchTable(zone());
- DispatchTableConstructor cons(table_, ignore_case, zone());
- cons.BuildTable(this);
- }
- return table_;
-}
-
-
-class FrequencyCollator {
- public:
- FrequencyCollator() : total_samples_(0) {
- for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
- frequencies_[i] = CharacterFrequency(i);
- }
- }
-
- void CountCharacter(int character) {
- int index = (character & RegExpMacroAssembler::kTableMask);
- frequencies_[index].Increment();
- total_samples_++;
- }
-
- // Does not measure in percent, but rather per-128 (the table size from the
- // regexp macro assembler).
- int Frequency(int in_character) {
- DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character);
- if (total_samples_ < 1) return 1; // Division by zero.
- int freq_in_per128 =
- (frequencies_[in_character].counter() * 128) / total_samples_;
- return freq_in_per128;
- }
-
- private:
- class CharacterFrequency {
- public:
- CharacterFrequency() : counter_(0), character_(-1) { }
- explicit CharacterFrequency(int character)
- : counter_(0), character_(character) { }
-
- void Increment() { counter_++; }
- int counter() { return counter_; }
- int character() { return character_; }
-
- private:
- int counter_;
- int character_;
- };
-
-
- private:
- CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize];
- int total_samples_;
-};
-
-
-class RegExpCompiler {
- public:
- RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool is_one_byte);
-
- int AllocateRegister() {
- if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
- reg_exp_too_big_ = true;
- return next_register_;
- }
- return next_register_++;
- }
-
- // Lookarounds to match lone surrogates for unicode character class matches
- // are never nested. We can therefore reuse registers.
- int UnicodeLookaroundStackRegister() {
- if (unicode_lookaround_stack_register_ == kNoRegister) {
- unicode_lookaround_stack_register_ = AllocateRegister();
- }
- return unicode_lookaround_stack_register_;
- }
-
- int UnicodeLookaroundPositionRegister() {
- if (unicode_lookaround_position_register_ == kNoRegister) {
- unicode_lookaround_position_register_ = AllocateRegister();
- }
- return unicode_lookaround_position_register_;
- }
-
- RegExpEngine::CompilationResult Assemble(Isolate* isolate,
- RegExpMacroAssembler* assembler,
- RegExpNode* start, int capture_count,
- Handle<String> pattern);
-
- inline void AddWork(RegExpNode* node) {
- if (!node->on_work_list() && !node->label()->is_bound()) {
- node->set_on_work_list(true);
- work_list_->push_back(node);
- }
- }
-
- static const int kImplementationOffset = 0;
- static const int kNumberOfRegistersOffset = 0;
- static const int kCodeOffset = 1;
-
- RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
- EndNode* accept() { return accept_; }
-
- static const int kMaxRecursion = 100;
- inline int recursion_depth() { return recursion_depth_; }
- inline void IncrementRecursionDepth() { recursion_depth_++; }
- inline void DecrementRecursionDepth() { recursion_depth_--; }
-
- void SetRegExpTooBig() { reg_exp_too_big_ = true; }
-
- inline bool one_byte() { return one_byte_; }
- inline bool optimize() { return optimize_; }
- inline void set_optimize(bool value) { optimize_ = value; }
- inline bool limiting_recursion() { return limiting_recursion_; }
- inline void set_limiting_recursion(bool value) {
- limiting_recursion_ = value;
- }
- bool read_backward() { return read_backward_; }
- void set_read_backward(bool value) { read_backward_ = value; }
- FrequencyCollator* frequency_collator() { return &frequency_collator_; }
-
- int current_expansion_factor() { return current_expansion_factor_; }
- void set_current_expansion_factor(int value) {
- current_expansion_factor_ = value;
- }
-
- Isolate* isolate() const { return isolate_; }
- Zone* zone() const { return zone_; }
-
- static const int kNoRegister = -1;
-
- private:
- EndNode* accept_;
- int next_register_;
- int unicode_lookaround_stack_register_;
- int unicode_lookaround_position_register_;
- std::vector<RegExpNode*>* work_list_;
- int recursion_depth_;
- RegExpMacroAssembler* macro_assembler_;
- bool one_byte_;
- bool reg_exp_too_big_;
- bool limiting_recursion_;
- bool optimize_;
- bool read_backward_;
- int current_expansion_factor_;
- FrequencyCollator frequency_collator_;
- Isolate* isolate_;
- Zone* zone_;
-};
-
-
-class RecursionCheck {
- public:
- explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
- compiler->IncrementRecursionDepth();
- }
- ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
- private:
- RegExpCompiler* compiler_;
-};
-
-
-static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
- return RegExpEngine::CompilationResult(isolate, "RegExp too big");
-}
-
-
-// Attempts to compile the regexp using an Irregexp code generator. Returns
-// a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool one_byte)
- : next_register_(2 * (capture_count + 1)),
- unicode_lookaround_stack_register_(kNoRegister),
- unicode_lookaround_position_register_(kNoRegister),
- work_list_(nullptr),
- recursion_depth_(0),
- one_byte_(one_byte),
- reg_exp_too_big_(false),
- limiting_recursion_(false),
- optimize_(FLAG_regexp_optimization),
- read_backward_(false),
- current_expansion_factor_(1),
- frequency_collator_(),
- isolate_(isolate),
- zone_(zone) {
- accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
- DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1);
-}
-
-RegExpEngine::CompilationResult RegExpCompiler::Assemble(
- Isolate* isolate, RegExpMacroAssembler* macro_assembler, RegExpNode* start,
- int capture_count, Handle<String> pattern) {
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler)
- macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
- else
-#endif
- macro_assembler_ = macro_assembler;
-
- std::vector<RegExpNode*> work_list;
- work_list_ = &work_list;
- Label fail;
- macro_assembler_->PushBacktrack(&fail);
- Trace new_trace;
- start->Emit(this, &new_trace);
- macro_assembler_->Bind(&fail);
- macro_assembler_->Fail();
- while (!work_list.empty()) {
- RegExpNode* node = work_list.back();
- work_list.pop_back();
- node->set_on_work_list(false);
- if (!node->label()->is_bound()) node->Emit(this, &new_trace);
- }
- if (reg_exp_too_big_) {
- macro_assembler_->AbortedCodeGeneration();
- return IrregexpRegExpTooBig(isolate_);
- }
-
- Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
- isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
- work_list_ = nullptr;
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code && !FLAG_regexp_interpret_all) {
- CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- OFStream os(trace_scope.file());
- Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
- }
-#endif
-#ifdef DEBUG
- if (FLAG_trace_regexp_assembler) {
- delete macro_assembler_;
- }
-#endif
- return RegExpEngine::CompilationResult(*code, next_register_);
-}
-
-
-bool Trace::DeferredAction::Mentions(int that) {
- if (action_type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(this)->range();
- return range.Contains(that);
- } else {
- return reg() == that;
- }
-}
-
-
-bool Trace::mentions_reg(int reg) {
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->Mentions(reg))
- return true;
- }
- return false;
-}
-
-
-bool Trace::GetStoredPosition(int reg, int* cp_offset) {
- DCHECK_EQ(0, *cp_offset);
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->Mentions(reg)) {
- if (action->action_type() == ActionNode::STORE_POSITION) {
- *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
- return true;
- } else {
- return false;
- }
- }
- }
- return false;
-}
-
-
-int Trace::FindAffectedRegisters(OutSet* affected_registers,
- Zone* zone) {
- int max_register = RegExpCompiler::kNoRegister;
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
- Interval range = static_cast<DeferredClearCaptures*>(action)->range();
- for (int i = range.from(); i <= range.to(); i++)
- affected_registers->Set(i, zone);
- if (range.to() > max_register) max_register = range.to();
- } else {
- affected_registers->Set(action->reg(), zone);
- if (action->reg() > max_register) max_register = action->reg();
- }
- }
- return max_register;
-}
-
-
-void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
- int max_register,
- const OutSet& registers_to_pop,
- const OutSet& registers_to_clear) {
- for (int reg = max_register; reg >= 0; reg--) {
- if (registers_to_pop.Get(reg)) {
- assembler->PopRegister(reg);
- } else if (registers_to_clear.Get(reg)) {
- int clear_to = reg;
- while (reg > 0 && registers_to_clear.Get(reg - 1)) {
- reg--;
- }
- assembler->ClearRegisters(reg, clear_to);
- }
- }
-}
-
-
-void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
- int max_register,
- const OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear,
- Zone* zone) {
- // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
- const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
-
- // Count pushes performed to force a stack limit check occasionally.
- int pushes = 0;
-
- for (int reg = 0; reg <= max_register; reg++) {
- if (!affected_registers.Get(reg)) {
- continue;
- }
-
- // The chronologically first deferred action in the trace
- // is used to infer the action needed to restore a register
- // to its previous state (or not, if it's safe to ignore it).
- enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
- DeferredActionUndoType undo_action = IGNORE;
-
- int value = 0;
- bool absolute = false;
- bool clear = false;
- static const int kNoStore = kMinInt;
- int store_position = kNoStore;
- // This is a little tricky because we are scanning the actions in reverse
- // historical order (newest first).
- for (DeferredAction* action = actions_; action != nullptr;
- action = action->next()) {
- if (action->Mentions(reg)) {
- switch (action->action_type()) {
- case ActionNode::SET_REGISTER: {
- Trace::DeferredSetRegister* psr =
- static_cast<Trace::DeferredSetRegister*>(action);
- if (!absolute) {
- value += psr->value();
- absolute = true;
- }
- // SET_REGISTER is currently only used for newly introduced loop
- // counters. They can have a significant previous value if they
- // occur in a loop. TODO(lrn): Propagate this information, so
- // we can set undo_action to IGNORE if we know there is no value to
- // restore.
- undo_action = RESTORE;
- DCHECK_EQ(store_position, kNoStore);
- DCHECK(!clear);
- break;
- }
- case ActionNode::INCREMENT_REGISTER:
- if (!absolute) {
- value++;
- }
- DCHECK_EQ(store_position, kNoStore);
- DCHECK(!clear);
- undo_action = RESTORE;
- break;
- case ActionNode::STORE_POSITION: {
- Trace::DeferredCapture* pc =
- static_cast<Trace::DeferredCapture*>(action);
- if (!clear && store_position == kNoStore) {
- store_position = pc->cp_offset();
- }
-
- // For captures we know that stores and clears alternate.
- // Other register, are never cleared, and if the occur
- // inside a loop, they might be assigned more than once.
- if (reg <= 1) {
- // Registers zero and one, aka "capture zero", is
- // always set correctly if we succeed. There is no
- // need to undo a setting on backtrack, because we
- // will set it again or fail.
- undo_action = IGNORE;
- } else {
- undo_action = pc->is_capture() ? CLEAR : RESTORE;
- }
- DCHECK(!absolute);
- DCHECK_EQ(value, 0);
- break;
- }
- case ActionNode::CLEAR_CAPTURES: {
- // Since we're scanning in reverse order, if we've already
- // set the position we have to ignore historically earlier
- // clearing operations.
- if (store_position == kNoStore) {
- clear = true;
- }
- undo_action = RESTORE;
- DCHECK(!absolute);
- DCHECK_EQ(value, 0);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- // Prepare for the undo-action (e.g., push if it's going to be popped).
- if (undo_action == RESTORE) {
- pushes++;
- RegExpMacroAssembler::StackCheckFlag stack_check =
- RegExpMacroAssembler::kNoStackLimitCheck;
- if (pushes == push_limit) {
- stack_check = RegExpMacroAssembler::kCheckStackLimit;
- pushes = 0;
- }
-
- assembler->PushRegister(reg, stack_check);
- registers_to_pop->Set(reg, zone);
- } else if (undo_action == CLEAR) {
- registers_to_clear->Set(reg, zone);
- }
- // Perform the chronologically last action (or accumulated increment)
- // for the register.
- if (store_position != kNoStore) {
- assembler->WriteCurrentPositionToRegister(reg, store_position);
- } else if (clear) {
- assembler->ClearRegisters(reg, reg);
- } else if (absolute) {
- assembler->SetRegister(reg, value);
- } else if (value != 0) {
- assembler->AdvanceRegister(reg, value);
- }
- }
-}
-
-
-// This is called as we come into a loop choice node and some other tricky
-// nodes. It normalizes the state of the code generator to ensure we can
-// generate generic code.
-void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- DCHECK(!is_trivial());
-
- if (actions_ == nullptr && backtrack() == nullptr) {
- // Here we just have some deferred cp advances to fix and we are back to
- // a normal situation. We may also have to forget some information gained
- // through a quick check that was already performed.
- if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
- // Create a new trivial state and generate the node with that.
- Trace new_state;
- successor->Emit(compiler, &new_state);
- return;
- }
-
- // Generate deferred actions here along with code to undo them again.
- OutSet affected_registers;
-
- if (backtrack() != nullptr) {
- // Here we have a concrete backtrack location. These are set up by choice
- // nodes and so they indicate that we have a deferred save of the current
- // position which we may need to emit here.
- assembler->PushCurrentPosition();
- }
-
- int max_register = FindAffectedRegisters(&affected_registers,
- compiler->zone());
- OutSet registers_to_pop;
- OutSet registers_to_clear;
- PerformDeferredActions(assembler,
- max_register,
- affected_registers,
- &registers_to_pop,
- &registers_to_clear,
- compiler->zone());
- if (cp_offset_ != 0) {
- assembler->AdvanceCurrentPosition(cp_offset_);
- }
-
- // Create a new trivial state and generate the node with that.
- Label undo;
- assembler->PushBacktrack(&undo);
- if (successor->KeepRecursing(compiler)) {
- Trace new_state;
- successor->Emit(compiler, &new_state);
- } else {
- compiler->AddWork(successor);
- assembler->GoTo(successor->label());
- }
-
- // On backtrack we need to restore state.
- assembler->Bind(&undo);
- RestoreAffectedRegisters(assembler,
- max_register,
- registers_to_pop,
- registers_to_clear);
- if (backtrack() == nullptr) {
- assembler->Backtrack();
- } else {
- assembler->PopCurrentPosition();
- assembler->GoTo(backtrack());
- }
-}
-
-
-void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- // Omit flushing the trace. We discard the entire stack frame anyway.
-
- if (!label()->is_bound()) {
- // We are completely independent of the trace, since we ignore it,
- // so this code can be used as the generic version.
- assembler->Bind(label());
- }
-
- // Throw away everything on the backtrack stack since the start
- // of the negative submatch and restore the character position.
- assembler->ReadCurrentPositionFromRegister(current_position_register_);
- assembler->ReadStackPointerFromRegister(stack_pointer_register_);
- if (clear_capture_count_ > 0) {
- // Clear any captures that might have been performed during the success
- // of the body of the negative look-ahead.
- int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
- assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
- }
- // Now that we have unwound the stack we find at the top of the stack the
- // backtrack that the BeginSubmatch node got.
- assembler->Backtrack();
-}
-
-
-void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!label()->is_bound()) {
- assembler->Bind(label());
- }
- switch (action_) {
- case ACCEPT:
- assembler->Succeed();
- return;
- case BACKTRACK:
- assembler->GoTo(trace->backtrack());
- return;
- case NEGATIVE_SUBMATCH_SUCCESS:
- // This case is handled in a different virtual method.
- UNREACHABLE();
- }
- UNIMPLEMENTED();
-}
-
-
-void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
- if (guards_ == nullptr) guards_ = new (zone) ZoneList<Guard*>(1, zone);
- guards_->Add(guard, zone);
-}
-
-
-ActionNode* ActionNode::SetRegister(int reg,
- int val,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(SET_REGISTER, on_success);
- result->data_.u_store_register.reg = reg;
- result->data_.u_store_register.value = val;
- return result;
-}
-
-
-ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
- result->data_.u_increment_register.reg = reg;
- return result;
-}
-
-
-ActionNode* ActionNode::StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(STORE_POSITION, on_success);
- result->data_.u_position_register.reg = reg;
- result->data_.u_position_register.is_capture = is_capture;
- return result;
-}
-
-
-ActionNode* ActionNode::ClearCaptures(Interval range,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
- result->data_.u_clear_captures.range_from = range.from();
- result->data_.u_clear_captures.range_to = range.to();
- return result;
-}
-
-
-ActionNode* ActionNode::BeginSubmatch(int stack_reg,
- int position_reg,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- return result;
-}
-
-
-ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg,
- int position_reg,
- int clear_register_count,
- int clear_register_from,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
- result->data_.u_submatch.stack_pointer_register = stack_reg;
- result->data_.u_submatch.current_position_register = position_reg;
- result->data_.u_submatch.clear_register_count = clear_register_count;
- result->data_.u_submatch.clear_register_from = clear_register_from;
- return result;
-}
-
-
-ActionNode* ActionNode::EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success) {
- ActionNode* result =
- new(on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
- result->data_.u_empty_match_check.start_register = start_register;
- result->data_.u_empty_match_check.repetition_register = repetition_register;
- result->data_.u_empty_match_check.repetition_limit = repetition_limit;
- return result;
-}
-
-
-#define DEFINE_ACCEPT(Type) \
- void Type##Node::Accept(NodeVisitor* visitor) { \
- visitor->Visit##Type(this); \
- }
-FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
-#undef DEFINE_ACCEPT
-
-
-void LoopChoiceNode::Accept(NodeVisitor* visitor) {
- visitor->VisitLoopChoice(this);
-}
-
-
-// -------------------------------------------------------------------
-// Emit code.
-
-
-void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace) {
- switch (guard->op()) {
- case Guard::LT:
- DCHECK(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterGE(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- case Guard::GEQ:
- DCHECK(!trace->mentions_reg(guard->reg()));
- macro_assembler->IfRegisterLT(guard->reg(),
- guard->value(),
- trace->backtrack());
- break;
- }
-}
-
-
-// Returns the number of characters in the equivalence class, omitting those
-// that cannot occur in the source string because it is Latin1.
-static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
- bool one_byte_subject,
- unibrow::uchar* letters,
- int letter_length) {
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeSet set;
- set.add(character);
- set = set.closeOver(USET_CASE_INSENSITIVE);
- int32_t range_count = set.getRangeCount();
- int items = 0;
- for (int32_t i = 0; i < range_count; i++) {
- UChar32 start = set.getRangeStart(i);
- UChar32 end = set.getRangeEnd(i);
- CHECK(end - start + items <= letter_length);
- while (start <= end) {
- if (one_byte_subject && start > String::kMaxOneByteCharCode) break;
- letters[items++] = (unibrow::uchar)(start);
- start++;
- }
- }
- return items;
-#else
- int length =
- isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
- // Unibrow returns 0 or 1 for characters where case independence is
- // trivial.
- if (length == 0) {
- letters[0] = character;
- length = 1;
- }
-
- if (one_byte_subject) {
- int new_length = 0;
- for (int i = 0; i < length; i++) {
- if (letters[i] <= String::kMaxOneByteCharCode) {
- letters[new_length++] = letters[i];
- }
- }
- length = new_length;
- }
-
- return length;
-#endif // V8_INTL_SUPPORT
-}
-
-static inline bool EmitSimpleCharacter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- bool bound_checked = false;
- if (!preloaded) {
- assembler->LoadCurrentCharacter(
- cp_offset,
- on_failure,
- check);
- bound_checked = true;
- }
- assembler->CheckNotCharacter(c, on_failure);
- return bound_checked;
-}
-
-
-// Only emits non-letters (things that don't have case). Only used for case
-// independent matches.
-static inline bool EmitAtomNonLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool one_byte = compiler->one_byte();
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
- if (length < 1) {
- // This can't match. Must be an one-byte subject and a non-one-byte
- // character. We do not need to do anything since the one-byte pass
- // already handled this.
- return false; // Bounds not checked.
- }
- bool checked = false;
- // We handle the length > 1 case in a later pass.
- if (length == 1) {
- if (one_byte && c > String::kMaxOneByteCharCodeU) {
- // Can't match - see above.
- return false; // Bounds not checked.
- }
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- checked = check;
- }
- macro_assembler->CheckNotCharacter(c, on_failure);
- }
- return checked;
-}
-
-
-static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
- bool one_byte, uc16 c1, uc16 c2,
- Label* on_failure) {
- uc16 char_mask;
- if (one_byte) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- uc16 exor = c1 ^ c2;
- // Check whether exor has only one bit set.
- if (((exor - 1) & exor) == 0) {
- // If c1 and c2 differ only by one bit.
- // Ecma262UnCanonicalize always gives the highest number last.
- DCHECK(c2 > c1);
- uc16 mask = char_mask ^ exor;
- macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
- return true;
- }
- DCHECK(c2 > c1);
- uc16 diff = c2 - c1;
- if (((diff - 1) & diff) == 0 && c1 >= diff) {
- // If the characters differ by 2^n but don't differ by one bit then
- // subtract the difference from the found character, then do the or
- // trick. We avoid the theoretical case where negative numbers are
- // involved in order to simplify code generation.
- uc16 mask = char_mask ^ diff;
- macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff,
- diff,
- mask,
- on_failure);
- return true;
- }
- return false;
-}
-
-using EmitCharacterFunction = bool(Isolate* isolate, RegExpCompiler* compiler,
- uc16 c, Label* on_failure, int cp_offset,
- bool check, bool preloaded);
-
-// Only emits letters (things that have case). Only used for case independent
-// matches.
-static inline bool EmitAtomLetter(Isolate* isolate,
- RegExpCompiler* compiler,
- uc16 c,
- Label* on_failure,
- int cp_offset,
- bool check,
- bool preloaded) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- bool one_byte = compiler->one_byte();
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
- if (length <= 1) return false;
- // We may not need to check against the end of the input string
- // if this character lies before a character that matched.
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
- }
- Label ok;
- switch (length) {
- case 2: {
- if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
- chars[1], on_failure)) {
- } else {
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckNotCharacter(chars[1], on_failure);
- macro_assembler->Bind(&ok);
- }
- break;
- }
- case 4:
- macro_assembler->CheckCharacter(chars[3], &ok);
- V8_FALLTHROUGH;
- case 3:
- macro_assembler->CheckCharacter(chars[0], &ok);
- macro_assembler->CheckCharacter(chars[1], &ok);
- macro_assembler->CheckNotCharacter(chars[2], on_failure);
- macro_assembler->Bind(&ok);
- break;
- default:
- UNREACHABLE();
- }
- return true;
-}
-
-
-static void EmitBoundaryTest(RegExpMacroAssembler* masm,
- int border,
- Label* fall_through,
- Label* above_or_equal,
- Label* below) {
- if (below != fall_through) {
- masm->CheckCharacterLT(border, below);
- if (above_or_equal != fall_through) masm->GoTo(above_or_equal);
- } else {
- masm->CheckCharacterGT(border - 1, above_or_equal);
- }
-}
-
-
-static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm,
- int first,
- int last,
- Label* fall_through,
- Label* in_range,
- Label* out_of_range) {
- if (in_range == fall_through) {
- if (first == last) {
- masm->CheckNotCharacter(first, out_of_range);
- } else {
- masm->CheckCharacterNotInRange(first, last, out_of_range);
- }
- } else {
- if (first == last) {
- masm->CheckCharacter(first, in_range);
- } else {
- masm->CheckCharacterInRange(first, last, in_range);
- }
- if (out_of_range != fall_through) masm->GoTo(out_of_range);
- }
-}
-
-
-// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
-// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
-static void EmitUseLookupTable(
- RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int min_char,
- Label* fall_through,
- Label* even_label,
- Label* odd_label) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int base = (min_char & ~kMask);
- USE(base);
-
- // Assert that everything is on one kTableSize page.
- for (int i = start_index; i <= end_index; i++) {
- DCHECK_EQ(ranges->at(i) & ~kMask, base);
- }
- DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
-
- char templ[kSize];
- Label* on_bit_set;
- Label* on_bit_clear;
- int bit;
- if (even_label == fall_through) {
- on_bit_set = odd_label;
- on_bit_clear = even_label;
- bit = 1;
- } else {
- on_bit_set = even_label;
- on_bit_clear = odd_label;
- bit = 0;
- }
- for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
- templ[i] = bit;
- }
- int j = 0;
- bit ^= 1;
- for (int i = start_index; i < end_index; i++) {
- for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
- templ[j] = bit;
- }
- bit ^= 1;
- }
- for (int i = j; i < kSize; i++) {
- templ[i] = bit;
- }
- Factory* factory = masm->isolate()->factory();
- // TODO(erikcorry): Cache these.
- Handle<ByteArray> ba = factory->NewByteArray(kSize, AllocationType::kOld);
- for (int i = 0; i < kSize; i++) {
- ba->set(i, templ[i]);
- }
- masm->CheckBitInTable(ba, on_bit_set);
- if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
-}
-
-
-static void CutOutRange(RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int cut_index,
- Label* even_label,
- Label* odd_label) {
- bool odd = (((cut_index - start_index) & 1) == 1);
- Label* in_range_label = odd ? odd_label : even_label;
- Label dummy;
- EmitDoubleBoundaryTest(masm,
- ranges->at(cut_index),
- ranges->at(cut_index + 1) - 1,
- &dummy,
- in_range_label,
- &dummy);
- DCHECK(!dummy.is_linked());
- // Cut out the single range by rewriting the array. This creates a new
- // range that is a merger of the two ranges on either side of the one we
- // are cutting out. The oddity of the labels is preserved.
- for (int j = cut_index; j > start_index; j--) {
- ranges->at(j) = ranges->at(j - 1);
- }
- for (int j = cut_index + 1; j < end_index; j++) {
- ranges->at(j) = ranges->at(j + 1);
- }
-}
-
-
-// Unicode case. Split the search space into kSize spaces that are handled
-// with recursion.
-static void SplitSearchSpace(ZoneList<int>* ranges,
- int start_index,
- int end_index,
- int* new_start_index,
- int* new_end_index,
- int* border) {
- static const int kSize = RegExpMacroAssembler::kTableSize;
- static const int kMask = RegExpMacroAssembler::kTableMask;
-
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
-
- *new_start_index = start_index;
- *border = (ranges->at(start_index) & ~kMask) + kSize;
- while (*new_start_index < end_index) {
- if (ranges->at(*new_start_index) > *border) break;
- (*new_start_index)++;
- }
- // new_start_index is the index of the first edge that is beyond the
- // current kSize space.
-
- // For very large search spaces we do a binary chop search of the non-Latin1
- // space instead of just going to the end of the current kSize space. The
- // heuristics are complicated a little by the fact that any 128-character
- // encoding space can be quickly tested with a table lookup, so we don't
- // wish to do binary chop search at a smaller granularity than that. A
- // 128-character space can take up a lot of space in the ranges array if,
- // for example, we only want to match every second character (eg. the lower
- // case characters on some Unicode pages).
- int binary_chop_index = (end_index + start_index) / 2;
- // The first test ensures that we get to the code that handles the Latin1
- // range with a single not-taken branch, speeding up this important
- // character range (even non-Latin1 charset-based text has spaces and
- // punctuation).
- if (*border - 1 > String::kMaxOneByteCharCode && // Latin1 case.
- end_index - start_index > (*new_start_index - start_index) * 2 &&
- last - first > kSize * 2 && binary_chop_index > *new_start_index &&
- ranges->at(binary_chop_index) >= first + 2 * kSize) {
- int scan_forward_for_section_border = binary_chop_index;;
- int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
-
- while (scan_forward_for_section_border < end_index) {
- if (ranges->at(scan_forward_for_section_border) > new_border) {
- *new_start_index = scan_forward_for_section_border;
- *border = new_border;
- break;
- }
- scan_forward_for_section_border++;
- }
- }
-
- DCHECK(*new_start_index > start_index);
- *new_end_index = *new_start_index - 1;
- if (ranges->at(*new_end_index) == *border) {
- (*new_end_index)--;
- }
- if (*border >= ranges->at(end_index)) {
- *border = ranges->at(end_index);
- *new_start_index = end_index; // Won't be used.
- *new_end_index = end_index - 1;
- }
-}
-
-// Gets a series of segment boundaries representing a character class. If the
-// character is in the range between an even and an odd boundary (counting from
-// start_index) then go to even_label, otherwise go to odd_label. We already
-// know that the character is in the range of min_char to max_char inclusive.
-// Either label can be nullptr indicating backtracking. Either label can also
-// be equal to the fall_through label.
-static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
- int start_index, int end_index, uc32 min_char,
- uc32 max_char, Label* fall_through,
- Label* even_label, Label* odd_label) {
- DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
- DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
-
- int first = ranges->at(start_index);
- int last = ranges->at(end_index) - 1;
-
- DCHECK_LT(min_char, first);
-
- // Just need to test if the character is before or on-or-after
- // a particular character.
- if (start_index == end_index) {
- EmitBoundaryTest(masm, first, fall_through, even_label, odd_label);
- return;
- }
-
- // Another almost trivial case: There is one interval in the middle that is
- // different from the end intervals.
- if (start_index + 1 == end_index) {
- EmitDoubleBoundaryTest(
- masm, first, last, fall_through, even_label, odd_label);
- return;
- }
-
- // It's not worth using table lookup if there are very few intervals in the
- // character class.
- if (end_index - start_index <= 6) {
- // It is faster to test for individual characters, so we look for those
- // first, then try arbitrary ranges in the second round.
- static int kNoCutIndex = -1;
- int cut = kNoCutIndex;
- for (int i = start_index; i < end_index; i++) {
- if (ranges->at(i) == ranges->at(i + 1) - 1) {
- cut = i;
- break;
- }
- }
- if (cut == kNoCutIndex) cut = start_index;
- CutOutRange(
- masm, ranges, start_index, end_index, cut, even_label, odd_label);
- DCHECK_GE(end_index - start_index, 2);
- GenerateBranches(masm,
- ranges,
- start_index + 1,
- end_index - 1,
- min_char,
- max_char,
- fall_through,
- even_label,
- odd_label);
- return;
- }
-
- // If there are a lot of intervals in the regexp, then we will use tables to
- // determine whether the character is inside or outside the character class.
- static const int kBits = RegExpMacroAssembler::kTableSizeBits;
-
- if ((max_char >> kBits) == (min_char >> kBits)) {
- EmitUseLookupTable(masm,
- ranges,
- start_index,
- end_index,
- min_char,
- fall_through,
- even_label,
- odd_label);
- return;
- }
-
- if ((min_char >> kBits) != (first >> kBits)) {
- masm->CheckCharacterLT(first, odd_label);
- GenerateBranches(masm,
- ranges,
- start_index + 1,
- end_index,
- first,
- max_char,
- fall_through,
- odd_label,
- even_label);
- return;
- }
-
- int new_start_index = 0;
- int new_end_index = 0;
- int border = 0;
-
- SplitSearchSpace(ranges,
- start_index,
- end_index,
- &new_start_index,
- &new_end_index,
- &border);
-
- Label handle_rest;
- Label* above = &handle_rest;
- if (border == last + 1) {
- // We didn't find any section that started after the limit, so everything
- // above the border is one of the terminal labels.
- above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
- DCHECK(new_end_index == end_index - 1);
- }
-
- DCHECK_LE(start_index, new_end_index);
- DCHECK_LE(new_start_index, end_index);
- DCHECK_LT(start_index, new_start_index);
- DCHECK_LT(new_end_index, end_index);
- DCHECK(new_end_index + 1 == new_start_index ||
- (new_end_index + 2 == new_start_index &&
- border == ranges->at(new_end_index + 1)));
- DCHECK_LT(min_char, border - 1);
- DCHECK_LT(border, max_char);
- DCHECK_LT(ranges->at(new_end_index), border);
- DCHECK(border < ranges->at(new_start_index) ||
- (border == ranges->at(new_start_index) &&
- new_start_index == end_index &&
- new_end_index == end_index - 1 &&
- border == last + 1));
- DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
-
- masm->CheckCharacterGT(border - 1, above);
- Label dummy;
- GenerateBranches(masm,
- ranges,
- start_index,
- new_end_index,
- min_char,
- border - 1,
- &dummy,
- even_label,
- odd_label);
- if (handle_rest.is_linked()) {
- masm->Bind(&handle_rest);
- bool flip = (new_start_index & 1) != (start_index & 1);
- GenerateBranches(masm,
- ranges,
- new_start_index,
- end_index,
- border,
- max_char,
- &dummy,
- flip ? odd_label : even_label,
- flip ? even_label : odd_label);
- }
-}
-
-
-static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
- RegExpCharacterClass* cc, bool one_byte,
- Label* on_failure, int cp_offset, bool check_offset,
- bool preloaded, Zone* zone) {
- ZoneList<CharacterRange>* ranges = cc->ranges(zone);
- CharacterRange::Canonicalize(ranges);
-
- int max_char;
- if (one_byte) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
-
- int range_count = ranges->length();
-
- int last_valid_range = range_count - 1;
- while (last_valid_range >= 0) {
- CharacterRange& range = ranges->at(last_valid_range);
- if (range.from() <= max_char) {
- break;
- }
- last_valid_range--;
- }
-
- if (last_valid_range < 0) {
- if (!cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- }
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
-
- if (last_valid_range == 0 &&
- ranges->at(0).IsEverything(max_char)) {
- if (cc->is_negated()) {
- macro_assembler->GoTo(on_failure);
- } else {
- // This is a common case hit by non-anchored expressions.
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- }
- return;
- }
-
- if (!preloaded) {
- macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
- }
-
- if (cc->is_standard(zone) &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- on_failure)) {
- return;
- }
-
-
- // A new list with ascending entries. Each entry is a code unit
- // where there is a boundary between code units that are part of
- // the class and code units that are not. Normally we insert an
- // entry at zero which goes to the failure label, but if there
- // was already one there we fall through for success on that entry.
- // Subsequent entries have alternating meaning (success/failure).
- ZoneList<int>* range_boundaries =
- new(zone) ZoneList<int>(last_valid_range, zone);
-
- bool zeroth_entry_is_failure = !cc->is_negated();
-
- for (int i = 0; i <= last_valid_range; i++) {
- CharacterRange& range = ranges->at(i);
- if (range.from() == 0) {
- DCHECK_EQ(i, 0);
- zeroth_entry_is_failure = !zeroth_entry_is_failure;
- } else {
- range_boundaries->Add(range.from(), zone);
- }
- range_boundaries->Add(range.to() + 1, zone);
- }
- int end_index = range_boundaries->length() - 1;
- if (range_boundaries->at(end_index) > max_char) {
- end_index--;
- }
-
- Label fall_through;
- GenerateBranches(macro_assembler,
- range_boundaries,
- 0, // start_index.
- end_index,
- 0, // min_char.
- max_char,
- &fall_through,
- zeroth_entry_is_failure ? &fall_through : on_failure,
- zeroth_entry_is_failure ? on_failure : &fall_through);
- macro_assembler->Bind(&fall_through);
-}
-
-RegExpNode::~RegExpNode() = default;
-
-RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
- Trace* trace) {
- // If we are generating a greedy loop then don't stop and don't reuse code.
- if (trace->stop_node() != nullptr) {
- return CONTINUE;
- }
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->is_trivial()) {
- if (label_.is_bound() || on_work_list() || !KeepRecursing(compiler)) {
- // If a generic version is already scheduled to be generated or we have
- // recursed too deeply then just generate a jump to that code.
- macro_assembler->GoTo(&label_);
- // This will queue it up for generation of a generic version if it hasn't
- // already been queued.
- compiler->AddWork(this);
- return DONE;
- }
- // Generate generic version of the node and bind the label for later use.
- macro_assembler->Bind(&label_);
- return CONTINUE;
- }
-
- // We are being asked to make a non-generic version. Keep track of how many
- // non-generic versions we generate so as not to overdo it.
- trace_count_++;
- if (KeepRecursing(compiler) && compiler->optimize() &&
- trace_count_ < kMaxCopiesCodeGenerated) {
- return CONTINUE;
- }
-
- // If we get here code has been generated for this node too many times or
- // recursion is too deep. Time to switch to a generic version. The code for
- // generic versions above can handle deep recursion properly.
- bool was_limiting = compiler->limiting_recursion();
- compiler->set_limiting_recursion(true);
- trace->Flush(compiler, this);
- compiler->set_limiting_recursion(was_limiting);
- return DONE;
-}
-
-
-bool RegExpNode::KeepRecursing(RegExpCompiler* compiler) {
- return !compiler->limiting_recursion() &&
- compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion;
-}
-
-
-int ActionNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- if (action_type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- }
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-int AssertionNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- // If we know we are not at the start and we are asked "how many characters
- // will you match if you succeed?" then we can answer anything since false
- // implies false. So lets just return the max answer (still_to_find) since
- // that won't prevent us from preloading a lot of characters for the other
- // branches in the node graph.
- if (assertion_type() == AT_START && not_at_start) return still_to_find;
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- // Match the behaviour of EatsAtLeast on this node.
- if (assertion_type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-int BackReferenceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (read_backward()) return 0;
- if (budget <= 0) return 0;
- return on_success()->EatsAtLeast(still_to_find,
- budget - 1,
- not_at_start);
-}
-
-
-int TextNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- if (read_backward()) return 0;
- int answer = Length();
- if (answer >= still_to_find) return answer;
- if (budget <= 0) return answer;
- // We are not at start after this node so we set the last argument to 'true'.
- return answer + on_success()->EatsAtLeast(still_to_find - answer,
- budget - 1,
- true);
-}
-
-
-int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
- bool not_at_start) {
- if (budget <= 0) return 0;
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
-}
-
-
-void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
- QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
- bool not_at_start) {
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeastHelper(int still_to_find,
- int budget,
- RegExpNode* ignore_this_node,
- bool not_at_start) {
- if (budget <= 0) return 0;
- int min = 100;
- int choice_count = alternatives_->length();
- budget = (budget - 1) / choice_count;
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* node = alternatives_->at(i).node();
- if (node == ignore_this_node) continue;
- int node_eats_at_least =
- node->EatsAtLeast(still_to_find, budget, not_at_start);
- if (node_eats_at_least < min) min = node_eats_at_least;
- if (min == 0) return 0;
- }
- return min;
-}
-
-
-int LoopChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find,
- budget - 1,
- loop_node_,
- not_at_start);
-}
-
-
-int ChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
- return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start);
-}
-
-
-// Takes the left-most 1-bit and smears it out, setting all bits to its right.
-static inline uint32_t SmearBitsRight(uint32_t v) {
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return v;
-}
-
-
-bool QuickCheckDetails::Rationalize(bool asc) {
- bool found_useful_op = false;
- uint32_t char_mask;
- if (asc) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- mask_ = 0;
- value_ = 0;
- int char_shift = 0;
- for (int i = 0; i < characters_; i++) {
- Position* pos = &positions_[i];
- if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
- found_useful_op = true;
- }
- mask_ |= (pos->mask & char_mask) << char_shift;
- value_ |= (pos->value & char_mask) << char_shift;
- char_shift += asc ? 8 : 16;
- }
- return found_useful_op;
-}
-
-
-bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
- Trace* bounds_check_trace,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details,
- bool fall_through_on_failure) {
- if (details->characters() == 0) return false;
- GetQuickCheckDetails(
- details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE);
- if (details->cannot_match()) return false;
- if (!details->Rationalize(compiler->one_byte())) return false;
- DCHECK(details->characters() == 1 ||
- compiler->macro_assembler()->CanReadUnaligned());
- uint32_t mask = details->mask();
- uint32_t value = details->value();
-
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
-
- if (trace->characters_preloaded() != details->characters()) {
- DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
- // We are attempting to preload the minimum number of characters
- // any choice would eat, so if the bounds check fails, then none of the
- // choices can succeed, so we can just immediately backtrack, rather
- // than go to the next choice.
- assembler->LoadCurrentCharacter(trace->cp_offset(),
- bounds_check_trace->backtrack(),
- !preload_has_checked_bounds,
- details->characters());
- }
-
-
- bool need_mask = true;
-
- if (details->characters() == 1) {
- // If number of characters preloaded is 1 then we used a byte or 16 bit
- // load so the value is already masked down.
- uint32_t char_mask;
- if (compiler->one_byte()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- if ((mask & char_mask) == char_mask) need_mask = false;
- mask &= char_mask;
- } else {
- // For 2-character preloads in one-byte mode or 1-character preloads in
- // two-byte mode we also use a 16 bit load with zero extend.
- static const uint32_t kTwoByteMask = 0xFFFF;
- static const uint32_t kFourByteMask = 0xFFFFFFFF;
- if (details->characters() == 2 && compiler->one_byte()) {
- if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
- } else if (details->characters() == 1 && !compiler->one_byte()) {
- if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
- } else {
- if (mask == kFourByteMask) need_mask = false;
- }
- }
-
- if (fall_through_on_failure) {
- if (need_mask) {
- assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
- } else {
- assembler->CheckCharacter(value, on_possible_success);
- }
- } else {
- if (need_mask) {
- assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
- } else {
- assembler->CheckNotCharacter(value, trace->backtrack());
- }
- }
- return true;
-}
-
-
-// Here is the meat of GetQuickCheckDetails (see also the comment on the
-// super-class in the .h file).
-//
-// We iterate along the text object, building up for each character a
-// mask and value that can be used to test for a quick failure to match.
-// The masks and values for the positions will be combined into a single
-// machine word for the current character width in order to be used in
-// generating a quick check.
-void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- // Do not collect any quick check details if the text node reads backward,
- // since it reads in the opposite direction than we use for quick checks.
- if (read_backward()) return;
- Isolate* isolate = compiler->macro_assembler()->isolate();
- DCHECK(characters_filled_in < details->characters());
- int characters = details->characters();
- int char_mask;
- if (compiler->one_byte()) {
- char_mask = String::kMaxOneByteCharCode;
- } else {
- char_mask = String::kMaxUtf16CodeUnit;
- }
- for (int k = 0; k < elements()->length(); k++) {
- TextElement elm = elements()->at(k);
- if (elm.text_type() == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.atom()->data();
- for (int i = 0; i < characters && i < quarks.length(); i++) {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- uc16 c = quarks[i];
- if (elm.atom()->ignore_case()) {
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(
- isolate, c, compiler->one_byte(), chars, 4);
- if (length == 0) {
- // This can happen because all case variants are non-Latin1, but we
- // know the input is Latin1.
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- if (length == 1) {
- // This letter has no case equivalents, so it's nice and simple
- // and the mask-compare will determine definitely whether we have
- // a match at this character position.
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- } else {
- uint32_t common_bits = char_mask;
- uint32_t bits = chars[0];
- for (int j = 1; j < length; j++) {
- uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- // If length is 2 and common bits has only one zero in it then
- // our mask and compare instruction will determine definitely
- // whether we have a match at this character position. Otherwise
- // it can only be an approximate check.
- uint32_t one_zero = (common_bits | ~char_mask);
- if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
- pos->determines_perfectly = true;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- } else {
- // Don't ignore case. Nice simple case where the mask-compare will
- // determine definitely whether we have a match at this character
- // position.
- if (c > char_mask) {
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- pos->mask = char_mask;
- pos->value = c;
- pos->determines_perfectly = true;
- }
- characters_filled_in++;
- DCHECK(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- } else {
- QuickCheckDetails::Position* pos =
- details->positions(characters_filled_in);
- RegExpCharacterClass* tree = elm.char_class();
- ZoneList<CharacterRange>* ranges = tree->ranges(zone());
- DCHECK(!ranges->is_empty());
- if (tree->is_negated()) {
- // A quick check uses multi-character mask and compare. There is no
- // useful way to incorporate a negative char class into this scheme
- // so we just conservatively create a mask and value that will always
- // succeed.
- pos->mask = 0;
- pos->value = 0;
- } else {
- int first_range = 0;
- while (ranges->at(first_range).from() > char_mask) {
- first_range++;
- if (first_range == ranges->length()) {
- details->set_cannot_match();
- pos->determines_perfectly = false;
- return;
- }
- }
- CharacterRange range = ranges->at(first_range);
- uc16 from = range.from();
- uc16 to = range.to();
- if (to > char_mask) {
- to = char_mask;
- }
- uint32_t differing_bits = (from ^ to);
- // A mask and compare is only perfect if the differing bits form a
- // number like 00011111 with one single block of trailing 1s.
- if ((differing_bits & (differing_bits + 1)) == 0 &&
- from + differing_bits == to) {
- pos->determines_perfectly = true;
- }
- uint32_t common_bits = ~SmearBitsRight(differing_bits);
- uint32_t bits = (from & common_bits);
- for (int i = first_range + 1; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- uc16 from = range.from();
- uc16 to = range.to();
- if (from > char_mask) continue;
- if (to > char_mask) to = char_mask;
- // Here we are combining more ranges into the mask and compare
- // value. With each new range the mask becomes more sparse and
- // so the chances of a false positive rise. A character class
- // with multiple ranges is assumed never to be equivalent to a
- // mask and compare operation.
- pos->determines_perfectly = false;
- uint32_t new_common_bits = (from ^ to);
- new_common_bits = ~SmearBitsRight(new_common_bits);
- common_bits &= new_common_bits;
- bits &= new_common_bits;
- uint32_t differing_bits = (from & common_bits) ^ bits;
- common_bits ^= differing_bits;
- bits &= common_bits;
- }
- pos->mask = common_bits;
- pos->value = bits;
- }
- characters_filled_in++;
- DCHECK(characters_filled_in <= details->characters());
- if (characters_filled_in == details->characters()) {
- return;
- }
- }
- }
- DCHECK(characters_filled_in != details->characters());
- if (!details->cannot_match()) {
- on_success()-> GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- true);
- }
-}
-
-
-void QuickCheckDetails::Clear() {
- for (int i = 0; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ = 0;
-}
-
-
-void QuickCheckDetails::Advance(int by, bool one_byte) {
- if (by >= characters_ || by < 0) {
- DCHECK_IMPLIES(by < 0, characters_ == 0);
- Clear();
- return;
- }
- DCHECK_LE(characters_ - by, 4);
- DCHECK_LE(characters_, 4);
- for (int i = 0; i < characters_ - by; i++) {
- positions_[i] = positions_[by + i];
- }
- for (int i = characters_ - by; i < characters_; i++) {
- positions_[i].mask = 0;
- positions_[i].value = 0;
- positions_[i].determines_perfectly = false;
- }
- characters_ -= by;
- // We could change mask_ and value_ here but we would never advance unless
- // they had already been used in a check and they won't be used again because
- // it would gain us nothing. So there's no point.
-}
-
-
-void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
- DCHECK(characters_ == other->characters_);
- if (other->cannot_match_) {
- return;
- }
- if (cannot_match_) {
- *this = *other;
- return;
- }
- for (int i = from_index; i < characters_; i++) {
- QuickCheckDetails::Position* pos = positions(i);
- QuickCheckDetails::Position* other_pos = other->positions(i);
- if (pos->mask != other_pos->mask ||
- pos->value != other_pos->value ||
- !other_pos->determines_perfectly) {
- // Our mask-compare operation will be approximate unless we have the
- // exact same operation on both sides of the alternation.
- pos->determines_perfectly = false;
- }
- pos->mask &= other_pos->mask;
- pos->value &= pos->mask;
- other_pos->value &= pos->mask;
- uc16 differing_bits = (pos->value ^ other_pos->value);
- pos->mask &= ~differing_bits;
- pos->value &= pos->mask;
- }
-}
-
-
-class VisitMarker {
- public:
- explicit VisitMarker(NodeInfo* info) : info_(info) {
- DCHECK(!info->visited);
- info->visited = true;
- }
- ~VisitMarker() {
- info_->visited = false;
- }
- private:
- NodeInfo* info_;
-};
-
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- DCHECK(!info()->visited);
- VisitMarker marker(info());
- return FilterSuccessor(depth - 1);
-}
-
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
- RegExpNode* next = on_success_->FilterOneByte(depth - 1);
- if (next == nullptr) return set_replacement(nullptr);
- on_success_ = next;
- return set_replacement(this);
-}
-
-// We need to check for the following characters: 0x39C 0x3BC 0x178.
-static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
- // TODO(dcarney): this could be a lot more efficient.
- return range.Contains(0x039C) || range.Contains(0x03BC) ||
- range.Contains(0x0178);
-}
-
-
-static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
- for (int i = 0; i < ranges->length(); i++) {
- // TODO(dcarney): this could be a lot more efficient.
- if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
- }
- return false;
-}
-
-RegExpNode* TextNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- DCHECK(!info()->visited);
- VisitMarker marker(info());
- int element_count = elements()->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elements()->at(i);
- if (elm.text_type() == TextElement::ATOM) {
- Vector<const uc16> quarks = elm.atom()->data();
- for (int j = 0; j < quarks.length(); j++) {
- uint16_t c = quarks[j];
- if (elm.atom()->ignore_case()) {
- c = unibrow::Latin1::TryConvertToLatin1(c);
- }
- if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
- // Replace quark in case we converted to Latin-1.
- uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.begin());
- writable_quarks[j] = c;
- }
- } else {
- DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
- RegExpCharacterClass* cc = elm.char_class();
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::Canonicalize(ranges);
- // Now they are in order so we only need to look at the first.
- int range_count = ranges->length();
- if (cc->is_negated()) {
- if (range_count != 0 &&
- ranges->at(0).from() == 0 &&
- ranges->at(0).to() >= String::kMaxOneByteCharCode) {
- // This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
- continue;
- return set_replacement(nullptr);
- }
- } else {
- if (range_count == 0 ||
- ranges->at(0).from() > String::kMaxOneByteCharCode) {
- // This will be handled in a later filter.
- if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
- continue;
- return set_replacement(nullptr);
- }
- }
- }
- }
- return FilterSuccessor(depth - 1);
-}
-
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- {
- VisitMarker marker(info());
-
- RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1);
- // If we can't continue after the loop then there is no sense in doing the
- // loop.
- if (continue_replacement == nullptr) return set_replacement(nullptr);
- }
-
- return ChoiceNode::FilterOneByte(depth - 1);
-}
-
-RegExpNode* ChoiceNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- VisitMarker marker(info());
- int choice_count = alternatives_->length();
-
- for (int i = 0; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- if (alternative.guards() != nullptr &&
- alternative.guards()->length() != 0) {
- set_replacement(this);
- return this;
- }
- }
-
- int surviving = 0;
- RegExpNode* survivor = nullptr;
- for (int i = 0; i < choice_count; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1);
- DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
- if (replacement != nullptr) {
- alternatives_->at(i).set_node(replacement);
- surviving++;
- survivor = replacement;
- }
- }
- if (surviving < 2) return set_replacement(survivor);
-
- set_replacement(this);
- if (surviving == choice_count) {
- return this;
- }
- // Only some of the nodes survived the filtering. We need to rebuild the
- // alternatives list.
- ZoneList<GuardedAlternative>* new_alternatives =
- new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
- for (int i = 0; i < choice_count; i++) {
- RegExpNode* replacement =
- alternatives_->at(i).node()->FilterOneByte(depth - 1);
- if (replacement != nullptr) {
- alternatives_->at(i).set_node(replacement);
- new_alternatives->Add(alternatives_->at(i), zone());
- }
- }
- alternatives_ = new_alternatives;
- return this;
-}
-
-RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
- if (info()->replacement_calculated) return replacement();
- if (depth < 0) return this;
- if (info()->visited) return this;
- VisitMarker marker(info());
- // Alternative 0 is the negative lookahead, alternative 1 is what comes
- // afterwards.
- RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterOneByte(depth - 1);
- if (replacement == nullptr) return set_replacement(nullptr);
- alternatives_->at(1).set_node(replacement);
-
- RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
- // If the negative lookahead is always going to fail then
- // we don't need to check it.
- if (neg_replacement == nullptr) return set_replacement(replacement);
- alternatives_->at(0).set_node(neg_replacement);
- return set_replacement(this);
-}
-
-
-void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- if (body_can_be_zero_length_ || info()->visited) return;
- VisitMarker marker(info());
- return ChoiceNode::GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
-}
-
-
-void LoopChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- if (body_can_be_zero_length_ || budget <= 0) {
- bm->SetRest(offset);
- SaveBMInfo(bm, not_at_start, offset);
- return;
- }
- ChoiceNode::FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) {
- not_at_start = (not_at_start || not_at_start_);
- int choice_count = alternatives_->length();
- DCHECK_LT(0, choice_count);
- alternatives_->at(0).node()->GetQuickCheckDetails(details,
- compiler,
- characters_filled_in,
- not_at_start);
- for (int i = 1; i < choice_count; i++) {
- QuickCheckDetails new_details(details->characters());
- RegExpNode* node = alternatives_->at(i).node();
- node->GetQuickCheckDetails(&new_details, compiler,
- characters_filled_in,
- not_at_start);
- // Here we merge the quick match details of the two branches.
- details->Merge(&new_details, characters_filled_in);
- }
-}
-
-
-// Check for [0-9A-Z_a-z].
-static void EmitWordCheck(RegExpMacroAssembler* assembler,
- Label* word,
- Label* non_word,
- bool fall_through_on_word) {
- if (assembler->CheckSpecialCharacterClass(
- fall_through_on_word ? 'w' : 'W',
- fall_through_on_word ? non_word : word)) {
- // Optimized implementation available.
- return;
- }
- assembler->CheckCharacterGT('z', non_word);
- assembler->CheckCharacterLT('0', non_word);
- assembler->CheckCharacterGT('a' - 1, word);
- assembler->CheckCharacterLT('9' + 1, word);
- assembler->CheckCharacterLT('A', non_word);
- assembler->CheckCharacterLT('Z' + 1, word);
- if (fall_through_on_word) {
- assembler->CheckNotCharacter('_', non_word);
- } else {
- assembler->CheckCharacter('_', word);
- }
-}
-
-
-// Emit the code to check for a ^ in multiline mode (1-character lookbehind
-// that matches newline or the start of input).
-static void EmitHat(RegExpCompiler* compiler,
- RegExpNode* on_success,
- Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- // We will be loading the previous character into the current character
- // register.
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label ok;
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a newline in this context, so skip to
- // ok if we are at the start.
- assembler->CheckAtStart(&ok);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
- new_trace.backtrack(),
- false);
- if (!assembler->CheckSpecialCharacterClass('n',
- new_trace.backtrack())) {
- // Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->one_byte()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xFFFE, &ok);
- }
- assembler->CheckCharacter('\n', &ok);
- assembler->CheckNotCharacter('\r', new_trace.backtrack());
- }
- assembler->Bind(&ok);
- on_success->Emit(compiler, &new_trace);
-}
-
-
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
-void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Isolate* isolate = assembler->isolate();
- Trace::TriBool next_is_word_character = Trace::UNKNOWN;
- bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
- BoyerMooreLookahead* lookahead = bm_info(not_at_start);
- if (lookahead == nullptr) {
- int eats_at_least =
- Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- not_at_start));
- if (eats_at_least >= 1) {
- BoyerMooreLookahead* bm =
- new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
- FillInBMInfo(isolate, 0, kRecursionBudget, bm, not_at_start);
- if (bm->at(0)->is_non_word())
- next_is_word_character = Trace::FALSE_VALUE;
- if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
- }
- } else {
- if (lookahead->at(0)->is_non_word())
- next_is_word_character = Trace::FALSE_VALUE;
- if (lookahead->at(0)->is_word())
- next_is_word_character = Trace::TRUE_VALUE;
- }
- bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
- if (next_is_word_character == Trace::UNKNOWN) {
- Label before_non_word;
- Label before_word;
- if (trace->characters_preloaded() != 1) {
- assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
- }
- // Fall through on non-word.
- EmitWordCheck(assembler, &before_word, &before_non_word, false);
- // Next character is not a word character.
- assembler->Bind(&before_non_word);
- Label ok;
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
- assembler->GoTo(&ok);
-
- assembler->Bind(&before_word);
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
- assembler->Bind(&ok);
- } else if (next_is_word_character == Trace::TRUE_VALUE) {
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
- } else {
- DCHECK(next_is_word_character == Trace::FALSE_VALUE);
- BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
- }
-}
-
-
-void AssertionNode::BacktrackIfPrevious(
- RegExpCompiler* compiler,
- Trace* trace,
- AssertionNode::IfPrevious backtrack_if_previous) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Trace new_trace(*trace);
- new_trace.InvalidateCurrentCharacter();
-
- Label fall_through, dummy;
-
- Label* non_word = backtrack_if_previous == kIsNonWord ?
- new_trace.backtrack() :
- &fall_through;
- Label* word = backtrack_if_previous == kIsNonWord ?
- &fall_through :
- new_trace.backtrack();
-
- if (new_trace.cp_offset() == 0) {
- // The start of input counts as a non-word character, so the question is
- // decided if we are at the start.
- assembler->CheckAtStart(non_word);
- }
- // We already checked that we are not at the start of input so it must be
- // OK to load the previous character.
- assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, &dummy, false);
- EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
-
- assembler->Bind(&fall_through);
- on_success()->Emit(compiler, &new_trace);
-}
-
-
-void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
- bool not_at_start) {
- if (assertion_type_ == AT_START && not_at_start) {
- details->set_cannot_match();
- return;
- }
- return on_success()->GetQuickCheckDetails(details,
- compiler,
- filled_in,
- not_at_start);
-}
-
-
-void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- switch (assertion_type_) {
- case AT_END: {
- Label ok;
- assembler->CheckPosition(trace->cp_offset(), &ok);
- assembler->GoTo(trace->backtrack());
- assembler->Bind(&ok);
- break;
- }
- case AT_START: {
- if (trace->at_start() == Trace::FALSE_VALUE) {
- assembler->GoTo(trace->backtrack());
- return;
- }
- if (trace->at_start() == Trace::UNKNOWN) {
- assembler->CheckNotAtStart(trace->cp_offset(), trace->backtrack());
- Trace at_start_trace = *trace;
- at_start_trace.set_at_start(Trace::TRUE_VALUE);
- on_success()->Emit(compiler, &at_start_trace);
- return;
- }
- }
- break;
- case AFTER_NEWLINE:
- EmitHat(compiler, on_success(), trace);
- return;
- case AT_BOUNDARY:
- case AT_NON_BOUNDARY: {
- EmitBoundaryCheck(compiler, trace);
- return;
- }
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
- if (quick_check == nullptr) return false;
- if (offset >= quick_check->characters()) return false;
- return quick_check->positions(offset)->determines_perfectly;
-}
-
-
-static void UpdateBoundsCheck(int index, int* checked_up_to) {
- if (index > *checked_up_to) {
- *checked_up_to = index;
- }
-}
-
-
-// We call this repeatedly to generate code for each pass over the text node.
-// The passes are in increasing order of difficulty because we hope one
-// of the first passes will fail in which case we are saved the work of the
-// later passes. for example for the case independent regexp /%[asdfghjkl]a/
-// we will check the '%' in the first pass, the case independent 'a' in the
-// second pass and the character class in the last pass.
-//
-// The passes are done from right to left, so for example to test for /bar/
-// we will first test for an 'r' with offset 2, then an 'a' with offset 1
-// and then a 'b' with offset 0. This means we can avoid the end-of-input
-// bounds check most of the time. In the example we only need to check for
-// end-of-input when loading the putative 'r'.
-//
-// A slight complication involves the fact that the first character may already
-// be fetched into a register by the previous node. In this case we want to
-// do the test for that character first. We do this in separate passes. The
-// 'preloaded' argument indicates that we are doing such a 'pass'. If such a
-// pass has been performed then subsequent passes will have true in
-// first_element_checked to indicate that that character does not need to be
-// checked again.
-//
-// In addition to all this we are passed a Trace, which can
-// contain an AlternativeGeneration object. In this AlternativeGeneration
-// object we can see details of any quick check that was already passed in
-// order to get to the code we are now generating. The quick check can involve
-// loading characters, which means we do not need to recheck the bounds
-// up to the limit the quick check already checked. In addition the quick
-// check can have involved a mask and compare operation which may simplify
-// or obviate the need for further checks at some character positions.
-void TextNode::TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- Isolate* isolate = assembler->isolate();
- bool one_byte = compiler->one_byte();
- Label* backtrack = trace->backtrack();
- QuickCheckDetails* quick_check = trace->quick_check_performed();
- int element_count = elements()->length();
- int backward_offset = read_backward() ? -Length() : 0;
- for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
- TextElement elm = elements()->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
- if (elm.text_type() == TextElement::ATOM) {
- if (SkipPass(pass, elm.atom()->ignore_case())) continue;
- Vector<const uc16> quarks = elm.atom()->data();
- for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
- if (first_element_checked && i == 0 && j == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
- EmitCharacterFunction* emit_function = nullptr;
- uc16 quark = quarks[j];
- if (elm.atom()->ignore_case()) {
- // Everywhere else we assume that a non-Latin-1 character cannot match
- // a Latin-1 character. Avoid the cases where this is assumption is
- // invalid by using the Latin1 equivalent instead.
- quark = unibrow::Latin1::TryConvertToLatin1(quark);
- }
- switch (pass) {
- case NON_LATIN1_MATCH:
- DCHECK(one_byte);
- if (quark > String::kMaxOneByteCharCode) {
- assembler->GoTo(backtrack);
- return;
- }
- break;
- case NON_LETTER_CHARACTER_MATCH:
- emit_function = &EmitAtomNonLetter;
- break;
- case SIMPLE_CHARACTER_MATCH:
- emit_function = &EmitSimpleCharacter;
- break;
- case CASE_CHARACTER_MATCH:
- emit_function = &EmitAtomLetter;
- break;
- default:
- break;
- }
- if (emit_function != nullptr) {
- bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
- bool bound_checked =
- emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
- bounds_check, preloaded);
- if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
- }
- }
- } else {
- DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type());
- if (pass == CHARACTER_CLASS_MATCH) {
- if (first_element_checked && i == 0) continue;
- if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
- RegExpCharacterClass* cc = elm.char_class();
- bool bounds_check = *checked_up_to < cp_offset || read_backward();
- EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
- bounds_check, preloaded, zone());
- UpdateBoundsCheck(cp_offset, checked_up_to);
- }
- }
- }
-}
-
-
-int TextNode::Length() {
- TextElement elm = elements()->last();
- DCHECK_LE(0, elm.cp_offset());
- return elm.cp_offset() + elm.length();
-}
-
-bool TextNode::SkipPass(TextEmitPassType pass, bool ignore_case) {
- if (ignore_case) {
- return pass == SIMPLE_CHARACTER_MATCH;
- } else {
- return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
- }
-}
-
-TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
- ZoneList<CharacterRange>* ranges,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
- DCHECK_NOT_NULL(ranges);
- ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(zone, ranges, flags)),
- zone);
- return new (zone) TextNode(elms, read_backward, on_success);
-}
-
-TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
- CharacterRange trail,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
- ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
- ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
- ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(zone, lead_ranges, flags)),
- zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(zone, trail_ranges, flags)),
- zone);
- return new (zone) TextNode(elms, read_backward, on_success);
-}
-
-
-// This generates the code to match a text node. A text node can contain
-// straight character sequences (possibly to be matched in a case-independent
-// way) and character classes. For efficiency we do not do this in a single
-// pass from left to right. Instead we pass over the text node several times,
-// emitting code for some character positions every time. See the comment on
-// TextEmitPass for details.
-void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- return;
- }
-
- if (compiler->one_byte()) {
- int dummy = 0;
- TextEmitPass(compiler, NON_LATIN1_MATCH, false, trace, false, &dummy);
- }
-
- bool first_elt_done = false;
- int bound_checked_to = trace->cp_offset() - 1;
- bound_checked_to += trace->bound_checked_up_to();
-
- // If a character is preloaded into the current character register then
- // check that now.
- if (trace->characters_preloaded() == 1) {
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), true, trace,
- false, &bound_checked_to);
- }
- first_elt_done = true;
- }
-
- for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
- TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), false, trace,
- first_elt_done, &bound_checked_to);
- }
-
- Trace successor_trace(*trace);
- // If we advance backward, we may end up at the start.
- successor_trace.AdvanceCurrentPositionInTrace(
- read_backward() ? -Length() : Length(), compiler);
- successor_trace.set_at_start(read_backward() ? Trace::UNKNOWN
- : Trace::FALSE_VALUE);
- RecursionCheck rc(compiler);
- on_success()->Emit(compiler, &successor_trace);
-}
-
-
-void Trace::InvalidateCurrentCharacter() {
- characters_preloaded_ = 0;
-}
-
-
-void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
- // We don't have an instruction for shifting the current character register
- // down or for using a shifted value for anything so lets just forget that
- // we preloaded any characters into it.
- characters_preloaded_ = 0;
- // Adjust the offsets of the quick check performed information. This
- // information is used to find out what we already determined about the
- // characters by means of mask and compare.
- quick_check_performed_.Advance(by, compiler->one_byte());
- cp_offset_ += by;
- if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
- compiler->SetRegExpTooBig();
- cp_offset_ = 0;
- }
- bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
-}
-
-
-void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
- int element_count = elements()->length();
- for (int i = 0; i < element_count; i++) {
- TextElement elm = elements()->at(i);
- if (elm.text_type() == TextElement::CHAR_CLASS) {
- RegExpCharacterClass* cc = elm.char_class();
-#ifdef V8_INTL_SUPPORT
- bool case_equivalents_already_added =
- NeedsUnicodeCaseEquivalents(cc->flags());
-#else
- bool case_equivalents_already_added = false;
-#endif
- if (IgnoreCase(cc->flags()) && !case_equivalents_already_added) {
- // None of the standard character classes is different in the case
- // independent case and it slows us down if we don't know that.
- if (cc->is_standard(zone())) continue;
- ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- CharacterRange::AddCaseEquivalents(isolate, zone(), ranges,
- is_one_byte);
- }
- }
- }
-}
-
-
-int TextNode::GreedyLoopTextLength() { return Length(); }
-
-
-RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) {
- if (read_backward()) return nullptr;
- if (elements()->length() != 1) return nullptr;
- TextElement elm = elements()->at(0);
- if (elm.text_type() != TextElement::CHAR_CLASS) return nullptr;
- RegExpCharacterClass* node = elm.char_class();
- ZoneList<CharacterRange>* ranges = node->ranges(zone());
- CharacterRange::Canonicalize(ranges);
- if (node->is_negated()) {
- return ranges->length() == 0 ? on_success() : nullptr;
- }
- if (ranges->length() != 1) return nullptr;
- uint32_t max_char;
- if (compiler->one_byte()) {
- max_char = String::kMaxOneByteCharCode;
- } else {
- max_char = String::kMaxUtf16CodeUnit;
- }
- return ranges->at(0).IsEverything(max_char) ? on_success() : nullptr;
-}
-
-
-// Finds the fixed match length of a sequence of nodes that goes from
-// this alternative and back to this choice node. If there are variable
-// length nodes or other complications in the way then return a sentinel
-// value indicating that a greedy loop cannot be constructed.
-int ChoiceNode::GreedyLoopTextLengthForAlternative(
- GuardedAlternative* alternative) {
- int length = 0;
- RegExpNode* node = alternative->node();
- // Later we will generate code for all these text nodes using recursion
- // so we have to limit the max number.
- int recursion_depth = 0;
- while (node != this) {
- if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- int node_length = node->GreedyLoopTextLength();
- if (node_length == kNodeIsTooComplexForGreedyLoops) {
- return kNodeIsTooComplexForGreedyLoops;
- }
- length += node_length;
- SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
- node = seq_node->on_success();
- }
- return read_backward() ? -length : length;
-}
-
-
-void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
- DCHECK_NULL(loop_node_);
- AddAlternative(alt);
- loop_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
- DCHECK_NULL(continue_node_);
- AddAlternative(alt);
- continue_node_ = alt.node();
-}
-
-
-void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- if (trace->stop_node() == this) {
- // Back edge of greedy optimized loop node graph.
- int text_length =
- GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
- DCHECK_NE(kNodeIsTooComplexForGreedyLoops, text_length);
- // Update the counter-based backtracking info on the stack. This is an
- // optimization for greedy loops (see below).
- DCHECK(trace->cp_offset() == text_length);
- macro_assembler->AdvanceCurrentPosition(text_length);
- macro_assembler->GoTo(trace->loop_label());
- return;
- }
- DCHECK_NULL(trace->stop_node());
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- ChoiceNode::Emit(compiler, trace);
-}
-
-
-int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
- int eats_at_least) {
- int preload_characters = Min(4, eats_at_least);
- DCHECK_LE(preload_characters, 4);
- if (compiler->macro_assembler()->CanReadUnaligned()) {
- bool one_byte = compiler->one_byte();
- if (one_byte) {
- // We can't preload 3 characters because there is no machine instruction
- // to do that. We can't just load 4 because we could be reading
- // beyond the end of the string, which could cause a memory fault.
- if (preload_characters == 3) preload_characters = 2;
- } else {
- if (preload_characters > 2) preload_characters = 2;
- }
- } else {
- if (preload_characters > 1) preload_characters = 1;
- }
- return preload_characters;
-}
-
-
-// This class is used when generating the alternatives in a choice node. It
-// records the way the alternative is being code generated.
-class AlternativeGeneration: public Malloced {
- public:
- AlternativeGeneration()
- : possible_success(),
- expects_preload(false),
- after(),
- quick_check_details() { }
- Label possible_success;
- bool expects_preload;
- Label after;
- QuickCheckDetails quick_check_details;
-};
-
-
-// Creates a list of AlternativeGenerations. If the list has a reasonable
-// size then it is on the stack, otherwise the excess is on the heap.
-class AlternativeGenerationList {
- public:
- AlternativeGenerationList(int count, Zone* zone)
- : alt_gens_(count, zone) {
- for (int i = 0; i < count && i < kAFew; i++) {
- alt_gens_.Add(a_few_alt_gens_ + i, zone);
- }
- for (int i = kAFew; i < count; i++) {
- alt_gens_.Add(new AlternativeGeneration(), zone);
- }
- }
- ~AlternativeGenerationList() {
- for (int i = kAFew; i < alt_gens_.length(); i++) {
- delete alt_gens_[i];
- alt_gens_[i] = nullptr;
- }
- }
-
- AlternativeGeneration* at(int i) {
- return alt_gens_[i];
- }
-
- private:
- static const int kAFew = 10;
- ZoneList<AlternativeGeneration*> alt_gens_;
- AlternativeGeneration a_few_alt_gens_[kAFew];
-};
-
-
-static const uc32 kRangeEndMarker = 0x110000;
-
-// The '2' variant is has inclusive from and exclusive to.
-// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
-// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
-static const int kSpaceRanges[] = {
- '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0, 0x00A1, 0x1680,
- 0x1681, 0x2000, 0x200B, 0x2028, 0x202A, 0x202F, 0x2030,
- 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, kRangeEndMarker};
-static const int kSpaceRangeCount = arraysize(kSpaceRanges);
-
-static const int kWordRanges[] = {
- '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, kRangeEndMarker};
-static const int kWordRangeCount = arraysize(kWordRanges);
-static const int kDigitRanges[] = {'0', '9' + 1, kRangeEndMarker};
-static const int kDigitRangeCount = arraysize(kDigitRanges);
-static const int kSurrogateRanges[] = {
- kLeadSurrogateStart, kLeadSurrogateStart + 1, kRangeEndMarker};
-static const int kSurrogateRangeCount = arraysize(kSurrogateRanges);
-static const int kLineTerminatorRanges[] = {
- 0x000A, 0x000B, 0x000D, 0x000E, 0x2028, 0x202A, kRangeEndMarker};
-static const int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
-
-void BoyerMoorePositionInfo::Set(int character) {
- SetInterval(Interval(character, character));
-}
-
-
-void BoyerMoorePositionInfo::SetInterval(const Interval& interval) {
- s_ = AddRange(s_, kSpaceRanges, kSpaceRangeCount, interval);
- w_ = AddRange(w_, kWordRanges, kWordRangeCount, interval);
- d_ = AddRange(d_, kDigitRanges, kDigitRangeCount, interval);
- surrogate_ =
- AddRange(surrogate_, kSurrogateRanges, kSurrogateRangeCount, interval);
- if (interval.to() - interval.from() >= kMapSize - 1) {
- if (map_count_ != kMapSize) {
- map_count_ = kMapSize;
- for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
- }
- return;
- }
- for (int i = interval.from(); i <= interval.to(); i++) {
- int mod_character = (i & kMask);
- if (!map_->at(mod_character)) {
- map_count_++;
- map_->at(mod_character) = true;
- }
- if (map_count_ == kMapSize) return;
- }
-}
-
-
-void BoyerMoorePositionInfo::SetAll() {
- s_ = w_ = d_ = kLatticeUnknown;
- if (map_count_ != kMapSize) {
- map_count_ = kMapSize;
- for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
- }
-}
-
-
-BoyerMooreLookahead::BoyerMooreLookahead(
- int length, RegExpCompiler* compiler, Zone* zone)
- : length_(length),
- compiler_(compiler) {
- if (compiler->one_byte()) {
- max_char_ = String::kMaxOneByteCharCode;
- } else {
- max_char_ = String::kMaxUtf16CodeUnit;
- }
- bitmaps_ = new(zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
- for (int i = 0; i < length; i++) {
- bitmaps_->Add(new(zone) BoyerMoorePositionInfo(zone), zone);
- }
-}
-
-
-// Find the longest range of lookahead that has the fewest number of different
-// characters that can occur at a given position. Since we are optimizing two
-// different parameters at once this is a tradeoff.
-bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
- int biggest_points = 0;
- // If more than 32 characters out of 128 can occur it is unlikely that we can
- // be lucky enough to step forwards much of the time.
- const int kMaxMax = 32;
- for (int max_number_of_chars = 4;
- max_number_of_chars < kMaxMax;
- max_number_of_chars *= 2) {
- biggest_points =
- FindBestInterval(max_number_of_chars, biggest_points, from, to);
- }
- if (biggest_points == 0) return false;
- return true;
-}
-
-
-// Find the highest-points range between 0 and length_ where the character
-// information is not too vague. 'Too vague' means that there are more than
-// max_number_of_chars that can occur at this position. Calculates the number
-// of points as the product of width-of-the-range and
-// probability-of-finding-one-of-the-characters, where the probability is
-// calculated using the frequency distribution of the sample subject string.
-int BoyerMooreLookahead::FindBestInterval(
- int max_number_of_chars, int old_biggest_points, int* from, int* to) {
- int biggest_points = old_biggest_points;
- static const int kSize = RegExpMacroAssembler::kTableSize;
- for (int i = 0; i < length_; ) {
- while (i < length_ && Count(i) > max_number_of_chars) i++;
- if (i == length_) break;
- int remembered_from = i;
- bool union_map[kSize];
- for (int j = 0; j < kSize; j++) union_map[j] = false;
- while (i < length_ && Count(i) <= max_number_of_chars) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- for (int j = 0; j < kSize; j++) union_map[j] |= map->at(j);
- i++;
- }
- int frequency = 0;
- for (int j = 0; j < kSize; j++) {
- if (union_map[j]) {
- // Add 1 to the frequency to give a small per-character boost for
- // the cases where our sampling is not good enough and many
- // characters have a frequency of zero. This means the frequency
- // can theoretically be up to 2*kSize though we treat it mostly as
- // a fraction of kSize.
- frequency += compiler_->frequency_collator()->Frequency(j) + 1;
- }
- }
- // We use the probability of skipping times the distance we are skipping to
- // judge the effectiveness of this. Actually we have a cut-off: By
- // dividing by 2 we switch off the skipping if the probability of skipping
- // is less than 50%. This is because the multibyte mask-and-compare
- // skipping in quickcheck is more likely to do well on this case.
- bool in_quickcheck_range =
- ((i - remembered_from < 4) ||
- (compiler_->one_byte() ? remembered_from <= 4 : remembered_from <= 2));
- // Called 'probability' but it is only a rough estimate and can actually
- // be outside the 0-kSize range.
- int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
- int points = (i - remembered_from) * probability;
- if (points > biggest_points) {
- *from = remembered_from;
- *to = i - 1;
- biggest_points = points;
- }
- }
- return biggest_points;
-}
-
-
-// Take all the characters that will not prevent a successful match if they
-// occur in the subject string in the range between min_lookahead and
-// max_lookahead (inclusive) measured from the current position. If the
-// character at max_lookahead offset is not one of these characters, then we
-// can safely skip forwards by the number of characters in the range.
-int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
- int max_lookahead,
- Handle<ByteArray> boolean_skip_table) {
- const int kSize = RegExpMacroAssembler::kTableSize;
-
- const int kSkipArrayEntry = 0;
- const int kDontSkipArrayEntry = 1;
-
- for (int i = 0; i < kSize; i++) {
- boolean_skip_table->set(i, kSkipArrayEntry);
- }
- int skip = max_lookahead + 1 - min_lookahead;
-
- for (int i = max_lookahead; i >= min_lookahead; i--) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- for (int j = 0; j < kSize; j++) {
- if (map->at(j)) {
- boolean_skip_table->set(j, kDontSkipArrayEntry);
- }
- }
- }
-
- return skip;
-}
-
-
-// See comment above on the implementation of GetSkipTable.
-void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
- const int kSize = RegExpMacroAssembler::kTableSize;
-
- int min_lookahead = 0;
- int max_lookahead = 0;
-
- if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return;
-
- bool found_single_character = false;
- int single_character = 0;
- for (int i = max_lookahead; i >= min_lookahead; i--) {
- BoyerMoorePositionInfo* map = bitmaps_->at(i);
- if (map->map_count() > 1 ||
- (found_single_character && map->map_count() != 0)) {
- found_single_character = false;
- break;
- }
- for (int j = 0; j < kSize; j++) {
- if (map->at(j)) {
- found_single_character = true;
- single_character = j;
- break;
- }
- }
- }
-
- int lookahead_width = max_lookahead + 1 - min_lookahead;
-
- if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
- // The mask-compare can probably handle this better.
- return;
- }
-
- if (found_single_character) {
- Label cont, again;
- masm->Bind(&again);
- masm->LoadCurrentCharacter(max_lookahead, &cont, true);
- if (max_char_ > kSize) {
- masm->CheckCharacterAfterAnd(single_character,
- RegExpMacroAssembler::kTableMask,
- &cont);
- } else {
- masm->CheckCharacter(single_character, &cont);
- }
- masm->AdvanceCurrentPosition(lookahead_width);
- masm->GoTo(&again);
- masm->Bind(&cont);
- return;
- }
-
- Factory* factory = masm->isolate()->factory();
- Handle<ByteArray> boolean_skip_table =
- factory->NewByteArray(kSize, AllocationType::kOld);
- int skip_distance = GetSkipTable(
- min_lookahead, max_lookahead, boolean_skip_table);
- DCHECK_NE(0, skip_distance);
-
- Label cont, again;
- masm->Bind(&again);
- masm->LoadCurrentCharacter(max_lookahead, &cont, true);
- masm->CheckBitInTable(boolean_skip_table, &cont);
- masm->AdvanceCurrentPosition(skip_distance);
- masm->GoTo(&again);
- masm->Bind(&cont);
-}
-
-
-/* Code generation for choice nodes.
- *
- * We generate quick checks that do a mask and compare to eliminate a
- * choice. If the quick check succeeds then it jumps to the continuation to
- * do slow checks and check subsequent nodes. If it fails (the common case)
- * it falls through to the next choice.
- *
- * Here is the desired flow graph. Nodes directly below each other imply
- * fallthrough. Alternatives 1 and 2 have quick checks. Alternative
- * 3 doesn't have a quick check so we have to call the slow check.
- * Nodes are marked Qn for quick checks and Sn for slow checks. The entire
- * regexp continuation is generated directly after the Sn node, up to the
- * next GoTo if we decide to reuse some already generated code. Some
- * nodes expect preload_characters to be preloaded into the current
- * character register. R nodes do this preloading. Vertices are marked
- * F for failures and S for success (possible success in the case of quick
- * nodes). L, V, < and > are used as arrow heads.
- *
- * ----------> R
- * |
- * V
- * Q1 -----> S1
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * Q2 -----> S2
- * | S /
- * F| /
- * | F/
- * | /
- * | R
- * | /
- * V L
- * S3
- * |
- * F|
- * |
- * R
- * |
- * backtrack V
- * <----------Q4
- * \ F |
- * \ |S
- * \ F V
- * \-----S4
- *
- * For greedy loops we push the current position, then generate the code that
- * eats the input specially in EmitGreedyLoop. The other choice (the
- * continuation) is generated by the normal code in EmitChoices, and steps back
- * in the input to the starting position when it fails to match. The loop code
- * looks like this (U is the unwind code that steps back in the greedy loop).
- *
- * _____
- * / \
- * V |
- * ----------> S1 |
- * /| |
- * / |S |
- * F/ \_____/
- * /
- * |<-----
- * | \
- * V |S
- * Q2 ---> U----->backtrack
- * | F /
- * S| /
- * V F /
- * S2--/
- */
-
-GreedyLoopState::GreedyLoopState(bool not_at_start) {
- counter_backtrack_trace_.set_backtrack(&label_);
- if (not_at_start) counter_backtrack_trace_.set_at_start(Trace::FALSE_VALUE);
-}
-
-
-void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
-#ifdef DEBUG
- int choice_count = alternatives_->length();
- for (int i = 0; i < choice_count - 1; i++) {
- GuardedAlternative alternative = alternatives_->at(i);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == nullptr) ? 0 : guards->length();
- for (int j = 0; j < guard_count; j++) {
- DCHECK(!trace->mentions_reg(guards->at(j)->reg()));
- }
- }
-#endif
-}
-
-
-void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
- Trace* current_trace,
- PreloadState* state) {
- if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
- // Save some time by looking at most one machine word ahead.
- state->eats_at_least_ =
- EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
- current_trace->at_start() == Trace::FALSE_VALUE);
- }
- state->preload_characters_ =
- CalculatePreloadCharacters(compiler, state->eats_at_least_);
-
- state->preload_is_current_ =
- (current_trace->characters_preloaded() == state->preload_characters_);
- state->preload_has_checked_bounds_ = state->preload_is_current_;
-}
-
-
-void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- int choice_count = alternatives_->length();
-
- if (choice_count == 1 && alternatives_->at(0).guards() == nullptr) {
- alternatives_->at(0).node()->Emit(compiler, trace);
- return;
- }
-
- AssertGuardsMentionRegisters(trace);
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- // For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
- // other choice nodes we only flush if we are out of code size budget.
- if (trace->flush_budget() == 0 && trace->actions() != nullptr) {
- trace->Flush(compiler, this);
- return;
- }
-
- RecursionCheck rc(compiler);
-
- PreloadState preload;
- preload.init();
- GreedyLoopState greedy_loop_state(not_at_start());
-
- int text_length = GreedyLoopTextLengthForAlternative(&alternatives_->at(0));
- AlternativeGenerationList alt_gens(choice_count, zone());
-
- if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
- trace = EmitGreedyLoop(compiler,
- trace,
- &alt_gens,
- &preload,
- &greedy_loop_state,
- text_length);
- } else {
- // TODO(erikcorry): Delete this. We don't need this label, but it makes us
- // match the traces produced pre-cleanup.
- Label second_choice;
- compiler->macro_assembler()->Bind(&second_choice);
-
- preload.eats_at_least_ = EmitOptimizedUnanchoredSearch(compiler, trace);
-
- EmitChoices(compiler,
- &alt_gens,
- 0,
- trace,
- &preload);
- }
-
- // At this point we need to generate slow checks for the alternatives where
- // the quick check was inlined. We can recognize these because the associated
- // label was bound.
- int new_flush_budget = trace->flush_budget() / choice_count;
- for (int i = 0; i < choice_count; i++) {
- AlternativeGeneration* alt_gen = alt_gens.at(i);
- Trace new_trace(*trace);
- // If there are actions to be flushed we have to limit how many times
- // they are flushed. Take the budget of the parent trace and distribute
- // it fairly amongst the children.
- if (new_trace.actions() != nullptr) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- bool next_expects_preload =
- i == choice_count - 1 ? false : alt_gens.at(i + 1)->expects_preload;
- EmitOutOfLineContinuation(compiler,
- &new_trace,
- alternatives_->at(i),
- alt_gen,
- preload.preload_characters_,
- next_expects_preload);
- }
-}
-
-
-Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
- Trace* trace,
- AlternativeGenerationList* alt_gens,
- PreloadState* preload,
- GreedyLoopState* greedy_loop_state,
- int text_length) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- // Here we have special handling for greedy loops containing only text nodes
- // and other simple nodes. These are handled by pushing the current
- // position on the stack and then incrementing the current position each
- // time around the switch. On backtrack we decrement the current position
- // and check it against the pushed value. This avoids pushing backtrack
- // information for each iteration of the loop, which could take up a lot of
- // space.
- DCHECK(trace->stop_node() == nullptr);
- macro_assembler->PushCurrentPosition();
- Label greedy_match_failed;
- Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(Trace::FALSE_VALUE);
- greedy_match_trace.set_backtrack(&greedy_match_failed);
- Label loop_label;
- macro_assembler->Bind(&loop_label);
- greedy_match_trace.set_stop_node(this);
- greedy_match_trace.set_loop_label(&loop_label);
- alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
- macro_assembler->Bind(&greedy_match_failed);
-
- Label second_choice; // For use in greedy matches.
- macro_assembler->Bind(&second_choice);
-
- Trace* new_trace = greedy_loop_state->counter_backtrack_trace();
-
- EmitChoices(compiler,
- alt_gens,
- 1,
- new_trace,
- preload);
-
- macro_assembler->Bind(greedy_loop_state->label());
- // If we have unwound to the bottom then backtrack.
- macro_assembler->CheckGreedyLoop(trace->backtrack());
- // Otherwise try the second priority at an earlier position.
- macro_assembler->AdvanceCurrentPosition(-text_length);
- macro_assembler->GoTo(&second_choice);
- return new_trace;
-}
-
-int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
- Trace* trace) {
- int eats_at_least = PreloadState::kEatsAtLeastNotYetInitialized;
- if (alternatives_->length() != 2) return eats_at_least;
-
- GuardedAlternative alt1 = alternatives_->at(1);
- if (alt1.guards() != nullptr && alt1.guards()->length() != 0) {
- return eats_at_least;
- }
- RegExpNode* eats_anything_node = alt1.node();
- if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) != this) {
- return eats_at_least;
- }
-
- // Really we should be creating a new trace when we execute this function,
- // but there is no need, because the code it generates cannot backtrack, and
- // we always arrive here with a trivial trace (since it's the entry to a
- // loop. That also implies that there are no preloaded characters, which is
- // good, because it means we won't be violating any assumptions by
- // overwriting those characters with new load instructions.
- DCHECK(trace->is_trivial());
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- Isolate* isolate = macro_assembler->isolate();
- // At this point we know that we are at a non-greedy loop that will eat
- // any character one at a time. Any non-anchored regexp has such a
- // loop prepended to it in order to find where it starts. We look for
- // a pattern of the form ...abc... where we can look 6 characters ahead
- // and step forwards 3 if the character is not one of abc. Abc need
- // not be atoms, they can be any reasonably limited character class or
- // small alternation.
- BoyerMooreLookahead* bm = bm_info(false);
- if (bm == nullptr) {
- eats_at_least = Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore,
- kRecursionBudget,
- false));
- if (eats_at_least >= 1) {
- bm = new(zone()) BoyerMooreLookahead(eats_at_least,
- compiler,
- zone());
- GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(isolate, 0, kRecursionBudget, bm, false);
- }
- }
- if (bm != nullptr) {
- bm->EmitSkipInstructions(macro_assembler);
- }
- return eats_at_least;
-}
-
-
-void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
- AlternativeGenerationList* alt_gens,
- int first_choice,
- Trace* trace,
- PreloadState* preload) {
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- SetUpPreLoad(compiler, trace, preload);
-
- // For now we just call all choices one after the other. The idea ultimately
- // is to use the Dispatch table to try only the relevant ones.
- int choice_count = alternatives_->length();
-
- int new_flush_budget = trace->flush_budget() / choice_count;
-
- for (int i = first_choice; i < choice_count; i++) {
- bool is_last = i == choice_count - 1;
- bool fall_through_on_failure = !is_last;
- GuardedAlternative alternative = alternatives_->at(i);
- AlternativeGeneration* alt_gen = alt_gens->at(i);
- alt_gen->quick_check_details.set_characters(preload->preload_characters_);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == nullptr) ? 0 : guards->length();
- Trace new_trace(*trace);
- new_trace.set_characters_preloaded(preload->preload_is_current_ ?
- preload->preload_characters_ :
- 0);
- if (preload->preload_has_checked_bounds_) {
- new_trace.set_bound_checked_up_to(preload->preload_characters_);
- }
- new_trace.quick_check_performed()->Clear();
- if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
- if (!is_last) {
- new_trace.set_backtrack(&alt_gen->after);
- }
- alt_gen->expects_preload = preload->preload_is_current_;
- bool generate_full_check_inline = false;
- if (compiler->optimize() &&
- try_to_emit_quick_check_for_alternative(i == 0) &&
- alternative.node()->EmitQuickCheck(
- compiler, trace, &new_trace, preload->preload_has_checked_bounds_,
- &alt_gen->possible_success, &alt_gen->quick_check_details,
- fall_through_on_failure)) {
- // Quick check was generated for this choice.
- preload->preload_is_current_ = true;
- preload->preload_has_checked_bounds_ = true;
- // If we generated the quick check to fall through on possible success,
- // we now need to generate the full check inline.
- if (!fall_through_on_failure) {
- macro_assembler->Bind(&alt_gen->possible_success);
- new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- new_trace.set_characters_preloaded(preload->preload_characters_);
- new_trace.set_bound_checked_up_to(preload->preload_characters_);
- generate_full_check_inline = true;
- }
- } else if (alt_gen->quick_check_details.cannot_match()) {
- if (!fall_through_on_failure) {
- macro_assembler->GoTo(trace->backtrack());
- }
- continue;
- } else {
- // No quick check was generated. Put the full code here.
- // If this is not the first choice then there could be slow checks from
- // previous cases that go here when they fail. There's no reason to
- // insist that they preload characters since the slow check we are about
- // to generate probably can't use it.
- if (i != first_choice) {
- alt_gen->expects_preload = false;
- new_trace.InvalidateCurrentCharacter();
- }
- generate_full_check_inline = true;
- }
- if (generate_full_check_inline) {
- if (new_trace.actions() != nullptr) {
- new_trace.set_flush_budget(new_flush_budget);
- }
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &new_trace);
- }
- alternative.node()->Emit(compiler, &new_trace);
- preload->preload_is_current_ = false;
- }
- macro_assembler->Bind(&alt_gen->after);
- }
-}
-
-
-void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload) {
- if (!alt_gen->possible_success.is_linked()) return;
-
- RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
- macro_assembler->Bind(&alt_gen->possible_success);
- Trace out_of_line_trace(*trace);
- out_of_line_trace.set_characters_preloaded(preload_characters);
- out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
- if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
- ZoneList<Guard*>* guards = alternative.guards();
- int guard_count = (guards == nullptr) ? 0 : guards->length();
- if (next_expects_preload) {
- Label reload_current_char;
- out_of_line_trace.set_backtrack(&reload_current_char);
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- macro_assembler->Bind(&reload_current_char);
- // Reload the current character, since the next quick check expects that.
- // We don't need to check bounds here because we only get into this
- // code through a quick check which already did the checked load.
- macro_assembler->LoadCurrentCharacter(trace->cp_offset(), nullptr, false,
- preload_characters);
- macro_assembler->GoTo(&(alt_gen->after));
- } else {
- out_of_line_trace.set_backtrack(&(alt_gen->after));
- for (int j = 0; j < guard_count; j++) {
- GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
- }
- alternative.node()->Emit(compiler, &out_of_line_trace);
- }
-}
-
-
-void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- switch (action_type_) {
- case STORE_POSITION: {
- Trace::DeferredCapture
- new_capture(data_.u_position_register.reg,
- data_.u_position_register.is_capture,
- trace);
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case INCREMENT_REGISTER: {
- Trace::DeferredIncrementRegister
- new_increment(data_.u_increment_register.reg);
- Trace new_trace = *trace;
- new_trace.add_action(&new_increment);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case SET_REGISTER: {
- Trace::DeferredSetRegister
- new_set(data_.u_store_register.reg, data_.u_store_register.value);
- Trace new_trace = *trace;
- new_trace.add_action(&new_set);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case CLEAR_CAPTURES: {
- Trace::DeferredClearCaptures
- new_capture(Interval(data_.u_clear_captures.range_from,
- data_.u_clear_captures.range_to));
- Trace new_trace = *trace;
- new_trace.add_action(&new_capture);
- on_success()->Emit(compiler, &new_trace);
- break;
- }
- case BEGIN_SUBMATCH:
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- assembler->WriteCurrentPositionToRegister(
- data_.u_submatch.current_position_register, 0);
- assembler->WriteStackPointerToRegister(
- data_.u_submatch.stack_pointer_register);
- on_success()->Emit(compiler, trace);
- }
- break;
- case EMPTY_MATCH_CHECK: {
- int start_pos_reg = data_.u_empty_match_check.start_register;
- int stored_pos = 0;
- int rep_reg = data_.u_empty_match_check.repetition_register;
- bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
- bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
- if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
- // If we know we haven't advanced and there is no minimum we
- // can just backtrack immediately.
- assembler->GoTo(trace->backtrack());
- } else if (know_dist && stored_pos < trace->cp_offset()) {
- // If we know we've advanced we can generate the continuation
- // immediately.
- on_success()->Emit(compiler, trace);
- } else if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- } else {
- Label skip_empty_check;
- // If we have a minimum number of repetitions we check the current
- // number first and skip the empty check if it's not enough.
- if (has_minimum) {
- int limit = data_.u_empty_match_check.repetition_limit;
- assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
- }
- // If the match is empty we bail out, otherwise we fall through
- // to the on-success continuation.
- assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
- trace->backtrack());
- assembler->Bind(&skip_empty_check);
- on_success()->Emit(compiler, trace);
- }
- break;
- }
- case POSITIVE_SUBMATCH_SUCCESS: {
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
- assembler->ReadCurrentPositionFromRegister(
- data_.u_submatch.current_position_register);
- assembler->ReadStackPointerFromRegister(
- data_.u_submatch.stack_pointer_register);
- int clear_register_count = data_.u_submatch.clear_register_count;
- if (clear_register_count == 0) {
- on_success()->Emit(compiler, trace);
- return;
- }
- int clear_registers_from = data_.u_submatch.clear_register_from;
- Label clear_registers_backtrack;
- Trace new_trace = *trace;
- new_trace.set_backtrack(&clear_registers_backtrack);
- on_success()->Emit(compiler, &new_trace);
-
- assembler->Bind(&clear_registers_backtrack);
- int clear_registers_to = clear_registers_from + clear_register_count - 1;
- assembler->ClearRegisters(clear_registers_from, clear_registers_to);
-
- DCHECK(trace->backtrack() == nullptr);
- assembler->Backtrack();
- return;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
- RegExpMacroAssembler* assembler = compiler->macro_assembler();
- if (!trace->is_trivial()) {
- trace->Flush(compiler, this);
- return;
- }
-
- LimitResult limit_result = LimitVersions(compiler, trace);
- if (limit_result == DONE) return;
- DCHECK(limit_result == CONTINUE);
-
- RecursionCheck rc(compiler);
-
- DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (IgnoreCase(flags_)) {
- assembler->CheckNotBackReferenceIgnoreCase(
- start_reg_, read_backward(), IsUnicode(flags_), trace->backtrack());
- } else {
- assembler->CheckNotBackReference(start_reg_, read_backward(),
- trace->backtrack());
- }
- // We are going to advance backward, so we may end up at the start.
- if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
-
- // Check that the back reference does not end inside a surrogate pair.
- if (IsUnicode(flags_) && !compiler->one_byte()) {
- assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
- }
- on_success()->Emit(compiler, trace);
-}
-
-
-// -------------------------------------------------------------------
-// Dot/dotty output
-
-
-#ifdef DEBUG
-
-
-class DotPrinter: public NodeVisitor {
- public:
- DotPrinter(std::ostream& os, bool ignore_case) // NOLINT
- : os_(os),
- ignore_case_(ignore_case) {}
- void PrintNode(const char* label, RegExpNode* node);
- void Visit(RegExpNode* node);
- void PrintAttributes(RegExpNode* from);
- void PrintOnFailure(RegExpNode* from, RegExpNode* to);
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- private:
- std::ostream& os_;
- bool ignore_case_;
-};
-
-
-void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
- os_ << "digraph G {\n graph [label=\"";
- for (int i = 0; label[i]; i++) {
- switch (label[i]) {
- case '\\':
- os_ << "\\\\";
- break;
- case '"':
- os_ << "\"";
- break;
- default:
- os_ << label[i];
- break;
- }
- }
- os_ << "\"];\n";
- Visit(node);
- os_ << "}" << std::endl;
-}
-
-
-void DotPrinter::Visit(RegExpNode* node) {
- if (node->info()->visited) return;
- node->info()->visited = true;
- node->Accept(this);
-}
-
-
-void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
- os_ << " n" << from << " -> n" << on_failure << " [style=dotted];\n";
- Visit(on_failure);
-}
-
-
-class TableEntryBodyPrinter {
- public:
- TableEntryBodyPrinter(std::ostream& os, ChoiceNode* choice) // NOLINT
- : os_(os),
- choice_(choice) {}
- void Call(uc16 from, DispatchTable::Entry entry) {
- OutSet* out_set = entry.out_set();
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- os_ << " n" << choice() << ":s" << from << "o" << i << " -> n"
- << choice()->alternatives()->at(i).node() << ";\n";
- }
- }
- }
- private:
- ChoiceNode* choice() { return choice_; }
- std::ostream& os_;
- ChoiceNode* choice_;
-};
-
-
-class TableEntryHeaderPrinter {
- public:
- explicit TableEntryHeaderPrinter(std::ostream& os) // NOLINT
- : first_(true),
- os_(os) {}
- void Call(uc16 from, DispatchTable::Entry entry) {
- if (first_) {
- first_ = false;
- } else {
- os_ << "|";
- }
- os_ << "{\\" << AsUC16(from) << "-\\" << AsUC16(entry.to()) << "|{";
- OutSet* out_set = entry.out_set();
- int priority = 0;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (out_set->Get(i)) {
- if (priority > 0) os_ << "|";
- os_ << "<s" << from << "o" << i << "> " << priority;
- priority++;
- }
- }
- os_ << "}}";
- }
-
- private:
- bool first_;
- std::ostream& os_;
-};
-
-
-class AttributePrinter {
- public:
- explicit AttributePrinter(std::ostream& os) // NOLINT
- : os_(os),
- first_(true) {}
- void PrintSeparator() {
- if (first_) {
- first_ = false;
- } else {
- os_ << "|";
- }
- }
- void PrintBit(const char* name, bool value) {
- if (!value) return;
- PrintSeparator();
- os_ << "{" << name << "}";
- }
- void PrintPositive(const char* name, int value) {
- if (value < 0) return;
- PrintSeparator();
- os_ << "{" << name << "|" << value << "}";
- }
-
- private:
- std::ostream& os_;
- bool first_;
-};
-
-
-void DotPrinter::PrintAttributes(RegExpNode* that) {
- os_ << " a" << that << " [shape=Mrecord, color=grey, fontcolor=grey, "
- << "margin=0.1, fontsize=10, label=\"{";
- AttributePrinter printer(os_);
- NodeInfo* info = that->info();
- printer.PrintBit("NI", info->follows_newline_interest);
- printer.PrintBit("WI", info->follows_word_interest);
- printer.PrintBit("SI", info->follows_start_interest);
- Label* label = that->label();
- if (label->is_bound())
- printer.PrintPositive("@", label->pos());
- os_ << "}\"];\n"
- << " a" << that << " -> n" << that
- << " [style=dashed, color=grey, arrowhead=none];\n";
-}
-
-
-static const bool kPrintDispatchTable = false;
-void DotPrinter::VisitChoice(ChoiceNode* that) {
- if (kPrintDispatchTable) {
- os_ << " n" << that << " [shape=Mrecord, label=\"";
- TableEntryHeaderPrinter header_printer(os_);
- that->GetTable(ignore_case_)->ForEach(&header_printer);
- os_ << "\"]\n";
- PrintAttributes(that);
- TableEntryBodyPrinter body_printer(os_, that);
- that->GetTable(ignore_case_)->ForEach(&body_printer);
- } else {
- os_ << " n" << that << " [shape=Mrecord, label=\"?\"];\n";
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- os_ << " n" << that << " -> n" << alt.node();
- }
- }
- for (int i = 0; i < that->alternatives()->length(); i++) {
- GuardedAlternative alt = that->alternatives()->at(i);
- alt.node()->Accept(this);
- }
-}
-
-
-void DotPrinter::VisitText(TextNode* that) {
- Zone* zone = that->zone();
- os_ << " n" << that << " [label=\"";
- for (int i = 0; i < that->elements()->length(); i++) {
- if (i > 0) os_ << " ";
- TextElement elm = that->elements()->at(i);
- switch (elm.text_type()) {
- case TextElement::ATOM: {
- Vector<const uc16> data = elm.atom()->data();
- for (int i = 0; i < data.length(); i++) {
- os_ << static_cast<char>(data[i]);
- }
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* node = elm.char_class();
- os_ << "[";
- if (node->is_negated()) os_ << "^";
- for (int j = 0; j < node->ranges(zone)->length(); j++) {
- CharacterRange range = node->ranges(zone)->at(j);
- os_ << AsUC16(range.from()) << "-" << AsUC16(range.to());
- }
- os_ << "]";
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- os_ << "\", shape=box, peripheries=2];\n";
- PrintAttributes(that);
- os_ << " n" << that << " -> n" << that->on_success() << ";\n";
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitBackReference(BackReferenceNode* that) {
- os_ << " n" << that << " [label=\"$" << that->start_register() << "..$"
- << that->end_register() << "\", shape=doubleoctagon];\n";
- PrintAttributes(that);
- os_ << " n" << that << " -> n" << that->on_success() << ";\n";
- Visit(that->on_success());
-}
-
-
-void DotPrinter::VisitEnd(EndNode* that) {
- os_ << " n" << that << " [style=bold, shape=point];\n";
- PrintAttributes(that);
-}
-
-
-void DotPrinter::VisitAssertion(AssertionNode* that) {
- os_ << " n" << that << " [";
- switch (that->assertion_type()) {
- case AssertionNode::AT_END:
- os_ << "label=\"$\", shape=septagon";
- break;
- case AssertionNode::AT_START:
- os_ << "label=\"^\", shape=septagon";
- break;
- case AssertionNode::AT_BOUNDARY:
- os_ << "label=\"\\b\", shape=septagon";
- break;
- case AssertionNode::AT_NON_BOUNDARY:
- os_ << "label=\"\\B\", shape=septagon";
- break;
- case AssertionNode::AFTER_NEWLINE:
- os_ << "label=\"(?<=\\n)\", shape=septagon";
- break;
- }
- os_ << "];\n";
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- os_ << " n" << that << " -> n" << successor << ";\n";
- Visit(successor);
-}
-
-
-void DotPrinter::VisitAction(ActionNode* that) {
- os_ << " n" << that << " [";
- switch (that->action_type_) {
- case ActionNode::SET_REGISTER:
- os_ << "label=\"$" << that->data_.u_store_register.reg
- << ":=" << that->data_.u_store_register.value << "\", shape=octagon";
- break;
- case ActionNode::INCREMENT_REGISTER:
- os_ << "label=\"$" << that->data_.u_increment_register.reg
- << "++\", shape=octagon";
- break;
- case ActionNode::STORE_POSITION:
- os_ << "label=\"$" << that->data_.u_position_register.reg
- << ":=$pos\", shape=octagon";
- break;
- case ActionNode::BEGIN_SUBMATCH:
- os_ << "label=\"$" << that->data_.u_submatch.current_position_register
- << ":=$pos,begin\", shape=septagon";
- break;
- case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
- os_ << "label=\"escape\", shape=septagon";
- break;
- case ActionNode::EMPTY_MATCH_CHECK:
- os_ << "label=\"$" << that->data_.u_empty_match_check.start_register
- << "=$pos?,$" << that->data_.u_empty_match_check.repetition_register
- << "<" << that->data_.u_empty_match_check.repetition_limit
- << "?\", shape=septagon";
- break;
- case ActionNode::CLEAR_CAPTURES: {
- os_ << "label=\"clear $" << that->data_.u_clear_captures.range_from
- << " to $" << that->data_.u_clear_captures.range_to
- << "\", shape=septagon";
- break;
- }
- }
- os_ << "];\n";
- PrintAttributes(that);
- RegExpNode* successor = that->on_success();
- os_ << " n" << that << " -> n" << successor << ";\n";
- Visit(successor);
-}
-
-
-class DispatchTableDumper {
- public:
- explicit DispatchTableDumper(std::ostream& os) : os_(os) {}
- void Call(uc16 key, DispatchTable::Entry entry);
- private:
- std::ostream& os_;
-};
-
-
-void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) {
- os_ << "[" << AsUC16(key) << "-" << AsUC16(entry.to()) << "]: {";
- OutSet* set = entry.out_set();
- bool first = true;
- for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
- if (set->Get(i)) {
- if (first) {
- first = false;
- } else {
- os_ << ", ";
- }
- os_ << i;
- }
- }
- os_ << "}\n";
-}
-
-
-void DispatchTable::Dump() {
- OFStream os(stderr);
- DispatchTableDumper dumper(os);
- tree()->ForEach(&dumper);
-}
-
-
-void RegExpEngine::DotPrint(const char* label,
- RegExpNode* node,
- bool ignore_case) {
- StdoutStream os;
- DotPrinter printer(os, ignore_case);
- printer.PrintNode(label, node);
-}
-
-
-#endif // DEBUG
-
-
-// -------------------------------------------------------------------
-// Tree to graph conversion
-
-RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<TextElement>* elms =
- new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
- elms->Add(TextElement::Atom(this), compiler->zone());
- return new (compiler->zone())
- TextNode(elms, compiler->read_backward(), on_success);
-}
-
-
-RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new (compiler->zone())
- TextNode(elements(), compiler->read_backward(), on_success);
-}
-
-
-static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
- const int* special_class,
- int length) {
- length--; // Remove final marker.
- DCHECK_EQ(kRangeEndMarker, special_class[length]);
- DCHECK_NE(0, ranges->length());
- DCHECK_NE(0, length);
- DCHECK_NE(0, special_class[0]);
- if (ranges->length() != (length >> 1) + 1) {
- return false;
- }
- CharacterRange range = ranges->at(0);
- if (range.from() != 0) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- if (special_class[i] != (range.to() + 1)) {
- return false;
- }
- range = ranges->at((i >> 1) + 1);
- if (special_class[i+1] != range.from()) {
- return false;
- }
- }
- if (range.to() != String::kMaxCodePoint) {
- return false;
- }
- return true;
-}
-
-
-static bool CompareRanges(ZoneList<CharacterRange>* ranges,
- const int* special_class,
- int length) {
- length--; // Remove final marker.
- DCHECK_EQ(kRangeEndMarker, special_class[length]);
- if (ranges->length() * 2 != length) {
- return false;
- }
- for (int i = 0; i < length; i += 2) {
- CharacterRange range = ranges->at(i >> 1);
- if (range.from() != special_class[i] ||
- range.to() != special_class[i + 1] - 1) {
- return false;
- }
- }
- return true;
-}
-
-
-bool RegExpCharacterClass::is_standard(Zone* zone) {
- // TODO(lrn): Remove need for this function, by not throwing away information
- // along the way.
- if (is_negated()) {
- return false;
- }
- if (set_.is_standard()) {
- return true;
- }
- if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('s');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
- set_.set_standard_set_type('S');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('.');
- return true;
- }
- if (CompareRanges(set_.ranges(zone),
- kLineTerminatorRanges,
- kLineTerminatorRangeCount)) {
- set_.set_standard_set_type('n');
- return true;
- }
- if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('w');
- return true;
- }
- if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
- set_.set_standard_set_type('W');
- return true;
- }
- return false;
-}
-
-
-UnicodeRangeSplitter::UnicodeRangeSplitter(Zone* zone,
- ZoneList<CharacterRange>* base)
- : zone_(zone),
- table_(zone),
- bmp_(nullptr),
- lead_surrogates_(nullptr),
- trail_surrogates_(nullptr),
- non_bmp_(nullptr) {
- // The unicode range splitter categorizes given character ranges into:
- // - Code points from the BMP representable by one code unit.
- // - Code points outside the BMP that need to be split into surrogate pairs.
- // - Lone lead surrogates.
- // - Lone trail surrogates.
- // Lone surrogates are valid code points, even though no actual characters.
- // They require special matching to make sure we do not split surrogate pairs.
- // We use the dispatch table to accomplish this. The base range is split up
- // by the table by the overlay ranges, and the Call callback is used to
- // filter and collect ranges for each category.
- for (int i = 0; i < base->length(); i++) {
- table_.AddRange(base->at(i), kBase, zone_);
- }
- // Add overlay ranges.
- table_.AddRange(CharacterRange::Range(0, kLeadSurrogateStart - 1),
- kBmpCodePoints, zone_);
- table_.AddRange(CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd),
- kLeadSurrogates, zone_);
- table_.AddRange(
- CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
- kTrailSurrogates, zone_);
- table_.AddRange(
- CharacterRange::Range(kTrailSurrogateEnd + 1, kNonBmpStart - 1),
- kBmpCodePoints, zone_);
- table_.AddRange(CharacterRange::Range(kNonBmpStart, kNonBmpEnd),
- kNonBmpCodePoints, zone_);
- table_.ForEach(this);
-}
-
-
-void UnicodeRangeSplitter::Call(uc32 from, DispatchTable::Entry entry) {
- OutSet* outset = entry.out_set();
- if (!outset->Get(kBase)) return;
- ZoneList<CharacterRange>** target = nullptr;
- if (outset->Get(kBmpCodePoints)) {
- target = &bmp_;
- } else if (outset->Get(kLeadSurrogates)) {
- target = &lead_surrogates_;
- } else if (outset->Get(kTrailSurrogates)) {
- target = &trail_surrogates_;
- } else {
- DCHECK(outset->Get(kNonBmpCodePoints));
- target = &non_bmp_;
- }
- if (*target == nullptr)
- *target = new (zone_) ZoneList<CharacterRange>(2, zone_);
- (*target)->Add(CharacterRange::Range(entry.from(), entry.to()), zone_);
-}
-
-void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
- ZoneList<CharacterRange>* bmp = splitter->bmp();
- if (bmp == nullptr) return;
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
- compiler->zone(), bmp, compiler->read_backward(), on_success,
- default_flags)));
-}
-
-void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success,
- UnicodeRangeSplitter* splitter) {
- ZoneList<CharacterRange>* non_bmp = splitter->non_bmp();
- if (non_bmp == nullptr) return;
- DCHECK(!compiler->one_byte());
- Zone* zone = compiler->zone();
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- CharacterRange::Canonicalize(non_bmp);
- for (int i = 0; i < non_bmp->length(); i++) {
- // Match surrogate pair.
- // E.g. [\u10005-\u11005] becomes
- // \ud800[\udc05-\udfff]|
- // [\ud801-\ud803][\udc00-\udfff]|
- // \ud804[\udc00-\udc05]
- uc32 from = non_bmp->at(i).from();
- uc32 to = non_bmp->at(i).to();
- uc16 from_l = unibrow::Utf16::LeadSurrogate(from);
- uc16 from_t = unibrow::Utf16::TrailSurrogate(from);
- uc16 to_l = unibrow::Utf16::LeadSurrogate(to);
- uc16 to_t = unibrow::Utf16::TrailSurrogate(to);
- if (from_l == to_l) {
- // The lead surrogate is the same.
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Singleton(from_l),
- CharacterRange::Range(from_t, to_t), compiler->read_backward(),
- on_success, default_flags)));
- } else {
- if (from_t != kTrailSurrogateStart) {
- // Add [from_l][from_t-\udfff]
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Singleton(from_l),
- CharacterRange::Range(from_t, kTrailSurrogateEnd),
- compiler->read_backward(), on_success, default_flags)));
- from_l++;
- }
- if (to_t != kTrailSurrogateEnd) {
- // Add [to_l][\udc00-to_t]
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Singleton(to_l),
- CharacterRange::Range(kTrailSurrogateStart, to_t),
- compiler->read_backward(), on_success, default_flags)));
- to_l--;
- }
- if (from_l <= to_l) {
- // Add [from_l-to_l][\udc00-\udfff]
- result->AddAlternative(
- GuardedAlternative(TextNode::CreateForSurrogatePair(
- zone, CharacterRange::Range(from_l, to_l),
- CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
- compiler->read_backward(), on_success, default_flags)));
- }
- }
- }
-}
-
-RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
- RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
- ZoneList<CharacterRange>* match, RegExpNode* on_success, bool read_backward,
- JSRegExp::Flags flags) {
- Zone* zone = compiler->zone();
- RegExpNode* match_node = TextNode::CreateForCharacterRanges(
- zone, match, read_backward, on_success, flags);
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- RegExpLookaround::Builder lookaround(false, match_node, stack_register,
- position_register);
- RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookbehind, !read_backward, lookaround.on_match_success(), flags);
- return lookaround.ForMatch(negative_match);
-}
-
-RegExpNode* MatchAndNegativeLookaroundInReadDirection(
- RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
- ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
- bool read_backward, JSRegExp::Flags flags) {
- Zone* zone = compiler->zone();
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- RegExpLookaround::Builder lookaround(false, on_success, stack_register,
- position_register);
- RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
- zone, lookahead, read_backward, lookaround.on_match_success(), flags);
- return TextNode::CreateForCharacterRanges(
- zone, match, read_backward, lookaround.ForMatch(negative_match), flags);
-}
-
-void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success,
- UnicodeRangeSplitter* splitter) {
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- ZoneList<CharacterRange>* lead_surrogates = splitter->lead_surrogates();
- if (lead_surrogates == nullptr) return;
- Zone* zone = compiler->zone();
- // E.g. \ud801 becomes \ud801(?![\udc00-\udfff]).
- ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
-
- RegExpNode* match;
- if (compiler->read_backward()) {
- // Reading backward. Assert that reading forward, there is no trail
- // surrogate, and then backward match the lead surrogate.
- match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, trail_surrogates, lead_surrogates, on_success, true,
- default_flags);
- } else {
- // Reading forward. Forward match the lead surrogate and assert that
- // no trail surrogate follows.
- match = MatchAndNegativeLookaroundInReadDirection(
- compiler, lead_surrogates, trail_surrogates, on_success, false,
- default_flags);
- }
- result->AddAlternative(GuardedAlternative(match));
-}
-
-void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
- RegExpNode* on_success,
- UnicodeRangeSplitter* splitter) {
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- ZoneList<CharacterRange>* trail_surrogates = splitter->trail_surrogates();
- if (trail_surrogates == nullptr) return;
- Zone* zone = compiler->zone();
- // E.g. \udc01 becomes (?<![\ud800-\udbff])\udc01
- ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
-
- RegExpNode* match;
- if (compiler->read_backward()) {
- // Reading backward. Backward match the trail surrogate and assert that no
- // lead surrogate precedes it.
- match = MatchAndNegativeLookaroundInReadDirection(
- compiler, trail_surrogates, lead_surrogates, on_success, true,
- default_flags);
- } else {
- // Reading forward. Assert that reading backward, there is no lead
- // surrogate, and then forward match the trail surrogate.
- match = NegativeLookaroundAgainstReadDirectionAndMatch(
- compiler, lead_surrogates, trail_surrogates, on_success, false,
- default_flags);
- }
- result->AddAlternative(GuardedAlternative(match));
-}
-
-RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- // This implements ES2015 21.2.5.2.3, AdvanceStringIndex.
- DCHECK(!compiler->read_backward());
- Zone* zone = compiler->zone();
- // Advance any character. If the character happens to be a lead surrogate and
- // we advanced into the middle of a surrogate pair, it will work out, as
- // nothing will match from there. We will have to advance again, consuming
- // the associated trail surrogate.
- ZoneList<CharacterRange>* range = CharacterRange::List(
- zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- return TextNode::CreateForCharacterRanges(zone, range, false, on_success,
- default_flags);
-}
-
-void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
-#ifdef V8_INTL_SUPPORT
- DCHECK(CharacterRange::IsCanonical(ranges));
-
- // Micro-optimization to avoid passing large ranges to UnicodeSet::closeOver.
- // See also https://crbug.com/v8/6727.
- // TODO(jgruber): This only covers the special case of the {0,0x10FFFF} range,
- // which we use frequently internally. But large ranges can also easily be
- // created by the user. We might want to have a more general caching mechanism
- // for such ranges.
- if (ranges->length() == 1 && ranges->at(0).IsEverything(kNonBmpEnd)) return;
-
- // Use ICU to compute the case fold closure over the ranges.
- icu::UnicodeSet set;
- for (int i = 0; i < ranges->length(); i++) {
- set.add(ranges->at(i).from(), ranges->at(i).to());
- }
- ranges->Clear();
- set.closeOver(USET_CASE_INSENSITIVE);
- // Full case mapping map single characters to multiple characters.
- // Those are represented as strings in the set. Remove them so that
- // we end up with only simple and common case mappings.
- set.removeAllStrings();
- for (int i = 0; i < set.getRangeCount(); i++) {
- ranges->Add(CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
- zone);
- }
- // No errors and everything we collected have been ranges.
- CharacterRange::Canonicalize(ranges);
-#endif // V8_INTL_SUPPORT
-}
-
-
-RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- set_.Canonicalize();
- Zone* zone = compiler->zone();
- ZoneList<CharacterRange>* ranges = this->ranges(zone);
- if (NeedsUnicodeCaseEquivalents(flags_)) {
- AddUnicodeCaseEquivalents(ranges, zone);
- }
- if (IsUnicode(flags_) && !compiler->one_byte() &&
- !contains_split_surrogate()) {
- if (is_negated()) {
- ZoneList<CharacterRange>* negated =
- new (zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::Negate(ranges, negated, zone);
- ranges = negated;
- }
- if (ranges->length() == 0) {
- JSRegExp::Flags default_flags;
- RegExpCharacterClass* fail =
- new (zone) RegExpCharacterClass(zone, ranges, default_flags);
- return new (zone) TextNode(fail, compiler->read_backward(), on_success);
- }
- if (standard_type() == '*') {
- return UnanchoredAdvance(compiler, on_success);
- } else {
- ChoiceNode* result = new (zone) ChoiceNode(2, zone);
- UnicodeRangeSplitter splitter(zone, ranges);
- AddBmpCharacters(compiler, result, on_success, &splitter);
- AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
- AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
- AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
- return result;
- }
- } else {
- return new (zone) TextNode(this, compiler->read_backward(), on_success);
- }
-}
-
-
-int CompareFirstChar(RegExpTree* const* a, RegExpTree* const* b) {
- RegExpAtom* atom1 = (*a)->AsAtom();
- RegExpAtom* atom2 = (*b)->AsAtom();
- uc16 character1 = atom1->data().at(0);
- uc16 character2 = atom2->data().at(0);
- if (character1 < character2) return -1;
- if (character1 > character2) return 1;
- return 0;
-}
-
-#ifdef V8_INTL_SUPPORT
-
-// Case Insensitve comparesion
-int CompareFirstCharCaseInsensitve(RegExpTree* const* a, RegExpTree* const* b) {
- RegExpAtom* atom1 = (*a)->AsAtom();
- RegExpAtom* atom2 = (*b)->AsAtom();
- icu::UnicodeString character1(atom1->data().at(0));
- return character1.caseCompare(atom2->data().at(0), U_FOLD_CASE_DEFAULT);
-}
-
-#else
-
-static unibrow::uchar Canonical(
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
- unibrow::uchar c) {
- unibrow::uchar chars[unibrow::Ecma262Canonicalize::kMaxWidth];
- int length = canonicalize->get(c, '\0', chars);
- DCHECK_LE(length, 1);
- unibrow::uchar canonical = c;
- if (length == 1) canonical = chars[0];
- return canonical;
-}
-
-int CompareFirstCharCaseIndependent(
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
- RegExpTree* const* a, RegExpTree* const* b) {
- RegExpAtom* atom1 = (*a)->AsAtom();
- RegExpAtom* atom2 = (*b)->AsAtom();
- unibrow::uchar character1 = atom1->data().at(0);
- unibrow::uchar character2 = atom2->data().at(0);
- if (character1 == character2) return 0;
- if (character1 >= 'a' || character2 >= 'a') {
- character1 = Canonical(canonicalize, character1);
- character2 = Canonical(canonicalize, character2);
- }
- return static_cast<int>(character1) - static_cast<int>(character2);
-}
-#endif // V8_INTL_SUPPORT
-
-// We can stable sort runs of atoms, since the order does not matter if they
-// start with different characters.
-// Returns true if any consecutive atoms were found.
-bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
- bool found_consecutive_atoms = false;
- for (int i = 0; i < length; i++) {
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (alternative->IsAtom()) break;
- i++;
- }
- // i is length or it is the index of an atom.
- if (i == length) break;
- int first_atom = i;
- JSRegExp::Flags flags = alternatives->at(i)->AsAtom()->flags();
- i++;
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (!alternative->IsAtom()) break;
- if (alternative->AsAtom()->flags() != flags) break;
- i++;
- }
- // Sort atoms to get ones with common prefixes together.
- // This step is more tricky if we are in a case-independent regexp,
- // because it would change /is|I/ to /I|is/, and order matters when
- // the regexp parts don't match only disjoint starting points. To fix
- // this we have a version of CompareFirstChar that uses case-
- // independent character classes for comparison.
- DCHECK_LT(first_atom, alternatives->length());
- DCHECK_LE(i, alternatives->length());
- DCHECK_LE(first_atom, i);
- if (IgnoreCase(flags)) {
-#ifdef V8_INTL_SUPPORT
- alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
- i - first_atom);
-#else
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- compiler->isolate()->regexp_macro_assembler_canonicalize();
- auto compare_closure =
- [canonicalize](RegExpTree* const* a, RegExpTree* const* b) {
- return CompareFirstCharCaseIndependent(canonicalize, a, b);
- };
- alternatives->StableSort(compare_closure, first_atom, i - first_atom);
-#endif // V8_INTL_SUPPORT
- } else {
- alternatives->StableSort(CompareFirstChar, first_atom, i - first_atom);
- }
- if (i - first_atom > 1) found_consecutive_atoms = true;
- }
- return found_consecutive_atoms;
-}
-
-
-// Optimizes ab|ac|az to a(?:b|c|d).
-void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
- Zone* zone = compiler->zone();
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
-
- int write_posn = 0;
- int i = 0;
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (!alternative->IsAtom()) {
- alternatives->at(write_posn++) = alternatives->at(i);
- i++;
- continue;
- }
- RegExpAtom* const atom = alternative->AsAtom();
- JSRegExp::Flags flags = atom->flags();
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeString common_prefix(atom->data().at(0));
-#else
- unibrow::uchar common_prefix = atom->data().at(0);
-#endif // V8_INTL_SUPPORT
- int first_with_prefix = i;
- int prefix_length = atom->length();
- i++;
- while (i < length) {
- alternative = alternatives->at(i);
- if (!alternative->IsAtom()) break;
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->flags() != flags) break;
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeString new_prefix(atom->data().at(0));
- if (new_prefix != common_prefix) {
- if (!IgnoreCase(flags)) break;
- if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
- break;
- }
-#else
- unibrow::uchar new_prefix = atom->data().at(0);
- if (new_prefix != common_prefix) {
- if (!IgnoreCase(flags)) break;
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- compiler->isolate()->regexp_macro_assembler_canonicalize();
- new_prefix = Canonical(canonicalize, new_prefix);
- common_prefix = Canonical(canonicalize, common_prefix);
- if (new_prefix != common_prefix) break;
- }
-#endif // V8_INTL_SUPPORT
- prefix_length = Min(prefix_length, atom->length());
- i++;
- }
- if (i > first_with_prefix + 2) {
- // Found worthwhile run of alternatives with common prefix of at least one
- // character. The sorting function above did not sort on more than one
- // character for reasons of correctness, but there may still be a longer
- // common prefix if the terms were similar or presorted in the input.
- // Find out how long the common prefix is.
- int run_length = i - first_with_prefix;
- RegExpAtom* const atom = alternatives->at(first_with_prefix)->AsAtom();
- for (int j = 1; j < run_length && prefix_length > 1; j++) {
- RegExpAtom* old_atom =
- alternatives->at(j + first_with_prefix)->AsAtom();
- for (int k = 1; k < prefix_length; k++) {
- if (atom->data().at(k) != old_atom->data().at(k)) {
- prefix_length = k;
- break;
- }
- }
- }
- RegExpAtom* prefix = new (zone)
- RegExpAtom(atom->data().SubVector(0, prefix_length), flags);
- ZoneList<RegExpTree*>* pair = new (zone) ZoneList<RegExpTree*>(2, zone);
- pair->Add(prefix, zone);
- ZoneList<RegExpTree*>* suffixes =
- new (zone) ZoneList<RegExpTree*>(run_length, zone);
- for (int j = 0; j < run_length; j++) {
- RegExpAtom* old_atom =
- alternatives->at(j + first_with_prefix)->AsAtom();
- int len = old_atom->length();
- if (len == prefix_length) {
- suffixes->Add(new (zone) RegExpEmpty(), zone);
- } else {
- RegExpTree* suffix = new (zone) RegExpAtom(
- old_atom->data().SubVector(prefix_length, old_atom->length()),
- flags);
- suffixes->Add(suffix, zone);
- }
- }
- pair->Add(new (zone) RegExpDisjunction(suffixes), zone);
- alternatives->at(write_posn++) = new (zone) RegExpAlternative(pair);
- } else {
- // Just copy any non-worthwhile alternatives.
- for (int j = first_with_prefix; j < i; j++) {
- alternatives->at(write_posn++) = alternatives->at(j);
- }
- }
- }
- alternatives->Rewind(write_posn); // Trim end of array.
-}
-
-
-// Optimizes b|c|z to [bcz].
-void RegExpDisjunction::FixSingleCharacterDisjunctions(
- RegExpCompiler* compiler) {
- Zone* zone = compiler->zone();
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- int length = alternatives->length();
-
- int write_posn = 0;
- int i = 0;
- while (i < length) {
- RegExpTree* alternative = alternatives->at(i);
- if (!alternative->IsAtom()) {
- alternatives->at(write_posn++) = alternatives->at(i);
- i++;
- continue;
- }
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->length() != 1) {
- alternatives->at(write_posn++) = alternatives->at(i);
- i++;
- continue;
- }
- JSRegExp::Flags flags = atom->flags();
- DCHECK_IMPLIES(IsUnicode(flags),
- !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
- bool contains_trail_surrogate =
- unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
- int first_in_run = i;
- i++;
- // Find a run of single-character atom alternatives that have identical
- // flags (case independence and unicode-ness).
- while (i < length) {
- alternative = alternatives->at(i);
- if (!alternative->IsAtom()) break;
- RegExpAtom* const atom = alternative->AsAtom();
- if (atom->length() != 1) break;
- if (atom->flags() != flags) break;
- DCHECK_IMPLIES(IsUnicode(flags),
- !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
- contains_trail_surrogate |=
- unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
- i++;
- }
- if (i > first_in_run + 1) {
- // Found non-trivial run of single-character alternatives.
- int run_length = i - first_in_run;
- ZoneList<CharacterRange>* ranges =
- new (zone) ZoneList<CharacterRange>(2, zone);
- for (int j = 0; j < run_length; j++) {
- RegExpAtom* old_atom = alternatives->at(j + first_in_run)->AsAtom();
- DCHECK_EQ(old_atom->length(), 1);
- ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
- }
- RegExpCharacterClass::CharacterClassFlags character_class_flags;
- if (IsUnicode(flags) && contains_trail_surrogate) {
- character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
- }
- alternatives->at(write_posn++) = new (zone)
- RegExpCharacterClass(zone, ranges, flags, character_class_flags);
- } else {
- // Just copy any trivial alternatives.
- for (int j = first_in_run; j < i; j++) {
- alternatives->at(write_posn++) = alternatives->at(j);
- }
- }
- }
- alternatives->Rewind(write_posn); // Trim end of array.
-}
-
-
-RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
-
- if (alternatives->length() > 2) {
- bool found_consecutive_atoms = SortConsecutiveAtoms(compiler);
- if (found_consecutive_atoms) RationalizeConsecutiveAtoms(compiler);
- FixSingleCharacterDisjunctions(compiler);
- if (alternatives->length() == 1) {
- return alternatives->at(0)->ToNode(compiler, on_success);
- }
- }
-
- int length = alternatives->length();
-
- ChoiceNode* result =
- new(compiler->zone()) ChoiceNode(length, compiler->zone());
- for (int i = 0; i < length; i++) {
- GuardedAlternative alternative(alternatives->at(i)->ToNode(compiler,
- on_success));
- result->AddAlternative(alternative);
- }
- return result;
-}
-
-
-RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(min(),
- max(),
- is_greedy(),
- body(),
- compiler,
- on_success);
-}
-
-
-// Scoped object to keep track of how much we unroll quantifier loops in the
-// regexp graph generator.
-class RegExpExpansionLimiter {
- public:
- static const int kMaxExpansionFactor = 6;
- RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
- : compiler_(compiler),
- saved_expansion_factor_(compiler->current_expansion_factor()),
- ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
- DCHECK_LT(0, factor);
- if (ok_to_expand_) {
- if (factor > kMaxExpansionFactor) {
- // Avoid integer overflow of the current expansion factor.
- ok_to_expand_ = false;
- compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
- } else {
- int new_factor = saved_expansion_factor_ * factor;
- ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
- compiler->set_current_expansion_factor(new_factor);
- }
- }
- }
-
- ~RegExpExpansionLimiter() {
- compiler_->set_current_expansion_factor(saved_expansion_factor_);
- }
-
- bool ok_to_expand() { return ok_to_expand_; }
-
- private:
- RegExpCompiler* compiler_;
- int saved_expansion_factor_;
- bool ok_to_expand_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
-};
-
-
-RegExpNode* RegExpQuantifier::ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start) {
- // x{f, t} becomes this:
- //
- // (r++)<-.
- // | `
- // | (x)
- // v ^
- // (r=0)-->(?)---/ [if r < t]
- // |
- // [if r >= f] \----> ...
- //
-
- // 15.10.2.5 RepeatMatcher algorithm.
- // The parser has already eliminated the case where max is 0. In the case
- // where max_match is zero the parser has removed the quantifier if min was
- // > 0 and removed the atom if min was 0. See AddQuantifierToAtom.
-
- // If we know that we cannot match zero length then things are a little
- // simpler since we don't need to make the special zero length match check
- // from step 2.1. If the min and max are small we can unroll a little in
- // this case.
- static const int kMaxUnrolledMinMatches = 3; // Unroll (foo)+ and (foo){3,}
- static const int kMaxUnrolledMaxMatches = 3; // Unroll (foo)? and (foo){x,3}
- if (max == 0) return on_success; // This can happen due to recursion.
- bool body_can_be_empty = (body->min_match() == 0);
- int body_start_reg = RegExpCompiler::kNoRegister;
- Interval capture_registers = body->CaptureRegisters();
- bool needs_capture_clearing = !capture_registers.is_empty();
- Zone* zone = compiler->zone();
-
- if (body_can_be_empty) {
- body_start_reg = compiler->AllocateRegister();
- } else if (compiler->optimize() && !needs_capture_clearing) {
- // Only unroll if there are no captures and the body can't be
- // empty.
- {
- RegExpExpansionLimiter limiter(
- compiler, min + ((max != min) ? 1 : 0));
- if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
- int new_max = (max == kInfinity) ? max : max - min;
- // Recurse once to get the loop or optional matches after the fixed
- // ones.
- RegExpNode* answer = ToNode(
- 0, new_max, is_greedy, body, compiler, on_success, true);
- // Unroll the forced matches from 0 to min. This can cause chains of
- // TextNodes (which the parser does not generate). These should be
- // combined if it turns out they hinder good code generation.
- for (int i = 0; i < min; i++) {
- answer = body->ToNode(compiler, answer);
- }
- return answer;
- }
- }
- if (max <= kMaxUnrolledMaxMatches && min == 0) {
- DCHECK_LT(0, max); // Due to the 'if' above.
- RegExpExpansionLimiter limiter(compiler, max);
- if (limiter.ok_to_expand()) {
- // Unroll the optional matches up to max.
- RegExpNode* answer = on_success;
- for (int i = 0; i < max; i++) {
- ChoiceNode* alternation = new(zone) ChoiceNode(2, zone);
- if (is_greedy) {
- alternation->AddAlternative(
- GuardedAlternative(body->ToNode(compiler, answer)));
- alternation->AddAlternative(GuardedAlternative(on_success));
- } else {
- alternation->AddAlternative(GuardedAlternative(on_success));
- alternation->AddAlternative(
- GuardedAlternative(body->ToNode(compiler, answer)));
- }
- answer = alternation;
- if (not_at_start && !compiler->read_backward()) {
- alternation->set_not_at_start();
- }
- }
- return answer;
- }
- }
- }
- bool has_min = min > 0;
- bool has_max = max < RegExpTree::kInfinity;
- bool needs_counter = has_min || has_max;
- int reg_ctr = needs_counter
- ? compiler->AllocateRegister()
- : RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new (zone)
- LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
- if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
- RegExpNode* loop_return = needs_counter
- ? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
- : static_cast<RegExpNode*>(center);
- if (body_can_be_empty) {
- // If the body can be empty we need to check if it was and then
- // backtrack.
- loop_return = ActionNode::EmptyMatchCheck(body_start_reg,
- reg_ctr,
- min,
- loop_return);
- }
- RegExpNode* body_node = body->ToNode(compiler, loop_return);
- if (body_can_be_empty) {
- // If the body can be empty we need to store the start position
- // so we can bail out if it was empty.
- body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
- }
- if (needs_capture_clearing) {
- // Before entering the body of this loop we need to clear captures.
- body_node = ActionNode::ClearCaptures(capture_registers, body_node);
- }
- GuardedAlternative body_alt(body_node);
- if (has_max) {
- Guard* body_guard =
- new(zone) Guard(reg_ctr, Guard::LT, max);
- body_alt.AddGuard(body_guard, zone);
- }
- GuardedAlternative rest_alt(on_success);
- if (has_min) {
- Guard* rest_guard = new(compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
- rest_alt.AddGuard(rest_guard, zone);
- }
- if (is_greedy) {
- center->AddLoopAlternative(body_alt);
- center->AddContinueAlternative(rest_alt);
- } else {
- center->AddContinueAlternative(rest_alt);
- center->AddLoopAlternative(body_alt);
- }
- if (needs_counter) {
- return ActionNode::SetRegister(reg_ctr, 0, center);
- } else {
- return center;
- }
-}
-
-namespace {
-// Desugar \b to (?<=\w)(?=\W)|(?<=\W)(?=\w) and
-// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
-RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
- RegExpNode* on_success,
- RegExpAssertion::AssertionType type,
- JSRegExp::Flags flags) {
- DCHECK(NeedsUnicodeCaseEquivalents(flags));
- Zone* zone = compiler->zone();
- ZoneList<CharacterRange>* word_range =
- new (zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::AddClassEscape('w', word_range, true, zone);
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- ChoiceNode* result = new (zone) ChoiceNode(2, zone);
- // Add two choices. The (non-)boundary could start with a word or
- // a non-word-character.
- for (int i = 0; i < 2; i++) {
- bool lookbehind_for_word = i == 0;
- bool lookahead_for_word =
- (type == RegExpAssertion::BOUNDARY) ^ lookbehind_for_word;
- // Look to the left.
- RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
- stack_register, position_register);
- RegExpNode* backward = TextNode::CreateForCharacterRanges(
- zone, word_range, true, lookbehind.on_match_success(), flags);
- // Look to the right.
- RegExpLookaround::Builder lookahead(lookahead_for_word,
- lookbehind.ForMatch(backward),
- stack_register, position_register);
- RegExpNode* forward = TextNode::CreateForCharacterRanges(
- zone, word_range, false, lookahead.on_match_success(), flags);
- result->AddAlternative(GuardedAlternative(lookahead.ForMatch(forward)));
- }
- return result;
-}
-} // anonymous namespace
-
-RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- NodeInfo info;
- Zone* zone = compiler->zone();
-
- switch (assertion_type()) {
- case START_OF_LINE:
- return AssertionNode::AfterNewline(on_success);
- case START_OF_INPUT:
- return AssertionNode::AtStart(on_success);
- case BOUNDARY:
- return NeedsUnicodeCaseEquivalents(flags_)
- ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
- flags_)
- : AssertionNode::AtBoundary(on_success);
- case NON_BOUNDARY:
- return NeedsUnicodeCaseEquivalents(flags_)
- ? BoundaryAssertionAsLookaround(compiler, on_success,
- NON_BOUNDARY, flags_)
- : AssertionNode::AtNonBoundary(on_success);
- case END_OF_INPUT:
- return AssertionNode::AtEnd(on_success);
- case END_OF_LINE: {
- // Compile $ in multiline regexps as an alternation with a positive
- // lookahead in one side and an end-of-input on the other side.
- // We need two registers for the lookahead.
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
- // The ChoiceNode to distinguish between a newline and end-of-input.
- ChoiceNode* result = new(zone) ChoiceNode(2, zone);
- // Create a newline atom.
- ZoneList<CharacterRange>* newline_ranges =
- new(zone) ZoneList<CharacterRange>(3, zone);
- CharacterRange::AddClassEscape('n', newline_ranges, false, zone);
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- RegExpCharacterClass* newline_atom =
- new (zone) RegExpCharacterClass('n', default_flags);
- TextNode* newline_matcher = new (zone) TextNode(
- newline_atom, false, ActionNode::PositiveSubmatchSuccess(
- stack_pointer_register, position_register,
- 0, // No captures inside.
- -1, // Ignored if no captures.
- on_success));
- // Create an end-of-input matcher.
- RegExpNode* end_of_line = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- newline_matcher);
- // Add the two alternatives to the ChoiceNode.
- GuardedAlternative eol_alternative(end_of_line);
- result->AddAlternative(eol_alternative);
- GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
- result->AddAlternative(end_alternative);
- return result;
- }
- default:
- UNREACHABLE();
- }
- return on_success;
-}
-
-
-RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return new (compiler->zone())
- BackReferenceNode(RegExpCapture::StartRegister(index()),
- RegExpCapture::EndRegister(index()), flags_,
- compiler->read_backward(), on_success);
-}
-
-
-RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return on_success;
-}
-
-
-RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
- int stack_pointer_register,
- int position_register,
- int capture_register_count,
- int capture_register_start)
- : is_positive_(is_positive),
- on_success_(on_success),
- stack_pointer_register_(stack_pointer_register),
- position_register_(position_register) {
- if (is_positive_) {
- on_match_success_ = ActionNode::PositiveSubmatchSuccess(
- stack_pointer_register, position_register, capture_register_count,
- capture_register_start, on_success_);
- } else {
- Zone* zone = on_success_->zone();
- on_match_success_ = new (zone) NegativeSubmatchSuccess(
- stack_pointer_register, position_register, capture_register_count,
- capture_register_start, zone);
- }
-}
-
-
-RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) {
- if (is_positive_) {
- return ActionNode::BeginSubmatch(stack_pointer_register_,
- position_register_, match);
- } else {
- Zone* zone = on_success_->zone();
- // We use a ChoiceNode to represent the negative lookaround. The first
- // alternative is the negative match. On success, the end node backtracks.
- // On failure, the second alternative is tried and leads to success.
- // NegativeLookaheadChoiceNode is a special ChoiceNode that ignores the
- // first exit when calculating quick checks.
- ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
- GuardedAlternative(match), GuardedAlternative(on_success_), zone);
- return ActionNode::BeginSubmatch(stack_pointer_register_,
- position_register_, choice_node);
- }
-}
-
-
-RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- int stack_pointer_register = compiler->AllocateRegister();
- int position_register = compiler->AllocateRegister();
-
- const int registers_per_capture = 2;
- const int register_of_first_capture = 2;
- int register_count = capture_count_ * registers_per_capture;
- int register_start =
- register_of_first_capture + capture_from_ * registers_per_capture;
-
- RegExpNode* result;
- bool was_reading_backward = compiler->read_backward();
- compiler->set_read_backward(type() == LOOKBEHIND);
- Builder builder(is_positive(), on_success, stack_pointer_register,
- position_register, register_count, register_start);
- RegExpNode* match = body_->ToNode(compiler, builder.on_match_success());
- result = builder.ForMatch(match);
- compiler->set_read_backward(was_reading_backward);
- return result;
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- return ToNode(body(), index(), compiler, on_success);
-}
-
-
-RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success) {
- DCHECK_NOT_NULL(body);
- int start_reg = RegExpCapture::StartRegister(index);
- int end_reg = RegExpCapture::EndRegister(index);
- if (compiler->read_backward()) std::swap(start_reg, end_reg);
- RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
- RegExpNode* body_node = body->ToNode(compiler, store_end);
- return ActionNode::StorePosition(start_reg, true, body_node);
-}
-
-
-RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
- ZoneList<RegExpTree*>* children = nodes();
- RegExpNode* current = on_success;
- if (compiler->read_backward()) {
- for (int i = 0; i < children->length(); i++) {
- current = children->at(i)->ToNode(compiler, current);
- }
- } else {
- for (int i = children->length() - 1; i >= 0; i--) {
- current = children->at(i)->ToNode(compiler, current);
- }
- }
- return current;
-}
-
-
-static void AddClass(const int* elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- elmc--;
- DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
- for (int i = 0; i < elmc; i += 2) {
- DCHECK(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
- }
-}
-
-
-static void AddClassNegated(const int *elmv,
- int elmc,
- ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- elmc--;
- DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
- DCHECK_NE(0x0000, elmv[0]);
- DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
- uc16 last = 0x0000;
- for (int i = 0; i < elmc; i += 2) {
- DCHECK(last <= elmv[i] - 1);
- DCHECK(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
- last = elmv[i + 1];
- }
- ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
-}
-
-void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
- bool add_unicode_case_equivalents,
- Zone* zone) {
- if (add_unicode_case_equivalents && (type == 'w' || type == 'W')) {
- // See #sec-runtime-semantics-wordcharacters-abstract-operation
- // In case of unicode and ignore_case, we need to create the closure over
- // case equivalent characters before negating.
- ZoneList<CharacterRange>* new_ranges =
- new (zone) ZoneList<CharacterRange>(2, zone);
- AddClass(kWordRanges, kWordRangeCount, new_ranges, zone);
- AddUnicodeCaseEquivalents(new_ranges, zone);
- if (type == 'W') {
- ZoneList<CharacterRange>* negated =
- new (zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::Negate(new_ranges, negated, zone);
- new_ranges = negated;
- }
- ranges->AddAll(*new_ranges, zone);
- return;
- }
- AddClassEscape(type, ranges, zone);
-}
-
-void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
- Zone* zone) {
- switch (type) {
- case 's':
- AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
- break;
- case 'S':
- AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
- break;
- case 'w':
- AddClass(kWordRanges, kWordRangeCount, ranges, zone);
- break;
- case 'W':
- AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
- break;
- case 'd':
- AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
- break;
- case 'D':
- AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
- break;
- case '.':
- AddClassNegated(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges,
- zone);
- break;
- // This is not a character range as defined by the spec but a
- // convenient shorthand for a character class that matches any
- // character.
- case '*':
- ranges->Add(CharacterRange::Everything(), zone);
- break;
- // This is the set of characters matched by the $ and ^ symbols
- // in multiline mode.
- case 'n':
- AddClass(kLineTerminatorRanges,
- kLineTerminatorRangeCount,
- ranges,
- zone);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-Vector<const int> CharacterRange::GetWordBounds() {
- return Vector<const int>(kWordRanges, kWordRangeCount - 1);
-}
-
-#ifdef V8_INTL_SUPPORT
-struct IgnoreSet {
- IgnoreSet() : set(BuildIgnoreSet()) {}
- const icu::UnicodeSet set;
-};
-
-struct SpecialAddSet {
- SpecialAddSet() : set(BuildSpecialAddSet()) {}
- const icu::UnicodeSet set;
-};
-
-icu::UnicodeSet BuildAsciiAToZSet() {
- icu::UnicodeSet set('a', 'z');
- set.add('A', 'Z');
- set.freeze();
- return set;
-}
-
-struct AsciiAToZSet {
- AsciiAToZSet() : set(BuildAsciiAToZSet()) {}
- const icu::UnicodeSet set;
-};
-
-static base::LazyInstance<IgnoreSet>::type ignore_set =
- LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<SpecialAddSet>::type special_add_set =
- LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<AsciiAToZSet>::type ascii_a_to_z_set =
- LAZY_INSTANCE_INITIALIZER;
-#endif // V8_INTL_SUPPORT
-
-// static
-void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
- ZoneList<CharacterRange>* ranges,
- bool is_one_byte) {
- CharacterRange::Canonicalize(ranges);
- int range_count = ranges->length();
-#ifdef V8_INTL_SUPPORT
- icu::UnicodeSet others;
- for (int i = 0; i < range_count; i++) {
- CharacterRange range = ranges->at(i);
- uc32 from = range.from();
- if (from > String::kMaxUtf16CodeUnit) continue;
- uc32 to = Min(range.to(), String::kMaxUtf16CodeUnit);
- // Nothing to be done for surrogates.
- if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
- if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (from > String::kMaxOneByteCharCode) continue;
- if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
- }
- others.add(from, to);
- }
-
- // Set of characters already added to ranges that do not need to be added
- // again.
- icu::UnicodeSet already_added(others);
-
- // Set of characters in ranges that are in the 52 ASCII characters [a-zA-Z].
- icu::UnicodeSet in_ascii_a_to_z(others);
- in_ascii_a_to_z.retainAll(ascii_a_to_z_set.Pointer()->set);
-
- // Remove all chars in [a-zA-Z] from others.
- others.removeAll(in_ascii_a_to_z);
-
- // Set of characters in ranges that are overlapping with special add set.
- icu::UnicodeSet in_special_add(others);
- in_special_add.retainAll(special_add_set.Pointer()->set);
-
- others.removeAll(in_special_add);
-
- // Ignore all chars in ignore set.
- others.removeAll(ignore_set.Pointer()->set);
-
- // For most of the chars in ranges that is still in others, find the case
- // equivlant set by calling closeOver(USET_CASE_INSENSITIVE).
- others.closeOver(USET_CASE_INSENSITIVE);
-
- // Because closeOver(USET_CASE_INSENSITIVE) may add ASCII [a-zA-Z] to others,
- // but ECMA262 "i" mode won't consider that, remove them from others.
- // Ex: U+017F add 'S' and 's' to others.
- others.removeAll(ascii_a_to_z_set.Pointer()->set);
-
- // Special handling for in_ascii_a_to_z.
- for (int32_t i = 0; i < in_ascii_a_to_z.getRangeCount(); i++) {
- UChar32 start = in_ascii_a_to_z.getRangeStart(i);
- UChar32 end = in_ascii_a_to_z.getRangeEnd(i);
- // Check if it is uppercase A-Z by checking bit 6.
- if (start & 0x0020) {
- // Add the lowercases
- others.add(start & 0x005F, end & 0x005F);
- } else {
- // Add the uppercases
- others.add(start | 0x0020, end | 0x0020);
- }
- }
-
- // Special handling for chars in "Special Add" set.
- for (int32_t i = 0; i < in_special_add.getRangeCount(); i++) {
- UChar32 end = in_special_add.getRangeEnd(i);
- for (UChar32 ch = in_special_add.getRangeStart(i); ch <= end; ch++) {
- // Add the uppercase of this character if itself is not an uppercase
- // character.
- // Note: The if condiction cannot be u_islower(ch) because ch could be
- // neither uppercase nor lowercase but Mn.
- if (!u_isupper(ch)) {
- others.add(u_toupper(ch));
- }
- icu::UnicodeSet candidates(ch, ch);
- candidates.closeOver(USET_CASE_INSENSITIVE);
- for (int32_t j = 0; j < candidates.getRangeCount(); j++) {
- UChar32 end2 = candidates.getRangeEnd(j);
- for (UChar32 ch2 = candidates.getRangeStart(j); ch2 <= end2; ch2++) {
- // Add character that is not uppercase to others.
- if (!u_isupper(ch2)) {
- others.add(ch2);
- }
- }
- }
- }
- }
-
- // Remove all characters which already in the ranges.
- others.removeAll(already_added);
-
- // Add others to the ranges
- for (int32_t i = 0; i < others.getRangeCount(); i++) {
- UChar32 from = others.getRangeStart(i);
- UChar32 to = others.getRangeEnd(i);
- if (from == to) {
- ranges->Add(CharacterRange::Singleton(from), zone);
- } else {
- ranges->Add(CharacterRange::Range(from, to), zone);
- }
- }
-#else
- for (int i = 0; i < range_count; i++) {
- CharacterRange range = ranges->at(i);
- uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) continue;
- uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
- // Nothing to be done for surrogates.
- if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
- if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) continue;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
- }
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (top == bottom) {
- // If this is a singleton we just expand the one character.
- int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
- for (int i = 0; i < length; i++) {
- uc32 chr = chars[i];
- if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]), zone);
- }
- }
- } else {
- // If this is a range we expand the characters block by block, expanding
- // contiguous subranges (blocks) one at a time. The approach is as
- // follows. For a given start character we look up the remainder of the
- // block that contains it (represented by the end point), for instance we
- // find 'z' if the character is 'c'. A block is characterized by the
- // property that all characters uncanonicalize in the same way, except
- // that each entry in the result is incremented by the distance from the
- // first element. So a-z is a block because 'a' uncanonicalizes to ['a',
- // 'A'] and the k'th letter uncanonicalizes to ['a' + k, 'A' + k]. Once
- // we've found the end point we look up its uncanonicalization and
- // produce a range for each element. For instance for [c-f] we look up
- // ['z', 'Z'] and produce [c-f] and [C-F]. We then only add a range if
- // it is not already contained in the input, so [c-f] will be skipped but
- // [C-F] will be added. If this range is not completely contained in a
- // block we do this for all the blocks covered by the range (handling
- // characters that is not in a block as a "singleton block").
- unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int pos = bottom;
- while (pos <= top) {
- int length =
- isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
- uc32 block_end;
- if (length == 0) {
- block_end = pos;
- } else {
- DCHECK_EQ(1, length);
- block_end = equivalents[0];
- }
- int end = (block_end > top) ? top : block_end;
- length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0',
- equivalents);
- for (int i = 0; i < length; i++) {
- uc32 c = equivalents[i];
- uc32 range_from = c - (block_end - pos);
- uc32 range_to = c - (block_end - end);
- if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange::Range(range_from, range_to), zone);
- }
- }
- pos = end + 1;
- }
- }
- }
-#endif // V8_INTL_SUPPORT
-}
-
-bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
- DCHECK_NOT_NULL(ranges);
- int n = ranges->length();
- if (n <= 1) return true;
- int max = ranges->at(0).to();
- for (int i = 1; i < n; i++) {
- CharacterRange next_range = ranges->at(i);
- if (next_range.from() <= max + 1) return false;
- max = next_range.to();
- }
- return true;
-}
-
-
-ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
- if (ranges_ == nullptr) {
- ranges_ = new(zone) ZoneList<CharacterRange>(2, zone);
- CharacterRange::AddClassEscape(standard_set_type_, ranges_, false, zone);
- }
- return ranges_;
-}
-
-
-// Move a number of elements in a zonelist to another position
-// in the same list. Handles overlapping source and target areas.
-static void MoveRanges(ZoneList<CharacterRange>* list,
- int from,
- int to,
- int count) {
- // Ranges are potentially overlapping.
- if (from < to) {
- for (int i = count - 1; i >= 0; i--) {
- list->at(to + i) = list->at(from + i);
- }
- } else {
- for (int i = 0; i < count; i++) {
- list->at(to + i) = list->at(from + i);
- }
- }
-}
-
-
-static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
- int count,
- CharacterRange insert) {
- // Inserts a range into list[0..count[, which must be sorted
- // by from value and non-overlapping and non-adjacent, using at most
- // list[0..count] for the result. Returns the number of resulting
- // canonicalized ranges. Inserting a range may collapse existing ranges into
- // fewer ranges, so the return value can be anything in the range 1..count+1.
- uc32 from = insert.from();
- uc32 to = insert.to();
- int start_pos = 0;
- int end_pos = count;
- for (int i = count - 1; i >= 0; i--) {
- CharacterRange current = list->at(i);
- if (current.from() > to + 1) {
- end_pos = i;
- } else if (current.to() + 1 < from) {
- start_pos = i + 1;
- break;
- }
- }
-
- // Inserted range overlaps, or is adjacent to, ranges at positions
- // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
- // not affected by the insertion.
- // If start_pos == end_pos, the range must be inserted before start_pos.
- // if start_pos < end_pos, the entire range from start_pos to end_pos
- // must be merged with the insert range.
-
- if (start_pos == end_pos) {
- // Insert between existing ranges at position start_pos.
- if (start_pos < count) {
- MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
- }
- list->at(start_pos) = insert;
- return count + 1;
- }
- if (start_pos + 1 == end_pos) {
- // Replace single existing range at position start_pos.
- CharacterRange to_replace = list->at(start_pos);
- int new_from = Min(to_replace.from(), from);
- int new_to = Max(to_replace.to(), to);
- list->at(start_pos) = CharacterRange::Range(new_from, new_to);
- return count;
- }
- // Replace a number of existing ranges from start_pos to end_pos - 1.
- // Move the remaining ranges down.
-
- int new_from = Min(list->at(start_pos).from(), from);
- int new_to = Max(list->at(end_pos - 1).to(), to);
- if (end_pos < count) {
- MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
- }
- list->at(start_pos) = CharacterRange::Range(new_from, new_to);
- return count - (end_pos - start_pos) + 1;
-}
-
-
-void CharacterSet::Canonicalize() {
- // Special/default classes are always considered canonical. The result
- // of calling ranges() will be sorted.
- if (ranges_ == nullptr) return;
- CharacterRange::Canonicalize(ranges_);
-}
-
-
-void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
- if (character_ranges->length() <= 1) return;
- // Check whether ranges are already canonical (increasing, non-overlapping,
- // non-adjacent).
- int n = character_ranges->length();
- int max = character_ranges->at(0).to();
- int i = 1;
- while (i < n) {
- CharacterRange current = character_ranges->at(i);
- if (current.from() <= max + 1) {
- break;
- }
- max = current.to();
- i++;
- }
- // Canonical until the i'th range. If that's all of them, we are done.
- if (i == n) return;
-
- // The ranges at index i and forward are not canonicalized. Make them so by
- // doing the equivalent of insertion sort (inserting each into the previous
- // list, in order).
- // Notice that inserting a range can reduce the number of ranges in the
- // result due to combining of adjacent and overlapping ranges.
- int read = i; // Range to insert.
- int num_canonical = i; // Length of canonicalized part of list.
- do {
- num_canonical = InsertRangeInCanonicalList(character_ranges,
- num_canonical,
- character_ranges->at(read));
- read++;
- } while (read < n);
- character_ranges->Rewind(num_canonical);
-
- DCHECK(CharacterRange::IsCanonical(character_ranges));
-}
-
-
-void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
- ZoneList<CharacterRange>* negated_ranges,
- Zone* zone) {
- DCHECK(CharacterRange::IsCanonical(ranges));
- DCHECK_EQ(0, negated_ranges->length());
- int range_count = ranges->length();
- uc32 from = 0;
- int i = 0;
- if (range_count > 0 && ranges->at(0).from() == 0) {
- from = ranges->at(0).to() + 1;
- i = 1;
- }
- while (i < range_count) {
- CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange::Range(from, range.from() - 1), zone);
- from = range.to() + 1;
- i++;
- }
- if (from < String::kMaxCodePoint) {
- negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
- zone);
- }
-}
-
-
-// -------------------------------------------------------------------
-// Splay tree
-
-
-OutSet* OutSet::Extend(unsigned value, Zone* zone) {
- if (Get(value))
- return this;
- if (successors(zone) != nullptr) {
- for (int i = 0; i < successors(zone)->length(); i++) {
- OutSet* successor = successors(zone)->at(i);
- if (successor->Get(value))
- return successor;
- }
- } else {
- successors_ = new(zone) ZoneList<OutSet*>(2, zone);
- }
- OutSet* result = new(zone) OutSet(first_, remaining_);
- result->Set(value, zone);
- successors(zone)->Add(result, zone);
- return result;
-}
-
-
-void OutSet::Set(unsigned value, Zone *zone) {
- if (value < kFirstLimit) {
- first_ |= (1 << value);
- } else {
- if (remaining_ == nullptr)
- remaining_ = new(zone) ZoneList<unsigned>(1, zone);
- if (remaining_->is_empty() || !remaining_->Contains(value))
- remaining_->Add(value, zone);
- }
-}
-
-
-bool OutSet::Get(unsigned value) const {
- if (value < kFirstLimit) {
- return (first_ & (1 << value)) != 0;
- } else if (remaining_ == nullptr) {
- return false;
- } else {
- return remaining_->Contains(value);
- }
-}
-
-
-const uc32 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
-
-
-void DispatchTable::AddRange(CharacterRange full_range, int value,
- Zone* zone) {
- CharacterRange current = full_range;
- if (tree()->is_empty()) {
- // If this is the first range we just insert into the table.
- ZoneSplayTree<Config>::Locator loc;
- bool inserted = tree()->Insert(current.from(), &loc);
- DCHECK(inserted);
- USE(inserted);
- loc.set_value(Entry(current.from(), current.to(),
- empty()->Extend(value, zone)));
- return;
- }
- // First see if there is a range to the left of this one that
- // overlaps.
- ZoneSplayTree<Config>::Locator loc;
- if (tree()->FindGreatestLessThan(current.from(), &loc)) {
- Entry* entry = &loc.value();
- // If we've found a range that overlaps with this one, and it
- // starts strictly to the left of this one, we have to fix it
- // because the following code only handles ranges that start on
- // or after the start point of the range we're adding.
- if (entry->from() < current.from() && entry->to() >= current.from()) {
- // Snap the overlapping range in half around the start point of
- // the range we're adding.
- CharacterRange left =
- CharacterRange::Range(entry->from(), current.from() - 1);
- CharacterRange right = CharacterRange::Range(current.from(), entry->to());
- // The left part of the overlapping range doesn't overlap.
- // Truncate the whole entry to be just the left part.
- entry->set_to(left.to());
- // The right part is the one that overlaps. We add this part
- // to the map and let the next step deal with merging it with
- // the range we're adding.
- ZoneSplayTree<Config>::Locator loc;
- bool inserted = tree()->Insert(right.from(), &loc);
- DCHECK(inserted);
- USE(inserted);
- loc.set_value(Entry(right.from(),
- right.to(),
- entry->out_set()));
- }
- }
- while (current.is_valid()) {
- if (tree()->FindLeastGreaterThan(current.from(), &loc) &&
- (loc.value().from() <= current.to()) &&
- (loc.value().to() >= current.from())) {
- Entry* entry = &loc.value();
- // We have overlap. If there is space between the start point of
- // the range we're adding and where the overlapping range starts
- // then we have to add a range covering just that space.
- if (current.from() < entry->from()) {
- ZoneSplayTree<Config>::Locator ins;
- bool inserted = tree()->Insert(current.from(), &ins);
- DCHECK(inserted);
- USE(inserted);
- ins.set_value(Entry(current.from(),
- entry->from() - 1,
- empty()->Extend(value, zone)));
- current.set_from(entry->from());
- }
- DCHECK_EQ(current.from(), entry->from());
- // If the overlapping range extends beyond the one we want to add
- // we have to snap the right part off and add it separately.
- if (entry->to() > current.to()) {
- ZoneSplayTree<Config>::Locator ins;
- bool inserted = tree()->Insert(current.to() + 1, &ins);
- DCHECK(inserted);
- USE(inserted);
- ins.set_value(Entry(current.to() + 1,
- entry->to(),
- entry->out_set()));
- entry->set_to(current.to());
- }
- DCHECK(entry->to() <= current.to());
- // The overlapping range is now completely contained by the range
- // we're adding so we can just update it and move the start point
- // of the range we're adding just past it.
- entry->AddValue(value, zone);
- DCHECK(entry->to() + 1 > current.from());
- current.set_from(entry->to() + 1);
- } else {
- // There is no overlap so we can just add the range
- ZoneSplayTree<Config>::Locator ins;
- bool inserted = tree()->Insert(current.from(), &ins);
- DCHECK(inserted);
- USE(inserted);
- ins.set_value(Entry(current.from(),
- current.to(),
- empty()->Extend(value, zone)));
- break;
- }
- }
-}
-
-
-OutSet* DispatchTable::Get(uc32 value) {
- ZoneSplayTree<Config>::Locator loc;
- if (!tree()->FindGreatestLessThan(value, &loc))
- return empty();
- Entry* entry = &loc.value();
- if (value <= entry->to())
- return entry->out_set();
- else
- return empty();
-}
-
-
-// -------------------------------------------------------------------
-// Analysis
-
-
-void Analysis::EnsureAnalyzed(RegExpNode* that) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
- fail("Stack overflow");
- return;
- }
- if (that->info()->been_analyzed || that->info()->being_analyzed)
- return;
- that->info()->being_analyzed = true;
- that->Accept(this);
- that->info()->being_analyzed = false;
- that->info()->been_analyzed = true;
-}
-
-
-void Analysis::VisitEnd(EndNode* that) {
- // nothing to do
-}
-
-
-void TextNode::CalculateOffsets() {
- int element_count = elements()->length();
- // Set up the offsets of the elements relative to the start. This is a fixed
- // quantity since a TextNode can only contain fixed-width things.
- int cp_offset = 0;
- for (int i = 0; i < element_count; i++) {
- TextElement& elm = elements()->at(i);
- elm.set_cp_offset(cp_offset);
- cp_offset += elm.length();
- }
-}
-
-
-void Analysis::VisitText(TextNode* that) {
- that->MakeCaseIndependent(isolate(), is_one_byte_);
- EnsureAnalyzed(that->on_success());
- if (!has_failed()) {
- that->CalculateOffsets();
- }
-}
-
-
-void Analysis::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- EnsureAnalyzed(target);
- if (!has_failed()) {
- // If the next node is interested in what it follows then this node
- // has to be interested too so it can pass the information on.
- that->info()->AddFromFollowing(target->info());
- }
-}
-
-
-void Analysis::VisitChoice(ChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- EnsureAnalyzed(node);
- if (has_failed()) return;
- // Anything the following nodes need to know has to be known by
- // this node also, so it can pass it on.
- info->AddFromFollowing(node->info());
- }
-}
-
-
-void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
- NodeInfo* info = that->info();
- for (int i = 0; i < that->alternatives()->length(); i++) {
- RegExpNode* node = that->alternatives()->at(i).node();
- if (node != that->loop_node()) {
- EnsureAnalyzed(node);
- if (has_failed()) return;
- info->AddFromFollowing(node->info());
- }
- }
- // Check the loop last since it may need the value of this node
- // to get a correct result.
- EnsureAnalyzed(that->loop_node());
- if (!has_failed()) {
- info->AddFromFollowing(that->loop_node()->info());
- }
-}
-
-
-void Analysis::VisitBackReference(BackReferenceNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void Analysis::VisitAssertion(AssertionNode* that) {
- EnsureAnalyzed(that->on_success());
-}
-
-
-void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm,
- bool not_at_start) {
- // Working out the set of characters that a backreference can match is too
- // hard, so we just say that any character can match.
- bm->SetRest(offset);
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
- RegExpMacroAssembler::kTableSize);
-
-
-void ChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- ZoneList<GuardedAlternative>* alts = alternatives();
- budget = (budget - 1) / alts->length();
- for (int i = 0; i < alts->length(); i++) {
- GuardedAlternative& alt = alts->at(i);
- if (alt.guards() != nullptr && alt.guards()->length() != 0) {
- bm->SetRest(offset); // Give up trying to fill in info.
- SaveBMInfo(bm, not_at_start, offset);
- return;
- }
- alt.node()->FillInBMInfo(isolate, offset, budget, bm, not_at_start);
- }
- SaveBMInfo(bm, not_at_start, offset);
-}
-
-
-void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- if (initial_offset >= bm->length()) return;
- int offset = initial_offset;
- int max_char = bm->max_char();
- for (int i = 0; i < elements()->length(); i++) {
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- TextElement text = elements()->at(i);
- if (text.text_type() == TextElement::ATOM) {
- RegExpAtom* atom = text.atom();
- for (int j = 0; j < atom->length(); j++, offset++) {
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- uc16 character = atom->data()[j];
- if (IgnoreCase(atom->flags())) {
- unibrow::uchar chars[4];
- int length = GetCaseIndependentLetters(
- isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
- chars, 4);
- for (int j = 0; j < length; j++) {
- bm->Set(offset, chars[j]);
- }
- } else {
- if (character <= max_char) bm->Set(offset, character);
- }
- }
- } else {
- DCHECK_EQ(TextElement::CHAR_CLASS, text.text_type());
- RegExpCharacterClass* char_class = text.char_class();
- ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
- if (char_class->is_negated()) {
- bm->SetAll(offset);
- } else {
- for (int k = 0; k < ranges->length(); k++) {
- CharacterRange& range = ranges->at(k);
- if (range.from() > max_char) continue;
- int to = Min(max_char, static_cast<int>(range.to()));
- bm->SetInterval(offset, Interval(range.from(), to));
- }
- }
- offset++;
- }
- }
- if (offset >= bm->length()) {
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
- return;
- }
- on_success()->FillInBMInfo(isolate, offset, budget - 1, bm,
- true); // Not at start after a text node.
- if (initial_offset == 0) set_bm_info(not_at_start, bm);
-}
-
-
-// -------------------------------------------------------------------
-// Dispatch table construction
-
-
-void DispatchTableConstructor::VisitEnd(EndNode* that) {
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::BuildTable(ChoiceNode* node) {
- node->set_being_calculated(true);
- ZoneList<GuardedAlternative>* alternatives = node->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- set_choice_index(i);
- alternatives->at(i).node()->Accept(this);
- }
- node->set_being_calculated(false);
-}
-
-
-class AddDispatchRange {
- public:
- explicit AddDispatchRange(DispatchTableConstructor* constructor)
- : constructor_(constructor) { }
- void Call(uc32 from, DispatchTable::Entry entry);
- private:
- DispatchTableConstructor* constructor_;
-};
-
-
-void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
- constructor_->AddRange(CharacterRange::Range(from, entry.to()));
-}
-
-
-void DispatchTableConstructor::VisitChoice(ChoiceNode* node) {
- if (node->being_calculated())
- return;
- DispatchTable* table = node->GetTable(ignore_case_);
- AddDispatchRange adder(this);
- table->ForEach(&adder);
-}
-
-
-void DispatchTableConstructor::VisitBackReference(BackReferenceNode* that) {
- // TODO(160): Find the node that we refer back to and propagate its start
- // set back to here. For now we just accept anything.
- AddRange(CharacterRange::Everything());
-}
-
-
-void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-
-static int CompareRangeByFrom(const CharacterRange* a,
- const CharacterRange* b) {
- return Compare<uc16>(a->from(), b->from());
-}
-
-
-void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
- ranges->Sort(CompareRangeByFrom);
- uc16 last = 0;
- for (int i = 0; i < ranges->length(); i++) {
- CharacterRange range = ranges->at(i);
- if (last < range.from())
- AddRange(CharacterRange::Range(last, range.from() - 1));
- if (range.to() >= last) {
- if (range.to() == String::kMaxCodePoint) {
- return;
- } else {
- last = range.to() + 1;
- }
- }
- }
- AddRange(CharacterRange::Range(last, String::kMaxCodePoint));
-}
-
-
-void DispatchTableConstructor::VisitText(TextNode* that) {
- TextElement elm = that->elements()->at(0);
- switch (elm.text_type()) {
- case TextElement::ATOM: {
- uc16 c = elm.atom()->data()[0];
- AddRange(CharacterRange::Range(c, c));
- break;
- }
- case TextElement::CHAR_CLASS: {
- RegExpCharacterClass* tree = elm.char_class();
- ZoneList<CharacterRange>* ranges = tree->ranges(that->zone());
- if (tree->is_negated()) {
- AddInverse(ranges);
- } else {
- for (int i = 0; i < ranges->length(); i++)
- AddRange(ranges->at(i));
- }
- break;
- }
- default: {
- UNIMPLEMENTED();
- }
- }
-}
-
-
-void DispatchTableConstructor::VisitAction(ActionNode* that) {
- RegExpNode* target = that->on_success();
- target->Accept(this);
-}
-
-RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
- RegExpNode* on_success,
- JSRegExp::Flags flags) {
- // If the regexp matching starts within a surrogate pair, step back
- // to the lead surrogate and start matching from there.
- DCHECK(!compiler->read_backward());
- Zone* zone = compiler->zone();
- ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
- ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
- zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
-
- ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
-
- int stack_register = compiler->UnicodeLookaroundStackRegister();
- int position_register = compiler->UnicodeLookaroundPositionRegister();
- RegExpNode* step_back = TextNode::CreateForCharacterRanges(
- zone, lead_surrogates, true, on_success, flags);
- RegExpLookaround::Builder builder(true, step_back, stack_register,
- position_register);
- RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
- zone, trail_surrogates, false, builder.on_match_success(), flags);
-
- optional_step_back->AddAlternative(
- GuardedAlternative(builder.ForMatch(match_trail)));
- optional_step_back->AddAlternative(GuardedAlternative(on_success));
-
- return optional_step_back;
-}
-
-
-RegExpEngine::CompilationResult RegExpEngine::Compile(
- Isolate* isolate, Zone* zone, RegExpCompileData* data,
- JSRegExp::Flags flags, Handle<String> pattern,
- Handle<String> sample_subject, bool is_one_byte) {
- if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
- return IrregexpRegExpTooBig(isolate);
- }
- bool is_sticky = IsSticky(flags);
- bool is_global = IsGlobal(flags);
- bool is_unicode = IsUnicode(flags);
- RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
-
- if (compiler.optimize())
- compiler.set_optimize(!TooMuchRegExpCode(isolate, pattern));
-
- // Sample some characters from the middle of the string.
- static const int kSampleSize = 128;
-
- sample_subject = String::Flatten(isolate, sample_subject);
- int chars_sampled = 0;
- int half_way = (sample_subject->length() - kSampleSize) / 2;
- for (int i = Max(0, half_way);
- i < sample_subject->length() && chars_sampled < kSampleSize;
- i++, chars_sampled++) {
- compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
- }
-
- // Wrap the body of the regexp in capture #0.
- RegExpNode* captured_body = RegExpCapture::ToNode(data->tree,
- 0,
- &compiler,
- compiler.accept());
- RegExpNode* node = captured_body;
- bool is_end_anchored = data->tree->IsAnchoredAtEnd();
- bool is_start_anchored = data->tree->IsAnchoredAtStart();
- int max_length = data->tree->max_match();
- if (!is_start_anchored && !is_sticky) {
- // Add a .*? at the beginning, outside the body capture, unless
- // this expression is anchored at the beginning or sticky.
- JSRegExp::Flags default_flags = JSRegExp::Flags();
- RegExpNode* loop_node = RegExpQuantifier::ToNode(
- 0, RegExpTree::kInfinity, false,
- new (zone) RegExpCharacterClass('*', default_flags), &compiler,
- captured_body, data->contains_anchor);
-
- if (data->contains_anchor) {
- // Unroll loop once, to take care of the case that might start
- // at the start of input.
- ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
- first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
- new (zone) RegExpCharacterClass('*', default_flags), false,
- loop_node)));
- node = first_step_node;
- } else {
- node = loop_node;
- }
- }
- if (is_one_byte) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
- // Do it again to propagate the new nodes to places where they were not
- // put because they had not been calculated yet.
- if (node != nullptr) {
- node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
- }
- } else if (is_unicode && (is_global || is_sticky)) {
- node = OptionallyStepBackToLeadSurrogate(&compiler, node, flags);
- }
-
- if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
- data->node = node;
- Analysis analysis(isolate, is_one_byte);
- analysis.EnsureAnalyzed(node);
- if (analysis.has_failed()) {
- const char* error_message = analysis.error_message();
- return CompilationResult(isolate, error_message);
- }
-
- // Create the correct assembler for the architecture.
- std::unique_ptr<RegExpMacroAssembler> macro_assembler;
- if (!FLAG_regexp_interpret_all) {
- // Native regexp implementation.
- DCHECK(!FLAG_jitless);
-
- NativeRegExpMacroAssembler::Mode mode =
- is_one_byte ? NativeRegExpMacroAssembler::LATIN1
- : NativeRegExpMacroAssembler::UC16;
-
-#if V8_TARGET_ARCH_IA32
- macro_assembler.reset(new RegExpMacroAssemblerIA32(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_X64
- macro_assembler.reset(new RegExpMacroAssemblerX64(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_ARM
- macro_assembler.reset(new RegExpMacroAssemblerARM(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_ARM64
- macro_assembler.reset(new RegExpMacroAssemblerARM64(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_S390
- macro_assembler.reset(new RegExpMacroAssemblerS390(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_PPC
- macro_assembler.reset(new RegExpMacroAssemblerPPC(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_MIPS
- macro_assembler.reset(new RegExpMacroAssemblerMIPS(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#elif V8_TARGET_ARCH_MIPS64
- macro_assembler.reset(new RegExpMacroAssemblerMIPS(
- isolate, zone, mode, (data->capture_count + 1) * 2));
-#else
-#error "Unsupported architecture"
-#endif
- } else {
- DCHECK(FLAG_regexp_interpret_all);
-
- // Interpreted regexp implementation.
- macro_assembler.reset(new RegExpMacroAssemblerIrregexp(isolate, zone));
- }
-
- macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
-
- // Inserted here, instead of in Assembler, because it depends on information
- // in the AST that isn't replicated in the Node structure.
- static const int kMaxBacksearchLimit = 1024;
- if (is_end_anchored && !is_start_anchored && !is_sticky &&
- max_length < kMaxBacksearchLimit) {
- macro_assembler->SetCurrentPositionFromEnd(max_length);
- }
-
- if (is_global) {
- RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
- if (data->tree->min_match() > 0) {
- mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
- } else if (is_unicode) {
- mode = RegExpMacroAssembler::GLOBAL_UNICODE;
- }
- macro_assembler->set_global_mode(mode);
- }
-
- return compiler.Assemble(isolate, macro_assembler.get(), node,
- data->capture_count, pattern);
-}
-
-bool RegExpEngine::TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
- Heap* heap = isolate->heap();
- bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
- if (isolate->total_regexp_code_generated() >
- RegExpImpl::kRegExpCompiledLimit &&
- heap->CommittedMemoryExecutable() >
- RegExpImpl::kRegExpExecutableMemoryLimit) {
- too_much = true;
- }
- return too_much;
-}
-
-Object RegExpResultsCache::Lookup(Heap* heap, String key_string,
- Object key_pattern,
- FixedArray* last_match_cache,
- ResultsCacheType type) {
- FixedArray cache;
- if (!key_string.IsInternalizedString()) return Smi::kZero;
- if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern.IsString());
- if (!key_pattern.IsInternalizedString()) return Smi::kZero;
- cache = heap->string_split_cache();
- } else {
- DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern.IsFixedArray());
- cache = heap->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string.Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache.get(index + kStringOffset) != key_string ||
- cache.get(index + kPatternOffset) != key_pattern) {
- index =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache.get(index + kStringOffset) != key_string ||
- cache.get(index + kPatternOffset) != key_pattern) {
- return Smi::kZero;
- }
- }
-
- *last_match_cache = FixedArray::cast(cache.get(index + kLastMatchOffset));
- return cache.get(index + kArrayOffset);
-}
-
-void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
- Handle<Object> key_pattern,
- Handle<FixedArray> value_array,
- Handle<FixedArray> last_match_cache,
- ResultsCacheType type) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> cache;
- if (!key_string->IsInternalizedString()) return;
- if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return;
- cache = factory->string_split_cache();
- } else {
- DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern->IsFixedArray());
- cache = factory->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string->Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == Smi::kZero) {
- cache->set(index + kStringOffset, *key_string);
- cache->set(index + kPatternOffset, *key_pattern);
- cache->set(index + kArrayOffset, *value_array);
- cache->set(index + kLastMatchOffset, *last_match_cache);
- } else {
- uint32_t index2 =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index2 + kStringOffset) == Smi::kZero) {
- cache->set(index2 + kStringOffset, *key_string);
- cache->set(index2 + kPatternOffset, *key_pattern);
- cache->set(index2 + kArrayOffset, *value_array);
- cache->set(index2 + kLastMatchOffset, *last_match_cache);
- } else {
- cache->set(index2 + kStringOffset, Smi::kZero);
- cache->set(index2 + kPatternOffset, Smi::kZero);
- cache->set(index2 + kArrayOffset, Smi::kZero);
- cache->set(index2 + kLastMatchOffset, Smi::kZero);
- cache->set(index + kStringOffset, *key_string);
- cache->set(index + kPatternOffset, *key_pattern);
- cache->set(index + kArrayOffset, *value_array);
- cache->set(index + kLastMatchOffset, *last_match_cache);
- }
- }
- // If the array is a reasonably short list of substrings, convert it into a
- // list of internalized strings.
- if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
- for (int i = 0; i < value_array->length(); i++) {
- Handle<String> str(String::cast(value_array->get(i)), isolate);
- Handle<String> internalized_str = factory->InternalizeString(str);
- value_array->set(i, *internalized_str);
- }
- }
- // Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(
- ReadOnlyRoots(isolate).fixed_cow_array_map());
-}
-
-void RegExpResultsCache::Clear(FixedArray cache) {
- for (int i = 0; i < kRegExpResultsCacheSize; i++) {
- cache.set(i, Smi::kZero);
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
deleted file mode 100644
index 832c7e3aa5..0000000000
--- a/deps/v8/src/regexp/jsregexp.h
+++ /dev/null
@@ -1,1548 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_REGEXP_JSREGEXP_H_
-#define V8_REGEXP_JSREGEXP_H_
-
-#include "src/execution/isolate.h"
-#include "src/objects/js-regexp.h"
-#include "src/regexp/regexp-ast.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/utils/allocation.h"
-#include "src/zone/zone-splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-class NodeVisitor;
-class RegExpCompiler;
-class RegExpMacroAssembler;
-class RegExpNode;
-class RegExpTree;
-class BoyerMooreLookahead;
-
-inline bool IgnoreCase(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kIgnoreCase) != 0;
-}
-
-inline bool IsUnicode(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kUnicode) != 0;
-}
-
-inline bool IsSticky(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kSticky) != 0;
-}
-
-inline bool IsGlobal(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kGlobal) != 0;
-}
-
-inline bool DotAll(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kDotAll) != 0;
-}
-
-inline bool Multiline(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kMultiline) != 0;
-}
-
-inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
- // Both unicode and ignore_case flags are set. We need to use ICU to find
- // the closure over case equivalents.
- return IsUnicode(flags) && IgnoreCase(flags);
-}
-
-class RegExpImpl {
- public:
- // Whether the irregexp engine generates native code or interpreter bytecode.
- static bool UsesNativeRegExp() { return !FLAG_regexp_interpret_all; }
-
- // Returns a string representation of a regular expression.
- // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
- // This function calls the garbage collector if necessary.
- static Handle<String> ToString(Handle<Object> value);
-
- // Parses the RegExp pattern and prepares the JSRegExp object with
- // generic data and choice of implementation - as well as what
- // the implementation wants to store in the data field.
- // Returns false if compilation fails.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
- Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- JSRegExp::Flags flags);
-
- // See ECMA-262 section 15.10.6.2.
- // This function calls the garbage collector if necessary.
- V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
-
- // Prepares a JSRegExp object with Irregexp-specific data.
- static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
- int capture_register_count);
-
- static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
- Handle<String> match_pattern);
-
- static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index, int32_t* output,
- int output_size);
-
- static Handle<Object> AtomExec(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index,
- Handle<RegExpMatchInfo> last_match_info);
-
- enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
-
- // Prepare a RegExp for being executed one or more times (using
- // IrregexpExecOnce) on the subject.
- // This ensures that the regexp is compiled for the subject, and that
- // the subject is flat.
- // Returns the number of integer spaces required by IrregexpExecOnce
- // as its "registers" argument. If the regexp cannot be compiled,
- // an exception is set as pending, and this function returns negative.
- static int IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject);
-
- // Execute a regular expression on the subject, starting from index.
- // If matching succeeds, return the number of matches. This can be larger
- // than one in the case of global regular expressions.
- // The captures and subcaptures are stored into the registers vector.
- // If matching fails, returns RE_FAILURE.
- // If execution fails, sets a pending exception and returns RE_EXCEPTION.
- static int IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> subject, int index, int32_t* output,
- int output_size);
-
- // Execute an Irregexp bytecode pattern.
- // On a successful match, the result is a JSArray containing
- // captured positions. On a failure, the result is the null value.
- // Returns an empty handle in case of an exception.
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
- Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
- int index, Handle<RegExpMatchInfo> last_match_info);
-
- // Set last match info. If match is nullptr, then setting captures is
- // omitted.
- static Handle<RegExpMatchInfo> SetLastMatchInfo(
- Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
- Handle<String> subject, int capture_count, int32_t* match);
-
- class GlobalCache {
- public:
- GlobalCache(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Isolate* isolate);
-
- V8_INLINE ~GlobalCache();
-
- // Fetch the next entry in the cache for global regexp match results.
- // This does not set the last match info. Upon failure, nullptr is
- // returned. The cause can be checked with Result(). The previous result is
- // still in available in memory when a failure happens.
- V8_INLINE int32_t* FetchNext();
-
- V8_INLINE int32_t* LastSuccessfulMatch();
-
- V8_INLINE bool HasException() { return num_matches_ < 0; }
-
- private:
- int AdvanceZeroLength(int last_index);
-
- int num_matches_;
- int max_matches_;
- int current_match_index_;
- int registers_per_match_;
- // Pointer to the last set of captures.
- int32_t* register_array_;
- int register_array_size_;
- Handle<JSRegExp> regexp_;
- Handle<String> subject_;
- Isolate* isolate_;
- };
-
- // For acting on the JSRegExp data FixedArray.
- static int IrregexpMaxRegisterCount(FixedArray re);
- static void SetIrregexpMaxRegisterCount(FixedArray re, int value);
- static void SetIrregexpCaptureNameMap(FixedArray re,
- Handle<FixedArray> value);
- static int IrregexpNumberOfCaptures(FixedArray re);
- static int IrregexpNumberOfRegisters(FixedArray re);
- static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
- static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
-
- // Limit the space regexps take up on the heap. In order to limit this we
- // would like to keep track of the amount of regexp code on the heap. This
- // is not tracked, however. As a conservative approximation we track the
- // total regexp code compiled including code that has subsequently been freed
- // and the total executable memory at any point.
- static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
- static const size_t kRegExpCompiledLimit = 1 * MB;
- static const int kRegExpTooLargeToOptimize = 20 * KB;
-
- private:
- static bool CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> sample_subject, bool is_one_byte);
- static inline bool EnsureCompiledIrregexp(Isolate* isolate,
- Handle<JSRegExp> re,
- Handle<String> sample_subject,
- bool is_one_byte);
-};
-
-
-// Represents the location of one element relative to the intersection of
-// two sets. Corresponds to the four areas of a Venn diagram.
-enum ElementInSetsRelation {
- kInsideNone = 0,
- kInsideFirst = 1,
- kInsideSecond = 2,
- kInsideBoth = 3
-};
-
-
-// A set of unsigned integers that behaves especially well on small
-// integers (< 32). May do zone-allocation.
-class OutSet: public ZoneObject {
- public:
- OutSet() : first_(0), remaining_(nullptr), successors_(nullptr) {}
- OutSet* Extend(unsigned value, Zone* zone);
- V8_EXPORT_PRIVATE bool Get(unsigned value) const;
- static const unsigned kFirstLimit = 32;
-
- private:
- // Destructively set a value in this set. In most cases you want
- // to use Extend instead to ensure that only one instance exists
- // that contains the same values.
- void Set(unsigned value, Zone* zone);
-
- // The successors are a list of sets that contain the same values
- // as this set and the one more value that is not present in this
- // set.
- ZoneList<OutSet*>* successors(Zone* zone) { return successors_; }
-
- OutSet(uint32_t first, ZoneList<unsigned>* remaining)
- : first_(first), remaining_(remaining), successors_(nullptr) {}
- uint32_t first_;
- ZoneList<unsigned>* remaining_;
- ZoneList<OutSet*>* successors_;
- friend class Trace;
-};
-
-
-// A mapping from integers, specified as ranges, to a set of integers.
-// Used for mapping character ranges to choices.
-class DispatchTable : public ZoneObject {
- public:
- explicit DispatchTable(Zone* zone) : tree_(zone) { }
-
- class Entry {
- public:
- Entry() : from_(0), to_(0), out_set_(nullptr) {}
- Entry(uc32 from, uc32 to, OutSet* out_set)
- : from_(from), to_(to), out_set_(out_set) {
- DCHECK(from <= to);
- }
- uc32 from() { return from_; }
- uc32 to() { return to_; }
- void set_to(uc32 value) { to_ = value; }
- void AddValue(int value, Zone* zone) {
- out_set_ = out_set_->Extend(value, zone);
- }
- OutSet* out_set() { return out_set_; }
- private:
- uc32 from_;
- uc32 to_;
- OutSet* out_set_;
- };
-
- class Config {
- public:
- using Key = uc32;
- using Value = Entry;
- static const uc32 kNoKey;
- static const Entry NoValue() { return Value(); }
- static inline int Compare(uc32 a, uc32 b) {
- if (a == b)
- return 0;
- else if (a < b)
- return -1;
- else
- return 1;
- }
- };
-
- V8_EXPORT_PRIVATE void AddRange(CharacterRange range, int value, Zone* zone);
- V8_EXPORT_PRIVATE OutSet* Get(uc32 value);
- void Dump();
-
- template <typename Callback>
- void ForEach(Callback* callback) {
- return tree()->ForEach(callback);
- }
-
- private:
- // There can't be a static empty set since it allocates its
- // successors in a zone and caches them.
- OutSet* empty() { return &empty_; }
- OutSet empty_;
- ZoneSplayTree<Config>* tree() { return &tree_; }
- ZoneSplayTree<Config> tree_;
-};
-
-
-// Categorizes character ranges into BMP, non-BMP, lead, and trail surrogates.
-class UnicodeRangeSplitter {
- public:
- V8_EXPORT_PRIVATE UnicodeRangeSplitter(Zone* zone,
- ZoneList<CharacterRange>* base);
- void Call(uc32 from, DispatchTable::Entry entry);
-
- ZoneList<CharacterRange>* bmp() { return bmp_; }
- ZoneList<CharacterRange>* lead_surrogates() { return lead_surrogates_; }
- ZoneList<CharacterRange>* trail_surrogates() { return trail_surrogates_; }
- ZoneList<CharacterRange>* non_bmp() const { return non_bmp_; }
-
- private:
- static const int kBase = 0;
- // Separate ranges into
- static const int kBmpCodePoints = 1;
- static const int kLeadSurrogates = 2;
- static const int kTrailSurrogates = 3;
- static const int kNonBmpCodePoints = 4;
-
- Zone* zone_;
- DispatchTable table_;
- ZoneList<CharacterRange>* bmp_;
- ZoneList<CharacterRange>* lead_surrogates_;
- ZoneList<CharacterRange>* trail_surrogates_;
- ZoneList<CharacterRange>* non_bmp_;
-};
-
-#define FOR_EACH_NODE_TYPE(VISIT) \
- VISIT(End) \
- VISIT(Action) \
- VISIT(Choice) \
- VISIT(BackReference) \
- VISIT(Assertion) \
- VISIT(Text)
-
-
-class Trace;
-struct PreloadState;
-class GreedyLoopState;
-class AlternativeGenerationList;
-
-struct NodeInfo {
- NodeInfo()
- : being_analyzed(false),
- been_analyzed(false),
- follows_word_interest(false),
- follows_newline_interest(false),
- follows_start_interest(false),
- at_end(false),
- visited(false),
- replacement_calculated(false) { }
-
- // Returns true if the interests and assumptions of this node
- // matches the given one.
- bool Matches(NodeInfo* that) {
- return (at_end == that->at_end) &&
- (follows_word_interest == that->follows_word_interest) &&
- (follows_newline_interest == that->follows_newline_interest) &&
- (follows_start_interest == that->follows_start_interest);
- }
-
- // Updates the interests of this node given the interests of the
- // node preceding it.
- void AddFromPreceding(NodeInfo* that) {
- at_end |= that->at_end;
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- bool HasLookbehind() {
- return follows_word_interest ||
- follows_newline_interest ||
- follows_start_interest;
- }
-
- // Sets the interests of this node to include the interests of the
- // following node.
- void AddFromFollowing(NodeInfo* that) {
- follows_word_interest |= that->follows_word_interest;
- follows_newline_interest |= that->follows_newline_interest;
- follows_start_interest |= that->follows_start_interest;
- }
-
- void ResetCompilationState() {
- being_analyzed = false;
- been_analyzed = false;
- }
-
- bool being_analyzed: 1;
- bool been_analyzed: 1;
-
- // These bits are set of this node has to know what the preceding
- // character was.
- bool follows_word_interest: 1;
- bool follows_newline_interest: 1;
- bool follows_start_interest: 1;
-
- bool at_end: 1;
- bool visited: 1;
- bool replacement_calculated: 1;
-};
-
-
-// Details of a quick mask-compare check that can look ahead in the
-// input stream.
-class QuickCheckDetails {
- public:
- QuickCheckDetails()
- : characters_(0),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- explicit QuickCheckDetails(int characters)
- : characters_(characters),
- mask_(0),
- value_(0),
- cannot_match_(false) { }
- bool Rationalize(bool one_byte);
- // Merge in the information from another branch of an alternation.
- void Merge(QuickCheckDetails* other, int from_index);
- // Advance the current position by some amount.
- void Advance(int by, bool one_byte);
- void Clear();
- bool cannot_match() { return cannot_match_; }
- void set_cannot_match() { cannot_match_ = true; }
- struct Position {
- Position() : mask(0), value(0), determines_perfectly(false) { }
- uc16 mask;
- uc16 value;
- bool determines_perfectly;
- };
- int characters() { return characters_; }
- void set_characters(int characters) { characters_ = characters; }
- Position* positions(int index) {
- DCHECK_LE(0, index);
- DCHECK_GT(characters_, index);
- return positions_ + index;
- }
- uint32_t mask() { return mask_; }
- uint32_t value() { return value_; }
-
- private:
- // How many characters do we have quick check information from. This is
- // the same for all branches of a choice node.
- int characters_;
- Position positions_[4];
- // These values are the condensate of the above array after Rationalize().
- uint32_t mask_;
- uint32_t value_;
- // If set to true, there is no way this quick check can match at all.
- // E.g., if it requires to be at the start of the input, and isn't.
- bool cannot_match_;
-};
-
-
-extern int kUninitializedRegExpNodePlaceHolder;
-
-
-class RegExpNode: public ZoneObject {
- public:
- explicit RegExpNode(Zone* zone)
- : replacement_(nullptr),
- on_work_list_(false),
- trace_count_(0),
- zone_(zone) {
- bm_info_[0] = bm_info_[1] = nullptr;
- }
- virtual ~RegExpNode();
- virtual void Accept(NodeVisitor* visitor) = 0;
- // Generates a goto to this node or actually generates the code at this point.
- virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
- // How many characters must this node consume at a minimum in order to
- // succeed. If we have found at least 'still_to_find' characters that
- // must be consumed there is no need to ask any following nodes whether
- // they are sure to eat any more characters. The not_at_start argument is
- // used to indicate that we know we are not at the start of the input. In
- // this case anchored branches will always fail and can be ignored when
- // determining how many characters are consumed on success.
- virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
- // Emits some quick code that checks whether the preloaded characters match.
- // Falls through on certain failure, jumps to the label on possible success.
- // If the node cannot make a quick check it does nothing and returns false.
- bool EmitQuickCheck(RegExpCompiler* compiler,
- Trace* bounds_check_trace,
- Trace* trace,
- bool preload_has_checked_bounds,
- Label* on_possible_success,
- QuickCheckDetails* details_return,
- bool fall_through_on_failure);
- // For a given number of characters this returns a mask and a value. The
- // next n characters are anded with the mask and compared with the value.
- // A comparison failure indicates the node cannot match the next n characters.
- // A comparison success indicates the node may match.
- virtual void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int characters_filled_in,
- bool not_at_start) = 0;
- static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
- virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
- // Only returns the successor for a text node of length 1 that matches any
- // character and that has no guards on it.
- virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) {
- return nullptr;
- }
-
- // Collects information on the possible code units (mod 128) that can match if
- // we look forward. This is used for a Boyer-Moore-like string searching
- // implementation. TODO(erikcorry): This should share more code with
- // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
- // the number of nodes we are willing to look at in order to create this data.
- static const int kRecursionBudget = 200;
- bool KeepRecursing(RegExpCompiler* compiler);
- virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) {
- UNREACHABLE();
- }
-
- // If we know that the input is one-byte then there are some nodes that can
- // never match. This method returns a node that can be substituted for
- // itself, or nullptr if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth) { return this; }
- // Helper for FilterOneByte.
- RegExpNode* replacement() {
- DCHECK(info()->replacement_calculated);
- return replacement_;
- }
- RegExpNode* set_replacement(RegExpNode* replacement) {
- info()->replacement_calculated = true;
- replacement_ = replacement;
- return replacement; // For convenience.
- }
-
- // We want to avoid recalculating the lookahead info, so we store it on the
- // node. Only info that is for this node is stored. We can tell that the
- // info is for this node when offset == 0, so the information is calculated
- // relative to this node.
- void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
-
- Label* label() { return &label_; }
- // If non-generic code is generated for a node (i.e. the node is not at the
- // start of the trace) then it cannot be reused. This variable sets a limit
- // on how often we allow that to happen before we insist on starting a new
- // trace and generating generic code for a node that can be reused by flushing
- // the deferred actions in the current trace and generating a goto.
- static const int kMaxCopiesCodeGenerated = 10;
-
- bool on_work_list() { return on_work_list_; }
- void set_on_work_list(bool value) { on_work_list_ = value; }
-
- NodeInfo* info() { return &info_; }
-
- BoyerMooreLookahead* bm_info(bool not_at_start) {
- return bm_info_[not_at_start ? 1 : 0];
- }
-
- Zone* zone() const { return zone_; }
-
- protected:
- enum LimitResult { DONE, CONTINUE };
- RegExpNode* replacement_;
-
- LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
-
- void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
- bm_info_[not_at_start ? 1 : 0] = bm;
- }
-
- private:
- static const int kFirstCharBudget = 10;
- Label label_;
- bool on_work_list_;
- NodeInfo info_;
- // This variable keeps track of how many times code has been generated for
- // this node (in different traces). We don't keep track of where the
- // generated code is located unless the code is generated at the start of
- // a trace, in which case it is generic and can be reused by flushing the
- // deferred operations in the current trace and generating a goto.
- int trace_count_;
- BoyerMooreLookahead* bm_info_[2];
-
- Zone* zone_;
-};
-
-
-class SeqRegExpNode: public RegExpNode {
- public:
- explicit SeqRegExpNode(RegExpNode* on_success)
- : RegExpNode(on_success->zone()), on_success_(on_success) { }
- RegExpNode* on_success() { return on_success_; }
- void set_on_success(RegExpNode* node) { on_success_ = node; }
- RegExpNode* FilterOneByte(int depth) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override {
- on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
-
- protected:
- RegExpNode* FilterSuccessor(int depth);
-
- private:
- RegExpNode* on_success_;
-};
-
-
-class ActionNode: public SeqRegExpNode {
- public:
- enum ActionType {
- SET_REGISTER,
- INCREMENT_REGISTER,
- STORE_POSITION,
- BEGIN_SUBMATCH,
- POSITIVE_SUBMATCH_SUCCESS,
- EMPTY_MATCH_CHECK,
- CLEAR_CAPTURES
- };
- static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
- static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
- static ActionNode* StorePosition(int reg,
- bool is_capture,
- RegExpNode* on_success);
- static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
- static ActionNode* BeginSubmatch(int stack_pointer_reg,
- int position_reg,
- RegExpNode* on_success);
- static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
- int restore_reg,
- int clear_capture_count,
- int clear_capture_from,
- RegExpNode* on_success);
- static ActionNode* EmptyMatchCheck(int start_register,
- int repetition_register,
- int repetition_limit,
- RegExpNode* on_success);
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int filled_in,
- bool not_at_start) override {
- return on_success()->GetQuickCheckDetails(
- details, compiler, filled_in, not_at_start);
- }
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- ActionType action_type() { return action_type_; }
- // TODO(erikcorry): We should allow some action nodes in greedy loops.
- int GreedyLoopTextLength() override {
- return kNodeIsTooComplexForGreedyLoops;
- }
-
- private:
- union {
- struct {
- int reg;
- int value;
- } u_store_register;
- struct {
- int reg;
- } u_increment_register;
- struct {
- int reg;
- bool is_capture;
- } u_position_register;
- struct {
- int stack_pointer_register;
- int current_position_register;
- int clear_register_count;
- int clear_register_from;
- } u_submatch;
- struct {
- int start_register;
- int repetition_register;
- int repetition_limit;
- } u_empty_match_check;
- struct {
- int range_from;
- int range_to;
- } u_clear_captures;
- } data_;
- ActionNode(ActionType action_type, RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- action_type_(action_type) { }
- ActionType action_type_;
- friend class DotPrinter;
-};
-
-
-class TextNode: public SeqRegExpNode {
- public:
- TextNode(ZoneList<TextElement>* elms, bool read_backward,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success), elms_(elms), read_backward_(read_backward) {}
- TextNode(RegExpCharacterClass* that, bool read_backward,
- RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(new (zone()) ZoneList<TextElement>(1, zone())),
- read_backward_(read_backward) {
- elms_->Add(TextElement::CharClass(that), zone());
- }
- // Create TextNode for a single character class for the given ranges.
- static TextNode* CreateForCharacterRanges(Zone* zone,
- ZoneList<CharacterRange>* ranges,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
- // Create TextNode for a surrogate pair with a range given for the
- // lead and the trail surrogate each.
- static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
- CharacterRange trail,
- bool read_backward,
- RegExpNode* on_success,
- JSRegExp::Flags flags);
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- ZoneList<TextElement>* elements() { return elms_; }
- bool read_backward() { return read_backward_; }
- void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
- int GreedyLoopTextLength() override;
- RegExpNode* GetSuccessorOfOmnivorousTextNode(
- RegExpCompiler* compiler) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- void CalculateOffsets();
- RegExpNode* FilterOneByte(int depth) override;
-
- private:
- enum TextEmitPassType {
- NON_LATIN1_MATCH, // Check for characters that can't match.
- SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
- NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
- CASE_CHARACTER_MATCH, // Case-independent single character check.
- CHARACTER_CLASS_MATCH // Character class.
- };
- static bool SkipPass(TextEmitPassType pass, bool ignore_case);
- static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
- static const int kLastPass = CHARACTER_CLASS_MATCH;
- void TextEmitPass(RegExpCompiler* compiler,
- TextEmitPassType pass,
- bool preloaded,
- Trace* trace,
- bool first_element_checked,
- int* checked_up_to);
- int Length();
- ZoneList<TextElement>* elms_;
- bool read_backward_;
-};
-
-
-class AssertionNode: public SeqRegExpNode {
- public:
- enum AssertionType {
- AT_END,
- AT_START,
- AT_BOUNDARY,
- AT_NON_BOUNDARY,
- AFTER_NEWLINE
- };
- static AssertionNode* AtEnd(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_END, on_success);
- }
- static AssertionNode* AtStart(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_START, on_success);
- }
- static AssertionNode* AtBoundary(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
- }
- static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
- }
- static AssertionNode* AfterNewline(RegExpNode* on_success) {
- return new(on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
- }
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- AssertionType assertion_type() { return assertion_type_; }
-
- private:
- void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
- enum IfPrevious { kIsNonWord, kIsWord };
- void BacktrackIfPrevious(RegExpCompiler* compiler,
- Trace* trace,
- IfPrevious backtrack_if_previous);
- AssertionNode(AssertionType t, RegExpNode* on_success)
- : SeqRegExpNode(on_success), assertion_type_(t) { }
- AssertionType assertion_type_;
-};
-
-
-class BackReferenceNode: public SeqRegExpNode {
- public:
- BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
- bool read_backward, RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- start_reg_(start_reg),
- end_reg_(end_reg),
- flags_(flags),
- read_backward_(read_backward) {}
- void Accept(NodeVisitor* visitor) override;
- int start_register() { return start_reg_; }
- int end_register() { return end_reg_; }
- bool read_backward() { return read_backward_; }
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int recursion_depth,
- bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override {
- return;
- }
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
-
- private:
- int start_reg_;
- int end_reg_;
- JSRegExp::Flags flags_;
- bool read_backward_;
-};
-
-
-class EndNode: public RegExpNode {
- public:
- enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
- EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
- void Accept(NodeVisitor* visitor) override;
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int recursion_depth,
- bool not_at_start) override {
- return 0;
- }
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override {
- // Returning 0 from EatsAtLeast should ensure we never get here.
- UNREACHABLE();
- }
-
- private:
- Action action_;
-};
-
-
-class NegativeSubmatchSuccess: public EndNode {
- public:
- NegativeSubmatchSuccess(int stack_pointer_reg,
- int position_reg,
- int clear_capture_count,
- int clear_capture_start,
- Zone* zone)
- : EndNode(NEGATIVE_SUBMATCH_SUCCESS, zone),
- stack_pointer_register_(stack_pointer_reg),
- current_position_register_(position_reg),
- clear_capture_count_(clear_capture_count),
- clear_capture_start_(clear_capture_start) { }
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
-
- private:
- int stack_pointer_register_;
- int current_position_register_;
- int clear_capture_count_;
- int clear_capture_start_;
-};
-
-
-class Guard: public ZoneObject {
- public:
- enum Relation { LT, GEQ };
- Guard(int reg, Relation op, int value)
- : reg_(reg),
- op_(op),
- value_(value) { }
- int reg() { return reg_; }
- Relation op() { return op_; }
- int value() { return value_; }
-
- private:
- int reg_;
- Relation op_;
- int value_;
-};
-
-
-class GuardedAlternative {
- public:
- explicit GuardedAlternative(RegExpNode* node)
- : node_(node), guards_(nullptr) {}
- void AddGuard(Guard* guard, Zone* zone);
- RegExpNode* node() { return node_; }
- void set_node(RegExpNode* node) { node_ = node; }
- ZoneList<Guard*>* guards() { return guards_; }
-
- private:
- RegExpNode* node_;
- ZoneList<Guard*>* guards_;
-};
-
-
-class AlternativeGeneration;
-
-
-class ChoiceNode: public RegExpNode {
- public:
- explicit ChoiceNode(int expected_size, Zone* zone)
- : RegExpNode(zone),
- alternatives_(new (zone)
- ZoneList<GuardedAlternative>(expected_size, zone)),
- table_(nullptr),
- not_at_start_(false),
- being_calculated_(false) {}
- void Accept(NodeVisitor* visitor) override;
- void AddAlternative(GuardedAlternative node) {
- alternatives()->Add(node, zone());
- }
- ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
- DispatchTable* GetTable(bool ignore_case);
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- int EatsAtLeastHelper(int still_to_find,
- int budget,
- RegExpNode* ignore_this_node,
- bool not_at_start);
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
-
- bool being_calculated() { return being_calculated_; }
- bool not_at_start() { return not_at_start_; }
- void set_not_at_start() { not_at_start_ = true; }
- void set_being_calculated(bool b) { being_calculated_ = b; }
- virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
- return true;
- }
- RegExpNode* FilterOneByte(int depth) override;
- virtual bool read_backward() { return false; }
-
- protected:
- int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
- ZoneList<GuardedAlternative>* alternatives_;
-
- private:
- friend class DispatchTableConstructor;
- friend class Analysis;
- void GenerateGuard(RegExpMacroAssembler* macro_assembler,
- Guard* guard,
- Trace* trace);
- int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
- void EmitOutOfLineContinuation(RegExpCompiler* compiler,
- Trace* trace,
- GuardedAlternative alternative,
- AlternativeGeneration* alt_gen,
- int preload_characters,
- bool next_expects_preload);
- void SetUpPreLoad(RegExpCompiler* compiler,
- Trace* current_trace,
- PreloadState* preloads);
- void AssertGuardsMentionRegisters(Trace* trace);
- int EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler, Trace* trace);
- Trace* EmitGreedyLoop(RegExpCompiler* compiler,
- Trace* trace,
- AlternativeGenerationList* alt_gens,
- PreloadState* preloads,
- GreedyLoopState* greedy_loop_state,
- int text_length);
- void EmitChoices(RegExpCompiler* compiler,
- AlternativeGenerationList* alt_gens,
- int first_choice,
- Trace* trace,
- PreloadState* preloads);
- DispatchTable* table_;
- // If true, this node is never checked at the start of the input.
- // Allows a new trace to start with at_start() set to false.
- bool not_at_start_;
- bool being_calculated_;
-};
-
-
-class NegativeLookaroundChoiceNode : public ChoiceNode {
- public:
- explicit NegativeLookaroundChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this,
- Zone* zone)
- : ChoiceNode(2, zone) {
- AddAlternative(this_must_fail);
- AddAlternative(then_do_this);
- }
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override {
- alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
- not_at_start);
- if (offset == 0) set_bm_info(not_at_start, bm);
- }
- // For a negative lookahead we don't emit the quick check for the
- // alternative that is expected to fail. This is because quick check code
- // starts by loading enough characters for the alternative that takes fewest
- // characters, but on a negative lookahead the negative branch did not take
- // part in that calculation (EatsAtLeast) so the assumptions don't hold.
- bool try_to_emit_quick_check_for_alternative(bool is_first) override {
- return !is_first;
- }
- RegExpNode* FilterOneByte(int depth) override;
-};
-
-
-class LoopChoiceNode: public ChoiceNode {
- public:
- LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
- : ChoiceNode(2, zone),
- loop_node_(nullptr),
- continue_node_(nullptr),
- body_can_be_zero_length_(body_can_be_zero_length),
- read_backward_(read_backward) {}
- void AddLoopAlternative(GuardedAlternative alt);
- void AddContinueAlternative(GuardedAlternative alt);
- void Emit(RegExpCompiler* compiler, Trace* trace) override;
- int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
- void GetQuickCheckDetails(QuickCheckDetails* details,
- RegExpCompiler* compiler, int characters_filled_in,
- bool not_at_start) override;
- void FillInBMInfo(Isolate* isolate, int offset, int budget,
- BoyerMooreLookahead* bm, bool not_at_start) override;
- RegExpNode* loop_node() { return loop_node_; }
- RegExpNode* continue_node() { return continue_node_; }
- bool body_can_be_zero_length() { return body_can_be_zero_length_; }
- bool read_backward() override { return read_backward_; }
- void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth) override;
-
- private:
- // AddAlternative is made private for loop nodes because alternatives
- // should not be added freely, we need to keep track of which node
- // goes back to the node itself.
- void AddAlternative(GuardedAlternative node) {
- ChoiceNode::AddAlternative(node);
- }
-
- RegExpNode* loop_node_;
- RegExpNode* continue_node_;
- bool body_can_be_zero_length_;
- bool read_backward_;
-};
-
-
-// Improve the speed that we scan for an initial point where a non-anchored
-// regexp can match by using a Boyer-Moore-like table. This is done by
-// identifying non-greedy non-capturing loops in the nodes that eat any
-// character one at a time. For example in the middle of the regexp
-// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
-// inserted at the start of any non-anchored regexp.
-//
-// When we have found such a loop we look ahead in the nodes to find the set of
-// characters that can come at given distances. For example for the regexp
-// /.?foo/ we know that there are at least 3 characters ahead of us, and the
-// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
-// the lookahead info where the set of characters is reasonably constrained. In
-// our example this is from index 1 to 2 (0 is not constrained). We can now
-// look 3 characters ahead and if we don't find one of [f, o] (the union of
-// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
-//
-// For Unicode input strings we do the same, but modulo 128.
-//
-// We also look at the first string fed to the regexp and use that to get a hint
-// of the character frequencies in the inputs. This affects the assessment of
-// whether the set of characters is 'reasonably constrained'.
-//
-// We also have another lookahead mechanism (called quick check in the code),
-// which uses a wide load of multiple characters followed by a mask and compare
-// to determine whether a match is possible at this point.
-enum ContainedInLattice {
- kNotYet = 0,
- kLatticeIn = 1,
- kLatticeOut = 2,
- kLatticeUnknown = 3 // Can also mean both in and out.
-};
-
-
-inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
- return static_cast<ContainedInLattice>(a | b);
-}
-
-
-ContainedInLattice AddRange(ContainedInLattice a,
- const int* ranges,
- int ranges_size,
- Interval new_range);
-
-
-class BoyerMoorePositionInfo : public ZoneObject {
- public:
- explicit BoyerMoorePositionInfo(Zone* zone)
- : map_(new(zone) ZoneList<bool>(kMapSize, zone)),
- map_count_(0),
- w_(kNotYet),
- s_(kNotYet),
- d_(kNotYet),
- surrogate_(kNotYet) {
- for (int i = 0; i < kMapSize; i++) {
- map_->Add(false, zone);
- }
- }
-
- bool& at(int i) { return map_->at(i); }
-
- static const int kMapSize = 128;
- static const int kMask = kMapSize - 1;
-
- int map_count() const { return map_count_; }
-
- void Set(int character);
- void SetInterval(const Interval& interval);
- void SetAll();
- bool is_non_word() { return w_ == kLatticeOut; }
- bool is_word() { return w_ == kLatticeIn; }
-
- private:
- ZoneList<bool>* map_;
- int map_count_; // Number of set bits in the map.
- ContainedInLattice w_; // The \w character class.
- ContainedInLattice s_; // The \s character class.
- ContainedInLattice d_; // The \d character class.
- ContainedInLattice surrogate_; // Surrogate UTF-16 code units.
-};
-
-
-class BoyerMooreLookahead : public ZoneObject {
- public:
- BoyerMooreLookahead(int length, RegExpCompiler* compiler, Zone* zone);
-
- int length() { return length_; }
- int max_char() { return max_char_; }
- RegExpCompiler* compiler() { return compiler_; }
-
- int Count(int map_number) {
- return bitmaps_->at(map_number)->map_count();
- }
-
- BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
-
- void Set(int map_number, int character) {
- if (character > max_char_) return;
- BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
- info->Set(character);
- }
-
- void SetInterval(int map_number, const Interval& interval) {
- if (interval.from() > max_char_) return;
- BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
- if (interval.to() > max_char_) {
- info->SetInterval(Interval(interval.from(), max_char_));
- } else {
- info->SetInterval(interval);
- }
- }
-
- void SetAll(int map_number) {
- bitmaps_->at(map_number)->SetAll();
- }
-
- void SetRest(int from_map) {
- for (int i = from_map; i < length_; i++) SetAll(i);
- }
- void EmitSkipInstructions(RegExpMacroAssembler* masm);
-
- private:
- // This is the value obtained by EatsAtLeast. If we do not have at least this
- // many characters left in the sample string then the match is bound to fail.
- // Therefore it is OK to read a character this far ahead of the current match
- // point.
- int length_;
- RegExpCompiler* compiler_;
- // 0xff for Latin1, 0xffff for UTF-16.
- int max_char_;
- ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
-
- int GetSkipTable(int min_lookahead,
- int max_lookahead,
- Handle<ByteArray> boolean_skip_table);
- bool FindWorthwhileInterval(int* from, int* to);
- int FindBestInterval(
- int max_number_of_chars, int old_biggest_points, int* from, int* to);
-};
-
-
-// There are many ways to generate code for a node. This class encapsulates
-// the current way we should be generating. In other words it encapsulates
-// the current state of the code generator. The effect of this is that we
-// generate code for paths that the matcher can take through the regular
-// expression. A given node in the regexp can be code-generated several times
-// as it can be part of several traces. For example for the regexp:
-// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
-// of the foo-bar-baz trace and once as part of the foo-ip-baz trace. The code
-// to match foo is generated only once (the traces have a common prefix). The
-// code to store the capture is deferred and generated (twice) after the places
-// where baz has been matched.
-class Trace {
- public:
- // A value for a property that is either known to be true, know to be false,
- // or not known.
- enum TriBool {
- UNKNOWN = -1, FALSE_VALUE = 0, TRUE_VALUE = 1
- };
-
- class DeferredAction {
- public:
- DeferredAction(ActionNode::ActionType action_type, int reg)
- : action_type_(action_type), reg_(reg), next_(nullptr) {}
- DeferredAction* next() { return next_; }
- bool Mentions(int reg);
- int reg() { return reg_; }
- ActionNode::ActionType action_type() { return action_type_; }
- private:
- ActionNode::ActionType action_type_;
- int reg_;
- DeferredAction* next_;
- friend class Trace;
- };
-
- class DeferredCapture : public DeferredAction {
- public:
- DeferredCapture(int reg, bool is_capture, Trace* trace)
- : DeferredAction(ActionNode::STORE_POSITION, reg),
- cp_offset_(trace->cp_offset()),
- is_capture_(is_capture) { }
- int cp_offset() { return cp_offset_; }
- bool is_capture() { return is_capture_; }
- private:
- int cp_offset_;
- bool is_capture_;
- void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
- };
-
- class DeferredSetRegister : public DeferredAction {
- public:
- DeferredSetRegister(int reg, int value)
- : DeferredAction(ActionNode::SET_REGISTER, reg),
- value_(value) { }
- int value() { return value_; }
- private:
- int value_;
- };
-
- class DeferredClearCaptures : public DeferredAction {
- public:
- explicit DeferredClearCaptures(Interval range)
- : DeferredAction(ActionNode::CLEAR_CAPTURES, -1),
- range_(range) { }
- Interval range() { return range_; }
- private:
- Interval range_;
- };
-
- class DeferredIncrementRegister : public DeferredAction {
- public:
- explicit DeferredIncrementRegister(int reg)
- : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) { }
- };
-
- Trace()
- : cp_offset_(0),
- actions_(nullptr),
- backtrack_(nullptr),
- stop_node_(nullptr),
- loop_label_(nullptr),
- characters_preloaded_(0),
- bound_checked_up_to_(0),
- flush_budget_(100),
- at_start_(UNKNOWN) {}
-
- // End the trace. This involves flushing the deferred actions in the trace
- // and pushing a backtrack location onto the backtrack stack. Once this is
- // done we can start a new trace or go to one that has already been
- // generated.
- void Flush(RegExpCompiler* compiler, RegExpNode* successor);
- int cp_offset() { return cp_offset_; }
- DeferredAction* actions() { return actions_; }
- // A trivial trace is one that has no deferred actions or other state that
- // affects the assumptions used when generating code. There is no recorded
- // backtrack location in a trivial trace, so with a trivial trace we will
- // generate code that, on a failure to match, gets the backtrack location
- // from the backtrack stack rather than using a direct jump instruction. We
- // always start code generation with a trivial trace and non-trivial traces
- // are created as we emit code for nodes or add to the list of deferred
- // actions in the trace. The location of the code generated for a node using
- // a trivial trace is recorded in a label in the node so that gotos can be
- // generated to that code.
- bool is_trivial() {
- return backtrack_ == nullptr && actions_ == nullptr && cp_offset_ == 0 &&
- characters_preloaded_ == 0 && bound_checked_up_to_ == 0 &&
- quick_check_performed_.characters() == 0 && at_start_ == UNKNOWN;
- }
- TriBool at_start() { return at_start_; }
- void set_at_start(TriBool at_start) { at_start_ = at_start; }
- Label* backtrack() { return backtrack_; }
- Label* loop_label() { return loop_label_; }
- RegExpNode* stop_node() { return stop_node_; }
- int characters_preloaded() { return characters_preloaded_; }
- int bound_checked_up_to() { return bound_checked_up_to_; }
- int flush_budget() { return flush_budget_; }
- QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
- bool mentions_reg(int reg);
- // Returns true if a deferred position store exists to the specified
- // register and stores the offset in the out-parameter. Otherwise
- // returns false.
- bool GetStoredPosition(int reg, int* cp_offset);
- // These set methods and AdvanceCurrentPositionInTrace should be used only on
- // new traces - the intention is that traces are immutable after creation.
- void add_action(DeferredAction* new_action) {
- DCHECK(new_action->next_ == nullptr);
- new_action->next_ = actions_;
- actions_ = new_action;
- }
- void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
- void set_stop_node(RegExpNode* node) { stop_node_ = node; }
- void set_loop_label(Label* label) { loop_label_ = label; }
- void set_characters_preloaded(int count) { characters_preloaded_ = count; }
- void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
- void set_flush_budget(int to) { flush_budget_ = to; }
- void set_quick_check_performed(QuickCheckDetails* d) {
- quick_check_performed_ = *d;
- }
- void InvalidateCurrentCharacter();
- void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
-
- private:
- int FindAffectedRegisters(OutSet* affected_registers, Zone* zone);
- void PerformDeferredActions(RegExpMacroAssembler* macro,
- int max_register,
- const OutSet& affected_registers,
- OutSet* registers_to_pop,
- OutSet* registers_to_clear,
- Zone* zone);
- void RestoreAffectedRegisters(RegExpMacroAssembler* macro,
- int max_register,
- const OutSet& registers_to_pop,
- const OutSet& registers_to_clear);
- int cp_offset_;
- DeferredAction* actions_;
- Label* backtrack_;
- RegExpNode* stop_node_;
- Label* loop_label_;
- int characters_preloaded_;
- int bound_checked_up_to_;
- QuickCheckDetails quick_check_performed_;
- int flush_budget_;
- TriBool at_start_;
-};
-
-
-class GreedyLoopState {
- public:
- explicit GreedyLoopState(bool not_at_start);
-
- Label* label() { return &label_; }
- Trace* counter_backtrack_trace() { return &counter_backtrack_trace_; }
-
- private:
- Label label_;
- Trace counter_backtrack_trace_;
-};
-
-
-struct PreloadState {
- static const int kEatsAtLeastNotYetInitialized = -1;
- bool preload_is_current_;
- bool preload_has_checked_bounds_;
- int preload_characters_;
- int eats_at_least_;
- void init() {
- eats_at_least_ = kEatsAtLeastNotYetInitialized;
- }
-};
-
-
-class NodeVisitor {
- public:
- virtual ~NodeVisitor() = default;
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that) = 0;
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
-};
-
-
-// Node visitor used to add the start set of the alternatives to the
-// dispatch table of a choice node.
-class V8_EXPORT_PRIVATE DispatchTableConstructor : public NodeVisitor {
- public:
- DispatchTableConstructor(DispatchTable* table, bool ignore_case,
- Zone* zone)
- : table_(table),
- choice_index_(-1),
- ignore_case_(ignore_case),
- zone_(zone) { }
-
- void BuildTable(ChoiceNode* node);
-
- void AddRange(CharacterRange range) {
- table()->AddRange(range, choice_index_, zone_);
- }
-
- void AddInverse(ZoneList<CharacterRange>* ranges);
-
-#define DECLARE_VISIT(Type) \
- virtual void Visit##Type(Type##Node* that);
-FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DispatchTable* table() { return table_; }
- void set_choice_index(int value) { choice_index_ = value; }
-
- protected:
- DispatchTable* table_;
- int choice_index_;
- bool ignore_case_;
- Zone* zone_;
-};
-
-// Assertion propagation moves information about assertions such as
-// \b to the affected nodes. For instance, in /.\b./ information must
-// be propagated to the first '.' that whatever follows needs to know
-// if it matched a word or a non-word, and to the second '.' that it
-// has to check if it succeeds a word or non-word. In this case the
-// result will be something like:
-//
-// +-------+ +------------+
-// | . | | . |
-// +-------+ ---> +------------+
-// | word? | | check word |
-// +-------+ +------------+
-class Analysis: public NodeVisitor {
- public:
- Analysis(Isolate* isolate, bool is_one_byte)
- : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
- void EnsureAnalyzed(RegExpNode* node);
-
-#define DECLARE_VISIT(Type) void Visit##Type(Type##Node* that) override;
- FOR_EACH_NODE_TYPE(DECLARE_VISIT)
-#undef DECLARE_VISIT
- void VisitLoopChoice(LoopChoiceNode* that) override;
-
- bool has_failed() { return error_message_ != nullptr; }
- const char* error_message() {
- DCHECK(error_message_ != nullptr);
- return error_message_;
- }
- void fail(const char* error_message) {
- error_message_ = error_message;
- }
-
- Isolate* isolate() const { return isolate_; }
-
- private:
- Isolate* isolate_;
- bool is_one_byte_;
- const char* error_message_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
-};
-
-
-struct RegExpCompileData {
- RegExpCompileData()
- : tree(nullptr),
- node(nullptr),
- simple(true),
- contains_anchor(false),
- capture_count(0) {}
- RegExpTree* tree;
- RegExpNode* node;
- bool simple;
- bool contains_anchor;
- Handle<FixedArray> capture_name_map;
- Handle<String> error;
- int capture_count;
-};
-
-
-class RegExpEngine: public AllStatic {
- public:
- struct CompilationResult {
- inline CompilationResult(Isolate* isolate, const char* error_message);
- CompilationResult(Object code, int registers)
- : code(code), num_registers(registers) {}
- const char* const error_message = nullptr;
- Object const code;
- int const num_registers = 0;
- };
-
- V8_EXPORT_PRIVATE static CompilationResult Compile(
- Isolate* isolate, Zone* zone, RegExpCompileData* input,
- JSRegExp::Flags flags, Handle<String> pattern,
- Handle<String> sample_subject, bool is_one_byte);
-
- static bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern);
-
- V8_EXPORT_PRIVATE static void DotPrint(const char* label, RegExpNode* node,
- bool ignore_case);
-};
-
-
-class RegExpResultsCache : public AllStatic {
- public:
- enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
-
- // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
- // On success, the returned result is guaranteed to be a COW-array.
- static Object Lookup(Heap* heap, String key_string, Object key_pattern,
- FixedArray* last_match_out, ResultsCacheType type);
- // Attempt to add value_array to the cache specified by type. On success,
- // value_array is turned into a COW-array.
- static void Enter(Isolate* isolate, Handle<String> key_string,
- Handle<Object> key_pattern, Handle<FixedArray> value_array,
- Handle<FixedArray> last_match_cache, ResultsCacheType type);
- static void Clear(FixedArray cache);
- static const int kRegExpResultsCacheSize = 0x100;
-
- private:
- static const int kArrayEntriesPerCacheEntry = 4;
- static const int kStringOffset = 0;
- static const int kPatternOffset = 1;
- static const int kArrayOffset = 2;
- static const int kLastMatchOffset = 3;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_REGEXP_JSREGEXP_H_
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 1fa9f7a35b..aab67cad15 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -50,7 +50,7 @@ class RegExpVisitor {
// A simple closed interval.
class Interval {
public:
- Interval() : from_(kNone), to_(kNone) {}
+ Interval() : from_(kNone), to_(kNone - 1) {} // '- 1' for branchless size().
Interval(int from, int to) : from_(from), to_(to) {}
Interval Union(Interval that) {
if (that.from_ == kNone)
@@ -60,12 +60,16 @@ class Interval {
else
return Interval(Min(from_, that.from_), Max(to_, that.to_));
}
+
bool Contains(int value) { return (from_ <= value) && (value <= to_); }
bool is_empty() { return from_ == kNone; }
int from() const { return from_; }
int to() const { return to_; }
+ int size() const { return to_ - from_ + 1; }
+
static Interval Empty() { return Interval(); }
- static const int kNone = -1;
+
+ static constexpr int kNone = -1;
private:
int from_;
@@ -268,12 +272,13 @@ class RegExpAlternative final : public RegExpTree {
class RegExpAssertion final : public RegExpTree {
public:
enum AssertionType {
- START_OF_LINE,
- START_OF_INPUT,
- END_OF_LINE,
- END_OF_INPUT,
- BOUNDARY,
- NON_BOUNDARY
+ START_OF_LINE = 0,
+ START_OF_INPUT = 1,
+ END_OF_LINE = 2,
+ END_OF_INPUT = 3,
+ BOUNDARY = 4,
+ NON_BOUNDARY = 5,
+ LAST_TYPE = NON_BOUNDARY,
};
RegExpAssertion(AssertionType type, JSRegExp::Flags flags)
: assertion_type_(type), flags_(flags) {}
@@ -285,7 +290,8 @@ class RegExpAssertion final : public RegExpTree {
bool IsAnchoredAtEnd() override;
int min_match() override { return 0; }
int max_match() override { return 0; }
- AssertionType assertion_type() { return assertion_type_; }
+ AssertionType assertion_type() const { return assertion_type_; }
+ JSRegExp::Flags flags() const { return flags_; }
private:
const AssertionType assertion_type_;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
index cda48aa00b..bd906fea15 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -2,30 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#ifndef V8_REGEXP_REGEXP_BYTECODE_GENERATOR_INL_H_
+#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_INL_H_
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
-#include "src/regexp/bytecodes-irregexp.h"
+#include "src/regexp/regexp-bytecodes.h"
namespace v8 {
namespace internal {
-void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
- uint32_t twenty_four_bits) {
+void RegExpBytecodeGenerator::Emit(uint32_t byte, uint32_t twenty_four_bits) {
uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
DCHECK(pc_ <= buffer_.length());
- if (pc_ + 3 >= buffer_.length()) {
+ if (pc_ + 3 >= buffer_.length()) {
Expand();
}
*reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
pc_ += 4;
}
-
-void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
+void RegExpBytecodeGenerator::Emit16(uint32_t word) {
DCHECK(pc_ <= buffer_.length());
if (pc_ + 1 >= buffer_.length()) {
Expand();
@@ -34,8 +32,7 @@ void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
pc_ += 2;
}
-
-void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
+void RegExpBytecodeGenerator::Emit8(uint32_t word) {
DCHECK(pc_ <= buffer_.length());
if (pc_ == buffer_.length()) {
Expand();
@@ -44,8 +41,7 @@ void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
pc_ += 1;
}
-
-void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
+void RegExpBytecodeGenerator::Emit32(uint32_t word) {
DCHECK(pc_ <= buffer_.length());
if (pc_ + 3 >= buffer_.length()) {
Expand();
@@ -57,4 +53,4 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#endif // V8_REGEXP_REGEXP_BYTECODE_GENERATOR_INL_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 712f00e509..ee3b4015d5 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -2,39 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/bytecodes-irregexp.h"
-#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
+#include "src/regexp/regexp-bytecode-generator-inl.h"
+#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Isolate* isolate,
- Zone* zone)
+RegExpBytecodeGenerator::RegExpBytecodeGenerator(Isolate* isolate, Zone* zone)
: RegExpMacroAssembler(isolate, zone),
buffer_(Vector<byte>::New(1024)),
pc_(0),
- own_buffer_(true),
advance_current_end_(kInvalidPC),
isolate_(isolate) {}
-RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
+RegExpBytecodeGenerator::~RegExpBytecodeGenerator() {
if (backtrack_.is_linked()) backtrack_.Unuse();
- if (own_buffer_) buffer_.Dispose();
+ buffer_.Dispose();
}
-
-RegExpMacroAssemblerIrregexp::IrregexpImplementation
-RegExpMacroAssemblerIrregexp::Implementation() {
+RegExpBytecodeGenerator::IrregexpImplementation
+RegExpBytecodeGenerator::Implementation() {
return kBytecodeImplementation;
}
-
-void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
+void RegExpBytecodeGenerator::Bind(Label* l) {
advance_current_end_ = kInvalidPC;
DCHECK(!l->is_bound());
if (l->is_linked()) {
@@ -48,8 +44,7 @@ void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
l->bind_to(pc_);
}
-
-void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
+void RegExpBytecodeGenerator::EmitOrLink(Label* l) {
if (l == nullptr) l = &backtrack_;
if (l->is_bound()) {
Emit32(l->pos());
@@ -63,102 +58,79 @@ void RegExpMacroAssemblerIrregexp::EmitOrLink(Label* l) {
}
}
-
-void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
+void RegExpBytecodeGenerator::PopRegister(int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_POP_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::PushRegister(
- int register_index,
- StackCheckFlag check_stack_limit) {
+void RegExpBytecodeGenerator::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_PUSH_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
- int register_index, int cp_offset) {
+void RegExpBytecodeGenerator::WriteCurrentPositionToRegister(int register_index,
+ int cp_offset) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER_TO_CP, register_index);
Emit32(cp_offset); // Current position offset.
}
-
-void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
+void RegExpBytecodeGenerator::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
for (int reg = reg_from; reg <= reg_to; reg++) {
SetRegister(reg, -1);
}
}
-
-void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
+void RegExpBytecodeGenerator::ReadCurrentPositionFromRegister(
int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_CP_TO_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
- int register_index) {
+void RegExpBytecodeGenerator::WriteStackPointerToRegister(int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER_TO_SP, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
- int register_index) {
+void RegExpBytecodeGenerator::ReadStackPointerFromRegister(int register_index) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_SP_TO_REGISTER, register_index);
}
-
-void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
+void RegExpBytecodeGenerator::SetCurrentPositionFromEnd(int by) {
DCHECK(is_uint24(by));
Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
}
-
-void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
+void RegExpBytecodeGenerator::SetRegister(int register_index, int to) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_SET_REGISTER, register_index);
Emit32(to);
}
-
-void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
+void RegExpBytecodeGenerator::AdvanceRegister(int register_index, int by) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_ADVANCE_REGISTER, register_index);
Emit32(by);
}
+void RegExpBytecodeGenerator::PopCurrentPosition() { Emit(BC_POP_CP, 0); }
-void RegExpMacroAssemblerIrregexp::PopCurrentPosition() {
- Emit(BC_POP_CP, 0);
-}
+void RegExpBytecodeGenerator::PushCurrentPosition() { Emit(BC_PUSH_CP, 0); }
+void RegExpBytecodeGenerator::Backtrack() { Emit(BC_POP_BT, 0); }
-void RegExpMacroAssemblerIrregexp::PushCurrentPosition() {
- Emit(BC_PUSH_CP, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::Backtrack() {
- Emit(BC_POP_BT, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
+void RegExpBytecodeGenerator::GoTo(Label* l) {
if (advance_current_end_ == pc_) {
// Combine advance current and goto.
pc_ = advance_current_start_;
@@ -172,25 +144,19 @@ void RegExpMacroAssemblerIrregexp::GoTo(Label* l) {
}
}
-
-void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) {
+void RegExpBytecodeGenerator::PushBacktrack(Label* l) {
Emit(BC_PUSH_BT, 0);
EmitOrLink(l);
}
-
-bool RegExpMacroAssemblerIrregexp::Succeed() {
+bool RegExpBytecodeGenerator::Succeed() {
Emit(BC_SUCCEED, 0);
return false; // Restart matching for global regexp not supported.
}
+void RegExpBytecodeGenerator::Fail() { Emit(BC_FAIL, 0); }
-void RegExpMacroAssemblerIrregexp::Fail() {
- Emit(BC_FAIL, 0);
-}
-
-
-void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
+void RegExpBytecodeGenerator::AdvanceCurrentPosition(int by) {
DCHECK_LE(kMinCPOffset, by);
DCHECK_GE(kMaxCPOffset, by);
advance_current_start_ = pc_;
@@ -199,18 +165,16 @@ void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
advance_current_end_ = pc_;
}
-
-void RegExpMacroAssemblerIrregexp::CheckGreedyLoop(
- Label* on_tos_equals_current_position) {
+void RegExpBytecodeGenerator::CheckGreedyLoop(
+ Label* on_tos_equals_current_position) {
Emit(BC_CHECK_GREEDY, 0);
EmitOrLink(on_tos_equals_current_position);
}
-
-void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
- Label* on_failure,
- bool check_bounds,
- int characters) {
+void RegExpBytecodeGenerator::LoadCurrentCharacter(int cp_offset,
+ Label* on_failure,
+ bool check_bounds,
+ int characters) {
DCHECK_LE(kMinCPOffset, cp_offset);
DCHECK_GE(kMaxCPOffset, cp_offset);
int bytecode;
@@ -237,22 +201,17 @@ void RegExpMacroAssemblerIrregexp::LoadCurrentCharacter(int cp_offset,
if (check_bounds) EmitOrLink(on_failure);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterLT(uc16 limit,
- Label* on_less) {
+void RegExpBytecodeGenerator::CheckCharacterLT(uc16 limit, Label* on_less) {
Emit(BC_CHECK_LT, limit);
EmitOrLink(on_less);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterGT(uc16 limit,
- Label* on_greater) {
+void RegExpBytecodeGenerator::CheckCharacterGT(uc16 limit, Label* on_greater) {
Emit(BC_CHECK_GT, limit);
EmitOrLink(on_greater);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
+void RegExpBytecodeGenerator::CheckCharacter(uint32_t c, Label* on_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_CHECK_4_CHARS, 0);
Emit32(c);
@@ -262,22 +221,19 @@ void RegExpMacroAssemblerIrregexp::CheckCharacter(uint32_t c, Label* on_equal) {
EmitOrLink(on_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
+void RegExpBytecodeGenerator::CheckAtStart(Label* on_at_start) {
Emit(BC_CHECK_AT_START, 0);
EmitOrLink(on_at_start);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotAtStart(int cp_offset,
- Label* on_not_at_start) {
+void RegExpBytecodeGenerator::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
Emit(BC_CHECK_NOT_AT_START, cp_offset);
EmitOrLink(on_not_at_start);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_CHECK_NOT_4_CHARS, 0);
Emit32(c);
@@ -287,11 +243,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotCharacter(uint32_t c,
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_equal) {
+void RegExpBytecodeGenerator::CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_AND_CHECK_4_CHARS, 0);
Emit32(c);
@@ -302,11 +255,9 @@ void RegExpMacroAssemblerIrregexp::CheckCharacterAfterAnd(
EmitOrLink(on_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
- uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
if (c > MAX_FIRST_ARG) {
Emit(BC_AND_CHECK_NOT_4_CHARS, 0);
Emit32(c);
@@ -317,43 +268,32 @@ void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterAnd(
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotCharacterAfterMinusAnd(
+ uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
Emit(BC_MINUS_AND_CHECK_NOT_CHAR, c);
Emit16(minus);
Emit16(mask);
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
+void RegExpBytecodeGenerator::CheckCharacterInRange(uc16 from, uc16 to,
+ Label* on_in_range) {
Emit(BC_CHECK_CHAR_IN_RANGE, 0);
Emit16(from);
Emit16(to);
EmitOrLink(on_in_range);
}
-
-void RegExpMacroAssemblerIrregexp::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
+void RegExpBytecodeGenerator::CheckCharacterNotInRange(uc16 from, uc16 to,
+ Label* on_not_in_range) {
Emit(BC_CHECK_CHAR_NOT_IN_RANGE, 0);
Emit16(from);
Emit16(to);
EmitOrLink(on_not_in_range);
}
-
-void RegExpMacroAssemblerIrregexp::CheckBitInTable(
- Handle<ByteArray> table, Label* on_bit_set) {
+void RegExpBytecodeGenerator::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
Emit(BC_CHECK_BIT_IN_TABLE, 0);
EmitOrLink(on_bit_set);
for (int i = 0; i < kTableSize; i += kBitsPerByte) {
@@ -365,10 +305,9 @@ void RegExpMacroAssemblerIrregexp::CheckBitInTable(
}
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
- bool read_backward,
- Label* on_not_equal) {
+void RegExpBytecodeGenerator::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_not_equal) {
DCHECK_LE(0, start_reg);
DCHECK_GE(kMaxRegister, start_reg);
Emit(read_backward ? BC_CHECK_NOT_BACK_REF_BACKWARD : BC_CHECK_NOT_BACK_REF,
@@ -376,8 +315,7 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
+void RegExpBytecodeGenerator::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
DCHECK_LE(0, start_reg);
DCHECK_GE(kMaxRegister, start_reg);
@@ -389,10 +327,8 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
EmitOrLink(on_not_equal);
}
-
-void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
- int comparand,
- Label* on_less_than) {
+void RegExpBytecodeGenerator::IfRegisterLT(int register_index, int comparand,
+ Label* on_less_than) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_LT, register_index);
@@ -400,10 +336,8 @@ void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
EmitOrLink(on_less_than);
}
-
-void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
- int comparand,
- Label* on_greater_or_equal) {
+void RegExpBytecodeGenerator::IfRegisterGE(int register_index, int comparand,
+ Label* on_greater_or_equal) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_GE, register_index);
@@ -411,18 +345,15 @@ void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
EmitOrLink(on_greater_or_equal);
}
-
-void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
- Label* on_eq) {
+void RegExpBytecodeGenerator::IfRegisterEqPos(int register_index,
+ Label* on_eq) {
DCHECK_LE(0, register_index);
DCHECK_GE(kMaxRegister, register_index);
Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
EmitOrLink(on_eq);
}
-
-Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
- Handle<String> source) {
+Handle<HeapObject> RegExpBytecodeGenerator::GetCode(Handle<String> source) {
Bind(&backtrack_);
Emit(BC_POP_BT, 0);
Handle<ByteArray> array = isolate_->factory()->NewByteArray(length());
@@ -430,25 +361,17 @@ Handle<HeapObject> RegExpMacroAssemblerIrregexp::GetCode(
return array;
}
+int RegExpBytecodeGenerator::length() { return pc_; }
-int RegExpMacroAssemblerIrregexp::length() {
- return pc_;
-}
-
-void RegExpMacroAssemblerIrregexp::Copy(byte* a) {
+void RegExpBytecodeGenerator::Copy(byte* a) {
MemCopy(a, buffer_.begin(), length());
}
-
-void RegExpMacroAssemblerIrregexp::Expand() {
- bool old_buffer_was_our_own = own_buffer_;
+void RegExpBytecodeGenerator::Expand() {
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
- own_buffer_ = true;
MemCopy(buffer_.begin(), old_buffer.begin(), old_buffer.length());
- if (old_buffer_was_our_own) {
- old_buffer.Dispose();
- }
+ old_buffer.Dispose();
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 9e17dca415..b7207e977c 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
-#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
+#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
-// A light-weight assembler for the Irregexp byte code.
-class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
- : public RegExpMacroAssembler {
+// An assembler/generator for the Irregexp byte code.
+class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -22,8 +21,8 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
// The assembler allocates and grows its own buffer, and buffer_size
// determines the initial buffer size. The buffer is owned by the assembler
// and deallocated upon destruction of the assembler.
- RegExpMacroAssemblerIrregexp(Isolate* isolate, Zone* zone);
- virtual ~RegExpMacroAssemblerIrregexp();
+ RegExpBytecodeGenerator(Isolate* isolate, Zone* zone);
+ virtual ~RegExpBytecodeGenerator();
// The byte-code interpreter checks on each push anyway.
virtual int stack_limit_slack() { return 1; }
virtual bool CanReadUnaligned() { return false; }
@@ -47,13 +46,11 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void WriteStackPointerToRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
+ virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
+ virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@@ -61,18 +58,12 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
+ virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
+ virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+ virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
virtual void CheckNotBackReference(int start_reg, bool read_backward,
@@ -103,8 +94,6 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
Vector<byte> buffer_;
// The program counter.
int pc_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
Label backtrack_;
int advance_current_start_;
@@ -115,10 +104,10 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerIrregexp
static const int kInvalidPC = -1;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpBytecodeGenerator);
};
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
diff --git a/deps/v8/src/regexp/bytecodes-irregexp.h b/deps/v8/src/regexp/regexp-bytecodes.h
index a27c9a0a2b..8b1468c1bf 100644
--- a/deps/v8/src/regexp/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/regexp-bytecodes.h
@@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_
-#define V8_REGEXP_BYTECODES_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_BYTECODES_H_
+#define V8_REGEXP_REGEXP_BYTECODES_H_
namespace v8 {
namespace internal {
-
const int BYTECODE_MASK = 0xff;
// The first argument is packed in with the byte code in one word, but so it
// has 24 bits, but it can be positive and negative so only use 23 bits for
@@ -71,8 +69,7 @@ const int BYTECODE_SHIFT = 8;
V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \
V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */
-#define DECLARE_BYTECODES(name, code, length) \
- static const int BC_##name = code;
+#define DECLARE_BYTECODES(name, code, length) static const int BC_##name = code;
BYTECODE_ITERATOR(DECLARE_BYTECODES)
#undef DECLARE_BYTECODES
@@ -84,4 +81,4 @@ BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_BYTECODES_H_
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
new file mode 100644
index 0000000000..d12c35682e
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -0,0 +1,1678 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-compiler.h"
+
+#include "src/execution/isolate.h"
+#include "src/regexp/regexp.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
+#endif // V8_INTL_SUPPORT
+#include "src/strings/unicode-inl.h"
+#include "src/zone/zone-list-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "unicode/locid.h"
+#include "unicode/uniset.h"
+#include "unicode/utypes.h"
+#endif // V8_INTL_SUPPORT
+
+namespace v8 {
+namespace internal {
+
+using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+
+// -------------------------------------------------------------------
+// Tree to graph conversion
+
+RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ ZoneList<TextElement>* elms =
+ new (compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
+ elms->Add(TextElement::Atom(this), compiler->zone());
+ return new (compiler->zone())
+ TextNode(elms, compiler->read_backward(), on_success);
+}
+
+RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return new (compiler->zone())
+ TextNode(elements(), compiler->read_backward(), on_success);
+}
+
+static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
+ const int* special_class, int length) {
+ length--; // Remove final marker.
+ DCHECK_EQ(kRangeEndMarker, special_class[length]);
+ DCHECK_NE(0, ranges->length());
+ DCHECK_NE(0, length);
+ DCHECK_NE(0, special_class[0]);
+ if (ranges->length() != (length >> 1) + 1) {
+ return false;
+ }
+ CharacterRange range = ranges->at(0);
+ if (range.from() != 0) {
+ return false;
+ }
+ for (int i = 0; i < length; i += 2) {
+ if (special_class[i] != (range.to() + 1)) {
+ return false;
+ }
+ range = ranges->at((i >> 1) + 1);
+ if (special_class[i + 1] != range.from()) {
+ return false;
+ }
+ }
+ if (range.to() != String::kMaxCodePoint) {
+ return false;
+ }
+ return true;
+}
+
+static bool CompareRanges(ZoneList<CharacterRange>* ranges,
+ const int* special_class, int length) {
+ length--; // Remove final marker.
+ DCHECK_EQ(kRangeEndMarker, special_class[length]);
+ if (ranges->length() * 2 != length) {
+ return false;
+ }
+ for (int i = 0; i < length; i += 2) {
+ CharacterRange range = ranges->at(i >> 1);
+ if (range.from() != special_class[i] ||
+ range.to() != special_class[i + 1] - 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool RegExpCharacterClass::is_standard(Zone* zone) {
+ // TODO(lrn): Remove need for this function, by not throwing away information
+ // along the way.
+ if (is_negated()) {
+ return false;
+ }
+ if (set_.is_standard()) {
+ return true;
+ }
+ if (CompareRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
+ set_.set_standard_set_type('s');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(zone), kSpaceRanges, kSpaceRangeCount)) {
+ set_.set_standard_set_type('S');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(zone), kLineTerminatorRanges,
+ kLineTerminatorRangeCount)) {
+ set_.set_standard_set_type('.');
+ return true;
+ }
+ if (CompareRanges(set_.ranges(zone), kLineTerminatorRanges,
+ kLineTerminatorRangeCount)) {
+ set_.set_standard_set_type('n');
+ return true;
+ }
+ if (CompareRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('w');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(zone), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('W');
+ return true;
+ }
+ return false;
+}
+
+UnicodeRangeSplitter::UnicodeRangeSplitter(ZoneList<CharacterRange>* base) {
+ // The unicode range splitter categorizes given character ranges into:
+ // - Code points from the BMP representable by one code unit.
+ // - Code points outside the BMP that need to be split into surrogate pairs.
+ // - Lone lead surrogates.
+ // - Lone trail surrogates.
+ // Lone surrogates are valid code points, even though no actual characters.
+ // They require special matching to make sure we do not split surrogate pairs.
+
+ for (int i = 0; i < base->length(); i++) AddRange(base->at(i));
+}
+
+void UnicodeRangeSplitter::AddRange(CharacterRange range) {
+ static constexpr uc32 kBmp1Start = 0;
+ static constexpr uc32 kBmp1End = kLeadSurrogateStart - 1;
+ static constexpr uc32 kBmp2Start = kTrailSurrogateEnd + 1;
+ static constexpr uc32 kBmp2End = kNonBmpStart - 1;
+
+ // Ends are all inclusive.
+ STATIC_ASSERT(kBmp1Start == 0);
+ STATIC_ASSERT(kBmp1Start < kBmp1End);
+ STATIC_ASSERT(kBmp1End + 1 == kLeadSurrogateStart);
+ STATIC_ASSERT(kLeadSurrogateStart < kLeadSurrogateEnd);
+ STATIC_ASSERT(kLeadSurrogateEnd + 1 == kTrailSurrogateStart);
+ STATIC_ASSERT(kTrailSurrogateStart < kTrailSurrogateEnd);
+ STATIC_ASSERT(kTrailSurrogateEnd + 1 == kBmp2Start);
+ STATIC_ASSERT(kBmp2Start < kBmp2End);
+ STATIC_ASSERT(kBmp2End + 1 == kNonBmpStart);
+ STATIC_ASSERT(kNonBmpStart < kNonBmpEnd);
+
+ static constexpr uc32 kStarts[] = {
+ kBmp1Start, kLeadSurrogateStart, kTrailSurrogateStart,
+ kBmp2Start, kNonBmpStart,
+ };
+
+ static constexpr uc32 kEnds[] = {
+ kBmp1End, kLeadSurrogateEnd, kTrailSurrogateEnd, kBmp2End, kNonBmpEnd,
+ };
+
+ CharacterRangeVector* const kTargets[] = {
+ &bmp_, &lead_surrogates_, &trail_surrogates_, &bmp_, &non_bmp_,
+ };
+
+ static constexpr int kCount = arraysize(kStarts);
+ STATIC_ASSERT(kCount == arraysize(kEnds));
+ STATIC_ASSERT(kCount == arraysize(kTargets));
+
+ for (int i = 0; i < kCount; i++) {
+ if (kStarts[i] > range.to()) break;
+ const uc32 from = std::max(kStarts[i], range.from());
+ const uc32 to = std::min(kEnds[i], range.to());
+ if (from > to) continue;
+ kTargets[i]->emplace_back(CharacterRange::Range(from, to));
+ }
+}
+
+namespace {
+
+// Translates between new and old V8-isms (SmallVector, ZoneList).
+ZoneList<CharacterRange>* ToCanonicalZoneList(
+ const UnicodeRangeSplitter::CharacterRangeVector* v, Zone* zone) {
+ if (v->empty()) return nullptr;
+
+ ZoneList<CharacterRange>* result =
+ new (zone) ZoneList<CharacterRange>(static_cast<int>(v->size()), zone);
+ for (size_t i = 0; i < v->size(); i++) {
+ result->Add(v->at(i), zone);
+ }
+
+ CharacterRange::Canonicalize(result);
+ return result;
+}
+
+void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* bmp =
+ ToCanonicalZoneList(splitter->bmp(), compiler->zone());
+ if (bmp == nullptr) return;
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
+ compiler->zone(), bmp, compiler->read_backward(), on_success,
+ default_flags)));
+}
+
+void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* non_bmp =
+ ToCanonicalZoneList(splitter->non_bmp(), compiler->zone());
+ if (non_bmp == nullptr) return;
+ DCHECK(!compiler->one_byte());
+ Zone* zone = compiler->zone();
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ CharacterRange::Canonicalize(non_bmp);
+ for (int i = 0; i < non_bmp->length(); i++) {
+ // Match surrogate pair.
+ // E.g. [\u10005-\u11005] becomes
+ // \ud800[\udc05-\udfff]|
+ // [\ud801-\ud803][\udc00-\udfff]|
+ // \ud804[\udc00-\udc05]
+ uc32 from = non_bmp->at(i).from();
+ uc32 to = non_bmp->at(i).to();
+ uc16 from_l = unibrow::Utf16::LeadSurrogate(from);
+ uc16 from_t = unibrow::Utf16::TrailSurrogate(from);
+ uc16 to_l = unibrow::Utf16::LeadSurrogate(to);
+ uc16 to_t = unibrow::Utf16::TrailSurrogate(to);
+ if (from_l == to_l) {
+ // The lead surrogate is the same.
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(from_l),
+ CharacterRange::Range(from_t, to_t), compiler->read_backward(),
+ on_success, default_flags)));
+ } else {
+ if (from_t != kTrailSurrogateStart) {
+ // Add [from_l][from_t-\udfff]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(from_l),
+ CharacterRange::Range(from_t, kTrailSurrogateEnd),
+ compiler->read_backward(), on_success, default_flags)));
+ from_l++;
+ }
+ if (to_t != kTrailSurrogateEnd) {
+ // Add [to_l][\udc00-to_t]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(to_l),
+ CharacterRange::Range(kTrailSurrogateStart, to_t),
+ compiler->read_backward(), on_success, default_flags)));
+ to_l--;
+ }
+ if (from_l <= to_l) {
+ // Add [from_l-to_l][\udc00-\udfff]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Range(from_l, to_l),
+ CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
+ compiler->read_backward(), on_success, default_flags)));
+ }
+ }
+ }
+}
+
+RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
+ RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
+ ZoneList<CharacterRange>* match, RegExpNode* on_success, bool read_backward,
+ JSRegExp::Flags flags) {
+ Zone* zone = compiler->zone();
+ RegExpNode* match_node = TextNode::CreateForCharacterRanges(
+ zone, match, read_backward, on_success, flags);
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpLookaround::Builder lookaround(false, match_node, stack_register,
+ position_register);
+ RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+ zone, lookbehind, !read_backward, lookaround.on_match_success(), flags);
+ return lookaround.ForMatch(negative_match);
+}
+
+RegExpNode* MatchAndNegativeLookaroundInReadDirection(
+ RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
+ ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
+ bool read_backward, JSRegExp::Flags flags) {
+ Zone* zone = compiler->zone();
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpLookaround::Builder lookaround(false, on_success, stack_register,
+ position_register);
+ RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+ zone, lookahead, read_backward, lookaround.on_match_success(), flags);
+ return TextNode::CreateForCharacterRanges(
+ zone, match, read_backward, lookaround.ForMatch(negative_match), flags);
+}
+
+void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ ZoneList<CharacterRange>* lead_surrogates =
+ ToCanonicalZoneList(splitter->lead_surrogates(), compiler->zone());
+ if (lead_surrogates == nullptr) return;
+ Zone* zone = compiler->zone();
+ // E.g. \ud801 becomes \ud801(?![\udc00-\udfff]).
+ ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+ RegExpNode* match;
+ if (compiler->read_backward()) {
+ // Reading backward. Assert that reading forward, there is no trail
+ // surrogate, and then backward match the lead surrogate.
+ match = NegativeLookaroundAgainstReadDirectionAndMatch(
+ compiler, trail_surrogates, lead_surrogates, on_success, true,
+ default_flags);
+ } else {
+ // Reading forward. Forward match the lead surrogate and assert that
+ // no trail surrogate follows.
+ match = MatchAndNegativeLookaroundInReadDirection(
+ compiler, lead_surrogates, trail_surrogates, on_success, false,
+ default_flags);
+ }
+ result->AddAlternative(GuardedAlternative(match));
+}
+
+void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ ZoneList<CharacterRange>* trail_surrogates =
+ ToCanonicalZoneList(splitter->trail_surrogates(), compiler->zone());
+ if (trail_surrogates == nullptr) return;
+ Zone* zone = compiler->zone();
+ // E.g. \udc01 becomes (?<![\ud800-\udbff])\udc01
+ ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+
+ RegExpNode* match;
+ if (compiler->read_backward()) {
+ // Reading backward. Backward match the trail surrogate and assert that no
+ // lead surrogate precedes it.
+ match = MatchAndNegativeLookaroundInReadDirection(
+ compiler, trail_surrogates, lead_surrogates, on_success, true,
+ default_flags);
+ } else {
+ // Reading forward. Assert that reading backward, there is no lead
+ // surrogate, and then forward match the trail surrogate.
+ match = NegativeLookaroundAgainstReadDirectionAndMatch(
+ compiler, lead_surrogates, trail_surrogates, on_success, false,
+ default_flags);
+ }
+ result->AddAlternative(GuardedAlternative(match));
+}
+
+RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ // This implements ES2015 21.2.5.2.3, AdvanceStringIndex.
+ DCHECK(!compiler->read_backward());
+ Zone* zone = compiler->zone();
+ // Advance any character. If the character happens to be a lead surrogate and
+ // we advanced into the middle of a surrogate pair, it will work out, as
+ // nothing will match from there. We will have to advance again, consuming
+ // the associated trail surrogate.
+ ZoneList<CharacterRange>* range = CharacterRange::List(
+ zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ return TextNode::CreateForCharacterRanges(zone, range, false, on_success,
+ default_flags);
+}
+
+void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
+#ifdef V8_INTL_SUPPORT
+ DCHECK(CharacterRange::IsCanonical(ranges));
+
+ // Micro-optimization to avoid passing large ranges to UnicodeSet::closeOver.
+ // See also https://crbug.com/v8/6727.
+ // TODO(jgruber): This only covers the special case of the {0,0x10FFFF} range,
+ // which we use frequently internally. But large ranges can also easily be
+ // created by the user. We might want to have a more general caching mechanism
+ // for such ranges.
+ if (ranges->length() == 1 && ranges->at(0).IsEverything(kNonBmpEnd)) return;
+
+ // Use ICU to compute the case fold closure over the ranges.
+ icu::UnicodeSet set;
+ for (int i = 0; i < ranges->length(); i++) {
+ set.add(ranges->at(i).from(), ranges->at(i).to());
+ }
+ ranges->Clear();
+ set.closeOver(USET_CASE_INSENSITIVE);
+ // Full case mapping map single characters to multiple characters.
+ // Those are represented as strings in the set. Remove them so that
+ // we end up with only simple and common case mappings.
+ set.removeAllStrings();
+ for (int i = 0; i < set.getRangeCount(); i++) {
+ ranges->Add(CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
+ zone);
+ }
+ // No errors and everything we collected have been ranges.
+ CharacterRange::Canonicalize(ranges);
+#endif // V8_INTL_SUPPORT
+}
+
+} // namespace
+
+RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ set_.Canonicalize();
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* ranges = this->ranges(zone);
+ if (NeedsUnicodeCaseEquivalents(flags_)) {
+ AddUnicodeCaseEquivalents(ranges, zone);
+ }
+ if (IsUnicode(flags_) && !compiler->one_byte() &&
+ !contains_split_surrogate()) {
+ if (is_negated()) {
+ ZoneList<CharacterRange>* negated =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::Negate(ranges, negated, zone);
+ ranges = negated;
+ }
+ if (ranges->length() == 0) {
+ JSRegExp::Flags default_flags;
+ RegExpCharacterClass* fail =
+ new (zone) RegExpCharacterClass(zone, ranges, default_flags);
+ return new (zone) TextNode(fail, compiler->read_backward(), on_success);
+ }
+ if (standard_type() == '*') {
+ return UnanchoredAdvance(compiler, on_success);
+ } else {
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ UnicodeRangeSplitter splitter(ranges);
+ AddBmpCharacters(compiler, result, on_success, &splitter);
+ AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
+ AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
+ AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
+ return result;
+ }
+ } else {
+ return new (zone) TextNode(this, compiler->read_backward(), on_success);
+ }
+}
+
+int CompareFirstChar(RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ uc16 character1 = atom1->data().at(0);
+ uc16 character2 = atom2->data().at(0);
+ if (character1 < character2) return -1;
+ if (character1 > character2) return 1;
+ return 0;
+}
+
+#ifdef V8_INTL_SUPPORT
+
+// Case Insensitve comparesion
+int CompareFirstCharCaseInsensitve(RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ icu::UnicodeString character1(atom1->data().at(0));
+ return character1.caseCompare(atom2->data().at(0), U_FOLD_CASE_DEFAULT);
+}
+
+#else
+
+static unibrow::uchar Canonical(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ unibrow::uchar c) {
+ unibrow::uchar chars[unibrow::Ecma262Canonicalize::kMaxWidth];
+ int length = canonicalize->get(c, '\0', chars);
+ DCHECK_LE(length, 1);
+ unibrow::uchar canonical = c;
+ if (length == 1) canonical = chars[0];
+ return canonical;
+}
+
+int CompareFirstCharCaseIndependent(
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize,
+ RegExpTree* const* a, RegExpTree* const* b) {
+ RegExpAtom* atom1 = (*a)->AsAtom();
+ RegExpAtom* atom2 = (*b)->AsAtom();
+ unibrow::uchar character1 = atom1->data().at(0);
+ unibrow::uchar character2 = atom2->data().at(0);
+ if (character1 == character2) return 0;
+ if (character1 >= 'a' || character2 >= 'a') {
+ character1 = Canonical(canonicalize, character1);
+ character2 = Canonical(canonicalize, character2);
+ }
+ return static_cast<int>(character1) - static_cast<int>(character2);
+}
+#endif // V8_INTL_SUPPORT
+
+// We can stable sort runs of atoms, since the order does not matter if they
+// start with different characters.
+// Returns true if any consecutive atoms were found.
+bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+ bool found_consecutive_atoms = false;
+ for (int i = 0; i < length; i++) {
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (alternative->IsAtom()) break;
+ i++;
+ }
+ // i is length or it is the index of an atom.
+ if (i == length) break;
+ int first_atom = i;
+ JSRegExp::Flags flags = alternatives->at(i)->AsAtom()->flags();
+ i++;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ if (alternative->AsAtom()->flags() != flags) break;
+ i++;
+ }
+ // Sort atoms to get ones with common prefixes together.
+ // This step is more tricky if we are in a case-independent regexp,
+ // because it would change /is|I/ to /I|is/, and order matters when
+ // the regexp parts don't match only disjoint starting points. To fix
+ // this we have a version of CompareFirstChar that uses case-
+ // independent character classes for comparison.
+ DCHECK_LT(first_atom, alternatives->length());
+ DCHECK_LE(i, alternatives->length());
+ DCHECK_LE(first_atom, i);
+ if (IgnoreCase(flags)) {
+#ifdef V8_INTL_SUPPORT
+ alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
+ i - first_atom);
+#else
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
+ auto compare_closure = [canonicalize](RegExpTree* const* a,
+ RegExpTree* const* b) {
+ return CompareFirstCharCaseIndependent(canonicalize, a, b);
+ };
+ alternatives->StableSort(compare_closure, first_atom, i - first_atom);
+#endif // V8_INTL_SUPPORT
+ } else {
+ alternatives->StableSort(CompareFirstChar, first_atom, i - first_atom);
+ }
+ if (i - first_atom > 1) found_consecutive_atoms = true;
+ }
+ return found_consecutive_atoms;
+}
+
+// Optimizes ab|ac|az to a(?:b|c|d).
+void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
+ Zone* zone = compiler->zone();
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+
+ int write_posn = 0;
+ int i = 0;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ RegExpAtom* const atom = alternative->AsAtom();
+ JSRegExp::Flags flags = atom->flags();
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeString common_prefix(atom->data().at(0));
+#else
+ unibrow::uchar common_prefix = atom->data().at(0);
+#endif // V8_INTL_SUPPORT
+ int first_with_prefix = i;
+ int prefix_length = atom->length();
+ i++;
+ while (i < length) {
+ alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->flags() != flags) break;
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeString new_prefix(atom->data().at(0));
+ if (new_prefix != common_prefix) {
+ if (!IgnoreCase(flags)) break;
+ if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
+ break;
+ }
+#else
+ unibrow::uchar new_prefix = atom->data().at(0);
+ if (new_prefix != common_prefix) {
+ if (!IgnoreCase(flags)) break;
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ compiler->isolate()->regexp_macro_assembler_canonicalize();
+ new_prefix = Canonical(canonicalize, new_prefix);
+ common_prefix = Canonical(canonicalize, common_prefix);
+ if (new_prefix != common_prefix) break;
+ }
+#endif // V8_INTL_SUPPORT
+ prefix_length = Min(prefix_length, atom->length());
+ i++;
+ }
+ if (i > first_with_prefix + 2) {
+ // Found worthwhile run of alternatives with common prefix of at least one
+ // character. The sorting function above did not sort on more than one
+ // character for reasons of correctness, but there may still be a longer
+ // common prefix if the terms were similar or presorted in the input.
+ // Find out how long the common prefix is.
+ int run_length = i - first_with_prefix;
+ RegExpAtom* const atom = alternatives->at(first_with_prefix)->AsAtom();
+ for (int j = 1; j < run_length && prefix_length > 1; j++) {
+ RegExpAtom* old_atom =
+ alternatives->at(j + first_with_prefix)->AsAtom();
+ for (int k = 1; k < prefix_length; k++) {
+ if (atom->data().at(k) != old_atom->data().at(k)) {
+ prefix_length = k;
+ break;
+ }
+ }
+ }
+ RegExpAtom* prefix = new (zone)
+ RegExpAtom(atom->data().SubVector(0, prefix_length), flags);
+ ZoneList<RegExpTree*>* pair = new (zone) ZoneList<RegExpTree*>(2, zone);
+ pair->Add(prefix, zone);
+ ZoneList<RegExpTree*>* suffixes =
+ new (zone) ZoneList<RegExpTree*>(run_length, zone);
+ for (int j = 0; j < run_length; j++) {
+ RegExpAtom* old_atom =
+ alternatives->at(j + first_with_prefix)->AsAtom();
+ int len = old_atom->length();
+ if (len == prefix_length) {
+ suffixes->Add(new (zone) RegExpEmpty(), zone);
+ } else {
+ RegExpTree* suffix = new (zone) RegExpAtom(
+ old_atom->data().SubVector(prefix_length, old_atom->length()),
+ flags);
+ suffixes->Add(suffix, zone);
+ }
+ }
+ pair->Add(new (zone) RegExpDisjunction(suffixes), zone);
+ alternatives->at(write_posn++) = new (zone) RegExpAlternative(pair);
+ } else {
+ // Just copy any non-worthwhile alternatives.
+ for (int j = first_with_prefix; j < i; j++) {
+ alternatives->at(write_posn++) = alternatives->at(j);
+ }
+ }
+ }
+ alternatives->Rewind(write_posn); // Trim end of array.
+}
+
+// Optimizes b|c|z to [bcz].
+void RegExpDisjunction::FixSingleCharacterDisjunctions(
+ RegExpCompiler* compiler) {
+ Zone* zone = compiler->zone();
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ int length = alternatives->length();
+
+ int write_posn = 0;
+ int i = 0;
+ while (i < length) {
+ RegExpTree* alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->length() != 1) {
+ alternatives->at(write_posn++) = alternatives->at(i);
+ i++;
+ continue;
+ }
+ JSRegExp::Flags flags = atom->flags();
+ DCHECK_IMPLIES(IsUnicode(flags),
+ !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ bool contains_trail_surrogate =
+ unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
+ int first_in_run = i;
+ i++;
+ // Find a run of single-character atom alternatives that have identical
+ // flags (case independence and unicode-ness).
+ while (i < length) {
+ alternative = alternatives->at(i);
+ if (!alternative->IsAtom()) break;
+ RegExpAtom* const atom = alternative->AsAtom();
+ if (atom->length() != 1) break;
+ if (atom->flags() != flags) break;
+ DCHECK_IMPLIES(IsUnicode(flags),
+ !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ contains_trail_surrogate |=
+ unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
+ i++;
+ }
+ if (i > first_in_run + 1) {
+ // Found non-trivial run of single-character alternatives.
+ int run_length = i - first_in_run;
+ ZoneList<CharacterRange>* ranges =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ for (int j = 0; j < run_length; j++) {
+ RegExpAtom* old_atom = alternatives->at(j + first_in_run)->AsAtom();
+ DCHECK_EQ(old_atom->length(), 1);
+ ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
+ }
+ RegExpCharacterClass::CharacterClassFlags character_class_flags;
+ if (IsUnicode(flags) && contains_trail_surrogate) {
+ character_class_flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
+ }
+ alternatives->at(write_posn++) = new (zone)
+ RegExpCharacterClass(zone, ranges, flags, character_class_flags);
+ } else {
+ // Just copy any trivial alternatives.
+ for (int j = first_in_run; j < i; j++) {
+ alternatives->at(write_posn++) = alternatives->at(j);
+ }
+ }
+ }
+ alternatives->Rewind(write_posn); // Trim end of array.
+}
+
+RegExpNode* RegExpDisjunction::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+
+ if (alternatives->length() > 2) {
+ bool found_consecutive_atoms = SortConsecutiveAtoms(compiler);
+ if (found_consecutive_atoms) RationalizeConsecutiveAtoms(compiler);
+ FixSingleCharacterDisjunctions(compiler);
+ if (alternatives->length() == 1) {
+ return alternatives->at(0)->ToNode(compiler, on_success);
+ }
+ }
+
+ int length = alternatives->length();
+
+ ChoiceNode* result =
+ new (compiler->zone()) ChoiceNode(length, compiler->zone());
+ for (int i = 0; i < length; i++) {
+ GuardedAlternative alternative(
+ alternatives->at(i)->ToNode(compiler, on_success));
+ result->AddAlternative(alternative);
+ }
+ return result;
+}
+
+RegExpNode* RegExpQuantifier::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return ToNode(min(), max(), is_greedy(), body(), compiler, on_success);
+}
+
+namespace {
+// Desugar \b to (?<=\w)(?=\W)|(?<=\W)(?=\w) and
+// \B to (?<=\w)(?=\w)|(?<=\W)(?=\W)
+RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ RegExpAssertion::AssertionType type,
+ JSRegExp::Flags flags) {
+ DCHECK(NeedsUnicodeCaseEquivalents(flags));
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* word_range =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::AddClassEscape('w', word_range, true, zone);
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ // Add two choices. The (non-)boundary could start with a word or
+ // a non-word-character.
+ for (int i = 0; i < 2; i++) {
+ bool lookbehind_for_word = i == 0;
+ bool lookahead_for_word =
+ (type == RegExpAssertion::BOUNDARY) ^ lookbehind_for_word;
+ // Look to the left.
+ RegExpLookaround::Builder lookbehind(lookbehind_for_word, on_success,
+ stack_register, position_register);
+ RegExpNode* backward = TextNode::CreateForCharacterRanges(
+ zone, word_range, true, lookbehind.on_match_success(), flags);
+ // Look to the right.
+ RegExpLookaround::Builder lookahead(lookahead_for_word,
+ lookbehind.ForMatch(backward),
+ stack_register, position_register);
+ RegExpNode* forward = TextNode::CreateForCharacterRanges(
+ zone, word_range, false, lookahead.on_match_success(), flags);
+ result->AddAlternative(GuardedAlternative(lookahead.ForMatch(forward)));
+ }
+ return result;
+}
+} // anonymous namespace
+
+RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ NodeInfo info;
+ Zone* zone = compiler->zone();
+
+ switch (assertion_type()) {
+ case START_OF_LINE:
+ return AssertionNode::AfterNewline(on_success);
+ case START_OF_INPUT:
+ return AssertionNode::AtStart(on_success);
+ case BOUNDARY:
+ return NeedsUnicodeCaseEquivalents(flags_)
+ ? BoundaryAssertionAsLookaround(compiler, on_success, BOUNDARY,
+ flags_)
+ : AssertionNode::AtBoundary(on_success);
+ case NON_BOUNDARY:
+ return NeedsUnicodeCaseEquivalents(flags_)
+ ? BoundaryAssertionAsLookaround(compiler, on_success,
+ NON_BOUNDARY, flags_)
+ : AssertionNode::AtNonBoundary(on_success);
+ case END_OF_INPUT:
+ return AssertionNode::AtEnd(on_success);
+ case END_OF_LINE: {
+ // Compile $ in multiline regexps as an alternation with a positive
+ // lookahead in one side and an end-of-input on the other side.
+ // We need two registers for the lookahead.
+ int stack_pointer_register = compiler->AllocateRegister();
+ int position_register = compiler->AllocateRegister();
+ // The ChoiceNode to distinguish between a newline and end-of-input.
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ // Create a newline atom.
+ ZoneList<CharacterRange>* newline_ranges =
+ new (zone) ZoneList<CharacterRange>(3, zone);
+ CharacterRange::AddClassEscape('n', newline_ranges, false, zone);
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ RegExpCharacterClass* newline_atom =
+ new (zone) RegExpCharacterClass('n', default_flags);
+ TextNode* newline_matcher =
+ new (zone) TextNode(newline_atom, false,
+ ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register,
+ 0, // No captures inside.
+ -1, // Ignored if no captures.
+ on_success));
+ // Create an end-of-input matcher.
+ RegExpNode* end_of_line = ActionNode::BeginSubmatch(
+ stack_pointer_register, position_register, newline_matcher);
+ // Add the two alternatives to the ChoiceNode.
+ GuardedAlternative eol_alternative(end_of_line);
+ result->AddAlternative(eol_alternative);
+ GuardedAlternative end_alternative(AssertionNode::AtEnd(on_success));
+ result->AddAlternative(end_alternative);
+ return result;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return on_success;
+}
+
+RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return new (compiler->zone())
+ BackReferenceNode(RegExpCapture::StartRegister(index()),
+ RegExpCapture::EndRegister(index()), flags_,
+ compiler->read_backward(), on_success);
+}
+
+RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return on_success;
+}
+
+RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
+ int stack_pointer_register,
+ int position_register,
+ int capture_register_count,
+ int capture_register_start)
+ : is_positive_(is_positive),
+ on_success_(on_success),
+ stack_pointer_register_(stack_pointer_register),
+ position_register_(position_register) {
+ if (is_positive_) {
+ on_match_success_ = ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register, capture_register_count,
+ capture_register_start, on_success_);
+ } else {
+ Zone* zone = on_success_->zone();
+ on_match_success_ = new (zone) NegativeSubmatchSuccess(
+ stack_pointer_register, position_register, capture_register_count,
+ capture_register_start, zone);
+ }
+}
+
+RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) {
+ if (is_positive_) {
+ return ActionNode::BeginSubmatch(stack_pointer_register_,
+ position_register_, match);
+ } else {
+ Zone* zone = on_success_->zone();
+ // We use a ChoiceNode to represent the negative lookaround. The first
+ // alternative is the negative match. On success, the end node backtracks.
+ // On failure, the second alternative is tried and leads to success.
+ // NegativeLookaheadChoiceNode is a special ChoiceNode that ignores the
+ // first exit when calculating quick checks.
+ ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
+ GuardedAlternative(match), GuardedAlternative(on_success_), zone);
+ return ActionNode::BeginSubmatch(stack_pointer_register_,
+ position_register_, choice_node);
+ }
+}
+
+RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ int stack_pointer_register = compiler->AllocateRegister();
+ int position_register = compiler->AllocateRegister();
+
+ const int registers_per_capture = 2;
+ const int register_of_first_capture = 2;
+ int register_count = capture_count_ * registers_per_capture;
+ int register_start =
+ register_of_first_capture + capture_from_ * registers_per_capture;
+
+ RegExpNode* result;
+ bool was_reading_backward = compiler->read_backward();
+ compiler->set_read_backward(type() == LOOKBEHIND);
+ Builder builder(is_positive(), on_success, stack_pointer_register,
+ position_register, register_count, register_start);
+ RegExpNode* match = body_->ToNode(compiler, builder.on_match_success());
+ result = builder.ForMatch(match);
+ compiler->set_read_backward(was_reading_backward);
+ return result;
+}
+
+RegExpNode* RegExpCapture::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ return ToNode(body(), index(), compiler, on_success);
+}
+
+RegExpNode* RegExpCapture::ToNode(RegExpTree* body, int index,
+ RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ DCHECK_NOT_NULL(body);
+ int start_reg = RegExpCapture::StartRegister(index);
+ int end_reg = RegExpCapture::EndRegister(index);
+ if (compiler->read_backward()) std::swap(start_reg, end_reg);
+ RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
+ RegExpNode* body_node = body->ToNode(compiler, store_end);
+ return ActionNode::StorePosition(start_reg, true, body_node);
+}
+
+namespace {
+
+class AssertionSequenceRewriter final {
+ public:
+ // TODO(jgruber): Consider moving this to a separate AST tree rewriter pass
+ // instead of sprinkling rewrites into the AST->Node conversion process.
+ static void MaybeRewrite(ZoneList<RegExpTree*>* terms, Zone* zone) {
+ AssertionSequenceRewriter rewriter(terms, zone);
+
+ static constexpr int kNoIndex = -1;
+ int from = kNoIndex;
+
+ for (int i = 0; i < terms->length(); i++) {
+ RegExpTree* t = terms->at(i);
+ if (from == kNoIndex && t->IsAssertion()) {
+ from = i; // Start a sequence.
+ } else if (from != kNoIndex && !t->IsAssertion()) {
+ // Terminate and process the sequence.
+ if (i - from > 1) rewriter.Rewrite(from, i);
+ from = kNoIndex;
+ }
+ }
+
+ if (from != kNoIndex && terms->length() - from > 1) {
+ rewriter.Rewrite(from, terms->length());
+ }
+ }
+
+ // All assertions are zero width. A consecutive sequence of assertions is
+ // order-independent. There's two ways we can optimize here:
+ // 1. fold all identical assertions.
+ // 2. if any assertion combinations are known to fail (e.g. \b\B), the entire
+ // sequence fails.
+ void Rewrite(int from, int to) {
+ DCHECK_GT(to, from + 1);
+
+ // Bitfield of all seen assertions.
+ uint32_t seen_assertions = 0;
+ STATIC_ASSERT(RegExpAssertion::LAST_TYPE < kUInt32Size * kBitsPerByte);
+
+ // Flags must match for folding.
+ JSRegExp::Flags flags = terms_->at(from)->AsAssertion()->flags();
+ bool saw_mismatched_flags = false;
+
+ for (int i = from; i < to; i++) {
+ RegExpAssertion* t = terms_->at(i)->AsAssertion();
+ if (t->flags() != flags) saw_mismatched_flags = true;
+ const uint32_t bit = 1 << t->assertion_type();
+
+ if ((seen_assertions & bit) && !saw_mismatched_flags) {
+ // Fold duplicates.
+ terms_->Set(i, new (zone_) RegExpEmpty());
+ }
+
+ seen_assertions |= bit;
+ }
+
+ // Collapse failures.
+ const uint32_t always_fails_mask =
+ 1 << RegExpAssertion::BOUNDARY | 1 << RegExpAssertion::NON_BOUNDARY;
+ if ((seen_assertions & always_fails_mask) == always_fails_mask) {
+ ReplaceSequenceWithFailure(from, to);
+ }
+ }
+
+ void ReplaceSequenceWithFailure(int from, int to) {
+ // Replace the entire sequence with a single node that always fails.
+ // TODO(jgruber): Consider adding an explicit Fail kind. Until then, the
+ // negated '*' (everything) range serves the purpose.
+ ZoneList<CharacterRange>* ranges =
+ new (zone_) ZoneList<CharacterRange>(0, zone_);
+ RegExpCharacterClass* cc =
+ new (zone_) RegExpCharacterClass(zone_, ranges, JSRegExp::Flags());
+ terms_->Set(from, cc);
+
+ // Zero out the rest.
+ RegExpEmpty* empty = new (zone_) RegExpEmpty();
+ for (int i = from + 1; i < to; i++) terms_->Set(i, empty);
+ }
+
+ private:
+ AssertionSequenceRewriter(ZoneList<RegExpTree*>* terms, Zone* zone)
+ : zone_(zone), terms_(terms) {}
+
+ Zone* zone_;
+ ZoneList<RegExpTree*>* terms_;
+};
+
+} // namespace
+
+RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ ZoneList<RegExpTree*>* children = nodes();
+
+ AssertionSequenceRewriter::MaybeRewrite(children, compiler->zone());
+
+ RegExpNode* current = on_success;
+ if (compiler->read_backward()) {
+ for (int i = 0; i < children->length(); i++) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
+ } else {
+ for (int i = children->length() - 1; i >= 0; i--) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
+ }
+ return current;
+}
+
+static void AddClass(const int* elmv, int elmc,
+ ZoneList<CharacterRange>* ranges, Zone* zone) {
+ elmc--;
+ DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
+ for (int i = 0; i < elmc; i += 2) {
+ DCHECK(elmv[i] < elmv[i + 1]);
+ ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
+ }
+}
+
+static void AddClassNegated(const int* elmv, int elmc,
+ ZoneList<CharacterRange>* ranges, Zone* zone) {
+ elmc--;
+ DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
+ DCHECK_NE(0x0000, elmv[0]);
+ DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
+ uc16 last = 0x0000;
+ for (int i = 0; i < elmc; i += 2) {
+ DCHECK(last <= elmv[i] - 1);
+ DCHECK(elmv[i] < elmv[i + 1]);
+ ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
+ last = elmv[i + 1];
+ }
+ ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
+}
+
+void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
+ bool add_unicode_case_equivalents,
+ Zone* zone) {
+ if (add_unicode_case_equivalents && (type == 'w' || type == 'W')) {
+ // See #sec-runtime-semantics-wordcharacters-abstract-operation
+ // In case of unicode and ignore_case, we need to create the closure over
+ // case equivalent characters before negating.
+ ZoneList<CharacterRange>* new_ranges =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ AddClass(kWordRanges, kWordRangeCount, new_ranges, zone);
+ AddUnicodeCaseEquivalents(new_ranges, zone);
+ if (type == 'W') {
+ ZoneList<CharacterRange>* negated =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::Negate(new_ranges, negated, zone);
+ new_ranges = negated;
+ }
+ ranges->AddAll(*new_ranges, zone);
+ return;
+ }
+ AddClassEscape(type, ranges, zone);
+}
+
+void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
+ Zone* zone) {
+ switch (type) {
+ case 's':
+ AddClass(kSpaceRanges, kSpaceRangeCount, ranges, zone);
+ break;
+ case 'S':
+ AddClassNegated(kSpaceRanges, kSpaceRangeCount, ranges, zone);
+ break;
+ case 'w':
+ AddClass(kWordRanges, kWordRangeCount, ranges, zone);
+ break;
+ case 'W':
+ AddClassNegated(kWordRanges, kWordRangeCount, ranges, zone);
+ break;
+ case 'd':
+ AddClass(kDigitRanges, kDigitRangeCount, ranges, zone);
+ break;
+ case 'D':
+ AddClassNegated(kDigitRanges, kDigitRangeCount, ranges, zone);
+ break;
+ case '.':
+ AddClassNegated(kLineTerminatorRanges, kLineTerminatorRangeCount, ranges,
+ zone);
+ break;
+ // This is not a character range as defined by the spec but a
+ // convenient shorthand for a character class that matches any
+ // character.
+ case '*':
+ ranges->Add(CharacterRange::Everything(), zone);
+ break;
+ // This is the set of characters matched by the $ and ^ symbols
+ // in multiline mode.
+ case 'n':
+ AddClass(kLineTerminatorRanges, kLineTerminatorRangeCount, ranges, zone);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Vector<const int> CharacterRange::GetWordBounds() {
+ return Vector<const int>(kWordRanges, kWordRangeCount - 1);
+}
+
+#ifdef V8_INTL_SUPPORT
+struct IgnoreSet {
+ IgnoreSet() : set(BuildIgnoreSet()) {}
+ const icu::UnicodeSet set;
+};
+
+struct SpecialAddSet {
+ SpecialAddSet() : set(BuildSpecialAddSet()) {}
+ const icu::UnicodeSet set;
+};
+
+icu::UnicodeSet BuildAsciiAToZSet() {
+ icu::UnicodeSet set('a', 'z');
+ set.add('A', 'Z');
+ set.freeze();
+ return set;
+}
+
+struct AsciiAToZSet {
+ AsciiAToZSet() : set(BuildAsciiAToZSet()) {}
+ const icu::UnicodeSet set;
+};
+
+static base::LazyInstance<IgnoreSet>::type ignore_set =
+ LAZY_INSTANCE_INITIALIZER;
+
+static base::LazyInstance<SpecialAddSet>::type special_add_set =
+ LAZY_INSTANCE_INITIALIZER;
+
+static base::LazyInstance<AsciiAToZSet>::type ascii_a_to_z_set =
+ LAZY_INSTANCE_INITIALIZER;
+#endif // V8_INTL_SUPPORT
+
+// static
+void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool is_one_byte) {
+ CharacterRange::Canonicalize(ranges);
+ int range_count = ranges->length();
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeSet others;
+ for (int i = 0; i < range_count; i++) {
+ CharacterRange range = ranges->at(i);
+ uc32 from = range.from();
+ if (from > String::kMaxUtf16CodeUnit) continue;
+ uc32 to = Min(range.to(), String::kMaxUtf16CodeUnit);
+ // Nothing to be done for surrogates.
+ if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
+ if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
+ if (from > String::kMaxOneByteCharCode) continue;
+ if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
+ }
+ others.add(from, to);
+ }
+
+ // Set of characters already added to ranges that do not need to be added
+ // again.
+ icu::UnicodeSet already_added(others);
+
+ // Set of characters in ranges that are in the 52 ASCII characters [a-zA-Z].
+ icu::UnicodeSet in_ascii_a_to_z(others);
+ in_ascii_a_to_z.retainAll(ascii_a_to_z_set.Pointer()->set);
+
+ // Remove all chars in [a-zA-Z] from others.
+ others.removeAll(in_ascii_a_to_z);
+
+ // Set of characters in ranges that are overlapping with special add set.
+ icu::UnicodeSet in_special_add(others);
+ in_special_add.retainAll(special_add_set.Pointer()->set);
+
+ others.removeAll(in_special_add);
+
+ // Ignore all chars in ignore set.
+ others.removeAll(ignore_set.Pointer()->set);
+
+ // For most of the chars in ranges that is still in others, find the case
+ // equivlant set by calling closeOver(USET_CASE_INSENSITIVE).
+ others.closeOver(USET_CASE_INSENSITIVE);
+
+ // Because closeOver(USET_CASE_INSENSITIVE) may add ASCII [a-zA-Z] to others,
+ // but ECMA262 "i" mode won't consider that, remove them from others.
+ // Ex: U+017F add 'S' and 's' to others.
+ others.removeAll(ascii_a_to_z_set.Pointer()->set);
+
+ // Special handling for in_ascii_a_to_z.
+ for (int32_t i = 0; i < in_ascii_a_to_z.getRangeCount(); i++) {
+ UChar32 start = in_ascii_a_to_z.getRangeStart(i);
+ UChar32 end = in_ascii_a_to_z.getRangeEnd(i);
+ // Check if it is uppercase A-Z by checking bit 6.
+ if (start & 0x0020) {
+ // Add the lowercases
+ others.add(start & 0x005F, end & 0x005F);
+ } else {
+ // Add the uppercases
+ others.add(start | 0x0020, end | 0x0020);
+ }
+ }
+
+ // Special handling for chars in "Special Add" set.
+ for (int32_t i = 0; i < in_special_add.getRangeCount(); i++) {
+ UChar32 end = in_special_add.getRangeEnd(i);
+ for (UChar32 ch = in_special_add.getRangeStart(i); ch <= end; ch++) {
+ // Add the uppercase of this character if itself is not an uppercase
+ // character.
+ // Note: The if condiction cannot be u_islower(ch) because ch could be
+ // neither uppercase nor lowercase but Mn.
+ if (!u_isupper(ch)) {
+ others.add(u_toupper(ch));
+ }
+ icu::UnicodeSet candidates(ch, ch);
+ candidates.closeOver(USET_CASE_INSENSITIVE);
+ for (int32_t j = 0; j < candidates.getRangeCount(); j++) {
+ UChar32 end2 = candidates.getRangeEnd(j);
+ for (UChar32 ch2 = candidates.getRangeStart(j); ch2 <= end2; ch2++) {
+ // Add character that is not uppercase to others.
+ if (!u_isupper(ch2)) {
+ others.add(ch2);
+ }
+ }
+ }
+ }
+ }
+
+ // Remove all characters which already in the ranges.
+ others.removeAll(already_added);
+
+ // Add others to the ranges
+ for (int32_t i = 0; i < others.getRangeCount(); i++) {
+ UChar32 from = others.getRangeStart(i);
+ UChar32 to = others.getRangeEnd(i);
+ if (from == to) {
+ ranges->Add(CharacterRange::Singleton(from), zone);
+ } else {
+ ranges->Add(CharacterRange::Range(from, to), zone);
+ }
+ }
+#else
+ for (int i = 0; i < range_count; i++) {
+ CharacterRange range = ranges->at(i);
+ uc32 bottom = range.from();
+ if (bottom > String::kMaxUtf16CodeUnit) continue;
+ uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
+ // Nothing to be done for surrogates.
+ if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
+ if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
+ if (bottom > String::kMaxOneByteCharCode) continue;
+ if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ }
+ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ if (top == bottom) {
+ // If this is a singleton we just expand the one character.
+ int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
+ for (int i = 0; i < length; i++) {
+ uc32 chr = chars[i];
+ if (chr != bottom) {
+ ranges->Add(CharacterRange::Singleton(chars[i]), zone);
+ }
+ }
+ } else {
+ // If this is a range we expand the characters block by block, expanding
+ // contiguous subranges (blocks) one at a time. The approach is as
+ // follows. For a given start character we look up the remainder of the
+ // block that contains it (represented by the end point), for instance we
+ // find 'z' if the character is 'c'. A block is characterized by the
+ // property that all characters uncanonicalize in the same way, except
+ // that each entry in the result is incremented by the distance from the
+ // first element. So a-z is a block because 'a' uncanonicalizes to ['a',
+ // 'A'] and the k'th letter uncanonicalizes to ['a' + k, 'A' + k]. Once
+ // we've found the end point we look up its uncanonicalization and
+ // produce a range for each element. For instance for [c-f] we look up
+ // ['z', 'Z'] and produce [c-f] and [C-F]. We then only add a range if
+ // it is not already contained in the input, so [c-f] will be skipped but
+ // [C-F] will be added. If this range is not completely contained in a
+ // block we do this for all the blocks covered by the range (handling
+ // characters that is not in a block as a "singleton block").
+ unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ int pos = bottom;
+ while (pos <= top) {
+ int length =
+ isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
+ uc32 block_end;
+ if (length == 0) {
+ block_end = pos;
+ } else {
+ DCHECK_EQ(1, length);
+ block_end = equivalents[0];
+ }
+ int end = (block_end > top) ? top : block_end;
+ length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0',
+ equivalents);
+ for (int i = 0; i < length; i++) {
+ uc32 c = equivalents[i];
+ uc32 range_from = c - (block_end - pos);
+ uc32 range_to = c - (block_end - end);
+ if (!(bottom <= range_from && range_to <= top)) {
+ ranges->Add(CharacterRange::Range(range_from, range_to), zone);
+ }
+ }
+ pos = end + 1;
+ }
+ }
+ }
+#endif // V8_INTL_SUPPORT
+}
+
+bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
+ DCHECK_NOT_NULL(ranges);
+ int n = ranges->length();
+ if (n <= 1) return true;
+ int max = ranges->at(0).to();
+ for (int i = 1; i < n; i++) {
+ CharacterRange next_range = ranges->at(i);
+ if (next_range.from() <= max + 1) return false;
+ max = next_range.to();
+ }
+ return true;
+}
+
+ZoneList<CharacterRange>* CharacterSet::ranges(Zone* zone) {
+ if (ranges_ == nullptr) {
+ ranges_ = new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::AddClassEscape(standard_set_type_, ranges_, false, zone);
+ }
+ return ranges_;
+}
+
+// Move a number of elements in a zonelist to another position
+// in the same list. Handles overlapping source and target areas.
+static void MoveRanges(ZoneList<CharacterRange>* list, int from, int to,
+ int count) {
+ // Ranges are potentially overlapping.
+ if (from < to) {
+ for (int i = count - 1; i >= 0; i--) {
+ list->at(to + i) = list->at(from + i);
+ }
+ } else {
+ for (int i = 0; i < count; i++) {
+ list->at(to + i) = list->at(from + i);
+ }
+ }
+}
+
+static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list, int count,
+ CharacterRange insert) {
+ // Inserts a range into list[0..count[, which must be sorted
+ // by from value and non-overlapping and non-adjacent, using at most
+ // list[0..count] for the result. Returns the number of resulting
+ // canonicalized ranges. Inserting a range may collapse existing ranges into
+ // fewer ranges, so the return value can be anything in the range 1..count+1.
+ uc32 from = insert.from();
+ uc32 to = insert.to();
+ int start_pos = 0;
+ int end_pos = count;
+ for (int i = count - 1; i >= 0; i--) {
+ CharacterRange current = list->at(i);
+ if (current.from() > to + 1) {
+ end_pos = i;
+ } else if (current.to() + 1 < from) {
+ start_pos = i + 1;
+ break;
+ }
+ }
+
+ // Inserted range overlaps, or is adjacent to, ranges at positions
+ // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
+ // not affected by the insertion.
+ // If start_pos == end_pos, the range must be inserted before start_pos.
+ // if start_pos < end_pos, the entire range from start_pos to end_pos
+ // must be merged with the insert range.
+
+ if (start_pos == end_pos) {
+ // Insert between existing ranges at position start_pos.
+ if (start_pos < count) {
+ MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
+ }
+ list->at(start_pos) = insert;
+ return count + 1;
+ }
+ if (start_pos + 1 == end_pos) {
+ // Replace single existing range at position start_pos.
+ CharacterRange to_replace = list->at(start_pos);
+ int new_from = Min(to_replace.from(), from);
+ int new_to = Max(to_replace.to(), to);
+ list->at(start_pos) = CharacterRange::Range(new_from, new_to);
+ return count;
+ }
+ // Replace a number of existing ranges from start_pos to end_pos - 1.
+ // Move the remaining ranges down.
+
+ int new_from = Min(list->at(start_pos).from(), from);
+ int new_to = Max(list->at(end_pos - 1).to(), to);
+ if (end_pos < count) {
+ MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
+ }
+ list->at(start_pos) = CharacterRange::Range(new_from, new_to);
+ return count - (end_pos - start_pos) + 1;
+}
+
+void CharacterSet::Canonicalize() {
+ // Special/default classes are always considered canonical. The result
+ // of calling ranges() will be sorted.
+ if (ranges_ == nullptr) return;
+ CharacterRange::Canonicalize(ranges_);
+}
+
+void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
+ if (character_ranges->length() <= 1) return;
+ // Check whether ranges are already canonical (increasing, non-overlapping,
+ // non-adjacent).
+ int n = character_ranges->length();
+ int max = character_ranges->at(0).to();
+ int i = 1;
+ while (i < n) {
+ CharacterRange current = character_ranges->at(i);
+ if (current.from() <= max + 1) {
+ break;
+ }
+ max = current.to();
+ i++;
+ }
+ // Canonical until the i'th range. If that's all of them, we are done.
+ if (i == n) return;
+
+ // The ranges at index i and forward are not canonicalized. Make them so by
+ // doing the equivalent of insertion sort (inserting each into the previous
+ // list, in order).
+ // Notice that inserting a range can reduce the number of ranges in the
+ // result due to combining of adjacent and overlapping ranges.
+ int read = i; // Range to insert.
+ int num_canonical = i; // Length of canonicalized part of list.
+ do {
+ num_canonical = InsertRangeInCanonicalList(character_ranges, num_canonical,
+ character_ranges->at(read));
+ read++;
+ } while (read < n);
+ character_ranges->Rewind(num_canonical);
+
+ DCHECK(CharacterRange::IsCanonical(character_ranges));
+}
+
+void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
+ ZoneList<CharacterRange>* negated_ranges,
+ Zone* zone) {
+ DCHECK(CharacterRange::IsCanonical(ranges));
+ DCHECK_EQ(0, negated_ranges->length());
+ int range_count = ranges->length();
+ uc32 from = 0;
+ int i = 0;
+ if (range_count > 0 && ranges->at(0).from() == 0) {
+ from = ranges->at(0).to() + 1;
+ i = 1;
+ }
+ while (i < range_count) {
+ CharacterRange range = ranges->at(i);
+ negated_ranges->Add(CharacterRange::Range(from, range.from() - 1), zone);
+ from = range.to() + 1;
+ i++;
+ }
+ if (from < String::kMaxCodePoint) {
+ negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
+ zone);
+ }
+}
+
+// Scoped object to keep track of how much we unroll quantifier loops in the
+// regexp graph generator.
+class RegExpExpansionLimiter {
+ public:
+ static const int kMaxExpansionFactor = 6;
+ RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
+ : compiler_(compiler),
+ saved_expansion_factor_(compiler->current_expansion_factor()),
+ ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
+ DCHECK_LT(0, factor);
+ if (ok_to_expand_) {
+ if (factor > kMaxExpansionFactor) {
+ // Avoid integer overflow of the current expansion factor.
+ ok_to_expand_ = false;
+ compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
+ } else {
+ int new_factor = saved_expansion_factor_ * factor;
+ ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
+ compiler->set_current_expansion_factor(new_factor);
+ }
+ }
+ }
+
+ ~RegExpExpansionLimiter() {
+ compiler_->set_current_expansion_factor(saved_expansion_factor_);
+ }
+
+ bool ok_to_expand() { return ok_to_expand_; }
+
+ private:
+ RegExpCompiler* compiler_;
+ int saved_expansion_factor_;
+ bool ok_to_expand_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
+};
+
+RegExpNode* RegExpQuantifier::ToNode(int min, int max, bool is_greedy,
+ RegExpTree* body, RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ bool not_at_start) {
+ // x{f, t} becomes this:
+ //
+ // (r++)<-.
+ // | `
+ // | (x)
+ // v ^
+ // (r=0)-->(?)---/ [if r < t]
+ // |
+ // [if r >= f] \----> ...
+ //
+
+ // 15.10.2.5 RepeatMatcher algorithm.
+ // The parser has already eliminated the case where max is 0. In the case
+ // where max_match is zero the parser has removed the quantifier if min was
+ // > 0 and removed the atom if min was 0. See AddQuantifierToAtom.
+
+ // If we know that we cannot match zero length then things are a little
+ // simpler since we don't need to make the special zero length match check
+ // from step 2.1. If the min and max are small we can unroll a little in
+ // this case.
+ static const int kMaxUnrolledMinMatches = 3; // Unroll (foo)+ and (foo){3,}
+ static const int kMaxUnrolledMaxMatches = 3; // Unroll (foo)? and (foo){x,3}
+ if (max == 0) return on_success; // This can happen due to recursion.
+ bool body_can_be_empty = (body->min_match() == 0);
+ int body_start_reg = RegExpCompiler::kNoRegister;
+ Interval capture_registers = body->CaptureRegisters();
+ bool needs_capture_clearing = !capture_registers.is_empty();
+ Zone* zone = compiler->zone();
+
+ if (body_can_be_empty) {
+ body_start_reg = compiler->AllocateRegister();
+ } else if (compiler->optimize() && !needs_capture_clearing) {
+ // Only unroll if there are no captures and the body can't be
+ // empty.
+ {
+ RegExpExpansionLimiter limiter(compiler, min + ((max != min) ? 1 : 0));
+ if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
+ int new_max = (max == kInfinity) ? max : max - min;
+ // Recurse once to get the loop or optional matches after the fixed
+ // ones.
+ RegExpNode* answer =
+ ToNode(0, new_max, is_greedy, body, compiler, on_success, true);
+ // Unroll the forced matches from 0 to min. This can cause chains of
+ // TextNodes (which the parser does not generate). These should be
+ // combined if it turns out they hinder good code generation.
+ for (int i = 0; i < min; i++) {
+ answer = body->ToNode(compiler, answer);
+ }
+ return answer;
+ }
+ }
+ if (max <= kMaxUnrolledMaxMatches && min == 0) {
+ DCHECK_LT(0, max); // Due to the 'if' above.
+ RegExpExpansionLimiter limiter(compiler, max);
+ if (limiter.ok_to_expand()) {
+ // Unroll the optional matches up to max.
+ RegExpNode* answer = on_success;
+ for (int i = 0; i < max; i++) {
+ ChoiceNode* alternation = new (zone) ChoiceNode(2, zone);
+ if (is_greedy) {
+ alternation->AddAlternative(
+ GuardedAlternative(body->ToNode(compiler, answer)));
+ alternation->AddAlternative(GuardedAlternative(on_success));
+ } else {
+ alternation->AddAlternative(GuardedAlternative(on_success));
+ alternation->AddAlternative(
+ GuardedAlternative(body->ToNode(compiler, answer)));
+ }
+ answer = alternation;
+ if (not_at_start && !compiler->read_backward()) {
+ alternation->set_not_at_start();
+ }
+ }
+ return answer;
+ }
+ }
+ }
+ bool has_min = min > 0;
+ bool has_max = max < RegExpTree::kInfinity;
+ bool needs_counter = has_min || has_max;
+ int reg_ctr = needs_counter ? compiler->AllocateRegister()
+ : RegExpCompiler::kNoRegister;
+ LoopChoiceNode* center = new (zone)
+ LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
+ if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
+ RegExpNode* loop_return =
+ needs_counter ? static_cast<RegExpNode*>(
+ ActionNode::IncrementRegister(reg_ctr, center))
+ : static_cast<RegExpNode*>(center);
+ if (body_can_be_empty) {
+ // If the body can be empty we need to check if it was and then
+ // backtrack.
+ loop_return =
+ ActionNode::EmptyMatchCheck(body_start_reg, reg_ctr, min, loop_return);
+ }
+ RegExpNode* body_node = body->ToNode(compiler, loop_return);
+ if (body_can_be_empty) {
+ // If the body can be empty we need to store the start position
+ // so we can bail out if it was empty.
+ body_node = ActionNode::StorePosition(body_start_reg, false, body_node);
+ }
+ if (needs_capture_clearing) {
+ // Before entering the body of this loop we need to clear captures.
+ body_node = ActionNode::ClearCaptures(capture_registers, body_node);
+ }
+ GuardedAlternative body_alt(body_node);
+ if (has_max) {
+ Guard* body_guard = new (zone) Guard(reg_ctr, Guard::LT, max);
+ body_alt.AddGuard(body_guard, zone);
+ }
+ GuardedAlternative rest_alt(on_success);
+ if (has_min) {
+ Guard* rest_guard = new (compiler->zone()) Guard(reg_ctr, Guard::GEQ, min);
+ rest_alt.AddGuard(rest_guard, zone);
+ }
+ if (is_greedy) {
+ center->AddLoopAlternative(body_alt);
+ center->AddContinueAlternative(rest_alt);
+ } else {
+ center->AddContinueAlternative(rest_alt);
+ center->AddLoopAlternative(body_alt);
+ }
+ if (needs_counter) {
+ return ActionNode::SetRegister(reg_ctr, 0, center);
+ } else {
+ return center;
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
new file mode 100644
index 0000000000..c643f988c0
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -0,0 +1,3551 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-compiler.h"
+
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
+#include "src/regexp/regexp-macro-assembler-tracer.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/zone/zone-list-inl.h"
+
+#ifdef V8_INTL_SUPPORT
+#include "unicode/locid.h"
+#include "unicode/uniset.h"
+#include "unicode/utypes.h"
+#endif // V8_INTL_SUPPORT
+
+namespace v8 {
+namespace internal {
+
+using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+
+// -------------------------------------------------------------------
+// Implementation of the Irregexp regular expression engine.
+//
+// The Irregexp regular expression engine is intended to be a complete
+// implementation of ECMAScript regular expressions. It generates either
+// bytecodes or native code.
+
+// The Irregexp regexp engine is structured in three steps.
+// 1) The parser generates an abstract syntax tree. See ast.cc.
+// 2) From the AST a node network is created. The nodes are all
+// subclasses of RegExpNode. The nodes represent states when
+// executing a regular expression. Several optimizations are
+// performed on the node network.
+// 3) From the nodes we generate either byte codes or native code
+// that can actually execute the regular expression (perform
+// the search). The code generation step is described in more
+// detail below.
+
+// Code generation.
+//
+// The nodes are divided into four main categories.
+// * Choice nodes
+// These represent places where the regular expression can
+// match in more than one way. For example on entry to an
+// alternation (foo|bar) or a repetition (*, +, ? or {}).
+// * Action nodes
+// These represent places where some action should be
+// performed. Examples include recording the current position
+// in the input string to a register (in order to implement
+// captures) or other actions on register for example in order
+// to implement the counters needed for {} repetitions.
+// * Matching nodes
+// These attempt to match some element part of the input string.
+// Examples of elements include character classes, plain strings
+// or back references.
+// * End nodes
+// These are used to implement the actions required on finding
+// a successful match or failing to find a match.
+//
+// The code generated (whether as byte codes or native code) maintains
+// some state as it runs. This consists of the following elements:
+//
+// * The capture registers. Used for string captures.
+// * Other registers. Used for counters etc.
+// * The current position.
+// * The stack of backtracking information. Used when a matching node
+// fails to find a match and needs to try an alternative.
+//
+// Conceptual regular expression execution model:
+//
+// There is a simple conceptual model of regular expression execution
+// which will be presented first. The actual code generated is a more
+// efficient simulation of the simple conceptual model:
+//
+// * Choice nodes are implemented as follows:
+// For each choice except the last {
+// push current position
+// push backtrack code location
+// <generate code to test for choice>
+// backtrack code location:
+// pop current position
+// }
+// <generate code to test for last choice>
+//
+// * Actions nodes are generated as follows
+// <push affected registers on backtrack stack>
+// <generate code to perform action>
+// push backtrack code location
+// <generate code to test for following nodes>
+// backtrack code location:
+// <pop affected registers to restore their state>
+// <pop backtrack location from stack and go to it>
+//
+// * Matching nodes are generated as follows:
+// if input string matches at current position
+// update current position
+// <generate code to test for following nodes>
+// else
+// <pop backtrack location from stack and go to it>
+//
+// Thus it can be seen that the current position is saved and restored
+// by the choice nodes, whereas the registers are saved and restored by
+// by the action nodes that manipulate them.
+//
+// The other interesting aspect of this model is that nodes are generated
+// at the point where they are needed by a recursive call to Emit(). If
+// the node has already been code generated then the Emit() call will
+// generate a jump to the previously generated code instead. In order to
+// limit recursion it is possible for the Emit() function to put the node
+// on a work list for later generation and instead generate a jump. The
+// destination of the jump is resolved later when the code is generated.
+//
+// Actual regular expression code generation.
+//
+// Code generation is actually more complicated than the above. In order
+// to improve the efficiency of the generated code some optimizations are
+// performed
+//
+// * Choice nodes have 1-character lookahead.
+// A choice node looks at the following character and eliminates some of
+// the choices immediately based on that character. This is not yet
+// implemented.
+// * Simple greedy loops store reduced backtracking information.
+// A quantifier like /.*foo/m will greedily match the whole input. It will
+// then need to backtrack to a point where it can match "foo". The naive
+// implementation of this would push each character position onto the
+// backtracking stack, then pop them off one by one. This would use space
+// proportional to the length of the input string. However since the "."
+// can only match in one way and always has a constant length (in this case
+// of 1) it suffices to store the current position on the top of the stack
+// once. Matching now becomes merely incrementing the current position and
+// backtracking becomes decrementing the current position and checking the
+// result against the stored current position. This is faster and saves
+// space.
+// * The current state is virtualized.
+// This is used to defer expensive operations until it is clear that they
+// are needed and to generate code for a node more than once, allowing
+// specialized an efficient versions of the code to be created. This is
+// explained in the section below.
+//
+// Execution state virtualization.
+//
+// Instead of emitting code, nodes that manipulate the state can record their
+// manipulation in an object called the Trace. The Trace object can record a
+// current position offset, an optional backtrack code location on the top of
+// the virtualized backtrack stack and some register changes. When a node is
+// to be emitted it can flush the Trace or update it. Flushing the Trace
+// will emit code to bring the actual state into line with the virtual state.
+// Avoiding flushing the state can postpone some work (e.g. updates of capture
+// registers). Postponing work can save time when executing the regular
+// expression since it may be found that the work never has to be done as a
+// failure to match can occur. In addition it is much faster to jump to a
+// known backtrack code location than it is to pop an unknown backtrack
+// location from the stack and jump there.
+//
+// The virtual state found in the Trace affects code generation. For example
+// the virtual state contains the difference between the actual current
+// position and the virtual current position, and matching code needs to use
+// this offset to attempt a match in the correct location of the input
+// string. Therefore code generated for a non-trivial trace is specialized
+// to that trace. The code generator therefore has the ability to generate
+// code for each node several times. In order to limit the size of the
+// generated code there is an arbitrary limit on how many specialized sets of
+// code may be generated for a given node. If the limit is reached, the
+// trace is flushed and a generic version of the code for a node is emitted.
+// This is subsequently used for that node. The code emitted for non-generic
+// trace is not recorded in the node and so it cannot currently be reused in
+// the event that code generation is requested for an identical trace.
+
+void RegExpTree::AppendToText(RegExpText* text, Zone* zone) { UNREACHABLE(); }
+
+void RegExpAtom::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::Atom(this), zone);
+}
+
+void RegExpCharacterClass::AppendToText(RegExpText* text, Zone* zone) {
+ text->AddElement(TextElement::CharClass(this), zone);
+}
+
+void RegExpText::AppendToText(RegExpText* text, Zone* zone) {
+ for (int i = 0; i < elements()->length(); i++)
+ text->AddElement(elements()->at(i), zone);
+}
+
+TextElement TextElement::Atom(RegExpAtom* atom) {
+ return TextElement(ATOM, atom);
+}
+
+TextElement TextElement::CharClass(RegExpCharacterClass* char_class) {
+ return TextElement(CHAR_CLASS, char_class);
+}
+
+int TextElement::length() const {
+ switch (text_type()) {
+ case ATOM:
+ return atom()->length();
+
+ case CHAR_CLASS:
+ return 1;
+ }
+ UNREACHABLE();
+}
+
+class RecursionCheck {
+ public:
+ explicit RecursionCheck(RegExpCompiler* compiler) : compiler_(compiler) {
+ compiler->IncrementRecursionDepth();
+ }
+ ~RecursionCheck() { compiler_->DecrementRecursionDepth(); }
+
+ private:
+ RegExpCompiler* compiler_;
+};
+
+// Attempts to compile the regexp using an Irregexp code generator. Returns
+// a fixed array or a null handle depending on whether it succeeded.
+RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
+ bool one_byte)
+ : next_register_(2 * (capture_count + 1)),
+ unicode_lookaround_stack_register_(kNoRegister),
+ unicode_lookaround_position_register_(kNoRegister),
+ work_list_(nullptr),
+ recursion_depth_(0),
+ one_byte_(one_byte),
+ reg_exp_too_big_(false),
+ limiting_recursion_(false),
+ optimize_(FLAG_regexp_optimization),
+ read_backward_(false),
+ current_expansion_factor_(1),
+ frequency_collator_(),
+ isolate_(isolate),
+ zone_(zone) {
+ accept_ = new (zone) EndNode(EndNode::ACCEPT, zone);
+ DCHECK_GE(RegExpMacroAssembler::kMaxRegister, next_register_ - 1);
+}
+
+RegExpCompiler::CompilationResult RegExpCompiler::Assemble(
+ Isolate* isolate, RegExpMacroAssembler* macro_assembler, RegExpNode* start,
+ int capture_count, Handle<String> pattern) {
+#ifdef DEBUG
+ if (FLAG_trace_regexp_assembler)
+ macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
+ else
+#endif
+ macro_assembler_ = macro_assembler;
+
+ std::vector<RegExpNode*> work_list;
+ work_list_ = &work_list;
+ Label fail;
+ macro_assembler_->PushBacktrack(&fail);
+ Trace new_trace;
+ start->Emit(this, &new_trace);
+ macro_assembler_->Bind(&fail);
+ macro_assembler_->Fail();
+ while (!work_list.empty()) {
+ RegExpNode* node = work_list.back();
+ work_list.pop_back();
+ node->set_on_work_list(false);
+ if (!node->label()->is_bound()) node->Emit(this, &new_trace);
+ }
+ if (reg_exp_too_big_) {
+ macro_assembler_->AbortedCodeGeneration();
+ return CompilationResult::RegExpTooBig();
+ }
+
+ Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
+ isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
+ work_list_ = nullptr;
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code && !FLAG_regexp_interpret_all) {
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ OFStream os(trace_scope.file());
+ Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
+ }
+#endif
+#ifdef DEBUG
+ if (FLAG_trace_regexp_assembler) {
+ delete macro_assembler_;
+ }
+#endif
+ return {*code, next_register_};
+}
+
+bool Trace::DeferredAction::Mentions(int that) {
+ if (action_type() == ActionNode::CLEAR_CAPTURES) {
+ Interval range = static_cast<DeferredClearCaptures*>(this)->range();
+ return range.Contains(that);
+ } else {
+ return reg() == that;
+ }
+}
+
+bool Trace::mentions_reg(int reg) {
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->Mentions(reg)) return true;
+ }
+ return false;
+}
+
+bool Trace::GetStoredPosition(int reg, int* cp_offset) {
+ DCHECK_EQ(0, *cp_offset);
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->Mentions(reg)) {
+ if (action->action_type() == ActionNode::STORE_POSITION) {
+ *cp_offset = static_cast<DeferredCapture*>(action)->cp_offset();
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+ return false;
+}
+
+// A (dynamically-sized) set of unsigned integers that behaves especially well
+// on small integers (< kFirstLimit). May do zone-allocation.
+class DynamicBitSet : public ZoneObject {
+ public:
+ V8_EXPORT_PRIVATE bool Get(unsigned value) const {
+ if (value < kFirstLimit) {
+ return (first_ & (1 << value)) != 0;
+ } else if (remaining_ == nullptr) {
+ return false;
+ } else {
+ return remaining_->Contains(value);
+ }
+ }
+
+ // Destructively set a value in this set.
+ void Set(unsigned value, Zone* zone) {
+ if (value < kFirstLimit) {
+ first_ |= (1 << value);
+ } else {
+ if (remaining_ == nullptr)
+ remaining_ = new (zone) ZoneList<unsigned>(1, zone);
+ if (remaining_->is_empty() || !remaining_->Contains(value))
+ remaining_->Add(value, zone);
+ }
+ }
+
+ private:
+ static constexpr unsigned kFirstLimit = 32;
+
+ uint32_t first_ = 0;
+ ZoneList<unsigned>* remaining_ = nullptr;
+};
+
+int Trace::FindAffectedRegisters(DynamicBitSet* affected_registers,
+ Zone* zone) {
+ int max_register = RegExpCompiler::kNoRegister;
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->action_type() == ActionNode::CLEAR_CAPTURES) {
+ Interval range = static_cast<DeferredClearCaptures*>(action)->range();
+ for (int i = range.from(); i <= range.to(); i++)
+ affected_registers->Set(i, zone);
+ if (range.to() > max_register) max_register = range.to();
+ } else {
+ affected_registers->Set(action->reg(), zone);
+ if (action->reg() > max_register) max_register = action->reg();
+ }
+ }
+ return max_register;
+}
+
+void Trace::RestoreAffectedRegisters(RegExpMacroAssembler* assembler,
+ int max_register,
+ const DynamicBitSet& registers_to_pop,
+ const DynamicBitSet& registers_to_clear) {
+ for (int reg = max_register; reg >= 0; reg--) {
+ if (registers_to_pop.Get(reg)) {
+ assembler->PopRegister(reg);
+ } else if (registers_to_clear.Get(reg)) {
+ int clear_to = reg;
+ while (reg > 0 && registers_to_clear.Get(reg - 1)) {
+ reg--;
+ }
+ assembler->ClearRegisters(reg, clear_to);
+ }
+ }
+}
+
+void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
+ int max_register,
+ const DynamicBitSet& affected_registers,
+ DynamicBitSet* registers_to_pop,
+ DynamicBitSet* registers_to_clear,
+ Zone* zone) {
+ // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
+ const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
+
+ // Count pushes performed to force a stack limit check occasionally.
+ int pushes = 0;
+
+ for (int reg = 0; reg <= max_register; reg++) {
+ if (!affected_registers.Get(reg)) {
+ continue;
+ }
+
+ // The chronologically first deferred action in the trace
+ // is used to infer the action needed to restore a register
+ // to its previous state (or not, if it's safe to ignore it).
+ enum DeferredActionUndoType { IGNORE, RESTORE, CLEAR };
+ DeferredActionUndoType undo_action = IGNORE;
+
+ int value = 0;
+ bool absolute = false;
+ bool clear = false;
+ static const int kNoStore = kMinInt;
+ int store_position = kNoStore;
+ // This is a little tricky because we are scanning the actions in reverse
+ // historical order (newest first).
+ for (DeferredAction* action = actions_; action != nullptr;
+ action = action->next()) {
+ if (action->Mentions(reg)) {
+ switch (action->action_type()) {
+ case ActionNode::SET_REGISTER: {
+ Trace::DeferredSetRegister* psr =
+ static_cast<Trace::DeferredSetRegister*>(action);
+ if (!absolute) {
+ value += psr->value();
+ absolute = true;
+ }
+ // SET_REGISTER is currently only used for newly introduced loop
+ // counters. They can have a significant previous value if they
+ // occur in a loop. TODO(lrn): Propagate this information, so
+ // we can set undo_action to IGNORE if we know there is no value to
+ // restore.
+ undo_action = RESTORE;
+ DCHECK_EQ(store_position, kNoStore);
+ DCHECK(!clear);
+ break;
+ }
+ case ActionNode::INCREMENT_REGISTER:
+ if (!absolute) {
+ value++;
+ }
+ DCHECK_EQ(store_position, kNoStore);
+ DCHECK(!clear);
+ undo_action = RESTORE;
+ break;
+ case ActionNode::STORE_POSITION: {
+ Trace::DeferredCapture* pc =
+ static_cast<Trace::DeferredCapture*>(action);
+ if (!clear && store_position == kNoStore) {
+ store_position = pc->cp_offset();
+ }
+
+ // For captures we know that stores and clears alternate.
+ // Other register, are never cleared, and if the occur
+ // inside a loop, they might be assigned more than once.
+ if (reg <= 1) {
+ // Registers zero and one, aka "capture zero", is
+ // always set correctly if we succeed. There is no
+ // need to undo a setting on backtrack, because we
+ // will set it again or fail.
+ undo_action = IGNORE;
+ } else {
+ undo_action = pc->is_capture() ? CLEAR : RESTORE;
+ }
+ DCHECK(!absolute);
+ DCHECK_EQ(value, 0);
+ break;
+ }
+ case ActionNode::CLEAR_CAPTURES: {
+ // Since we're scanning in reverse order, if we've already
+ // set the position we have to ignore historically earlier
+ // clearing operations.
+ if (store_position == kNoStore) {
+ clear = true;
+ }
+ undo_action = RESTORE;
+ DCHECK(!absolute);
+ DCHECK_EQ(value, 0);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ // Prepare for the undo-action (e.g., push if it's going to be popped).
+ if (undo_action == RESTORE) {
+ pushes++;
+ RegExpMacroAssembler::StackCheckFlag stack_check =
+ RegExpMacroAssembler::kNoStackLimitCheck;
+ if (pushes == push_limit) {
+ stack_check = RegExpMacroAssembler::kCheckStackLimit;
+ pushes = 0;
+ }
+
+ assembler->PushRegister(reg, stack_check);
+ registers_to_pop->Set(reg, zone);
+ } else if (undo_action == CLEAR) {
+ registers_to_clear->Set(reg, zone);
+ }
+ // Perform the chronologically last action (or accumulated increment)
+ // for the register.
+ if (store_position != kNoStore) {
+ assembler->WriteCurrentPositionToRegister(reg, store_position);
+ } else if (clear) {
+ assembler->ClearRegisters(reg, reg);
+ } else if (absolute) {
+ assembler->SetRegister(reg, value);
+ } else if (value != 0) {
+ assembler->AdvanceRegister(reg, value);
+ }
+ }
+}
+
+// This is called as we come into a loop choice node and some other tricky
+// nodes. It normalizes the state of the code generator to ensure we can
+// generate generic code.
+void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+ DCHECK(!is_trivial());
+
+ if (actions_ == nullptr && backtrack() == nullptr) {
+ // Here we just have some deferred cp advances to fix and we are back to
+ // a normal situation. We may also have to forget some information gained
+ // through a quick check that was already performed.
+ if (cp_offset_ != 0) assembler->AdvanceCurrentPosition(cp_offset_);
+ // Create a new trivial state and generate the node with that.
+ Trace new_state;
+ successor->Emit(compiler, &new_state);
+ return;
+ }
+
+ // Generate deferred actions here along with code to undo them again.
+ DynamicBitSet affected_registers;
+
+ if (backtrack() != nullptr) {
+ // Here we have a concrete backtrack location. These are set up by choice
+ // nodes and so they indicate that we have a deferred save of the current
+ // position which we may need to emit here.
+ assembler->PushCurrentPosition();
+ }
+
+ int max_register =
+ FindAffectedRegisters(&affected_registers, compiler->zone());
+ DynamicBitSet registers_to_pop;
+ DynamicBitSet registers_to_clear;
+ PerformDeferredActions(assembler, max_register, affected_registers,
+ &registers_to_pop, &registers_to_clear,
+ compiler->zone());
+ if (cp_offset_ != 0) {
+ assembler->AdvanceCurrentPosition(cp_offset_);
+ }
+
+ // Create a new trivial state and generate the node with that.
+ Label undo;
+ assembler->PushBacktrack(&undo);
+ if (successor->KeepRecursing(compiler)) {
+ Trace new_state;
+ successor->Emit(compiler, &new_state);
+ } else {
+ compiler->AddWork(successor);
+ assembler->GoTo(successor->label());
+ }
+
+ // On backtrack we need to restore state.
+ assembler->Bind(&undo);
+ RestoreAffectedRegisters(assembler, max_register, registers_to_pop,
+ registers_to_clear);
+ if (backtrack() == nullptr) {
+ assembler->Backtrack();
+ } else {
+ assembler->PopCurrentPosition();
+ assembler->GoTo(backtrack());
+ }
+}
+
+void NegativeSubmatchSuccess::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+ // Omit flushing the trace. We discard the entire stack frame anyway.
+
+ if (!label()->is_bound()) {
+ // We are completely independent of the trace, since we ignore it,
+ // so this code can be used as the generic version.
+ assembler->Bind(label());
+ }
+
+ // Throw away everything on the backtrack stack since the start
+ // of the negative submatch and restore the character position.
+ assembler->ReadCurrentPositionFromRegister(current_position_register_);
+ assembler->ReadStackPointerFromRegister(stack_pointer_register_);
+ if (clear_capture_count_ > 0) {
+ // Clear any captures that might have been performed during the success
+ // of the body of the negative look-ahead.
+ int clear_capture_end = clear_capture_start_ + clear_capture_count_ - 1;
+ assembler->ClearRegisters(clear_capture_start_, clear_capture_end);
+ }
+ // Now that we have unwound the stack we find at the top of the stack the
+ // backtrack that the BeginSubmatch node got.
+ assembler->Backtrack();
+}
+
+void EndNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ if (!label()->is_bound()) {
+ assembler->Bind(label());
+ }
+ switch (action_) {
+ case ACCEPT:
+ assembler->Succeed();
+ return;
+ case BACKTRACK:
+ assembler->GoTo(trace->backtrack());
+ return;
+ case NEGATIVE_SUBMATCH_SUCCESS:
+ // This case is handled in a different virtual method.
+ UNREACHABLE();
+ }
+ UNIMPLEMENTED();
+}
+
+void GuardedAlternative::AddGuard(Guard* guard, Zone* zone) {
+ if (guards_ == nullptr) guards_ = new (zone) ZoneList<Guard*>(1, zone);
+ guards_->Add(guard, zone);
+}
+
+ActionNode* ActionNode::SetRegister(int reg, int val, RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(SET_REGISTER, on_success);
+ result->data_.u_store_register.reg = reg;
+ result->data_.u_store_register.value = val;
+ return result;
+}
+
+ActionNode* ActionNode::IncrementRegister(int reg, RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(INCREMENT_REGISTER, on_success);
+ result->data_.u_increment_register.reg = reg;
+ return result;
+}
+
+ActionNode* ActionNode::StorePosition(int reg, bool is_capture,
+ RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(STORE_POSITION, on_success);
+ result->data_.u_position_register.reg = reg;
+ result->data_.u_position_register.is_capture = is_capture;
+ return result;
+}
+
+ActionNode* ActionNode::ClearCaptures(Interval range, RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(CLEAR_CAPTURES, on_success);
+ result->data_.u_clear_captures.range_from = range.from();
+ result->data_.u_clear_captures.range_to = range.to();
+ return result;
+}
+
+ActionNode* ActionNode::BeginSubmatch(int stack_reg, int position_reg,
+ RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(BEGIN_SUBMATCH, on_success);
+ result->data_.u_submatch.stack_pointer_register = stack_reg;
+ result->data_.u_submatch.current_position_register = position_reg;
+ return result;
+}
+
+ActionNode* ActionNode::PositiveSubmatchSuccess(int stack_reg, int position_reg,
+ int clear_register_count,
+ int clear_register_from,
+ RegExpNode* on_success) {
+ ActionNode* result = new (on_success->zone())
+ ActionNode(POSITIVE_SUBMATCH_SUCCESS, on_success);
+ result->data_.u_submatch.stack_pointer_register = stack_reg;
+ result->data_.u_submatch.current_position_register = position_reg;
+ result->data_.u_submatch.clear_register_count = clear_register_count;
+ result->data_.u_submatch.clear_register_from = clear_register_from;
+ return result;
+}
+
+ActionNode* ActionNode::EmptyMatchCheck(int start_register,
+ int repetition_register,
+ int repetition_limit,
+ RegExpNode* on_success) {
+ ActionNode* result =
+ new (on_success->zone()) ActionNode(EMPTY_MATCH_CHECK, on_success);
+ result->data_.u_empty_match_check.start_register = start_register;
+ result->data_.u_empty_match_check.repetition_register = repetition_register;
+ result->data_.u_empty_match_check.repetition_limit = repetition_limit;
+ return result;
+}
+
+#define DEFINE_ACCEPT(Type) \
+ void Type##Node::Accept(NodeVisitor* visitor) { visitor->Visit##Type(this); }
+FOR_EACH_NODE_TYPE(DEFINE_ACCEPT)
+#undef DEFINE_ACCEPT
+
+void LoopChoiceNode::Accept(NodeVisitor* visitor) {
+ visitor->VisitLoopChoice(this);
+}
+
+// -------------------------------------------------------------------
+// Emit code.
+
+void ChoiceNode::GenerateGuard(RegExpMacroAssembler* macro_assembler,
+ Guard* guard, Trace* trace) {
+ switch (guard->op()) {
+ case Guard::LT:
+ DCHECK(!trace->mentions_reg(guard->reg()));
+ macro_assembler->IfRegisterGE(guard->reg(), guard->value(),
+ trace->backtrack());
+ break;
+ case Guard::GEQ:
+ DCHECK(!trace->mentions_reg(guard->reg()));
+ macro_assembler->IfRegisterLT(guard->reg(), guard->value(),
+ trace->backtrack());
+ break;
+ }
+}
+
+// Returns the number of characters in the equivalence class, omitting those
+// that cannot occur in the source string because it is Latin1.
+static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
+ bool one_byte_subject,
+ unibrow::uchar* letters,
+ int letter_length) {
+#ifdef V8_INTL_SUPPORT
+ icu::UnicodeSet set;
+ set.add(character);
+ set = set.closeOver(USET_CASE_INSENSITIVE);
+ int32_t range_count = set.getRangeCount();
+ int items = 0;
+ for (int32_t i = 0; i < range_count; i++) {
+ UChar32 start = set.getRangeStart(i);
+ UChar32 end = set.getRangeEnd(i);
+ CHECK(end - start + items <= letter_length);
+ while (start <= end) {
+ if (one_byte_subject && start > String::kMaxOneByteCharCode) break;
+ letters[items++] = (unibrow::uchar)(start);
+ start++;
+ }
+ }
+ return items;
+#else
+ int length =
+ isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
+ // Unibrow returns 0 or 1 for characters where case independence is
+ // trivial.
+ if (length == 0) {
+ letters[0] = character;
+ length = 1;
+ }
+
+ if (one_byte_subject) {
+ int new_length = 0;
+ for (int i = 0; i < length; i++) {
+ if (letters[i] <= String::kMaxOneByteCharCode) {
+ letters[new_length++] = letters[i];
+ }
+ }
+ length = new_length;
+ }
+
+ return length;
+#endif // V8_INTL_SUPPORT
+}
+
+static inline bool EmitSimpleCharacter(Isolate* isolate,
+ RegExpCompiler* compiler, uc16 c,
+ Label* on_failure, int cp_offset,
+ bool check, bool preloaded) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ bool bound_checked = false;
+ if (!preloaded) {
+ assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+ bound_checked = true;
+ }
+ assembler->CheckNotCharacter(c, on_failure);
+ return bound_checked;
+}
+
+// Only emits non-letters (things that don't have case). Only used for case
+// independent matches.
+static inline bool EmitAtomNonLetter(Isolate* isolate, RegExpCompiler* compiler,
+ uc16 c, Label* on_failure, int cp_offset,
+ bool check, bool preloaded) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ bool one_byte = compiler->one_byte();
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
+ if (length < 1) {
+ // This can't match. Must be an one-byte subject and a non-one-byte
+ // character. We do not need to do anything since the one-byte pass
+ // already handled this.
+ return false; // Bounds not checked.
+ }
+ bool checked = false;
+ // We handle the length > 1 case in a later pass.
+ if (length == 1) {
+ if (one_byte && c > String::kMaxOneByteCharCodeU) {
+ // Can't match - see above.
+ return false; // Bounds not checked.
+ }
+ if (!preloaded) {
+ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+ checked = check;
+ }
+ macro_assembler->CheckNotCharacter(c, on_failure);
+ }
+ return checked;
+}
+
+static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
+ bool one_byte, uc16 c1, uc16 c2,
+ Label* on_failure) {
+ uc16 char_mask;
+ if (one_byte) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ uc16 exor = c1 ^ c2;
+ // Check whether exor has only one bit set.
+ if (((exor - 1) & exor) == 0) {
+ // If c1 and c2 differ only by one bit.
+ // Ecma262UnCanonicalize always gives the highest number last.
+ DCHECK(c2 > c1);
+ uc16 mask = char_mask ^ exor;
+ macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
+ return true;
+ }
+ DCHECK(c2 > c1);
+ uc16 diff = c2 - c1;
+ if (((diff - 1) & diff) == 0 && c1 >= diff) {
+ // If the characters differ by 2^n but don't differ by one bit then
+ // subtract the difference from the found character, then do the or
+ // trick. We avoid the theoretical case where negative numbers are
+ // involved in order to simplify code generation.
+ uc16 mask = char_mask ^ diff;
+ macro_assembler->CheckNotCharacterAfterMinusAnd(c1 - diff, diff, mask,
+ on_failure);
+ return true;
+ }
+ return false;
+}
+
+using EmitCharacterFunction = bool(Isolate* isolate, RegExpCompiler* compiler,
+ uc16 c, Label* on_failure, int cp_offset,
+ bool check, bool preloaded);
+
+// Only emits letters (things that have case). Only used for case independent
+// matches.
+static inline bool EmitAtomLetter(Isolate* isolate, RegExpCompiler* compiler,
+ uc16 c, Label* on_failure, int cp_offset,
+ bool check, bool preloaded) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ bool one_byte = compiler->one_byte();
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(isolate, c, one_byte, chars, 4);
+ if (length <= 1) return false;
+ // We may not need to check against the end of the input string
+ // if this character lies before a character that matched.
+ if (!preloaded) {
+ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
+ }
+ Label ok;
+ switch (length) {
+ case 2: {
+ if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
+ chars[1], on_failure)) {
+ } else {
+ macro_assembler->CheckCharacter(chars[0], &ok);
+ macro_assembler->CheckNotCharacter(chars[1], on_failure);
+ macro_assembler->Bind(&ok);
+ }
+ break;
+ }
+ case 4:
+ macro_assembler->CheckCharacter(chars[3], &ok);
+ V8_FALLTHROUGH;
+ case 3:
+ macro_assembler->CheckCharacter(chars[0], &ok);
+ macro_assembler->CheckCharacter(chars[1], &ok);
+ macro_assembler->CheckNotCharacter(chars[2], on_failure);
+ macro_assembler->Bind(&ok);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return true;
+}
+
+static void EmitBoundaryTest(RegExpMacroAssembler* masm, int border,
+ Label* fall_through, Label* above_or_equal,
+ Label* below) {
+ if (below != fall_through) {
+ masm->CheckCharacterLT(border, below);
+ if (above_or_equal != fall_through) masm->GoTo(above_or_equal);
+ } else {
+ masm->CheckCharacterGT(border - 1, above_or_equal);
+ }
+}
+
+static void EmitDoubleBoundaryTest(RegExpMacroAssembler* masm, int first,
+ int last, Label* fall_through,
+ Label* in_range, Label* out_of_range) {
+ if (in_range == fall_through) {
+ if (first == last) {
+ masm->CheckNotCharacter(first, out_of_range);
+ } else {
+ masm->CheckCharacterNotInRange(first, last, out_of_range);
+ }
+ } else {
+ if (first == last) {
+ masm->CheckCharacter(first, in_range);
+ } else {
+ masm->CheckCharacterInRange(first, last, in_range);
+ }
+ if (out_of_range != fall_through) masm->GoTo(out_of_range);
+ }
+}
+
+// even_label is for ranges[i] to ranges[i + 1] where i - start_index is even.
+// odd_label is for ranges[i] to ranges[i + 1] where i - start_index is odd.
+static void EmitUseLookupTable(RegExpMacroAssembler* masm,
+ ZoneList<int>* ranges, int start_index,
+ int end_index, int min_char, Label* fall_through,
+ Label* even_label, Label* odd_label) {
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ static const int kMask = RegExpMacroAssembler::kTableMask;
+
+ int base = (min_char & ~kMask);
+ USE(base);
+
+ // Assert that everything is on one kTableSize page.
+ for (int i = start_index; i <= end_index; i++) {
+ DCHECK_EQ(ranges->at(i) & ~kMask, base);
+ }
+ DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
+
+ char templ[kSize];
+ Label* on_bit_set;
+ Label* on_bit_clear;
+ int bit;
+ if (even_label == fall_through) {
+ on_bit_set = odd_label;
+ on_bit_clear = even_label;
+ bit = 1;
+ } else {
+ on_bit_set = even_label;
+ on_bit_clear = odd_label;
+ bit = 0;
+ }
+ for (int i = 0; i < (ranges->at(start_index) & kMask) && i < kSize; i++) {
+ templ[i] = bit;
+ }
+ int j = 0;
+ bit ^= 1;
+ for (int i = start_index; i < end_index; i++) {
+ for (j = (ranges->at(i) & kMask); j < (ranges->at(i + 1) & kMask); j++) {
+ templ[j] = bit;
+ }
+ bit ^= 1;
+ }
+ for (int i = j; i < kSize; i++) {
+ templ[i] = bit;
+ }
+ Factory* factory = masm->isolate()->factory();
+ // TODO(erikcorry): Cache these.
+ Handle<ByteArray> ba = factory->NewByteArray(kSize, AllocationType::kOld);
+ for (int i = 0; i < kSize; i++) {
+ ba->set(i, templ[i]);
+ }
+ masm->CheckBitInTable(ba, on_bit_set);
+ if (on_bit_clear != fall_through) masm->GoTo(on_bit_clear);
+}
+
+static void CutOutRange(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
+ int start_index, int end_index, int cut_index,
+ Label* even_label, Label* odd_label) {
+ bool odd = (((cut_index - start_index) & 1) == 1);
+ Label* in_range_label = odd ? odd_label : even_label;
+ Label dummy;
+ EmitDoubleBoundaryTest(masm, ranges->at(cut_index),
+ ranges->at(cut_index + 1) - 1, &dummy, in_range_label,
+ &dummy);
+ DCHECK(!dummy.is_linked());
+ // Cut out the single range by rewriting the array. This creates a new
+ // range that is a merger of the two ranges on either side of the one we
+ // are cutting out. The oddity of the labels is preserved.
+ for (int j = cut_index; j > start_index; j--) {
+ ranges->at(j) = ranges->at(j - 1);
+ }
+ for (int j = cut_index + 1; j < end_index; j++) {
+ ranges->at(j) = ranges->at(j + 1);
+ }
+}
+
+// Unicode case. Split the search space into kSize spaces that are handled
+// with recursion.
+static void SplitSearchSpace(ZoneList<int>* ranges, int start_index,
+ int end_index, int* new_start_index,
+ int* new_end_index, int* border) {
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ static const int kMask = RegExpMacroAssembler::kTableMask;
+
+ int first = ranges->at(start_index);
+ int last = ranges->at(end_index) - 1;
+
+ *new_start_index = start_index;
+ *border = (ranges->at(start_index) & ~kMask) + kSize;
+ while (*new_start_index < end_index) {
+ if (ranges->at(*new_start_index) > *border) break;
+ (*new_start_index)++;
+ }
+ // new_start_index is the index of the first edge that is beyond the
+ // current kSize space.
+
+ // For very large search spaces we do a binary chop search of the non-Latin1
+ // space instead of just going to the end of the current kSize space. The
+ // heuristics are complicated a little by the fact that any 128-character
+ // encoding space can be quickly tested with a table lookup, so we don't
+ // wish to do binary chop search at a smaller granularity than that. A
+ // 128-character space can take up a lot of space in the ranges array if,
+ // for example, we only want to match every second character (eg. the lower
+ // case characters on some Unicode pages).
+ int binary_chop_index = (end_index + start_index) / 2;
+ // The first test ensures that we get to the code that handles the Latin1
+ // range with a single not-taken branch, speeding up this important
+ // character range (even non-Latin1 charset-based text has spaces and
+ // punctuation).
+ if (*border - 1 > String::kMaxOneByteCharCode && // Latin1 case.
+ end_index - start_index > (*new_start_index - start_index) * 2 &&
+ last - first > kSize * 2 && binary_chop_index > *new_start_index &&
+ ranges->at(binary_chop_index) >= first + 2 * kSize) {
+ int scan_forward_for_section_border = binary_chop_index;
+ int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
+
+ while (scan_forward_for_section_border < end_index) {
+ if (ranges->at(scan_forward_for_section_border) > new_border) {
+ *new_start_index = scan_forward_for_section_border;
+ *border = new_border;
+ break;
+ }
+ scan_forward_for_section_border++;
+ }
+ }
+
+ DCHECK(*new_start_index > start_index);
+ *new_end_index = *new_start_index - 1;
+ if (ranges->at(*new_end_index) == *border) {
+ (*new_end_index)--;
+ }
+ if (*border >= ranges->at(end_index)) {
+ *border = ranges->at(end_index);
+ *new_start_index = end_index; // Won't be used.
+ *new_end_index = end_index - 1;
+ }
+}
+
+// Gets a series of segment boundaries representing a character class. If the
+// character is in the range between an even and an odd boundary (counting from
+// start_index) then go to even_label, otherwise go to odd_label. We already
+// know that the character is in the range of min_char to max_char inclusive.
+// Either label can be nullptr indicating backtracking. Either label can also
+// be equal to the fall_through label.
+static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
+ int start_index, int end_index, uc32 min_char,
+ uc32 max_char, Label* fall_through,
+ Label* even_label, Label* odd_label) {
+ DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
+ DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
+
+ int first = ranges->at(start_index);
+ int last = ranges->at(end_index) - 1;
+
+ DCHECK_LT(min_char, first);
+
+ // Just need to test if the character is before or on-or-after
+ // a particular character.
+ if (start_index == end_index) {
+ EmitBoundaryTest(masm, first, fall_through, even_label, odd_label);
+ return;
+ }
+
+ // Another almost trivial case: There is one interval in the middle that is
+ // different from the end intervals.
+ if (start_index + 1 == end_index) {
+ EmitDoubleBoundaryTest(masm, first, last, fall_through, even_label,
+ odd_label);
+ return;
+ }
+
+ // It's not worth using table lookup if there are very few intervals in the
+ // character class.
+ if (end_index - start_index <= 6) {
+ // It is faster to test for individual characters, so we look for those
+ // first, then try arbitrary ranges in the second round.
+ static int kNoCutIndex = -1;
+ int cut = kNoCutIndex;
+ for (int i = start_index; i < end_index; i++) {
+ if (ranges->at(i) == ranges->at(i + 1) - 1) {
+ cut = i;
+ break;
+ }
+ }
+ if (cut == kNoCutIndex) cut = start_index;
+ CutOutRange(masm, ranges, start_index, end_index, cut, even_label,
+ odd_label);
+ DCHECK_GE(end_index - start_index, 2);
+ GenerateBranches(masm, ranges, start_index + 1, end_index - 1, min_char,
+ max_char, fall_through, even_label, odd_label);
+ return;
+ }
+
+ // If there are a lot of intervals in the regexp, then we will use tables to
+ // determine whether the character is inside or outside the character class.
+ static const int kBits = RegExpMacroAssembler::kTableSizeBits;
+
+ if ((max_char >> kBits) == (min_char >> kBits)) {
+ EmitUseLookupTable(masm, ranges, start_index, end_index, min_char,
+ fall_through, even_label, odd_label);
+ return;
+ }
+
+ if ((min_char >> kBits) != (first >> kBits)) {
+ masm->CheckCharacterLT(first, odd_label);
+ GenerateBranches(masm, ranges, start_index + 1, end_index, first, max_char,
+ fall_through, odd_label, even_label);
+ return;
+ }
+
+ int new_start_index = 0;
+ int new_end_index = 0;
+ int border = 0;
+
+ SplitSearchSpace(ranges, start_index, end_index, &new_start_index,
+ &new_end_index, &border);
+
+ Label handle_rest;
+ Label* above = &handle_rest;
+ if (border == last + 1) {
+ // We didn't find any section that started after the limit, so everything
+ // above the border is one of the terminal labels.
+ above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
+ DCHECK(new_end_index == end_index - 1);
+ }
+
+ DCHECK_LE(start_index, new_end_index);
+ DCHECK_LE(new_start_index, end_index);
+ DCHECK_LT(start_index, new_start_index);
+ DCHECK_LT(new_end_index, end_index);
+ DCHECK(new_end_index + 1 == new_start_index ||
+ (new_end_index + 2 == new_start_index &&
+ border == ranges->at(new_end_index + 1)));
+ DCHECK_LT(min_char, border - 1);
+ DCHECK_LT(border, max_char);
+ DCHECK_LT(ranges->at(new_end_index), border);
+ DCHECK(border < ranges->at(new_start_index) ||
+ (border == ranges->at(new_start_index) &&
+ new_start_index == end_index && new_end_index == end_index - 1 &&
+ border == last + 1));
+ DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
+
+ masm->CheckCharacterGT(border - 1, above);
+ Label dummy;
+ GenerateBranches(masm, ranges, start_index, new_end_index, min_char,
+ border - 1, &dummy, even_label, odd_label);
+ if (handle_rest.is_linked()) {
+ masm->Bind(&handle_rest);
+ bool flip = (new_start_index & 1) != (start_index & 1);
+ GenerateBranches(masm, ranges, new_start_index, end_index, border, max_char,
+ &dummy, flip ? odd_label : even_label,
+ flip ? even_label : odd_label);
+ }
+}
+
+static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
+ RegExpCharacterClass* cc, bool one_byte,
+ Label* on_failure, int cp_offset, bool check_offset,
+ bool preloaded, Zone* zone) {
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone);
+ CharacterRange::Canonicalize(ranges);
+
+ int max_char;
+ if (one_byte) {
+ max_char = String::kMaxOneByteCharCode;
+ } else {
+ max_char = String::kMaxUtf16CodeUnit;
+ }
+
+ int range_count = ranges->length();
+
+ int last_valid_range = range_count - 1;
+ while (last_valid_range >= 0) {
+ CharacterRange& range = ranges->at(last_valid_range);
+ if (range.from() <= max_char) {
+ break;
+ }
+ last_valid_range--;
+ }
+
+ if (last_valid_range < 0) {
+ if (!cc->is_negated()) {
+ macro_assembler->GoTo(on_failure);
+ }
+ if (check_offset) {
+ macro_assembler->CheckPosition(cp_offset, on_failure);
+ }
+ return;
+ }
+
+ if (last_valid_range == 0 && ranges->at(0).IsEverything(max_char)) {
+ if (cc->is_negated()) {
+ macro_assembler->GoTo(on_failure);
+ } else {
+ // This is a common case hit by non-anchored expressions.
+ if (check_offset) {
+ macro_assembler->CheckPosition(cp_offset, on_failure);
+ }
+ }
+ return;
+ }
+
+ if (!preloaded) {
+ macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
+ }
+
+ if (cc->is_standard(zone) && macro_assembler->CheckSpecialCharacterClass(
+ cc->standard_type(), on_failure)) {
+ return;
+ }
+
+ // A new list with ascending entries. Each entry is a code unit
+ // where there is a boundary between code units that are part of
+ // the class and code units that are not. Normally we insert an
+ // entry at zero which goes to the failure label, but if there
+ // was already one there we fall through for success on that entry.
+ // Subsequent entries have alternating meaning (success/failure).
+ ZoneList<int>* range_boundaries =
+ new (zone) ZoneList<int>(last_valid_range, zone);
+
+ bool zeroth_entry_is_failure = !cc->is_negated();
+
+ for (int i = 0; i <= last_valid_range; i++) {
+ CharacterRange& range = ranges->at(i);
+ if (range.from() == 0) {
+ DCHECK_EQ(i, 0);
+ zeroth_entry_is_failure = !zeroth_entry_is_failure;
+ } else {
+ range_boundaries->Add(range.from(), zone);
+ }
+ range_boundaries->Add(range.to() + 1, zone);
+ }
+ int end_index = range_boundaries->length() - 1;
+ if (range_boundaries->at(end_index) > max_char) {
+ end_index--;
+ }
+
+ Label fall_through;
+ GenerateBranches(macro_assembler, range_boundaries,
+ 0, // start_index.
+ end_index,
+ 0, // min_char.
+ max_char, &fall_through,
+ zeroth_entry_is_failure ? &fall_through : on_failure,
+ zeroth_entry_is_failure ? on_failure : &fall_through);
+ macro_assembler->Bind(&fall_through);
+}
+
+RegExpNode::~RegExpNode() = default;
+
+RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
+ Trace* trace) {
+ // If we are generating a greedy loop then don't stop and don't reuse code.
+ if (trace->stop_node() != nullptr) {
+ return CONTINUE;
+ }
+
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ if (trace->is_trivial()) {
+ if (label_.is_bound() || on_work_list() || !KeepRecursing(compiler)) {
+ // If a generic version is already scheduled to be generated or we have
+ // recursed too deeply then just generate a jump to that code.
+ macro_assembler->GoTo(&label_);
+ // This will queue it up for generation of a generic version if it hasn't
+ // already been queued.
+ compiler->AddWork(this);
+ return DONE;
+ }
+ // Generate generic version of the node and bind the label for later use.
+ macro_assembler->Bind(&label_);
+ return CONTINUE;
+ }
+
+ // We are being asked to make a non-generic version. Keep track of how many
+ // non-generic versions we generate so as not to overdo it.
+ trace_count_++;
+ if (KeepRecursing(compiler) && compiler->optimize() &&
+ trace_count_ < kMaxCopiesCodeGenerated) {
+ return CONTINUE;
+ }
+
+ // If we get here code has been generated for this node too many times or
+ // recursion is too deep. Time to switch to a generic version. The code for
+ // generic versions above can handle deep recursion properly.
+ bool was_limiting = compiler->limiting_recursion();
+ compiler->set_limiting_recursion(true);
+ trace->Flush(compiler, this);
+ compiler->set_limiting_recursion(was_limiting);
+ return DONE;
+}
+
+bool RegExpNode::KeepRecursing(RegExpCompiler* compiler) {
+ return !compiler->limiting_recursion() &&
+ compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion;
+}
+
+int ActionNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
+ if (budget <= 0) return 0;
+ if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
+ return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+void ActionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ if (action_type_ == POSITIVE_SUBMATCH_SUCCESS) {
+ // Anything may follow a positive submatch success, thus we need to accept
+ // all characters from this position onwards.
+ bm->SetRest(offset);
+ } else {
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ }
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+int AssertionNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ if (budget <= 0) return 0;
+ // If we know we are not at the start and we are asked "how many characters
+ // will you match if you succeed?" then we can answer anything since false
+ // implies false. So lets just return the max answer (still_to_find) since
+ // that won't prevent us from preloading a lot of characters for the other
+ // branches in the node graph.
+ if (assertion_type() == AT_START && not_at_start) return still_to_find;
+ return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ // Match the behaviour of EatsAtLeast on this node.
+ if (assertion_type() == AT_START && not_at_start) return;
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+int BackReferenceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ if (read_backward()) return 0;
+ if (budget <= 0) return 0;
+ return on_success()->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+int TextNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
+ if (read_backward()) return 0;
+ int answer = Length();
+ if (answer >= still_to_find) return answer;
+ if (budget <= 0) return answer;
+ // We are not at start after this node so we set the last argument to 'true'.
+ return answer +
+ on_success()->EatsAtLeast(still_to_find - answer, budget - 1, true);
+}
+
+int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ if (budget <= 0) return 0;
+ // Alternative 0 is the negative lookahead, alternative 1 is what comes
+ // afterwards.
+ RegExpNode* node = alternatives_->at(1).node();
+ return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
+}
+
+void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
+ QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) {
+ // Alternative 0 is the negative lookahead, alternative 1 is what comes
+ // afterwards.
+ RegExpNode* node = alternatives_->at(1).node();
+ return node->GetQuickCheckDetails(details, compiler, filled_in, not_at_start);
+}
+
+int ChoiceNode::EatsAtLeastHelper(int still_to_find, int budget,
+ RegExpNode* ignore_this_node,
+ bool not_at_start) {
+ if (budget <= 0) return 0;
+ int min = 100;
+ int choice_count = alternatives_->length();
+ budget = (budget - 1) / choice_count;
+ for (int i = 0; i < choice_count; i++) {
+ RegExpNode* node = alternatives_->at(i).node();
+ if (node == ignore_this_node) continue;
+ int node_eats_at_least =
+ node->EatsAtLeast(still_to_find, budget, not_at_start);
+ if (node_eats_at_least < min) min = node_eats_at_least;
+ if (min == 0) return 0;
+ }
+ return min;
+}
+
+int LoopChoiceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
+ return EatsAtLeastHelper(still_to_find, budget - 1, loop_node_, not_at_start);
+}
+
+int ChoiceNode::EatsAtLeast(int still_to_find, int budget, bool not_at_start) {
+ return EatsAtLeastHelper(still_to_find, budget, nullptr, not_at_start);
+}
+
+// Takes the left-most 1-bit and smears it out, setting all bits to its right.
+static inline uint32_t SmearBitsRight(uint32_t v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return v;
+}
+
+bool QuickCheckDetails::Rationalize(bool asc) {
+ bool found_useful_op = false;
+ uint32_t char_mask;
+ if (asc) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ mask_ = 0;
+ value_ = 0;
+ int char_shift = 0;
+ for (int i = 0; i < characters_; i++) {
+ Position* pos = &positions_[i];
+ if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
+ found_useful_op = true;
+ }
+ mask_ |= (pos->mask & char_mask) << char_shift;
+ value_ |= (pos->value & char_mask) << char_shift;
+ char_shift += asc ? 8 : 16;
+ }
+ return found_useful_op;
+}
+
+bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
+ Trace* bounds_check_trace, Trace* trace,
+ bool preload_has_checked_bounds,
+ Label* on_possible_success,
+ QuickCheckDetails* details,
+ bool fall_through_on_failure) {
+ if (details->characters() == 0) return false;
+ GetQuickCheckDetails(details, compiler, 0,
+ trace->at_start() == Trace::FALSE_VALUE);
+ if (details->cannot_match()) return false;
+ if (!details->Rationalize(compiler->one_byte())) return false;
+ DCHECK(details->characters() == 1 ||
+ compiler->macro_assembler()->CanReadUnaligned());
+ uint32_t mask = details->mask();
+ uint32_t value = details->value();
+
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+
+ if (trace->characters_preloaded() != details->characters()) {
+ DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
+ // We are attempting to preload the minimum number of characters
+ // any choice would eat, so if the bounds check fails, then none of the
+ // choices can succeed, so we can just immediately backtrack, rather
+ // than go to the next choice.
+ assembler->LoadCurrentCharacter(
+ trace->cp_offset(), bounds_check_trace->backtrack(),
+ !preload_has_checked_bounds, details->characters());
+ }
+
+ bool need_mask = true;
+
+ if (details->characters() == 1) {
+ // If number of characters preloaded is 1 then we used a byte or 16 bit
+ // load so the value is already masked down.
+ uint32_t char_mask;
+ if (compiler->one_byte()) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ if ((mask & char_mask) == char_mask) need_mask = false;
+ mask &= char_mask;
+ } else {
+ // For 2-character preloads in one-byte mode or 1-character preloads in
+ // two-byte mode we also use a 16 bit load with zero extend.
+ static const uint32_t kTwoByteMask = 0xFFFF;
+ static const uint32_t kFourByteMask = 0xFFFFFFFF;
+ if (details->characters() == 2 && compiler->one_byte()) {
+ if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
+ } else if (details->characters() == 1 && !compiler->one_byte()) {
+ if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
+ } else {
+ if (mask == kFourByteMask) need_mask = false;
+ }
+ }
+
+ if (fall_through_on_failure) {
+ if (need_mask) {
+ assembler->CheckCharacterAfterAnd(value, mask, on_possible_success);
+ } else {
+ assembler->CheckCharacter(value, on_possible_success);
+ }
+ } else {
+ if (need_mask) {
+ assembler->CheckNotCharacterAfterAnd(value, mask, trace->backtrack());
+ } else {
+ assembler->CheckNotCharacter(value, trace->backtrack());
+ }
+ }
+ return true;
+}
+
+// Here is the meat of GetQuickCheckDetails (see also the comment on the
+// super-class in the .h file).
+//
+// We iterate along the text object, building up for each character a
+// mask and value that can be used to test for a quick failure to match.
+// The masks and values for the positions will be combined into a single
+// machine word for the current character width in order to be used in
+// generating a quick check.
+void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) {
+ // Do not collect any quick check details if the text node reads backward,
+ // since it reads in the opposite direction than we use for quick checks.
+ if (read_backward()) return;
+ Isolate* isolate = compiler->macro_assembler()->isolate();
+ DCHECK(characters_filled_in < details->characters());
+ int characters = details->characters();
+ int char_mask;
+ if (compiler->one_byte()) {
+ char_mask = String::kMaxOneByteCharCode;
+ } else {
+ char_mask = String::kMaxUtf16CodeUnit;
+ }
+ for (int k = 0; k < elements()->length(); k++) {
+ TextElement elm = elements()->at(k);
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
+ for (int i = 0; i < characters && i < quarks.length(); i++) {
+ QuickCheckDetails::Position* pos =
+ details->positions(characters_filled_in);
+ uc16 c = quarks[i];
+ if (elm.atom()->ignore_case()) {
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(
+ isolate, c, compiler->one_byte(), chars, 4);
+ if (length == 0) {
+ // This can happen because all case variants are non-Latin1, but we
+ // know the input is Latin1.
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
+ if (length == 1) {
+ // This letter has no case equivalents, so it's nice and simple
+ // and the mask-compare will determine definitely whether we have
+ // a match at this character position.
+ pos->mask = char_mask;
+ pos->value = c;
+ pos->determines_perfectly = true;
+ } else {
+ uint32_t common_bits = char_mask;
+ uint32_t bits = chars[0];
+ for (int j = 1; j < length; j++) {
+ uint32_t differing_bits = ((chars[j] & common_bits) ^ bits);
+ common_bits ^= differing_bits;
+ bits &= common_bits;
+ }
+ // If length is 2 and common bits has only one zero in it then
+ // our mask and compare instruction will determine definitely
+ // whether we have a match at this character position. Otherwise
+ // it can only be an approximate check.
+ uint32_t one_zero = (common_bits | ~char_mask);
+ if (length == 2 && ((~one_zero) & ((~one_zero) - 1)) == 0) {
+ pos->determines_perfectly = true;
+ }
+ pos->mask = common_bits;
+ pos->value = bits;
+ }
+ } else {
+ // Don't ignore case. Nice simple case where the mask-compare will
+ // determine definitely whether we have a match at this character
+ // position.
+ if (c > char_mask) {
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
+ pos->mask = char_mask;
+ pos->value = c;
+ pos->determines_perfectly = true;
+ }
+ characters_filled_in++;
+ DCHECK(characters_filled_in <= details->characters());
+ if (characters_filled_in == details->characters()) {
+ return;
+ }
+ }
+ } else {
+ QuickCheckDetails::Position* pos =
+ details->positions(characters_filled_in);
+ RegExpCharacterClass* tree = elm.char_class();
+ ZoneList<CharacterRange>* ranges = tree->ranges(zone());
+ DCHECK(!ranges->is_empty());
+ if (tree->is_negated()) {
+ // A quick check uses multi-character mask and compare. There is no
+ // useful way to incorporate a negative char class into this scheme
+ // so we just conservatively create a mask and value that will always
+ // succeed.
+ pos->mask = 0;
+ pos->value = 0;
+ } else {
+ int first_range = 0;
+ while (ranges->at(first_range).from() > char_mask) {
+ first_range++;
+ if (first_range == ranges->length()) {
+ details->set_cannot_match();
+ pos->determines_perfectly = false;
+ return;
+ }
+ }
+ CharacterRange range = ranges->at(first_range);
+ uc16 from = range.from();
+ uc16 to = range.to();
+ if (to > char_mask) {
+ to = char_mask;
+ }
+ uint32_t differing_bits = (from ^ to);
+ // A mask and compare is only perfect if the differing bits form a
+ // number like 00011111 with one single block of trailing 1s.
+ if ((differing_bits & (differing_bits + 1)) == 0 &&
+ from + differing_bits == to) {
+ pos->determines_perfectly = true;
+ }
+ uint32_t common_bits = ~SmearBitsRight(differing_bits);
+ uint32_t bits = (from & common_bits);
+ for (int i = first_range + 1; i < ranges->length(); i++) {
+ CharacterRange range = ranges->at(i);
+ uc16 from = range.from();
+ uc16 to = range.to();
+ if (from > char_mask) continue;
+ if (to > char_mask) to = char_mask;
+ // Here we are combining more ranges into the mask and compare
+ // value. With each new range the mask becomes more sparse and
+ // so the chances of a false positive rise. A character class
+ // with multiple ranges is assumed never to be equivalent to a
+ // mask and compare operation.
+ pos->determines_perfectly = false;
+ uint32_t new_common_bits = (from ^ to);
+ new_common_bits = ~SmearBitsRight(new_common_bits);
+ common_bits &= new_common_bits;
+ bits &= new_common_bits;
+ uint32_t differing_bits = (from & common_bits) ^ bits;
+ common_bits ^= differing_bits;
+ bits &= common_bits;
+ }
+ pos->mask = common_bits;
+ pos->value = bits;
+ }
+ characters_filled_in++;
+ DCHECK(characters_filled_in <= details->characters());
+ if (characters_filled_in == details->characters()) {
+ return;
+ }
+ }
+ }
+ DCHECK(characters_filled_in != details->characters());
+ if (!details->cannot_match()) {
+ on_success()->GetQuickCheckDetails(details, compiler, characters_filled_in,
+ true);
+ }
+}
+
+void QuickCheckDetails::Clear() {
+ for (int i = 0; i < characters_; i++) {
+ positions_[i].mask = 0;
+ positions_[i].value = 0;
+ positions_[i].determines_perfectly = false;
+ }
+ characters_ = 0;
+}
+
+void QuickCheckDetails::Advance(int by, bool one_byte) {
+ if (by >= characters_ || by < 0) {
+ DCHECK_IMPLIES(by < 0, characters_ == 0);
+ Clear();
+ return;
+ }
+ DCHECK_LE(characters_ - by, 4);
+ DCHECK_LE(characters_, 4);
+ for (int i = 0; i < characters_ - by; i++) {
+ positions_[i] = positions_[by + i];
+ }
+ for (int i = characters_ - by; i < characters_; i++) {
+ positions_[i].mask = 0;
+ positions_[i].value = 0;
+ positions_[i].determines_perfectly = false;
+ }
+ characters_ -= by;
+ // We could change mask_ and value_ here but we would never advance unless
+ // they had already been used in a check and they won't be used again because
+ // it would gain us nothing. So there's no point.
+}
+
+void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
+ DCHECK(characters_ == other->characters_);
+ if (other->cannot_match_) {
+ return;
+ }
+ if (cannot_match_) {
+ *this = *other;
+ return;
+ }
+ for (int i = from_index; i < characters_; i++) {
+ QuickCheckDetails::Position* pos = positions(i);
+ QuickCheckDetails::Position* other_pos = other->positions(i);
+ if (pos->mask != other_pos->mask || pos->value != other_pos->value ||
+ !other_pos->determines_perfectly) {
+ // Our mask-compare operation will be approximate unless we have the
+ // exact same operation on both sides of the alternation.
+ pos->determines_perfectly = false;
+ }
+ pos->mask &= other_pos->mask;
+ pos->value &= pos->mask;
+ other_pos->value &= pos->mask;
+ uc16 differing_bits = (pos->value ^ other_pos->value);
+ pos->mask &= ~differing_bits;
+ pos->value &= pos->mask;
+ }
+}
+
+class VisitMarker {
+ public:
+ explicit VisitMarker(NodeInfo* info) : info_(info) {
+ DCHECK(!info->visited);
+ info->visited = true;
+ }
+ ~VisitMarker() { info_->visited = false; }
+
+ private:
+ NodeInfo* info_;
+};
+
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ DCHECK(!info()->visited);
+ VisitMarker marker(info());
+ return FilterSuccessor(depth - 1);
+}
+
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
+ RegExpNode* next = on_success_->FilterOneByte(depth - 1);
+ if (next == nullptr) return set_replacement(nullptr);
+ on_success_ = next;
+ return set_replacement(this);
+}
+
+// We need to check for the following characters: 0x39C 0x3BC 0x178.
+bool RangeContainsLatin1Equivalents(CharacterRange range) {
+ // TODO(dcarney): this could be a lot more efficient.
+ return range.Contains(0x039C) || range.Contains(0x03BC) ||
+ range.Contains(0x0178);
+}
+
+static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
+ for (int i = 0; i < ranges->length(); i++) {
+ // TODO(dcarney): this could be a lot more efficient.
+ if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
+ }
+ return false;
+}
+
+RegExpNode* TextNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ DCHECK(!info()->visited);
+ VisitMarker marker(info());
+ int element_count = elements()->length();
+ for (int i = 0; i < element_count; i++) {
+ TextElement elm = elements()->at(i);
+ if (elm.text_type() == TextElement::ATOM) {
+ Vector<const uc16> quarks = elm.atom()->data();
+ for (int j = 0; j < quarks.length(); j++) {
+ uint16_t c = quarks[j];
+ if (elm.atom()->ignore_case()) {
+ c = unibrow::Latin1::TryConvertToLatin1(c);
+ }
+ if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
+ // Replace quark in case we converted to Latin-1.
+ uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.begin());
+ writable_quarks[j] = c;
+ }
+ } else {
+ DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* cc = elm.char_class();
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::Canonicalize(ranges);
+ // Now they are in order so we only need to look at the first.
+ int range_count = ranges->length();
+ if (cc->is_negated()) {
+ if (range_count != 0 && ranges->at(0).from() == 0 &&
+ ranges->at(0).to() >= String::kMaxOneByteCharCode) {
+ // This will be handled in a later filter.
+ if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ continue;
+ return set_replacement(nullptr);
+ }
+ } else {
+ if (range_count == 0 ||
+ ranges->at(0).from() > String::kMaxOneByteCharCode) {
+ // This will be handled in a later filter.
+ if (IgnoreCase(cc->flags()) && RangesContainLatin1Equivalents(ranges))
+ continue;
+ return set_replacement(nullptr);
+ }
+ }
+ }
+ }
+ return FilterSuccessor(depth - 1);
+}
+
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ if (info()->visited) return this;
+ {
+ VisitMarker marker(info());
+
+ RegExpNode* continue_replacement = continue_node_->FilterOneByte(depth - 1);
+ // If we can't continue after the loop then there is no sense in doing the
+ // loop.
+ if (continue_replacement == nullptr) return set_replacement(nullptr);
+ }
+
+ return ChoiceNode::FilterOneByte(depth - 1);
+}
+
+RegExpNode* ChoiceNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ if (info()->visited) return this;
+ VisitMarker marker(info());
+ int choice_count = alternatives_->length();
+
+ for (int i = 0; i < choice_count; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ if (alternative.guards() != nullptr &&
+ alternative.guards()->length() != 0) {
+ set_replacement(this);
+ return this;
+ }
+ }
+
+ int surviving = 0;
+ RegExpNode* survivor = nullptr;
+ for (int i = 0; i < choice_count; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ RegExpNode* replacement = alternative.node()->FilterOneByte(depth - 1);
+ DCHECK(replacement != this); // No missing EMPTY_MATCH_CHECK.
+ if (replacement != nullptr) {
+ alternatives_->at(i).set_node(replacement);
+ surviving++;
+ survivor = replacement;
+ }
+ }
+ if (surviving < 2) return set_replacement(survivor);
+
+ set_replacement(this);
+ if (surviving == choice_count) {
+ return this;
+ }
+ // Only some of the nodes survived the filtering. We need to rebuild the
+ // alternatives list.
+ ZoneList<GuardedAlternative>* new_alternatives =
+ new (zone()) ZoneList<GuardedAlternative>(surviving, zone());
+ for (int i = 0; i < choice_count; i++) {
+ RegExpNode* replacement =
+ alternatives_->at(i).node()->FilterOneByte(depth - 1);
+ if (replacement != nullptr) {
+ alternatives_->at(i).set_node(replacement);
+ new_alternatives->Add(alternatives_->at(i), zone());
+ }
+ }
+ alternatives_ = new_alternatives;
+ return this;
+}
+
+RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth) {
+ if (info()->replacement_calculated) return replacement();
+ if (depth < 0) return this;
+ if (info()->visited) return this;
+ VisitMarker marker(info());
+ // Alternative 0 is the negative lookahead, alternative 1 is what comes
+ // afterwards.
+ RegExpNode* node = alternatives_->at(1).node();
+ RegExpNode* replacement = node->FilterOneByte(depth - 1);
+ if (replacement == nullptr) return set_replacement(nullptr);
+ alternatives_->at(1).set_node(replacement);
+
+ RegExpNode* neg_node = alternatives_->at(0).node();
+ RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1);
+ // If the negative lookahead is always going to fail then
+ // we don't need to check it.
+ if (neg_replacement == nullptr) return set_replacement(replacement);
+ alternatives_->at(0).set_node(neg_replacement);
+ return set_replacement(this);
+}
+
+void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) {
+ if (body_can_be_zero_length_ || info()->visited) return;
+ VisitMarker marker(info());
+ return ChoiceNode::GetQuickCheckDetails(details, compiler,
+ characters_filled_in, not_at_start);
+}
+
+void LoopChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ if (body_can_be_zero_length_ || budget <= 0) {
+ bm->SetRest(offset);
+ SaveBMInfo(bm, not_at_start, offset);
+ return;
+ }
+ ChoiceNode::FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+void ChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) {
+ not_at_start = (not_at_start || not_at_start_);
+ int choice_count = alternatives_->length();
+ DCHECK_LT(0, choice_count);
+ alternatives_->at(0).node()->GetQuickCheckDetails(
+ details, compiler, characters_filled_in, not_at_start);
+ for (int i = 1; i < choice_count; i++) {
+ QuickCheckDetails new_details(details->characters());
+ RegExpNode* node = alternatives_->at(i).node();
+ node->GetQuickCheckDetails(&new_details, compiler, characters_filled_in,
+ not_at_start);
+ // Here we merge the quick match details of the two branches.
+ details->Merge(&new_details, characters_filled_in);
+ }
+}
+
+// Check for [0-9A-Z_a-z].
+static void EmitWordCheck(RegExpMacroAssembler* assembler, Label* word,
+ Label* non_word, bool fall_through_on_word) {
+ if (assembler->CheckSpecialCharacterClass(
+ fall_through_on_word ? 'w' : 'W',
+ fall_through_on_word ? non_word : word)) {
+ // Optimized implementation available.
+ return;
+ }
+ assembler->CheckCharacterGT('z', non_word);
+ assembler->CheckCharacterLT('0', non_word);
+ assembler->CheckCharacterGT('a' - 1, word);
+ assembler->CheckCharacterLT('9' + 1, word);
+ assembler->CheckCharacterLT('A', non_word);
+ assembler->CheckCharacterLT('Z' + 1, word);
+ if (fall_through_on_word) {
+ assembler->CheckNotCharacter('_', non_word);
+ } else {
+ assembler->CheckCharacter('_', word);
+ }
+}
+
+// Emit the code to check for a ^ in multiline mode (1-character lookbehind
+// that matches newline or the start of input).
+static void EmitHat(RegExpCompiler* compiler, RegExpNode* on_success,
+ Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ // We will be loading the previous character into the current character
+ // register.
+ Trace new_trace(*trace);
+ new_trace.InvalidateCurrentCharacter();
+
+ Label ok;
+ if (new_trace.cp_offset() == 0) {
+ // The start of input counts as a newline in this context, so skip to
+ // ok if we are at the start.
+ assembler->CheckAtStart(&ok);
+ }
+ // We already checked that we are not at the start of input so it must be
+ // OK to load the previous character.
+ assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
+ new_trace.backtrack(), false);
+ if (!assembler->CheckSpecialCharacterClass('n', new_trace.backtrack())) {
+ // Newline means \n, \r, 0x2028 or 0x2029.
+ if (!compiler->one_byte()) {
+ assembler->CheckCharacterAfterAnd(0x2028, 0xFFFE, &ok);
+ }
+ assembler->CheckCharacter('\n', &ok);
+ assembler->CheckNotCharacter('\r', new_trace.backtrack());
+ }
+ assembler->Bind(&ok);
+ on_success->Emit(compiler, &new_trace);
+}
+
+// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
+void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->isolate();
+ Trace::TriBool next_is_word_character = Trace::UNKNOWN;
+ bool not_at_start = (trace->at_start() == Trace::FALSE_VALUE);
+ BoyerMooreLookahead* lookahead = bm_info(not_at_start);
+ if (lookahead == nullptr) {
+ int eats_at_least = Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget, not_at_start));
+ if (eats_at_least >= 1) {
+ BoyerMooreLookahead* bm =
+ new (zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
+ FillInBMInfo(isolate, 0, kRecursionBudget, bm, not_at_start);
+ if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE_VALUE;
+ if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
+ }
+ } else {
+ if (lookahead->at(0)->is_non_word())
+ next_is_word_character = Trace::FALSE_VALUE;
+ if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE_VALUE;
+ }
+ bool at_boundary = (assertion_type_ == AssertionNode::AT_BOUNDARY);
+ if (next_is_word_character == Trace::UNKNOWN) {
+ Label before_non_word;
+ Label before_word;
+ if (trace->characters_preloaded() != 1) {
+ assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
+ }
+ // Fall through on non-word.
+ EmitWordCheck(assembler, &before_word, &before_non_word, false);
+ // Next character is not a word character.
+ assembler->Bind(&before_non_word);
+ Label ok;
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
+ assembler->GoTo(&ok);
+
+ assembler->Bind(&before_word);
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
+ assembler->Bind(&ok);
+ } else if (next_is_word_character == Trace::TRUE_VALUE) {
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
+ } else {
+ DCHECK(next_is_word_character == Trace::FALSE_VALUE);
+ BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
+ }
+}
+
+void AssertionNode::BacktrackIfPrevious(
+ RegExpCompiler* compiler, Trace* trace,
+ AssertionNode::IfPrevious backtrack_if_previous) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Trace new_trace(*trace);
+ new_trace.InvalidateCurrentCharacter();
+
+ Label fall_through, dummy;
+
+ Label* non_word = backtrack_if_previous == kIsNonWord ? new_trace.backtrack()
+ : &fall_through;
+ Label* word = backtrack_if_previous == kIsNonWord ? &fall_through
+ : new_trace.backtrack();
+
+ if (new_trace.cp_offset() == 0) {
+ // The start of input counts as a non-word character, so the question is
+ // decided if we are at the start.
+ assembler->CheckAtStart(non_word);
+ }
+ // We already checked that we are not at the start of input so it must be
+ // OK to load the previous character.
+ assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, &dummy, false);
+ EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
+
+ assembler->Bind(&fall_through);
+ on_success()->Emit(compiler, &new_trace);
+}
+
+void AssertionNode::GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int filled_in, bool not_at_start) {
+ if (assertion_type_ == AT_START && not_at_start) {
+ details->set_cannot_match();
+ return;
+ }
+ return on_success()->GetQuickCheckDetails(details, compiler, filled_in,
+ not_at_start);
+}
+
+void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ switch (assertion_type_) {
+ case AT_END: {
+ Label ok;
+ assembler->CheckPosition(trace->cp_offset(), &ok);
+ assembler->GoTo(trace->backtrack());
+ assembler->Bind(&ok);
+ break;
+ }
+ case AT_START: {
+ if (trace->at_start() == Trace::FALSE_VALUE) {
+ assembler->GoTo(trace->backtrack());
+ return;
+ }
+ if (trace->at_start() == Trace::UNKNOWN) {
+ assembler->CheckNotAtStart(trace->cp_offset(), trace->backtrack());
+ Trace at_start_trace = *trace;
+ at_start_trace.set_at_start(Trace::TRUE_VALUE);
+ on_success()->Emit(compiler, &at_start_trace);
+ return;
+ }
+ } break;
+ case AFTER_NEWLINE:
+ EmitHat(compiler, on_success(), trace);
+ return;
+ case AT_BOUNDARY:
+ case AT_NON_BOUNDARY: {
+ EmitBoundaryCheck(compiler, trace);
+ return;
+ }
+ }
+ on_success()->Emit(compiler, trace);
+}
+
+static bool DeterminedAlready(QuickCheckDetails* quick_check, int offset) {
+ if (quick_check == nullptr) return false;
+ if (offset >= quick_check->characters()) return false;
+ return quick_check->positions(offset)->determines_perfectly;
+}
+
+static void UpdateBoundsCheck(int index, int* checked_up_to) {
+ if (index > *checked_up_to) {
+ *checked_up_to = index;
+ }
+}
+
+// We call this repeatedly to generate code for each pass over the text node.
+// The passes are in increasing order of difficulty because we hope one
+// of the first passes will fail in which case we are saved the work of the
+// later passes. for example for the case independent regexp /%[asdfghjkl]a/
+// we will check the '%' in the first pass, the case independent 'a' in the
+// second pass and the character class in the last pass.
+//
+// The passes are done from right to left, so for example to test for /bar/
+// we will first test for an 'r' with offset 2, then an 'a' with offset 1
+// and then a 'b' with offset 0. This means we can avoid the end-of-input
+// bounds check most of the time. In the example we only need to check for
+// end-of-input when loading the putative 'r'.
+//
+// A slight complication involves the fact that the first character may already
+// be fetched into a register by the previous node. In this case we want to
+// do the test for that character first. We do this in separate passes. The
+// 'preloaded' argument indicates that we are doing such a 'pass'. If such a
+// pass has been performed then subsequent passes will have true in
+// first_element_checked to indicate that that character does not need to be
+// checked again.
+//
+// In addition to all this we are passed a Trace, which can
+// contain an AlternativeGeneration object. In this AlternativeGeneration
+// object we can see details of any quick check that was already passed in
+// order to get to the code we are now generating. The quick check can involve
+// loading characters, which means we do not need to recheck the bounds
+// up to the limit the quick check already checked. In addition the quick
+// check can have involved a mask and compare operation which may simplify
+// or obviate the need for further checks at some character positions.
+void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
+ bool preloaded, Trace* trace,
+ bool first_element_checked, int* checked_up_to) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Isolate* isolate = assembler->isolate();
+ bool one_byte = compiler->one_byte();
+ Label* backtrack = trace->backtrack();
+ QuickCheckDetails* quick_check = trace->quick_check_performed();
+ int element_count = elements()->length();
+ int backward_offset = read_backward() ? -Length() : 0;
+ for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
+ TextElement elm = elements()->at(i);
+ int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
+ if (elm.text_type() == TextElement::ATOM) {
+ if (SkipPass(pass, elm.atom()->ignore_case())) continue;
+ Vector<const uc16> quarks = elm.atom()->data();
+ for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
+ if (first_element_checked && i == 0 && j == 0) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
+ EmitCharacterFunction* emit_function = nullptr;
+ uc16 quark = quarks[j];
+ if (elm.atom()->ignore_case()) {
+ // Everywhere else we assume that a non-Latin-1 character cannot match
+ // a Latin-1 character. Avoid the cases where this is assumption is
+ // invalid by using the Latin1 equivalent instead.
+ quark = unibrow::Latin1::TryConvertToLatin1(quark);
+ }
+ switch (pass) {
+ case NON_LATIN1_MATCH:
+ DCHECK(one_byte);
+ if (quark > String::kMaxOneByteCharCode) {
+ assembler->GoTo(backtrack);
+ return;
+ }
+ break;
+ case NON_LETTER_CHARACTER_MATCH:
+ emit_function = &EmitAtomNonLetter;
+ break;
+ case SIMPLE_CHARACTER_MATCH:
+ emit_function = &EmitSimpleCharacter;
+ break;
+ case CASE_CHARACTER_MATCH:
+ emit_function = &EmitAtomLetter;
+ break;
+ default:
+ break;
+ }
+ if (emit_function != nullptr) {
+ bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
+ bool bound_checked =
+ emit_function(isolate, compiler, quark, backtrack, cp_offset + j,
+ bounds_check, preloaded);
+ if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
+ }
+ }
+ } else {
+ DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type());
+ if (pass == CHARACTER_CLASS_MATCH) {
+ if (first_element_checked && i == 0) continue;
+ if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
+ RegExpCharacterClass* cc = elm.char_class();
+ bool bounds_check = *checked_up_to < cp_offset || read_backward();
+ EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
+ bounds_check, preloaded, zone());
+ UpdateBoundsCheck(cp_offset, checked_up_to);
+ }
+ }
+ }
+}
+
+int TextNode::Length() {
+ TextElement elm = elements()->last();
+ DCHECK_LE(0, elm.cp_offset());
+ return elm.cp_offset() + elm.length();
+}
+
+bool TextNode::SkipPass(TextEmitPassType pass, bool ignore_case) {
+ if (ignore_case) {
+ return pass == SIMPLE_CHARACTER_MATCH;
+ } else {
+ return pass == NON_LETTER_CHARACTER_MATCH || pass == CASE_CHARACTER_MATCH;
+ }
+}
+
+TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
+ DCHECK_NOT_NULL(ranges);
+ ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, ranges, flags)),
+ zone);
+ return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+ CharacterRange trail,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags) {
+ ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
+ ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
+ ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, lead_ranges, flags)),
+ zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(zone, trail_ranges, flags)),
+ zone);
+ return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+// This generates the code to match a text node. A text node can contain
+// straight character sequences (possibly to be matched in a case-independent
+// way) and character classes. For efficiency we do not do this in a single
+// pass from left to right. Instead we pass over the text node several times,
+// emitting code for some character positions every time. See the comment on
+// TextEmitPass for details.
+void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
+ compiler->SetRegExpTooBig();
+ return;
+ }
+
+ if (compiler->one_byte()) {
+ int dummy = 0;
+ TextEmitPass(compiler, NON_LATIN1_MATCH, false, trace, false, &dummy);
+ }
+
+ bool first_elt_done = false;
+ int bound_checked_to = trace->cp_offset() - 1;
+ bound_checked_to += trace->bound_checked_up_to();
+
+ // If a character is preloaded into the current character register then
+ // check that now.
+ if (trace->characters_preloaded() == 1) {
+ for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
+ TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), true, trace,
+ false, &bound_checked_to);
+ }
+ first_elt_done = true;
+ }
+
+ for (int pass = kFirstRealPass; pass <= kLastPass; pass++) {
+ TextEmitPass(compiler, static_cast<TextEmitPassType>(pass), false, trace,
+ first_elt_done, &bound_checked_to);
+ }
+
+ Trace successor_trace(*trace);
+ // If we advance backward, we may end up at the start.
+ successor_trace.AdvanceCurrentPositionInTrace(
+ read_backward() ? -Length() : Length(), compiler);
+ successor_trace.set_at_start(read_backward() ? Trace::UNKNOWN
+ : Trace::FALSE_VALUE);
+ RecursionCheck rc(compiler);
+ on_success()->Emit(compiler, &successor_trace);
+}
+
+void Trace::InvalidateCurrentCharacter() { characters_preloaded_ = 0; }
+
+void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
+ // We don't have an instruction for shifting the current character register
+ // down or for using a shifted value for anything so lets just forget that
+ // we preloaded any characters into it.
+ characters_preloaded_ = 0;
+ // Adjust the offsets of the quick check performed information. This
+ // information is used to find out what we already determined about the
+ // characters by means of mask and compare.
+ quick_check_performed_.Advance(by, compiler->one_byte());
+ cp_offset_ += by;
+ if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
+ compiler->SetRegExpTooBig();
+ cp_offset_ = 0;
+ }
+ bound_checked_up_to_ = Max(0, bound_checked_up_to_ - by);
+}
+
+void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
+ int element_count = elements()->length();
+ for (int i = 0; i < element_count; i++) {
+ TextElement elm = elements()->at(i);
+ if (elm.text_type() == TextElement::CHAR_CLASS) {
+ RegExpCharacterClass* cc = elm.char_class();
+#ifdef V8_INTL_SUPPORT
+ bool case_equivalents_already_added =
+ NeedsUnicodeCaseEquivalents(cc->flags());
+#else
+ bool case_equivalents_already_added = false;
+#endif
+ if (IgnoreCase(cc->flags()) && !case_equivalents_already_added) {
+ // None of the standard character classes is different in the case
+ // independent case and it slows us down if we don't know that.
+ if (cc->is_standard(zone())) continue;
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::AddCaseEquivalents(isolate, zone(), ranges,
+ is_one_byte);
+ }
+ }
+ }
+}
+
+int TextNode::GreedyLoopTextLength() { return Length(); }
+
+RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) {
+ if (read_backward()) return nullptr;
+ if (elements()->length() != 1) return nullptr;
+ TextElement elm = elements()->at(0);
+ if (elm.text_type() != TextElement::CHAR_CLASS) return nullptr;
+ RegExpCharacterClass* node = elm.char_class();
+ ZoneList<CharacterRange>* ranges = node->ranges(zone());
+ CharacterRange::Canonicalize(ranges);
+ if (node->is_negated()) {
+ return ranges->length() == 0 ? on_success() : nullptr;
+ }
+ if (ranges->length() != 1) return nullptr;
+ uint32_t max_char;
+ if (compiler->one_byte()) {
+ max_char = String::kMaxOneByteCharCode;
+ } else {
+ max_char = String::kMaxUtf16CodeUnit;
+ }
+ return ranges->at(0).IsEverything(max_char) ? on_success() : nullptr;
+}
+
+// Finds the fixed match length of a sequence of nodes that goes from
+// this alternative and back to this choice node. If there are variable
+// length nodes or other complications in the way then return a sentinel
+// value indicating that a greedy loop cannot be constructed.
+int ChoiceNode::GreedyLoopTextLengthForAlternative(
+ GuardedAlternative* alternative) {
+ int length = 0;
+ RegExpNode* node = alternative->node();
+ // Later we will generate code for all these text nodes using recursion
+ // so we have to limit the max number.
+ int recursion_depth = 0;
+ while (node != this) {
+ if (recursion_depth++ > RegExpCompiler::kMaxRecursion) {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
+ int node_length = node->GreedyLoopTextLength();
+ if (node_length == kNodeIsTooComplexForGreedyLoops) {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
+ length += node_length;
+ SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
+ node = seq_node->on_success();
+ }
+ return read_backward() ? -length : length;
+}
+
+void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
+ DCHECK_NULL(loop_node_);
+ AddAlternative(alt);
+ loop_node_ = alt.node();
+}
+
+void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
+ DCHECK_NULL(continue_node_);
+ AddAlternative(alt);
+ continue_node_ = alt.node();
+}
+
+void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ if (trace->stop_node() == this) {
+ // Back edge of greedy optimized loop node graph.
+ int text_length =
+ GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
+ DCHECK_NE(kNodeIsTooComplexForGreedyLoops, text_length);
+ // Update the counter-based backtracking info on the stack. This is an
+ // optimization for greedy loops (see below).
+ DCHECK(trace->cp_offset() == text_length);
+ macro_assembler->AdvanceCurrentPosition(text_length);
+ macro_assembler->GoTo(trace->loop_label());
+ return;
+ }
+ DCHECK_NULL(trace->stop_node());
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+ ChoiceNode::Emit(compiler, trace);
+}
+
+int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
+ int eats_at_least) {
+ int preload_characters = Min(4, eats_at_least);
+ DCHECK_LE(preload_characters, 4);
+ if (compiler->macro_assembler()->CanReadUnaligned()) {
+ bool one_byte = compiler->one_byte();
+ if (one_byte) {
+ // We can't preload 3 characters because there is no machine instruction
+ // to do that. We can't just load 4 because we could be reading
+ // beyond the end of the string, which could cause a memory fault.
+ if (preload_characters == 3) preload_characters = 2;
+ } else {
+ if (preload_characters > 2) preload_characters = 2;
+ }
+ } else {
+ if (preload_characters > 1) preload_characters = 1;
+ }
+ return preload_characters;
+}
+
+// This class is used when generating the alternatives in a choice node. It
+// records the way the alternative is being code generated.
+class AlternativeGeneration : public Malloced {
+ public:
+ AlternativeGeneration()
+ : possible_success(),
+ expects_preload(false),
+ after(),
+ quick_check_details() {}
+ Label possible_success;
+ bool expects_preload;
+ Label after;
+ QuickCheckDetails quick_check_details;
+};
+
+// Creates a list of AlternativeGenerations. If the list has a reasonable
+// size then it is on the stack, otherwise the excess is on the heap.
+class AlternativeGenerationList {
+ public:
+ AlternativeGenerationList(int count, Zone* zone) : alt_gens_(count, zone) {
+ for (int i = 0; i < count && i < kAFew; i++) {
+ alt_gens_.Add(a_few_alt_gens_ + i, zone);
+ }
+ for (int i = kAFew; i < count; i++) {
+ alt_gens_.Add(new AlternativeGeneration(), zone);
+ }
+ }
+ ~AlternativeGenerationList() {
+ for (int i = kAFew; i < alt_gens_.length(); i++) {
+ delete alt_gens_[i];
+ alt_gens_[i] = nullptr;
+ }
+ }
+
+ AlternativeGeneration* at(int i) { return alt_gens_[i]; }
+
+ private:
+ static const int kAFew = 10;
+ ZoneList<AlternativeGeneration*> alt_gens_;
+ AlternativeGeneration a_few_alt_gens_[kAFew];
+};
+
+void BoyerMoorePositionInfo::Set(int character) {
+ SetInterval(Interval(character, character));
+}
+
+namespace {
+
+ContainedInLattice AddRange(ContainedInLattice containment, const int* ranges,
+ int ranges_length, Interval new_range) {
+ DCHECK_EQ(1, ranges_length & 1);
+ DCHECK_EQ(String::kMaxCodePoint + 1, ranges[ranges_length - 1]);
+ if (containment == kLatticeUnknown) return containment;
+ bool inside = false;
+ int last = 0;
+ for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) {
+ // Consider the range from last to ranges[i].
+ // We haven't got to the new range yet.
+ if (ranges[i] <= new_range.from()) continue;
+ // New range is wholly inside last-ranges[i]. Note that new_range.to() is
+ // inclusive, but the values in ranges are not.
+ if (last <= new_range.from() && new_range.to() < ranges[i]) {
+ return Combine(containment, inside ? kLatticeIn : kLatticeOut);
+ }
+ return kLatticeUnknown;
+ }
+ return containment;
+}
+
+int BitsetFirstSetBit(BoyerMoorePositionInfo::Bitset bitset) {
+ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
+ 2 * kInt64Size * kBitsPerByte);
+
+ // Slight fiddling is needed here, since the bitset is of length 128 while
+ // CountTrailingZeros requires an integral type and std::bitset can only
+ // convert to unsigned long long. So we handle the most- and least-significant
+ // bits separately.
+
+ {
+ static constexpr BoyerMoorePositionInfo::Bitset mask(~uint64_t{0});
+ BoyerMoorePositionInfo::Bitset masked_bitset = bitset & mask;
+ STATIC_ASSERT(kInt64Size >= sizeof(decltype(masked_bitset.to_ullong())));
+ uint64_t lsb = masked_bitset.to_ullong();
+ if (lsb != 0) return base::bits::CountTrailingZeros(lsb);
+ }
+
+ {
+ BoyerMoorePositionInfo::Bitset masked_bitset = bitset >> 64;
+ uint64_t msb = masked_bitset.to_ullong();
+ if (msb != 0) return 64 + base::bits::CountTrailingZeros(msb);
+ }
+
+ return -1;
+}
+
+} // namespace
+
+void BoyerMoorePositionInfo::SetInterval(const Interval& interval) {
+ w_ = AddRange(w_, kWordRanges, kWordRangeCount, interval);
+
+ if (interval.size() >= kMapSize) {
+ map_count_ = kMapSize;
+ map_.set();
+ return;
+ }
+
+ for (int i = interval.from(); i <= interval.to(); i++) {
+ int mod_character = (i & kMask);
+ if (!map_[mod_character]) {
+ map_count_++;
+ map_.set(mod_character);
+ }
+ if (map_count_ == kMapSize) return;
+ }
+}
+
+void BoyerMoorePositionInfo::SetAll() {
+ w_ = kLatticeUnknown;
+ if (map_count_ != kMapSize) {
+ map_count_ = kMapSize;
+ map_.set();
+ }
+}
+
+BoyerMooreLookahead::BoyerMooreLookahead(int length, RegExpCompiler* compiler,
+ Zone* zone)
+ : length_(length), compiler_(compiler) {
+ if (compiler->one_byte()) {
+ max_char_ = String::kMaxOneByteCharCode;
+ } else {
+ max_char_ = String::kMaxUtf16CodeUnit;
+ }
+ bitmaps_ = new (zone) ZoneList<BoyerMoorePositionInfo*>(length, zone);
+ for (int i = 0; i < length; i++) {
+ bitmaps_->Add(new (zone) BoyerMoorePositionInfo(), zone);
+ }
+}
+
+// Find the longest range of lookahead that has the fewest number of different
+// characters that can occur at a given position. Since we are optimizing two
+// different parameters at once this is a tradeoff.
+bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
+ int biggest_points = 0;
+ // If more than 32 characters out of 128 can occur it is unlikely that we can
+ // be lucky enough to step forwards much of the time.
+ const int kMaxMax = 32;
+ for (int max_number_of_chars = 4; max_number_of_chars < kMaxMax;
+ max_number_of_chars *= 2) {
+ biggest_points =
+ FindBestInterval(max_number_of_chars, biggest_points, from, to);
+ }
+ if (biggest_points == 0) return false;
+ return true;
+}
+
+// Find the highest-points range between 0 and length_ where the character
+// information is not too vague. 'Too vague' means that there are more than
+// max_number_of_chars that can occur at this position. Calculates the number
+// of points as the product of width-of-the-range and
+// probability-of-finding-one-of-the-characters, where the probability is
+// calculated using the frequency distribution of the sample subject string.
+int BoyerMooreLookahead::FindBestInterval(int max_number_of_chars,
+ int old_biggest_points, int* from,
+ int* to) {
+ int biggest_points = old_biggest_points;
+ static const int kSize = RegExpMacroAssembler::kTableSize;
+ for (int i = 0; i < length_;) {
+ while (i < length_ && Count(i) > max_number_of_chars) i++;
+ if (i == length_) break;
+ int remembered_from = i;
+
+ BoyerMoorePositionInfo::Bitset union_bitset;
+ for (; i < length_ && Count(i) <= max_number_of_chars; i++) {
+ union_bitset |= bitmaps_->at(i)->raw_bitset();
+ }
+
+ int frequency = 0;
+
+ // Iterate only over set bits.
+ int j;
+ while ((j = BitsetFirstSetBit(union_bitset)) != -1) {
+ DCHECK(union_bitset[j]); // Sanity check.
+ // Add 1 to the frequency to give a small per-character boost for
+ // the cases where our sampling is not good enough and many
+ // characters have a frequency of zero. This means the frequency
+ // can theoretically be up to 2*kSize though we treat it mostly as
+ // a fraction of kSize.
+ frequency += compiler_->frequency_collator()->Frequency(j) + 1;
+ union_bitset.reset(j);
+ }
+
+ // We use the probability of skipping times the distance we are skipping to
+ // judge the effectiveness of this. Actually we have a cut-off: By
+ // dividing by 2 we switch off the skipping if the probability of skipping
+ // is less than 50%. This is because the multibyte mask-and-compare
+ // skipping in quickcheck is more likely to do well on this case.
+ bool in_quickcheck_range =
+ ((i - remembered_from < 4) ||
+ (compiler_->one_byte() ? remembered_from <= 4 : remembered_from <= 2));
+ // Called 'probability' but it is only a rough estimate and can actually
+ // be outside the 0-kSize range.
+ int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
+ int points = (i - remembered_from) * probability;
+ if (points > biggest_points) {
+ *from = remembered_from;
+ *to = i - 1;
+ biggest_points = points;
+ }
+ }
+ return biggest_points;
+}
+
+// Take all the characters that will not prevent a successful match if they
+// occur in the subject string in the range between min_lookahead and
+// max_lookahead (inclusive) measured from the current position. If the
+// character at max_lookahead offset is not one of these characters, then we
+// can safely skip forwards by the number of characters in the range.
+int BoyerMooreLookahead::GetSkipTable(int min_lookahead, int max_lookahead,
+ Handle<ByteArray> boolean_skip_table) {
+ const int kSkipArrayEntry = 0;
+ const int kDontSkipArrayEntry = 1;
+
+ std::memset(boolean_skip_table->GetDataStartAddress(), kSkipArrayEntry,
+ boolean_skip_table->length());
+
+ for (int i = max_lookahead; i >= min_lookahead; i--) {
+ BoyerMoorePositionInfo::Bitset bitset = bitmaps_->at(i)->raw_bitset();
+
+ // Iterate only over set bits.
+ int j;
+ while ((j = BitsetFirstSetBit(bitset)) != -1) {
+ DCHECK(bitset[j]); // Sanity check.
+ boolean_skip_table->set(j, kDontSkipArrayEntry);
+ bitset.reset(j);
+ }
+ }
+
+ const int skip = max_lookahead + 1 - min_lookahead;
+ return skip;
+}
+
+// See comment above on the implementation of GetSkipTable.
+void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
+ const int kSize = RegExpMacroAssembler::kTableSize;
+
+ int min_lookahead = 0;
+ int max_lookahead = 0;
+
+ if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return;
+
+ // Check if we only have a single non-empty position info, and that info
+ // contains precisely one character.
+ bool found_single_character = false;
+ int single_character = 0;
+ for (int i = max_lookahead; i >= min_lookahead; i--) {
+ BoyerMoorePositionInfo* map = bitmaps_->at(i);
+ if (map->map_count() == 0) continue;
+
+ if (found_single_character || map->map_count() > 1) {
+ found_single_character = false;
+ break;
+ }
+
+ DCHECK(!found_single_character);
+ DCHECK_EQ(map->map_count(), 1);
+
+ found_single_character = true;
+ single_character = BitsetFirstSetBit(map->raw_bitset());
+
+ DCHECK_NE(single_character, -1);
+ }
+
+ int lookahead_width = max_lookahead + 1 - min_lookahead;
+
+ if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
+ // The mask-compare can probably handle this better.
+ return;
+ }
+
+ if (found_single_character) {
+ Label cont, again;
+ masm->Bind(&again);
+ masm->LoadCurrentCharacter(max_lookahead, &cont, true);
+ if (max_char_ > kSize) {
+ masm->CheckCharacterAfterAnd(single_character,
+ RegExpMacroAssembler::kTableMask, &cont);
+ } else {
+ masm->CheckCharacter(single_character, &cont);
+ }
+ masm->AdvanceCurrentPosition(lookahead_width);
+ masm->GoTo(&again);
+ masm->Bind(&cont);
+ return;
+ }
+
+ Factory* factory = masm->isolate()->factory();
+ Handle<ByteArray> boolean_skip_table =
+ factory->NewByteArray(kSize, AllocationType::kOld);
+ int skip_distance =
+ GetSkipTable(min_lookahead, max_lookahead, boolean_skip_table);
+ DCHECK_NE(0, skip_distance);
+
+ Label cont, again;
+ masm->Bind(&again);
+ masm->LoadCurrentCharacter(max_lookahead, &cont, true);
+ masm->CheckBitInTable(boolean_skip_table, &cont);
+ masm->AdvanceCurrentPosition(skip_distance);
+ masm->GoTo(&again);
+ masm->Bind(&cont);
+}
+
+/* Code generation for choice nodes.
+ *
+ * We generate quick checks that do a mask and compare to eliminate a
+ * choice. If the quick check succeeds then it jumps to the continuation to
+ * do slow checks and check subsequent nodes. If it fails (the common case)
+ * it falls through to the next choice.
+ *
+ * Here is the desired flow graph. Nodes directly below each other imply
+ * fallthrough. Alternatives 1 and 2 have quick checks. Alternative
+ * 3 doesn't have a quick check so we have to call the slow check.
+ * Nodes are marked Qn for quick checks and Sn for slow checks. The entire
+ * regexp continuation is generated directly after the Sn node, up to the
+ * next GoTo if we decide to reuse some already generated code. Some
+ * nodes expect preload_characters to be preloaded into the current
+ * character register. R nodes do this preloading. Vertices are marked
+ * F for failures and S for success (possible success in the case of quick
+ * nodes). L, V, < and > are used as arrow heads.
+ *
+ * ----------> R
+ * |
+ * V
+ * Q1 -----> S1
+ * | S /
+ * F| /
+ * | F/
+ * | /
+ * | R
+ * | /
+ * V L
+ * Q2 -----> S2
+ * | S /
+ * F| /
+ * | F/
+ * | /
+ * | R
+ * | /
+ * V L
+ * S3
+ * |
+ * F|
+ * |
+ * R
+ * |
+ * backtrack V
+ * <----------Q4
+ * \ F |
+ * \ |S
+ * \ F V
+ * \-----S4
+ *
+ * For greedy loops we push the current position, then generate the code that
+ * eats the input specially in EmitGreedyLoop. The other choice (the
+ * continuation) is generated by the normal code in EmitChoices, and steps back
+ * in the input to the starting position when it fails to match. The loop code
+ * looks like this (U is the unwind code that steps back in the greedy loop).
+ *
+ * _____
+ * / \
+ * V |
+ * ----------> S1 |
+ * /| |
+ * / |S |
+ * F/ \_____/
+ * /
+ * |<-----
+ * | \
+ * V |S
+ * Q2 ---> U----->backtrack
+ * | F /
+ * S| /
+ * V F /
+ * S2--/
+ */
+
+GreedyLoopState::GreedyLoopState(bool not_at_start) {
+ counter_backtrack_trace_.set_backtrack(&label_);
+ if (not_at_start) counter_backtrack_trace_.set_at_start(Trace::FALSE_VALUE);
+}
+
+void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
+#ifdef DEBUG
+ int choice_count = alternatives_->length();
+ for (int i = 0; i < choice_count - 1; i++) {
+ GuardedAlternative alternative = alternatives_->at(i);
+ ZoneList<Guard*>* guards = alternative.guards();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
+ for (int j = 0; j < guard_count; j++) {
+ DCHECK(!trace->mentions_reg(guards->at(j)->reg()));
+ }
+ }
+#endif
+}
+
+void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler, Trace* current_trace,
+ PreloadState* state) {
+ if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
+ // Save some time by looking at most one machine word ahead.
+ state->eats_at_least_ =
+ EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
+ current_trace->at_start() == Trace::FALSE_VALUE);
+ }
+ state->preload_characters_ =
+ CalculatePreloadCharacters(compiler, state->eats_at_least_);
+
+ state->preload_is_current_ =
+ (current_trace->characters_preloaded() == state->preload_characters_);
+ state->preload_has_checked_bounds_ = state->preload_is_current_;
+}
+
+void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ int choice_count = alternatives_->length();
+
+ if (choice_count == 1 && alternatives_->at(0).guards() == nullptr) {
+ alternatives_->at(0).node()->Emit(compiler, trace);
+ return;
+ }
+
+ AssertGuardsMentionRegisters(trace);
+
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ // For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
+ // other choice nodes we only flush if we are out of code size budget.
+ if (trace->flush_budget() == 0 && trace->actions() != nullptr) {
+ trace->Flush(compiler, this);
+ return;
+ }
+
+ RecursionCheck rc(compiler);
+
+ PreloadState preload;
+ preload.init();
+ GreedyLoopState greedy_loop_state(not_at_start());
+
+ int text_length = GreedyLoopTextLengthForAlternative(&alternatives_->at(0));
+ AlternativeGenerationList alt_gens(choice_count, zone());
+
+ if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
+ trace = EmitGreedyLoop(compiler, trace, &alt_gens, &preload,
+ &greedy_loop_state, text_length);
+ } else {
+ // TODO(erikcorry): Delete this. We don't need this label, but it makes us
+ // match the traces produced pre-cleanup.
+ Label second_choice;
+ compiler->macro_assembler()->Bind(&second_choice);
+
+ preload.eats_at_least_ = EmitOptimizedUnanchoredSearch(compiler, trace);
+
+ EmitChoices(compiler, &alt_gens, 0, trace, &preload);
+ }
+
+ // At this point we need to generate slow checks for the alternatives where
+ // the quick check was inlined. We can recognize these because the associated
+ // label was bound.
+ int new_flush_budget = trace->flush_budget() / choice_count;
+ for (int i = 0; i < choice_count; i++) {
+ AlternativeGeneration* alt_gen = alt_gens.at(i);
+ Trace new_trace(*trace);
+ // If there are actions to be flushed we have to limit how many times
+ // they are flushed. Take the budget of the parent trace and distribute
+ // it fairly amongst the children.
+ if (new_trace.actions() != nullptr) {
+ new_trace.set_flush_budget(new_flush_budget);
+ }
+ bool next_expects_preload =
+ i == choice_count - 1 ? false : alt_gens.at(i + 1)->expects_preload;
+ EmitOutOfLineContinuation(compiler, &new_trace, alternatives_->at(i),
+ alt_gen, preload.preload_characters_,
+ next_expects_preload);
+ }
+}
+
+Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler, Trace* trace,
+ AlternativeGenerationList* alt_gens,
+ PreloadState* preload,
+ GreedyLoopState* greedy_loop_state,
+ int text_length) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ // Here we have special handling for greedy loops containing only text nodes
+ // and other simple nodes. These are handled by pushing the current
+ // position on the stack and then incrementing the current position each
+ // time around the switch. On backtrack we decrement the current position
+ // and check it against the pushed value. This avoids pushing backtrack
+ // information for each iteration of the loop, which could take up a lot of
+ // space.
+ DCHECK(trace->stop_node() == nullptr);
+ macro_assembler->PushCurrentPosition();
+ Label greedy_match_failed;
+ Trace greedy_match_trace;
+ if (not_at_start()) greedy_match_trace.set_at_start(Trace::FALSE_VALUE);
+ greedy_match_trace.set_backtrack(&greedy_match_failed);
+ Label loop_label;
+ macro_assembler->Bind(&loop_label);
+ greedy_match_trace.set_stop_node(this);
+ greedy_match_trace.set_loop_label(&loop_label);
+ alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
+ macro_assembler->Bind(&greedy_match_failed);
+
+ Label second_choice; // For use in greedy matches.
+ macro_assembler->Bind(&second_choice);
+
+ Trace* new_trace = greedy_loop_state->counter_backtrack_trace();
+
+ EmitChoices(compiler, alt_gens, 1, new_trace, preload);
+
+ macro_assembler->Bind(greedy_loop_state->label());
+ // If we have unwound to the bottom then backtrack.
+ macro_assembler->CheckGreedyLoop(trace->backtrack());
+ // Otherwise try the second priority at an earlier position.
+ macro_assembler->AdvanceCurrentPosition(-text_length);
+ macro_assembler->GoTo(&second_choice);
+ return new_trace;
+}
+
+int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
+ Trace* trace) {
+ int eats_at_least = PreloadState::kEatsAtLeastNotYetInitialized;
+ if (alternatives_->length() != 2) return eats_at_least;
+
+ GuardedAlternative alt1 = alternatives_->at(1);
+ if (alt1.guards() != nullptr && alt1.guards()->length() != 0) {
+ return eats_at_least;
+ }
+ RegExpNode* eats_anything_node = alt1.node();
+ if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) != this) {
+ return eats_at_least;
+ }
+
+ // Really we should be creating a new trace when we execute this function,
+ // but there is no need, because the code it generates cannot backtrack, and
+ // we always arrive here with a trivial trace (since it's the entry to a
+ // loop. That also implies that there are no preloaded characters, which is
+ // good, because it means we won't be violating any assumptions by
+ // overwriting those characters with new load instructions.
+ DCHECK(trace->is_trivial());
+
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ Isolate* isolate = macro_assembler->isolate();
+ // At this point we know that we are at a non-greedy loop that will eat
+ // any character one at a time. Any non-anchored regexp has such a
+ // loop prepended to it in order to find where it starts. We look for
+ // a pattern of the form ...abc... where we can look 6 characters ahead
+ // and step forwards 3 if the character is not one of abc. Abc need
+ // not be atoms, they can be any reasonably limited character class or
+ // small alternation.
+ BoyerMooreLookahead* bm = bm_info(false);
+ if (bm == nullptr) {
+ eats_at_least =
+ Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore, kRecursionBudget, false));
+ if (eats_at_least >= 1) {
+ bm = new (zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
+ GuardedAlternative alt0 = alternatives_->at(0);
+ alt0.node()->FillInBMInfo(isolate, 0, kRecursionBudget, bm, false);
+ }
+ }
+ if (bm != nullptr) {
+ bm->EmitSkipInstructions(macro_assembler);
+ }
+ return eats_at_least;
+}
+
+void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
+ AlternativeGenerationList* alt_gens,
+ int first_choice, Trace* trace,
+ PreloadState* preload) {
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ SetUpPreLoad(compiler, trace, preload);
+
+ // For now we just call all choices one after the other. The idea ultimately
+ // is to use the Dispatch table to try only the relevant ones.
+ int choice_count = alternatives_->length();
+
+ int new_flush_budget = trace->flush_budget() / choice_count;
+
+ for (int i = first_choice; i < choice_count; i++) {
+ bool is_last = i == choice_count - 1;
+ bool fall_through_on_failure = !is_last;
+ GuardedAlternative alternative = alternatives_->at(i);
+ AlternativeGeneration* alt_gen = alt_gens->at(i);
+ alt_gen->quick_check_details.set_characters(preload->preload_characters_);
+ ZoneList<Guard*>* guards = alternative.guards();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
+ Trace new_trace(*trace);
+ new_trace.set_characters_preloaded(
+ preload->preload_is_current_ ? preload->preload_characters_ : 0);
+ if (preload->preload_has_checked_bounds_) {
+ new_trace.set_bound_checked_up_to(preload->preload_characters_);
+ }
+ new_trace.quick_check_performed()->Clear();
+ if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
+ if (!is_last) {
+ new_trace.set_backtrack(&alt_gen->after);
+ }
+ alt_gen->expects_preload = preload->preload_is_current_;
+ bool generate_full_check_inline = false;
+ if (compiler->optimize() &&
+ try_to_emit_quick_check_for_alternative(i == 0) &&
+ alternative.node()->EmitQuickCheck(
+ compiler, trace, &new_trace, preload->preload_has_checked_bounds_,
+ &alt_gen->possible_success, &alt_gen->quick_check_details,
+ fall_through_on_failure)) {
+ // Quick check was generated for this choice.
+ preload->preload_is_current_ = true;
+ preload->preload_has_checked_bounds_ = true;
+ // If we generated the quick check to fall through on possible success,
+ // we now need to generate the full check inline.
+ if (!fall_through_on_failure) {
+ macro_assembler->Bind(&alt_gen->possible_success);
+ new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
+ new_trace.set_characters_preloaded(preload->preload_characters_);
+ new_trace.set_bound_checked_up_to(preload->preload_characters_);
+ generate_full_check_inline = true;
+ }
+ } else if (alt_gen->quick_check_details.cannot_match()) {
+ if (!fall_through_on_failure) {
+ macro_assembler->GoTo(trace->backtrack());
+ }
+ continue;
+ } else {
+ // No quick check was generated. Put the full code here.
+ // If this is not the first choice then there could be slow checks from
+ // previous cases that go here when they fail. There's no reason to
+ // insist that they preload characters since the slow check we are about
+ // to generate probably can't use it.
+ if (i != first_choice) {
+ alt_gen->expects_preload = false;
+ new_trace.InvalidateCurrentCharacter();
+ }
+ generate_full_check_inline = true;
+ }
+ if (generate_full_check_inline) {
+ if (new_trace.actions() != nullptr) {
+ new_trace.set_flush_budget(new_flush_budget);
+ }
+ for (int j = 0; j < guard_count; j++) {
+ GenerateGuard(macro_assembler, guards->at(j), &new_trace);
+ }
+ alternative.node()->Emit(compiler, &new_trace);
+ preload->preload_is_current_ = false;
+ }
+ macro_assembler->Bind(&alt_gen->after);
+ }
+}
+
+void ChoiceNode::EmitOutOfLineContinuation(RegExpCompiler* compiler,
+ Trace* trace,
+ GuardedAlternative alternative,
+ AlternativeGeneration* alt_gen,
+ int preload_characters,
+ bool next_expects_preload) {
+ if (!alt_gen->possible_success.is_linked()) return;
+
+ RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+ macro_assembler->Bind(&alt_gen->possible_success);
+ Trace out_of_line_trace(*trace);
+ out_of_line_trace.set_characters_preloaded(preload_characters);
+ out_of_line_trace.set_quick_check_performed(&alt_gen->quick_check_details);
+ if (not_at_start_) out_of_line_trace.set_at_start(Trace::FALSE_VALUE);
+ ZoneList<Guard*>* guards = alternative.guards();
+ int guard_count = (guards == nullptr) ? 0 : guards->length();
+ if (next_expects_preload) {
+ Label reload_current_char;
+ out_of_line_trace.set_backtrack(&reload_current_char);
+ for (int j = 0; j < guard_count; j++) {
+ GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
+ }
+ alternative.node()->Emit(compiler, &out_of_line_trace);
+ macro_assembler->Bind(&reload_current_char);
+ // Reload the current character, since the next quick check expects that.
+ // We don't need to check bounds here because we only get into this
+ // code through a quick check which already did the checked load.
+ macro_assembler->LoadCurrentCharacter(trace->cp_offset(), nullptr, false,
+ preload_characters);
+ macro_assembler->GoTo(&(alt_gen->after));
+ } else {
+ out_of_line_trace.set_backtrack(&(alt_gen->after));
+ for (int j = 0; j < guard_count; j++) {
+ GenerateGuard(macro_assembler, guards->at(j), &out_of_line_trace);
+ }
+ alternative.node()->Emit(compiler, &out_of_line_trace);
+ }
+}
+
+void ActionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ RecursionCheck rc(compiler);
+
+ switch (action_type_) {
+ case STORE_POSITION: {
+ Trace::DeferredCapture new_capture(data_.u_position_register.reg,
+ data_.u_position_register.is_capture,
+ trace);
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_capture);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case INCREMENT_REGISTER: {
+ Trace::DeferredIncrementRegister new_increment(
+ data_.u_increment_register.reg);
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_increment);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case SET_REGISTER: {
+ Trace::DeferredSetRegister new_set(data_.u_store_register.reg,
+ data_.u_store_register.value);
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_set);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case CLEAR_CAPTURES: {
+ Trace::DeferredClearCaptures new_capture(Interval(
+ data_.u_clear_captures.range_from, data_.u_clear_captures.range_to));
+ Trace new_trace = *trace;
+ new_trace.add_action(&new_capture);
+ on_success()->Emit(compiler, &new_trace);
+ break;
+ }
+ case BEGIN_SUBMATCH:
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ } else {
+ assembler->WriteCurrentPositionToRegister(
+ data_.u_submatch.current_position_register, 0);
+ assembler->WriteStackPointerToRegister(
+ data_.u_submatch.stack_pointer_register);
+ on_success()->Emit(compiler, trace);
+ }
+ break;
+ case EMPTY_MATCH_CHECK: {
+ int start_pos_reg = data_.u_empty_match_check.start_register;
+ int stored_pos = 0;
+ int rep_reg = data_.u_empty_match_check.repetition_register;
+ bool has_minimum = (rep_reg != RegExpCompiler::kNoRegister);
+ bool know_dist = trace->GetStoredPosition(start_pos_reg, &stored_pos);
+ if (know_dist && !has_minimum && stored_pos == trace->cp_offset()) {
+ // If we know we haven't advanced and there is no minimum we
+ // can just backtrack immediately.
+ assembler->GoTo(trace->backtrack());
+ } else if (know_dist && stored_pos < trace->cp_offset()) {
+ // If we know we've advanced we can generate the continuation
+ // immediately.
+ on_success()->Emit(compiler, trace);
+ } else if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ } else {
+ Label skip_empty_check;
+ // If we have a minimum number of repetitions we check the current
+ // number first and skip the empty check if it's not enough.
+ if (has_minimum) {
+ int limit = data_.u_empty_match_check.repetition_limit;
+ assembler->IfRegisterLT(rep_reg, limit, &skip_empty_check);
+ }
+ // If the match is empty we bail out, otherwise we fall through
+ // to the on-success continuation.
+ assembler->IfRegisterEqPos(data_.u_empty_match_check.start_register,
+ trace->backtrack());
+ assembler->Bind(&skip_empty_check);
+ on_success()->Emit(compiler, trace);
+ }
+ break;
+ }
+ case POSITIVE_SUBMATCH_SUCCESS: {
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+ assembler->ReadCurrentPositionFromRegister(
+ data_.u_submatch.current_position_register);
+ assembler->ReadStackPointerFromRegister(
+ data_.u_submatch.stack_pointer_register);
+ int clear_register_count = data_.u_submatch.clear_register_count;
+ if (clear_register_count == 0) {
+ on_success()->Emit(compiler, trace);
+ return;
+ }
+ int clear_registers_from = data_.u_submatch.clear_register_from;
+ Label clear_registers_backtrack;
+ Trace new_trace = *trace;
+ new_trace.set_backtrack(&clear_registers_backtrack);
+ on_success()->Emit(compiler, &new_trace);
+
+ assembler->Bind(&clear_registers_backtrack);
+ int clear_registers_to = clear_registers_from + clear_register_count - 1;
+ assembler->ClearRegisters(clear_registers_from, clear_registers_to);
+
+ DCHECK(trace->backtrack() == nullptr);
+ assembler->Backtrack();
+ return;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ if (!trace->is_trivial()) {
+ trace->Flush(compiler, this);
+ return;
+ }
+
+ LimitResult limit_result = LimitVersions(compiler, trace);
+ if (limit_result == DONE) return;
+ DCHECK(limit_result == CONTINUE);
+
+ RecursionCheck rc(compiler);
+
+ DCHECK_EQ(start_reg_ + 1, end_reg_);
+ if (IgnoreCase(flags_)) {
+ assembler->CheckNotBackReferenceIgnoreCase(
+ start_reg_, read_backward(), IsUnicode(flags_), trace->backtrack());
+ } else {
+ assembler->CheckNotBackReference(start_reg_, read_backward(),
+ trace->backtrack());
+ }
+ // We are going to advance backward, so we may end up at the start.
+ if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
+
+ // Check that the back reference does not end inside a surrogate pair.
+ if (IsUnicode(flags_) && !compiler->one_byte()) {
+ assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
+ }
+ on_success()->Emit(compiler, trace);
+}
+
+// -------------------------------------------------------------------
+// Analysis
+
+void Analysis::EnsureAnalyzed(RegExpNode* that) {
+ StackLimitCheck check(isolate());
+ if (check.HasOverflowed()) {
+ fail("Stack overflow");
+ return;
+ }
+ if (that->info()->been_analyzed || that->info()->being_analyzed) return;
+ that->info()->being_analyzed = true;
+ that->Accept(this);
+ that->info()->being_analyzed = false;
+ that->info()->been_analyzed = true;
+}
+
+void Analysis::VisitEnd(EndNode* that) {
+ // nothing to do
+}
+
+void TextNode::CalculateOffsets() {
+ int element_count = elements()->length();
+ // Set up the offsets of the elements relative to the start. This is a fixed
+ // quantity since a TextNode can only contain fixed-width things.
+ int cp_offset = 0;
+ for (int i = 0; i < element_count; i++) {
+ TextElement& elm = elements()->at(i);
+ elm.set_cp_offset(cp_offset);
+ cp_offset += elm.length();
+ }
+}
+
+void Analysis::VisitText(TextNode* that) {
+ that->MakeCaseIndependent(isolate(), is_one_byte_);
+ EnsureAnalyzed(that->on_success());
+ if (!has_failed()) {
+ that->CalculateOffsets();
+ }
+}
+
+void Analysis::VisitAction(ActionNode* that) {
+ RegExpNode* target = that->on_success();
+ EnsureAnalyzed(target);
+ if (!has_failed()) {
+ // If the next node is interested in what it follows then this node
+ // has to be interested too so it can pass the information on.
+ that->info()->AddFromFollowing(target->info());
+ }
+}
+
+void Analysis::VisitChoice(ChoiceNode* that) {
+ NodeInfo* info = that->info();
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ RegExpNode* node = that->alternatives()->at(i).node();
+ EnsureAnalyzed(node);
+ if (has_failed()) return;
+ // Anything the following nodes need to know has to be known by
+ // this node also, so it can pass it on.
+ info->AddFromFollowing(node->info());
+ }
+}
+
+void Analysis::VisitLoopChoice(LoopChoiceNode* that) {
+ NodeInfo* info = that->info();
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ RegExpNode* node = that->alternatives()->at(i).node();
+ if (node != that->loop_node()) {
+ EnsureAnalyzed(node);
+ if (has_failed()) return;
+ info->AddFromFollowing(node->info());
+ }
+ }
+ // Check the loop last since it may need the value of this node
+ // to get a correct result.
+ EnsureAnalyzed(that->loop_node());
+ if (!has_failed()) {
+ info->AddFromFollowing(that->loop_node()->info());
+ }
+}
+
+void Analysis::VisitBackReference(BackReferenceNode* that) {
+ EnsureAnalyzed(that->on_success());
+}
+
+void Analysis::VisitAssertion(AssertionNode* that) {
+ EnsureAnalyzed(that->on_success());
+}
+
+void BackReferenceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm,
+ bool not_at_start) {
+ // Working out the set of characters that a backreference can match is too
+ // hard, so we just say that any character can match.
+ bm->SetRest(offset);
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
+ RegExpMacroAssembler::kTableSize);
+
+void ChoiceNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ ZoneList<GuardedAlternative>* alts = alternatives();
+ budget = (budget - 1) / alts->length();
+ for (int i = 0; i < alts->length(); i++) {
+ GuardedAlternative& alt = alts->at(i);
+ if (alt.guards() != nullptr && alt.guards()->length() != 0) {
+ bm->SetRest(offset); // Give up trying to fill in info.
+ SaveBMInfo(bm, not_at_start, offset);
+ return;
+ }
+ alt.node()->FillInBMInfo(isolate, offset, budget, bm, not_at_start);
+ }
+ SaveBMInfo(bm, not_at_start, offset);
+}
+
+void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ if (initial_offset >= bm->length()) return;
+ int offset = initial_offset;
+ int max_char = bm->max_char();
+ for (int i = 0; i < elements()->length(); i++) {
+ if (offset >= bm->length()) {
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+ return;
+ }
+ TextElement text = elements()->at(i);
+ if (text.text_type() == TextElement::ATOM) {
+ RegExpAtom* atom = text.atom();
+ for (int j = 0; j < atom->length(); j++, offset++) {
+ if (offset >= bm->length()) {
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+ return;
+ }
+ uc16 character = atom->data()[j];
+ if (IgnoreCase(atom->flags())) {
+ unibrow::uchar chars[4];
+ int length = GetCaseIndependentLetters(
+ isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
+ chars, 4);
+ for (int j = 0; j < length; j++) {
+ bm->Set(offset, chars[j]);
+ }
+ } else {
+ if (character <= max_char) bm->Set(offset, character);
+ }
+ }
+ } else {
+ DCHECK_EQ(TextElement::CHAR_CLASS, text.text_type());
+ RegExpCharacterClass* char_class = text.char_class();
+ ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
+ if (char_class->is_negated()) {
+ bm->SetAll(offset);
+ } else {
+ for (int k = 0; k < ranges->length(); k++) {
+ CharacterRange& range = ranges->at(k);
+ if (range.from() > max_char) continue;
+ int to = Min(max_char, static_cast<int>(range.to()));
+ bm->SetInterval(offset, Interval(range.from(), to));
+ }
+ }
+ offset++;
+ }
+ }
+ if (offset >= bm->length()) {
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+ return;
+ }
+ on_success()->FillInBMInfo(isolate, offset, budget - 1, bm,
+ true); // Not at start after a text node.
+ if (initial_offset == 0) set_bm_info(not_at_start, bm);
+}
+
+// static
+RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
+ RegExpCompiler* compiler, RegExpNode* on_success, JSRegExp::Flags flags) {
+ DCHECK(!compiler->read_backward());
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+ ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+ ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
+
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpNode* step_back = TextNode::CreateForCharacterRanges(
+ zone, lead_surrogates, true, on_success, flags);
+ RegExpLookaround::Builder builder(true, step_back, stack_register,
+ position_register);
+ RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
+ zone, trail_surrogates, false, builder.on_match_success(), flags);
+
+ optional_step_back->AddAlternative(
+ GuardedAlternative(builder.ForMatch(match_trail)));
+ optional_step_back->AddAlternative(GuardedAlternative(on_success));
+
+ return optional_step_back;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
new file mode 100644
index 0000000000..1b70abfd98
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -0,0 +1,657 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_COMPILER_H_
+#define V8_REGEXP_REGEXP_COMPILER_H_
+
+#include <bitset>
+
+#include "src/base/small-vector.h"
+#include "src/regexp/regexp-nodes.h"
+
+namespace v8 {
+namespace internal {
+
+class DynamicBitSet;
+class Isolate;
+
+namespace regexp_compiler_constants {
+
+// The '2' variant is has inclusive from and exclusive to.
+// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
+// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
+constexpr uc32 kRangeEndMarker = 0x110000;
+constexpr int kSpaceRanges[] = {
+ '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0, 0x00A1, 0x1680,
+ 0x1681, 0x2000, 0x200B, 0x2028, 0x202A, 0x202F, 0x2030,
+ 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, kRangeEndMarker};
+constexpr int kSpaceRangeCount = arraysize(kSpaceRanges);
+
+constexpr int kWordRanges[] = {'0', '9' + 1, 'A', 'Z' + 1, '_',
+ '_' + 1, 'a', 'z' + 1, kRangeEndMarker};
+constexpr int kWordRangeCount = arraysize(kWordRanges);
+constexpr int kDigitRanges[] = {'0', '9' + 1, kRangeEndMarker};
+constexpr int kDigitRangeCount = arraysize(kDigitRanges);
+constexpr int kSurrogateRanges[] = {kLeadSurrogateStart,
+ kLeadSurrogateStart + 1, kRangeEndMarker};
+constexpr int kSurrogateRangeCount = arraysize(kSurrogateRanges);
+constexpr int kLineTerminatorRanges[] = {0x000A, 0x000B, 0x000D, 0x000E,
+ 0x2028, 0x202A, kRangeEndMarker};
+constexpr int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
+
+// More makes code generation slower, less makes V8 benchmark score lower.
+constexpr int kMaxLookaheadForBoyerMoore = 8;
+// In a 3-character pattern you can maximally step forwards 3 characters
+// at a time, which is not always enough to pay for the extra logic.
+constexpr int kPatternTooShortForBoyerMoore = 2;
+
+} // namespace regexp_compiler_constants
+
+inline bool IgnoreCase(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kIgnoreCase) != 0;
+}
+
+inline bool IsUnicode(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kUnicode) != 0;
+}
+
+inline bool IsSticky(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kSticky) != 0;
+}
+
+inline bool IsGlobal(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kGlobal) != 0;
+}
+
+inline bool DotAll(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kDotAll) != 0;
+}
+
+inline bool Multiline(JSRegExp::Flags flags) {
+ return (flags & JSRegExp::kMultiline) != 0;
+}
+
+inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
+ // Both unicode and ignore_case flags are set. We need to use ICU to find
+ // the closure over case equivalents.
+ return IsUnicode(flags) && IgnoreCase(flags);
+}
+
+// Details of a quick mask-compare check that can look ahead in the
+// input stream.
+class QuickCheckDetails {
+ public:
+ QuickCheckDetails()
+ : characters_(0), mask_(0), value_(0), cannot_match_(false) {}
+ explicit QuickCheckDetails(int characters)
+ : characters_(characters), mask_(0), value_(0), cannot_match_(false) {}
+ bool Rationalize(bool one_byte);
+ // Merge in the information from another branch of an alternation.
+ void Merge(QuickCheckDetails* other, int from_index);
+ // Advance the current position by some amount.
+ void Advance(int by, bool one_byte);
+ void Clear();
+ bool cannot_match() { return cannot_match_; }
+ void set_cannot_match() { cannot_match_ = true; }
+ struct Position {
+ Position() : mask(0), value(0), determines_perfectly(false) {}
+ uc16 mask;
+ uc16 value;
+ bool determines_perfectly;
+ };
+ int characters() { return characters_; }
+ void set_characters(int characters) { characters_ = characters; }
+ Position* positions(int index) {
+ DCHECK_LE(0, index);
+ DCHECK_GT(characters_, index);
+ return positions_ + index;
+ }
+ uint32_t mask() { return mask_; }
+ uint32_t value() { return value_; }
+
+ private:
+ // How many characters do we have quick check information from. This is
+ // the same for all branches of a choice node.
+ int characters_;
+ Position positions_[4];
+ // These values are the condensate of the above array after Rationalize().
+ uint32_t mask_;
+ uint32_t value_;
+ // If set to true, there is no way this quick check can match at all.
+ // E.g., if it requires to be at the start of the input, and isn't.
+ bool cannot_match_;
+};
+
+// Improve the speed that we scan for an initial point where a non-anchored
+// regexp can match by using a Boyer-Moore-like table. This is done by
+// identifying non-greedy non-capturing loops in the nodes that eat any
+// character one at a time. For example in the middle of the regexp
+// /foo[\s\S]*?bar/ we find such a loop. There is also such a loop implicitly
+// inserted at the start of any non-anchored regexp.
+//
+// When we have found such a loop we look ahead in the nodes to find the set of
+// characters that can come at given distances. For example for the regexp
+// /.?foo/ we know that there are at least 3 characters ahead of us, and the
+// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
+// the lookahead info where the set of characters is reasonably constrained. In
+// our example this is from index 1 to 2 (0 is not constrained). We can now
+// look 3 characters ahead and if we don't find one of [f, o] (the union of
+// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
+//
+// For Unicode input strings we do the same, but modulo 128.
+//
+// We also look at the first string fed to the regexp and use that to get a hint
+// of the character frequencies in the inputs. This affects the assessment of
+// whether the set of characters is 'reasonably constrained'.
+//
+// We also have another lookahead mechanism (called quick check in the code),
+// which uses a wide load of multiple characters followed by a mask and compare
+// to determine whether a match is possible at this point.
+enum ContainedInLattice {
+ kNotYet = 0,
+ kLatticeIn = 1,
+ kLatticeOut = 2,
+ kLatticeUnknown = 3 // Can also mean both in and out.
+};
+
+inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
+ return static_cast<ContainedInLattice>(a | b);
+}
+
+class BoyerMoorePositionInfo : public ZoneObject {
+ public:
+ bool at(int i) const { return map_[i]; }
+
+ static constexpr int kMapSize = 128;
+ static constexpr int kMask = kMapSize - 1;
+
+ int map_count() const { return map_count_; }
+
+ void Set(int character);
+ void SetInterval(const Interval& interval);
+ void SetAll();
+
+ bool is_non_word() { return w_ == kLatticeOut; }
+ bool is_word() { return w_ == kLatticeIn; }
+
+ using Bitset = std::bitset<kMapSize>;
+ Bitset raw_bitset() const { return map_; }
+
+ private:
+ Bitset map_;
+ int map_count_ = 0; // Number of set bits in the map.
+ ContainedInLattice w_ = kNotYet; // The \w character class.
+};
+
+class BoyerMooreLookahead : public ZoneObject {
+ public:
+ BoyerMooreLookahead(int length, RegExpCompiler* compiler, Zone* zone);
+
+ int length() { return length_; }
+ int max_char() { return max_char_; }
+ RegExpCompiler* compiler() { return compiler_; }
+
+ int Count(int map_number) { return bitmaps_->at(map_number)->map_count(); }
+
+ BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
+
+ void Set(int map_number, int character) {
+ if (character > max_char_) return;
+ BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
+ info->Set(character);
+ }
+
+ void SetInterval(int map_number, const Interval& interval) {
+ if (interval.from() > max_char_) return;
+ BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
+ if (interval.to() > max_char_) {
+ info->SetInterval(Interval(interval.from(), max_char_));
+ } else {
+ info->SetInterval(interval);
+ }
+ }
+
+ void SetAll(int map_number) { bitmaps_->at(map_number)->SetAll(); }
+
+ void SetRest(int from_map) {
+ for (int i = from_map; i < length_; i++) SetAll(i);
+ }
+ void EmitSkipInstructions(RegExpMacroAssembler* masm);
+
+ private:
+ // This is the value obtained by EatsAtLeast. If we do not have at least this
+ // many characters left in the sample string then the match is bound to fail.
+ // Therefore it is OK to read a character this far ahead of the current match
+ // point.
+ int length_;
+ RegExpCompiler* compiler_;
+ // 0xff for Latin1, 0xffff for UTF-16.
+ int max_char_;
+ ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
+
+ int GetSkipTable(int min_lookahead, int max_lookahead,
+ Handle<ByteArray> boolean_skip_table);
+ bool FindWorthwhileInterval(int* from, int* to);
+ int FindBestInterval(int max_number_of_chars, int old_biggest_points,
+ int* from, int* to);
+};
+
+// There are many ways to generate code for a node. This class encapsulates
+// the current way we should be generating. In other words it encapsulates
+// the current state of the code generator. The effect of this is that we
+// generate code for paths that the matcher can take through the regular
+// expression. A given node in the regexp can be code-generated several times
+// as it can be part of several traces. For example for the regexp:
+// /foo(bar|ip)baz/ the code to match baz will be generated twice, once as part
+// of the foo-bar-baz trace and once as part of the foo-ip-baz trace. The code
+// to match foo is generated only once (the traces have a common prefix). The
+// code to store the capture is deferred and generated (twice) after the places
+// where baz has been matched.
+class Trace {
+ public:
+ // A value for a property that is either known to be true, know to be false,
+ // or not known.
+ enum TriBool { UNKNOWN = -1, FALSE_VALUE = 0, TRUE_VALUE = 1 };
+
+ class DeferredAction {
+ public:
+ DeferredAction(ActionNode::ActionType action_type, int reg)
+ : action_type_(action_type), reg_(reg), next_(nullptr) {}
+ DeferredAction* next() { return next_; }
+ bool Mentions(int reg);
+ int reg() { return reg_; }
+ ActionNode::ActionType action_type() { return action_type_; }
+
+ private:
+ ActionNode::ActionType action_type_;
+ int reg_;
+ DeferredAction* next_;
+ friend class Trace;
+ };
+
+ class DeferredCapture : public DeferredAction {
+ public:
+ DeferredCapture(int reg, bool is_capture, Trace* trace)
+ : DeferredAction(ActionNode::STORE_POSITION, reg),
+ cp_offset_(trace->cp_offset()),
+ is_capture_(is_capture) {}
+ int cp_offset() { return cp_offset_; }
+ bool is_capture() { return is_capture_; }
+
+ private:
+ int cp_offset_;
+ bool is_capture_;
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ };
+
+ class DeferredSetRegister : public DeferredAction {
+ public:
+ DeferredSetRegister(int reg, int value)
+ : DeferredAction(ActionNode::SET_REGISTER, reg), value_(value) {}
+ int value() { return value_; }
+
+ private:
+ int value_;
+ };
+
+ class DeferredClearCaptures : public DeferredAction {
+ public:
+ explicit DeferredClearCaptures(Interval range)
+ : DeferredAction(ActionNode::CLEAR_CAPTURES, -1), range_(range) {}
+ Interval range() { return range_; }
+
+ private:
+ Interval range_;
+ };
+
+ class DeferredIncrementRegister : public DeferredAction {
+ public:
+ explicit DeferredIncrementRegister(int reg)
+ : DeferredAction(ActionNode::INCREMENT_REGISTER, reg) {}
+ };
+
+ Trace()
+ : cp_offset_(0),
+ actions_(nullptr),
+ backtrack_(nullptr),
+ stop_node_(nullptr),
+ loop_label_(nullptr),
+ characters_preloaded_(0),
+ bound_checked_up_to_(0),
+ flush_budget_(100),
+ at_start_(UNKNOWN) {}
+
+ // End the trace. This involves flushing the deferred actions in the trace
+ // and pushing a backtrack location onto the backtrack stack. Once this is
+ // done we can start a new trace or go to one that has already been
+ // generated.
+ void Flush(RegExpCompiler* compiler, RegExpNode* successor);
+ int cp_offset() { return cp_offset_; }
+ DeferredAction* actions() { return actions_; }
+ // A trivial trace is one that has no deferred actions or other state that
+ // affects the assumptions used when generating code. There is no recorded
+ // backtrack location in a trivial trace, so with a trivial trace we will
+ // generate code that, on a failure to match, gets the backtrack location
+ // from the backtrack stack rather than using a direct jump instruction. We
+ // always start code generation with a trivial trace and non-trivial traces
+ // are created as we emit code for nodes or add to the list of deferred
+ // actions in the trace. The location of the code generated for a node using
+ // a trivial trace is recorded in a label in the node so that gotos can be
+ // generated to that code.
+ bool is_trivial() {
+ return backtrack_ == nullptr && actions_ == nullptr && cp_offset_ == 0 &&
+ characters_preloaded_ == 0 && bound_checked_up_to_ == 0 &&
+ quick_check_performed_.characters() == 0 && at_start_ == UNKNOWN;
+ }
+ TriBool at_start() { return at_start_; }
+ void set_at_start(TriBool at_start) { at_start_ = at_start; }
+ Label* backtrack() { return backtrack_; }
+ Label* loop_label() { return loop_label_; }
+ RegExpNode* stop_node() { return stop_node_; }
+ int characters_preloaded() { return characters_preloaded_; }
+ int bound_checked_up_to() { return bound_checked_up_to_; }
+ int flush_budget() { return flush_budget_; }
+ QuickCheckDetails* quick_check_performed() { return &quick_check_performed_; }
+ bool mentions_reg(int reg);
+ // Returns true if a deferred position store exists to the specified
+ // register and stores the offset in the out-parameter. Otherwise
+ // returns false.
+ bool GetStoredPosition(int reg, int* cp_offset);
+ // These set methods and AdvanceCurrentPositionInTrace should be used only on
+ // new traces - the intention is that traces are immutable after creation.
+ void add_action(DeferredAction* new_action) {
+ DCHECK(new_action->next_ == nullptr);
+ new_action->next_ = actions_;
+ actions_ = new_action;
+ }
+ void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
+ void set_stop_node(RegExpNode* node) { stop_node_ = node; }
+ void set_loop_label(Label* label) { loop_label_ = label; }
+ void set_characters_preloaded(int count) { characters_preloaded_ = count; }
+ void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
+ void set_flush_budget(int to) { flush_budget_ = to; }
+ void set_quick_check_performed(QuickCheckDetails* d) {
+ quick_check_performed_ = *d;
+ }
+ void InvalidateCurrentCharacter();
+ void AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler);
+
+ private:
+ int FindAffectedRegisters(DynamicBitSet* affected_registers, Zone* zone);
+ void PerformDeferredActions(RegExpMacroAssembler* macro, int max_register,
+ const DynamicBitSet& affected_registers,
+ DynamicBitSet* registers_to_pop,
+ DynamicBitSet* registers_to_clear, Zone* zone);
+ void RestoreAffectedRegisters(RegExpMacroAssembler* macro, int max_register,
+ const DynamicBitSet& registers_to_pop,
+ const DynamicBitSet& registers_to_clear);
+ int cp_offset_;
+ DeferredAction* actions_;
+ Label* backtrack_;
+ RegExpNode* stop_node_;
+ Label* loop_label_;
+ int characters_preloaded_;
+ int bound_checked_up_to_;
+ QuickCheckDetails quick_check_performed_;
+ int flush_budget_;
+ TriBool at_start_;
+};
+
+class GreedyLoopState {
+ public:
+ explicit GreedyLoopState(bool not_at_start);
+
+ Label* label() { return &label_; }
+ Trace* counter_backtrack_trace() { return &counter_backtrack_trace_; }
+
+ private:
+ Label label_;
+ Trace counter_backtrack_trace_;
+};
+
+struct PreloadState {
+ static const int kEatsAtLeastNotYetInitialized = -1;
+ bool preload_is_current_;
+ bool preload_has_checked_bounds_;
+ int preload_characters_;
+ int eats_at_least_;
+ void init() { eats_at_least_ = kEatsAtLeastNotYetInitialized; }
+};
+
+// Assertion propagation moves information about assertions such as
+// \b to the affected nodes. For instance, in /.\b./ information must
+// be propagated to the first '.' that whatever follows needs to know
+// if it matched a word or a non-word, and to the second '.' that it
+// has to check if it succeeds a word or non-word. In this case the
+// result will be something like:
+//
+// +-------+ +------------+
+// | . | | . |
+// +-------+ ---> +------------+
+// | word? | | check word |
+// +-------+ +------------+
+class Analysis : public NodeVisitor {
+ public:
+ Analysis(Isolate* isolate, bool is_one_byte)
+ : isolate_(isolate), is_one_byte_(is_one_byte), error_message_(nullptr) {}
+ void EnsureAnalyzed(RegExpNode* node);
+
+#define DECLARE_VISIT(Type) void Visit##Type(Type##Node* that) override;
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ void VisitLoopChoice(LoopChoiceNode* that) override;
+
+ bool has_failed() { return error_message_ != nullptr; }
+ const char* error_message() {
+ DCHECK(error_message_ != nullptr);
+ return error_message_;
+ }
+ void fail(const char* error_message) { error_message_ = error_message; }
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ Isolate* isolate_;
+ bool is_one_byte_;
+ const char* error_message_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
+};
+
+class FrequencyCollator {
+ public:
+ FrequencyCollator() : total_samples_(0) {
+ for (int i = 0; i < RegExpMacroAssembler::kTableSize; i++) {
+ frequencies_[i] = CharacterFrequency(i);
+ }
+ }
+
+ void CountCharacter(int character) {
+ int index = (character & RegExpMacroAssembler::kTableMask);
+ frequencies_[index].Increment();
+ total_samples_++;
+ }
+
+ // Does not measure in percent, but rather per-128 (the table size from the
+ // regexp macro assembler).
+ int Frequency(int in_character) {
+ DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character);
+ if (total_samples_ < 1) return 1; // Division by zero.
+ int freq_in_per128 =
+ (frequencies_[in_character].counter() * 128) / total_samples_;
+ return freq_in_per128;
+ }
+
+ private:
+ class CharacterFrequency {
+ public:
+ CharacterFrequency() : counter_(0), character_(-1) {}
+ explicit CharacterFrequency(int character)
+ : counter_(0), character_(character) {}
+
+ void Increment() { counter_++; }
+ int counter() { return counter_; }
+ int character() { return character_; }
+
+ private:
+ int counter_;
+ int character_;
+ };
+
+ private:
+ CharacterFrequency frequencies_[RegExpMacroAssembler::kTableSize];
+ int total_samples_;
+};
+
+class RegExpCompiler {
+ public:
+ RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
+ bool is_one_byte);
+
+ int AllocateRegister() {
+ if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
+ reg_exp_too_big_ = true;
+ return next_register_;
+ }
+ return next_register_++;
+ }
+
+ // Lookarounds to match lone surrogates for unicode character class matches
+ // are never nested. We can therefore reuse registers.
+ int UnicodeLookaroundStackRegister() {
+ if (unicode_lookaround_stack_register_ == kNoRegister) {
+ unicode_lookaround_stack_register_ = AllocateRegister();
+ }
+ return unicode_lookaround_stack_register_;
+ }
+
+ int UnicodeLookaroundPositionRegister() {
+ if (unicode_lookaround_position_register_ == kNoRegister) {
+ unicode_lookaround_position_register_ = AllocateRegister();
+ }
+ return unicode_lookaround_position_register_;
+ }
+
+ struct CompilationResult final {
+ explicit CompilationResult(const char* error_message)
+ : error_message(error_message) {}
+ CompilationResult(Object code, int registers)
+ : code(code), num_registers(registers) {}
+
+ static CompilationResult RegExpTooBig() {
+ return CompilationResult("RegExp too big");
+ }
+
+ bool Succeeded() const { return error_message == nullptr; }
+
+ const char* const error_message = nullptr;
+ Object code;
+ int num_registers = 0;
+ };
+
+ CompilationResult Assemble(Isolate* isolate, RegExpMacroAssembler* assembler,
+ RegExpNode* start, int capture_count,
+ Handle<String> pattern);
+
+ // If the regexp matching starts within a surrogate pair, step back to the
+ // lead surrogate and start matching from there.
+ static RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
+
+ inline void AddWork(RegExpNode* node) {
+ if (!node->on_work_list() && !node->label()->is_bound()) {
+ node->set_on_work_list(true);
+ work_list_->push_back(node);
+ }
+ }
+
+ static const int kImplementationOffset = 0;
+ static const int kNumberOfRegistersOffset = 0;
+ static const int kCodeOffset = 1;
+
+ RegExpMacroAssembler* macro_assembler() { return macro_assembler_; }
+ EndNode* accept() { return accept_; }
+
+ static const int kMaxRecursion = 100;
+ inline int recursion_depth() { return recursion_depth_; }
+ inline void IncrementRecursionDepth() { recursion_depth_++; }
+ inline void DecrementRecursionDepth() { recursion_depth_--; }
+
+ void SetRegExpTooBig() { reg_exp_too_big_ = true; }
+
+ inline bool one_byte() { return one_byte_; }
+ inline bool optimize() { return optimize_; }
+ inline void set_optimize(bool value) { optimize_ = value; }
+ inline bool limiting_recursion() { return limiting_recursion_; }
+ inline void set_limiting_recursion(bool value) {
+ limiting_recursion_ = value;
+ }
+ bool read_backward() { return read_backward_; }
+ void set_read_backward(bool value) { read_backward_ = value; }
+ FrequencyCollator* frequency_collator() { return &frequency_collator_; }
+
+ int current_expansion_factor() { return current_expansion_factor_; }
+ void set_current_expansion_factor(int value) {
+ current_expansion_factor_ = value;
+ }
+
+ Isolate* isolate() const { return isolate_; }
+ Zone* zone() const { return zone_; }
+
+ static const int kNoRegister = -1;
+
+ private:
+ EndNode* accept_;
+ int next_register_;
+ int unicode_lookaround_stack_register_;
+ int unicode_lookaround_position_register_;
+ std::vector<RegExpNode*>* work_list_;
+ int recursion_depth_;
+ RegExpMacroAssembler* macro_assembler_;
+ bool one_byte_;
+ bool reg_exp_too_big_;
+ bool limiting_recursion_;
+ bool optimize_;
+ bool read_backward_;
+ int current_expansion_factor_;
+ FrequencyCollator frequency_collator_;
+ Isolate* isolate_;
+ Zone* zone_;
+};
+
+// Categorizes character ranges into BMP, non-BMP, lead, and trail surrogates.
+class UnicodeRangeSplitter {
+ public:
+ V8_EXPORT_PRIVATE UnicodeRangeSplitter(ZoneList<CharacterRange>* base);
+
+ static constexpr int kInitialSize = 8;
+ using CharacterRangeVector = base::SmallVector<CharacterRange, kInitialSize>;
+
+ const CharacterRangeVector* bmp() const { return &bmp_; }
+ const CharacterRangeVector* lead_surrogates() const {
+ return &lead_surrogates_;
+ }
+ const CharacterRangeVector* trail_surrogates() const {
+ return &trail_surrogates_;
+ }
+ const CharacterRangeVector* non_bmp() const { return &non_bmp_; }
+
+ private:
+ void AddRange(CharacterRange range);
+
+ CharacterRangeVector bmp_;
+ CharacterRangeVector lead_surrogates_;
+ CharacterRangeVector trail_surrogates_;
+ CharacterRangeVector non_bmp_;
+};
+
+// We need to check for the following characters: 0x39C 0x3BC 0x178.
+// TODO(jgruber): Move to CharacterRange.
+bool RangeContainsLatin1Equivalents(CharacterRange range);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_COMPILER_H_
diff --git a/deps/v8/src/regexp/regexp-dotprinter.cc b/deps/v8/src/regexp/regexp-dotprinter.cc
new file mode 100644
index 0000000000..a6d72aaf5b
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-dotprinter.cc
@@ -0,0 +1,244 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-dotprinter.h"
+
+#include "src/regexp/regexp-compiler.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------
+// Dot/dotty output
+
+#ifdef DEBUG
+
+class DotPrinterImpl : public NodeVisitor {
+ public:
+ explicit DotPrinterImpl(std::ostream& os) : os_(os) {}
+ void PrintNode(const char* label, RegExpNode* node);
+ void Visit(RegExpNode* node);
+ void PrintAttributes(RegExpNode* from);
+ void PrintOnFailure(RegExpNode* from, RegExpNode* to);
+#define DECLARE_VISIT(Type) virtual void Visit##Type(Type##Node* that);
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ private:
+ std::ostream& os_;
+};
+
+void DotPrinterImpl::PrintNode(const char* label, RegExpNode* node) {
+ os_ << "digraph G {\n graph [label=\"";
+ for (int i = 0; label[i]; i++) {
+ switch (label[i]) {
+ case '\\':
+ os_ << "\\\\";
+ break;
+ case '"':
+ os_ << "\"";
+ break;
+ default:
+ os_ << label[i];
+ break;
+ }
+ }
+ os_ << "\"];\n";
+ Visit(node);
+ os_ << "}" << std::endl;
+}
+
+void DotPrinterImpl::Visit(RegExpNode* node) {
+ if (node->info()->visited) return;
+ node->info()->visited = true;
+ node->Accept(this);
+}
+
+void DotPrinterImpl::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
+ os_ << " n" << from << " -> n" << on_failure << " [style=dotted];\n";
+ Visit(on_failure);
+}
+
+class AttributePrinter {
+ public:
+ explicit AttributePrinter(std::ostream& os) // NOLINT
+ : os_(os), first_(true) {}
+ void PrintSeparator() {
+ if (first_) {
+ first_ = false;
+ } else {
+ os_ << "|";
+ }
+ }
+ void PrintBit(const char* name, bool value) {
+ if (!value) return;
+ PrintSeparator();
+ os_ << "{" << name << "}";
+ }
+ void PrintPositive(const char* name, int value) {
+ if (value < 0) return;
+ PrintSeparator();
+ os_ << "{" << name << "|" << value << "}";
+ }
+
+ private:
+ std::ostream& os_;
+ bool first_;
+};
+
+void DotPrinterImpl::PrintAttributes(RegExpNode* that) {
+ os_ << " a" << that << " [shape=Mrecord, color=grey, fontcolor=grey, "
+ << "margin=0.1, fontsize=10, label=\"{";
+ AttributePrinter printer(os_);
+ NodeInfo* info = that->info();
+ printer.PrintBit("NI", info->follows_newline_interest);
+ printer.PrintBit("WI", info->follows_word_interest);
+ printer.PrintBit("SI", info->follows_start_interest);
+ Label* label = that->label();
+ if (label->is_bound()) printer.PrintPositive("@", label->pos());
+ os_ << "}\"];\n"
+ << " a" << that << " -> n" << that
+ << " [style=dashed, color=grey, arrowhead=none];\n";
+}
+
+void DotPrinterImpl::VisitChoice(ChoiceNode* that) {
+ os_ << " n" << that << " [shape=Mrecord, label=\"?\"];\n";
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ GuardedAlternative alt = that->alternatives()->at(i);
+ os_ << " n" << that << " -> n" << alt.node();
+ }
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ GuardedAlternative alt = that->alternatives()->at(i);
+ alt.node()->Accept(this);
+ }
+}
+
+void DotPrinterImpl::VisitText(TextNode* that) {
+ Zone* zone = that->zone();
+ os_ << " n" << that << " [label=\"";
+ for (int i = 0; i < that->elements()->length(); i++) {
+ if (i > 0) os_ << " ";
+ TextElement elm = that->elements()->at(i);
+ switch (elm.text_type()) {
+ case TextElement::ATOM: {
+ Vector<const uc16> data = elm.atom()->data();
+ for (int i = 0; i < data.length(); i++) {
+ os_ << static_cast<char>(data[i]);
+ }
+ break;
+ }
+ case TextElement::CHAR_CLASS: {
+ RegExpCharacterClass* node = elm.char_class();
+ os_ << "[";
+ if (node->is_negated()) os_ << "^";
+ for (int j = 0; j < node->ranges(zone)->length(); j++) {
+ CharacterRange range = node->ranges(zone)->at(j);
+ os_ << AsUC16(range.from()) << "-" << AsUC16(range.to());
+ }
+ os_ << "]";
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ os_ << "\", shape=box, peripheries=2];\n";
+ PrintAttributes(that);
+ os_ << " n" << that << " -> n" << that->on_success() << ";\n";
+ Visit(that->on_success());
+}
+
+void DotPrinterImpl::VisitBackReference(BackReferenceNode* that) {
+ os_ << " n" << that << " [label=\"$" << that->start_register() << "..$"
+ << that->end_register() << "\", shape=doubleoctagon];\n";
+ PrintAttributes(that);
+ os_ << " n" << that << " -> n" << that->on_success() << ";\n";
+ Visit(that->on_success());
+}
+
+void DotPrinterImpl::VisitEnd(EndNode* that) {
+ os_ << " n" << that << " [style=bold, shape=point];\n";
+ PrintAttributes(that);
+}
+
+void DotPrinterImpl::VisitAssertion(AssertionNode* that) {
+ os_ << " n" << that << " [";
+ switch (that->assertion_type()) {
+ case AssertionNode::AT_END:
+ os_ << "label=\"$\", shape=septagon";
+ break;
+ case AssertionNode::AT_START:
+ os_ << "label=\"^\", shape=septagon";
+ break;
+ case AssertionNode::AT_BOUNDARY:
+ os_ << "label=\"\\b\", shape=septagon";
+ break;
+ case AssertionNode::AT_NON_BOUNDARY:
+ os_ << "label=\"\\B\", shape=septagon";
+ break;
+ case AssertionNode::AFTER_NEWLINE:
+ os_ << "label=\"(?<=\\n)\", shape=septagon";
+ break;
+ }
+ os_ << "];\n";
+ PrintAttributes(that);
+ RegExpNode* successor = that->on_success();
+ os_ << " n" << that << " -> n" << successor << ";\n";
+ Visit(successor);
+}
+
+void DotPrinterImpl::VisitAction(ActionNode* that) {
+ os_ << " n" << that << " [";
+ switch (that->action_type_) {
+ case ActionNode::SET_REGISTER:
+ os_ << "label=\"$" << that->data_.u_store_register.reg
+ << ":=" << that->data_.u_store_register.value << "\", shape=octagon";
+ break;
+ case ActionNode::INCREMENT_REGISTER:
+ os_ << "label=\"$" << that->data_.u_increment_register.reg
+ << "++\", shape=octagon";
+ break;
+ case ActionNode::STORE_POSITION:
+ os_ << "label=\"$" << that->data_.u_position_register.reg
+ << ":=$pos\", shape=octagon";
+ break;
+ case ActionNode::BEGIN_SUBMATCH:
+ os_ << "label=\"$" << that->data_.u_submatch.current_position_register
+ << ":=$pos,begin\", shape=septagon";
+ break;
+ case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
+ os_ << "label=\"escape\", shape=septagon";
+ break;
+ case ActionNode::EMPTY_MATCH_CHECK:
+ os_ << "label=\"$" << that->data_.u_empty_match_check.start_register
+ << "=$pos?,$" << that->data_.u_empty_match_check.repetition_register
+ << "<" << that->data_.u_empty_match_check.repetition_limit
+ << "?\", shape=septagon";
+ break;
+ case ActionNode::CLEAR_CAPTURES: {
+ os_ << "label=\"clear $" << that->data_.u_clear_captures.range_from
+ << " to $" << that->data_.u_clear_captures.range_to
+ << "\", shape=septagon";
+ break;
+ }
+ }
+ os_ << "];\n";
+ PrintAttributes(that);
+ RegExpNode* successor = that->on_success();
+ os_ << " n" << that << " -> n" << successor << ";\n";
+ Visit(successor);
+}
+
+#endif // DEBUG
+
+void DotPrinter::DotPrint(const char* label, RegExpNode* node) {
+#ifdef DEBUG
+ StdoutStream os;
+ DotPrinterImpl printer(os);
+ printer.PrintNode(label, node);
+#endif // DEBUG
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-dotprinter.h b/deps/v8/src/regexp/regexp-dotprinter.h
new file mode 100644
index 0000000000..d9c75fc1f2
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-dotprinter.h
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_DOTPRINTER_H_
+#define V8_REGEXP_REGEXP_DOTPRINTER_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class RegExpNode;
+
+class DotPrinter final : public AllStatic {
+ public:
+ static void DotPrint(const char* label, RegExpNode* node);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_DOTPRINTER_H_
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 04bb63ee7a..881758861c 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -4,13 +4,14 @@
// A simple interpreter for the Irregexp byte code.
-#include "src/regexp/interpreter-irregexp.h"
+#include "src/regexp/regexp-interpreter.h"
#include "src/ast/ast.h"
+#include "src/base/small-vector.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/bytecodes-irregexp.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp.h"
#include "src/strings/unicode.h"
#include "src/utils/utils.h"
@@ -33,7 +34,6 @@ static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
offset_a, offset_b, length, unicode ? nullptr : isolate) == 1;
}
-
static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
int len, Vector<const uint8_t> subject,
bool unicode) {
@@ -55,28 +55,19 @@ static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
return true;
}
-
#ifdef DEBUG
-static void TraceInterpreter(const byte* code_base,
- const byte* pc,
- int stack_depth,
- int current_position,
- uint32_t current_char,
- int bytecode_length,
+static void TraceInterpreter(const byte* code_base, const byte* pc,
+ int stack_depth, int current_position,
+ uint32_t current_char, int bytecode_length,
const char* bytecode_name) {
if (FLAG_trace_regexp_bytecodes) {
bool printable = (current_char < 127 && current_char >= 32);
const char* format =
- printable ?
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s" :
- "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
- PrintF(format,
- pc - code_base,
- stack_depth,
- current_position,
- current_char,
- printable ? current_char : '.',
- bytecode_name);
+ printable
+ ? "pc = %02x, sp = %d, curpos = %d, curchar = %08x (%c), bc = %s"
+ : "pc = %02x, sp = %d, curpos = %d, curchar = %08x .%c., bc = %s";
+ PrintF(format, pc - code_base, stack_depth, current_position, current_char,
+ printable ? current_char : '.', bytecode_name);
for (int i = 0; i < bytecode_length; i++) {
printf(", %02x", pc[i]);
}
@@ -93,54 +84,57 @@ static void TraceInterpreter(const byte* code_base,
}
}
-
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- static_cast<int>(backtrack_sp - backtrack_stack_base), \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
- #name);
+#define BYTECODE(name) \
+ case BC_##name: \
+ TraceInterpreter(code_base, pc, backtrack_stack.sp(), current, \
+ current_char, BC_##name##_LENGTH, #name);
#else
-#define BYTECODE(name) \
- case BC_##name:
+#define BYTECODE(name) case BC_##name:
#endif
-
static int32_t Load32Aligned(const byte* pc) {
DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 3);
- return *reinterpret_cast<const int32_t *>(pc);
+ return *reinterpret_cast<const int32_t*>(pc);
}
-
static int32_t Load16Aligned(const byte* pc) {
DCHECK_EQ(0, reinterpret_cast<intptr_t>(pc) & 1);
- return *reinterpret_cast<const uint16_t *>(pc);
+ return *reinterpret_cast<const uint16_t*>(pc);
}
-
// A simple abstraction over the backtracking stack used by the interpreter.
-// This backtracking stack does not grow automatically, but it ensures that the
-// the memory held by the stack is released or remembered in a cache if the
-// matching terminates.
+//
+// Despite the name 'backtracking' stack, it's actually used as a generic stack
+// that stores both program counters (= offsets into the bytecode) and generic
+// integer values.
class BacktrackStack {
public:
- BacktrackStack() { data_ = NewArray<int>(kBacktrackStackSize); }
+ BacktrackStack() = default;
- ~BacktrackStack() {
- DeleteArray(data_);
+ void push(int v) { data_.emplace_back(v); }
+ int peek() const {
+ DCHECK(!data_.empty());
+ return data_.back();
+ }
+ int pop() {
+ int v = peek();
+ data_.pop_back();
+ return v;
}
- int* data() const { return data_; }
-
- int max_size() const { return kBacktrackStackSize; }
+ // The 'sp' is the index of the first empty element in the stack.
+ int sp() const { return static_cast<int>(data_.size()); }
+ void set_sp(int new_sp) {
+ DCHECK_LE(new_sp, sp());
+ data_.resize_no_init(new_sp);
+ }
private:
- static const int kBacktrackStackSize = 10000;
+ // Semi-arbitrary. Should be large enough for common cases to remain in the
+ // static stack-allocated backing store, but small enough not to waste space.
+ static constexpr int kStaticCapacity = 64;
- int* data_;
+ base::SmallVector<int, kStaticCapacity> data_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
@@ -163,28 +157,30 @@ IrregexpInterpreter::Result HandleInterrupts(Isolate* isolate,
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
- // A real stack overflow.
- return StackOverflow(isolate);
+ return StackOverflow(isolate); // A real stack overflow.
}
- const bool was_one_byte =
- String::IsOneByteRepresentationUnderneath(*subject_string);
+ // Handle interrupts if any exist.
+ if (check.InterruptRequested()) {
+ const bool was_one_byte =
+ String::IsOneByteRepresentationUnderneath(*subject_string);
- Object result;
- {
- AllowHeapAllocation yes_gc;
- result = isolate->stack_guard()->HandleInterrupts();
- }
+ Object result;
+ {
+ AllowHeapAllocation yes_gc;
+ result = isolate->stack_guard()->HandleInterrupts();
+ }
- if (result.IsException(isolate)) {
- return IrregexpInterpreter::EXCEPTION;
- }
+ if (result.IsException(isolate)) {
+ return IrregexpInterpreter::EXCEPTION;
+ }
- // If we changed between a LATIN1 and a UC16 string, we need to restart
- // regexp matching with the appropriate template instantiation of RawMatch.
- if (String::IsOneByteRepresentationUnderneath(*subject_string) !=
- was_one_byte) {
- return IrregexpInterpreter::RETRY;
+ // If we changed between a LATIN1 and a UC16 string, we need to restart
+ // regexp matching with the appropriate template instantiation of RawMatch.
+ if (String::IsOneByteRepresentationUnderneath(*subject_string) !=
+ was_one_byte) {
+ return IrregexpInterpreter::RETRY;
+ }
}
return IrregexpInterpreter::SUCCESS;
@@ -221,121 +217,108 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
const byte* pc = code_array->GetDataStartAddress();
const byte* code_base = pc;
- // BacktrackStack ensures that the memory allocated for the backtracking stack
- // is returned to the system or cached if there is no stack being cached at
- // the moment.
BacktrackStack backtrack_stack;
- int* backtrack_stack_base = backtrack_stack.data();
- int* backtrack_sp = backtrack_stack_base;
- int backtrack_stack_space = backtrack_stack.max_size();
+
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
PrintF("\n\nStart bytecode interpreter\n\n");
}
#endif
while (true) {
- int32_t insn = Load32Aligned(pc);
+ const int32_t insn = Load32Aligned(pc);
switch (insn & BYTECODE_MASK) {
- BYTECODE(BREAK)
- UNREACHABLE();
- BYTECODE(PUSH_CP)
- if (--backtrack_stack_space < 0) {
- return StackOverflow(isolate);
- }
- *backtrack_sp++ = current;
+ BYTECODE(BREAK) { UNREACHABLE(); }
+ BYTECODE(PUSH_CP) {
+ backtrack_stack.push(current);
pc += BC_PUSH_CP_LENGTH;
break;
- BYTECODE(PUSH_BT)
- if (--backtrack_stack_space < 0) {
- return StackOverflow(isolate);
- }
- *backtrack_sp++ = Load32Aligned(pc + 4);
+ }
+ BYTECODE(PUSH_BT) {
+ backtrack_stack.push(Load32Aligned(pc + 4));
pc += BC_PUSH_BT_LENGTH;
break;
- BYTECODE(PUSH_REGISTER)
- if (--backtrack_stack_space < 0) {
- return StackOverflow(isolate);
- }
- *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
+ }
+ BYTECODE(PUSH_REGISTER) {
+ backtrack_stack.push(registers[insn >> BYTECODE_SHIFT]);
pc += BC_PUSH_REGISTER_LENGTH;
break;
- BYTECODE(SET_REGISTER)
+ }
+ BYTECODE(SET_REGISTER) {
registers[insn >> BYTECODE_SHIFT] = Load32Aligned(pc + 4);
pc += BC_SET_REGISTER_LENGTH;
break;
- BYTECODE(ADVANCE_REGISTER)
+ }
+ BYTECODE(ADVANCE_REGISTER) {
registers[insn >> BYTECODE_SHIFT] += Load32Aligned(pc + 4);
pc += BC_ADVANCE_REGISTER_LENGTH;
break;
- BYTECODE(SET_REGISTER_TO_CP)
+ }
+ BYTECODE(SET_REGISTER_TO_CP) {
registers[insn >> BYTECODE_SHIFT] = current + Load32Aligned(pc + 4);
pc += BC_SET_REGISTER_TO_CP_LENGTH;
break;
- BYTECODE(SET_CP_TO_REGISTER)
+ }
+ BYTECODE(SET_CP_TO_REGISTER) {
current = registers[insn >> BYTECODE_SHIFT];
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
- BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] =
- static_cast<int>(backtrack_sp - backtrack_stack_base);
+ }
+ BYTECODE(SET_REGISTER_TO_SP) {
+ registers[insn >> BYTECODE_SHIFT] = backtrack_stack.sp();
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
- BYTECODE(SET_SP_TO_REGISTER)
- backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
- backtrack_stack_space = backtrack_stack.max_size() -
- static_cast<int>(backtrack_sp - backtrack_stack_base);
+ }
+ BYTECODE(SET_SP_TO_REGISTER) {
+ backtrack_stack.set_sp(registers[insn >> BYTECODE_SHIFT]);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
- BYTECODE(POP_CP)
- backtrack_stack_space++;
- --backtrack_sp;
- current = *backtrack_sp;
+ }
+ BYTECODE(POP_CP) {
+ current = backtrack_stack.pop();
pc += BC_POP_CP_LENGTH;
break;
- // clang-format off
+ }
BYTECODE(POP_BT) {
- IrregexpInterpreter::Result return_code = HandleInterrupts(
- isolate, subject_string);
+ IrregexpInterpreter::Result return_code =
+ HandleInterrupts(isolate, subject_string);
if (return_code != IrregexpInterpreter::SUCCESS) return return_code;
UpdateCodeAndSubjectReferences(isolate, code_array, subject_string,
- &code_base, &pc, &subject);
+ &code_base, &pc, &subject);
- backtrack_stack_space++;
- --backtrack_sp;
- pc = code_base + *backtrack_sp;
+ pc = code_base + backtrack_stack.pop();
break;
}
- BYTECODE(POP_REGISTER) // clang-format on
- backtrack_stack_space++;
- --backtrack_sp;
- registers[insn >> BYTECODE_SHIFT] = *backtrack_sp;
+ BYTECODE(POP_REGISTER) {
+ registers[insn >> BYTECODE_SHIFT] = backtrack_stack.pop();
pc += BC_POP_REGISTER_LENGTH;
break;
- BYTECODE(FAIL)
- return IrregexpInterpreter::FAILURE;
- BYTECODE(SUCCEED)
- return IrregexpInterpreter::SUCCESS;
- BYTECODE(ADVANCE_CP)
+ }
+ BYTECODE(FAIL) { return IrregexpInterpreter::FAILURE; }
+ BYTECODE(SUCCEED) { return IrregexpInterpreter::SUCCESS; }
+ BYTECODE(ADVANCE_CP) {
current += insn >> BYTECODE_SHIFT;
pc += BC_ADVANCE_CP_LENGTH;
break;
- BYTECODE(GOTO)
+ }
+ BYTECODE(GOTO) {
pc = code_base + Load32Aligned(pc + 4);
break;
- BYTECODE(ADVANCE_CP_AND_GOTO)
+ }
+ BYTECODE(ADVANCE_CP_AND_GOTO) {
current += insn >> BYTECODE_SHIFT;
pc = code_base + Load32Aligned(pc + 4);
break;
- BYTECODE(CHECK_GREEDY)
- if (current == backtrack_sp[-1]) {
- backtrack_sp--;
- backtrack_stack_space++;
+ }
+ BYTECODE(CHECK_GREEDY) {
+ if (current == backtrack_stack.peek()) {
+ backtrack_stack.pop();
pc = code_base + Load32Aligned(pc + 4);
} else {
pc += BC_CHECK_GREEDY_LENGTH;
}
break;
+ }
BYTECODE(LOAD_CURRENT_CHAR) {
int pos = current + (insn >> BYTECODE_SHIFT);
if (pos >= subject.length() || pos < 0) {
@@ -380,10 +363,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
Char next1 = subject[pos + 1];
Char next2 = subject[pos + 2];
Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
+ current_char =
+ (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
pc += BC_LOAD_4_CURRENT_CHARS_LENGTH;
}
break;
@@ -394,10 +375,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
Char next1 = subject[pos + 1];
Char next2 = subject[pos + 2];
Char next3 = subject[pos + 3];
- current_char = (subject[pos] |
- (next1 << 8) |
- (next2 << 16) |
- (next3 << 24));
+ current_char =
+ (subject[pos] | (next1 << 8) | (next2 << 16) | (next3 << 24));
pc += BC_LOAD_4_CURRENT_CHARS_UNCHECKED_LENGTH;
break;
}
@@ -533,28 +512,31 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
}
break;
}
- BYTECODE(CHECK_REGISTER_LT)
+ BYTECODE(CHECK_REGISTER_LT) {
if (registers[insn >> BYTECODE_SHIFT] < Load32Aligned(pc + 4)) {
pc = code_base + Load32Aligned(pc + 8);
} else {
pc += BC_CHECK_REGISTER_LT_LENGTH;
}
break;
- BYTECODE(CHECK_REGISTER_GE)
+ }
+ BYTECODE(CHECK_REGISTER_GE) {
if (registers[insn >> BYTECODE_SHIFT] >= Load32Aligned(pc + 4)) {
pc = code_base + Load32Aligned(pc + 8);
} else {
pc += BC_CHECK_REGISTER_GE_LENGTH;
}
break;
- BYTECODE(CHECK_REGISTER_EQ_POS)
+ }
+ BYTECODE(CHECK_REGISTER_EQ_POS) {
if (registers[insn >> BYTECODE_SHIFT] == current) {
pc = code_base + Load32Aligned(pc + 4);
} else {
pc += BC_CHECK_REGISTER_EQ_POS_LENGTH;
}
break;
- BYTECODE(CHECK_NOT_REGS_EQUAL)
+ }
+ BYTECODE(CHECK_NOT_REGS_EQUAL) {
if (registers[insn >> BYTECODE_SHIFT] ==
registers[Load32Aligned(pc + 4)]) {
pc += BC_CHECK_NOT_REGS_EQUAL_LENGTH;
@@ -562,6 +544,7 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
pc = code_base + Load32Aligned(pc + 8);
}
break;
+ }
BYTECODE(CHECK_NOT_BACK_REF) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
@@ -628,20 +611,22 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
pc += BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD_LENGTH;
break;
}
- BYTECODE(CHECK_AT_START)
+ BYTECODE(CHECK_AT_START) {
if (current == 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
pc += BC_CHECK_AT_START_LENGTH;
}
break;
- BYTECODE(CHECK_NOT_AT_START)
+ }
+ BYTECODE(CHECK_NOT_AT_START) {
if (current + (insn >> BYTECODE_SHIFT) == 0) {
pc += BC_CHECK_NOT_AT_START_LENGTH;
} else {
pc = code_base + Load32Aligned(pc + 4);
}
break;
+ }
BYTECODE(SET_CURRENT_POSITION_FROM_END) {
int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
if (subject.length() - current > by) {
@@ -658,6 +643,8 @@ IrregexpInterpreter::Result RawMatch(Isolate* isolate,
}
}
+#undef BYTECODE
+
} // namespace
// static
diff --git a/deps/v8/src/regexp/interpreter-irregexp.h b/deps/v8/src/regexp/regexp-interpreter.h
index a57d40854e..ad27dcd296 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -4,20 +4,22 @@
// A simple interpreter for the Irregexp byte code.
-#ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_
-#define V8_REGEXP_INTERPRETER_IRREGEXP_H_
+#ifndef V8_REGEXP_REGEXP_INTERPRETER_H_
+#define V8_REGEXP_REGEXP_INTERPRETER_H_
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
class V8_EXPORT_PRIVATE IrregexpInterpreter {
public:
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
- STATIC_ASSERT(EXCEPTION == static_cast<int>(RegExpImpl::RE_EXCEPTION));
- STATIC_ASSERT(FAILURE == static_cast<int>(RegExpImpl::RE_FAILURE));
- STATIC_ASSERT(SUCCESS == static_cast<int>(RegExpImpl::RE_SUCCESS));
+ enum Result {
+ FAILURE = RegExp::kInternalRegExpFailure,
+ SUCCESS = RegExp::kInternalRegExpSuccess,
+ EXCEPTION = RegExp::kInternalRegExpException,
+ RETRY = RegExp::kInternalRegExpRetry,
+ };
// The caller is responsible for initializing registers before each call.
static Result Match(Isolate* isolate, Handle<ByteArray> code_array,
@@ -28,4 +30,4 @@ class V8_EXPORT_PRIVATE IrregexpInterpreter {
} // namespace internal
} // namespace v8
-#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
+#endif // V8_REGEXP_REGEXP_INTERPRETER_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
new file mode 100644
index 0000000000..2dc6739e42
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_ARCH_H_
+#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_ARCH_H_
+
+#include "src/regexp/regexp-macro-assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/regexp/arm/regexp-macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/regexp/mips/regexp-macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_ARCH_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index cfe827ef4e..68fa16db61 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -133,7 +133,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
Isolate* isolate, int start_index, bool is_direct_call,
Address* return_address, Code re_code, Address* subject,
const byte** input_start, const byte** input_end) {
- AllowHeapAllocation allow_allocation;
+ DisallowHeapAllocation no_gc;
+
DCHECK(re_code.raw_instruction_start() <= *return_address);
DCHECK(*return_address <= re_code.raw_instruction_end());
int return_value = 0;
@@ -154,15 +155,15 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
// forcing the call through the runtime system.
return_value = js_has_overflowed ? EXCEPTION : RETRY;
} else if (js_has_overflowed) {
+ AllowHeapAllocation yes_gc;
isolate->StackOverflow();
return_value = EXCEPTION;
- } else {
+ } else if (check.InterruptRequested()) {
+ AllowHeapAllocation yes_gc;
Object result = isolate->stack_guard()->HandleInterrupts();
if (result.IsException(isolate)) return_value = EXCEPTION;
}
- DisallowHeapAllocation no_gc;
-
if (*code_handle != re_code) { // Return address no longer valid
intptr_t delta = code_handle->address() - re_code.address();
// Overwrite the return address on the stack.
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 8626d1a19e..b55ac13590 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -7,6 +7,7 @@
#include "src/codegen/label.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -206,7 +207,12 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
// FAILURE: Matching failed.
// SUCCESS: Matching succeeded, and the output array has been filled with
// capture positions.
- enum Result { RETRY = -2, EXCEPTION = -1, FAILURE = 0, SUCCESS = 1 };
+ enum Result {
+ FAILURE = RegExp::kInternalRegExpFailure,
+ SUCCESS = RegExp::kInternalRegExpSuccess,
+ EXCEPTION = RegExp::kInternalRegExpException,
+ RETRY = RegExp::kInternalRegExpRetry,
+ };
NativeRegExpMacroAssembler(Isolate* isolate, Zone* zone);
~NativeRegExpMacroAssembler() override;
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
new file mode 100644
index 0000000000..4c13b74926
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -0,0 +1,675 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_NODES_H_
+#define V8_REGEXP_REGEXP_NODES_H_
+
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class AlternativeGenerationList;
+class BoyerMooreLookahead;
+class GreedyLoopState;
+class Label;
+class NodeVisitor;
+class QuickCheckDetails;
+class RegExpCompiler;
+class Trace;
+struct PreloadState;
+
+#define FOR_EACH_NODE_TYPE(VISIT) \
+ VISIT(End) \
+ VISIT(Action) \
+ VISIT(Choice) \
+ VISIT(BackReference) \
+ VISIT(Assertion) \
+ VISIT(Text)
+
+struct NodeInfo final {
+ NodeInfo()
+ : being_analyzed(false),
+ been_analyzed(false),
+ follows_word_interest(false),
+ follows_newline_interest(false),
+ follows_start_interest(false),
+ at_end(false),
+ visited(false),
+ replacement_calculated(false) {}
+
+ // Returns true if the interests and assumptions of this node
+ // matches the given one.
+ bool Matches(NodeInfo* that) {
+ return (at_end == that->at_end) &&
+ (follows_word_interest == that->follows_word_interest) &&
+ (follows_newline_interest == that->follows_newline_interest) &&
+ (follows_start_interest == that->follows_start_interest);
+ }
+
+ // Updates the interests of this node given the interests of the
+ // node preceding it.
+ void AddFromPreceding(NodeInfo* that) {
+ at_end |= that->at_end;
+ follows_word_interest |= that->follows_word_interest;
+ follows_newline_interest |= that->follows_newline_interest;
+ follows_start_interest |= that->follows_start_interest;
+ }
+
+ bool HasLookbehind() {
+ return follows_word_interest || follows_newline_interest ||
+ follows_start_interest;
+ }
+
+ // Sets the interests of this node to include the interests of the
+ // following node.
+ void AddFromFollowing(NodeInfo* that) {
+ follows_word_interest |= that->follows_word_interest;
+ follows_newline_interest |= that->follows_newline_interest;
+ follows_start_interest |= that->follows_start_interest;
+ }
+
+ void ResetCompilationState() {
+ being_analyzed = false;
+ been_analyzed = false;
+ }
+
+ bool being_analyzed : 1;
+ bool been_analyzed : 1;
+
+ // These bits are set of this node has to know what the preceding
+ // character was.
+ bool follows_word_interest : 1;
+ bool follows_newline_interest : 1;
+ bool follows_start_interest : 1;
+
+ bool at_end : 1;
+ bool visited : 1;
+ bool replacement_calculated : 1;
+};
+
+class RegExpNode : public ZoneObject {
+ public:
+ explicit RegExpNode(Zone* zone)
+ : replacement_(nullptr),
+ on_work_list_(false),
+ trace_count_(0),
+ zone_(zone) {
+ bm_info_[0] = bm_info_[1] = nullptr;
+ }
+ virtual ~RegExpNode();
+ virtual void Accept(NodeVisitor* visitor) = 0;
+ // Generates a goto to this node or actually generates the code at this point.
+ virtual void Emit(RegExpCompiler* compiler, Trace* trace) = 0;
+ // How many characters must this node consume at a minimum in order to
+ // succeed. If we have found at least 'still_to_find' characters that
+ // must be consumed there is no need to ask any following nodes whether
+ // they are sure to eat any more characters. The not_at_start argument is
+ // used to indicate that we know we are not at the start of the input. In
+ // this case anchored branches will always fail and can be ignored when
+ // determining how many characters are consumed on success.
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
+ // Emits some quick code that checks whether the preloaded characters match.
+ // Falls through on certain failure, jumps to the label on possible success.
+ // If the node cannot make a quick check it does nothing and returns false.
+ bool EmitQuickCheck(RegExpCompiler* compiler, Trace* bounds_check_trace,
+ Trace* trace, bool preload_has_checked_bounds,
+ Label* on_possible_success,
+ QuickCheckDetails* details_return,
+ bool fall_through_on_failure);
+ // For a given number of characters this returns a mask and a value. The
+ // next n characters are anded with the mask and compared with the value.
+ // A comparison failure indicates the node cannot match the next n characters.
+ // A comparison success indicates the node may match.
+ virtual void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler,
+ int characters_filled_in,
+ bool not_at_start) = 0;
+ static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
+ virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
+ // Only returns the successor for a text node of length 1 that matches any
+ // character and that has no guards on it.
+ virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) {
+ return nullptr;
+ }
+
+ // Collects information on the possible code units (mod 128) that can match if
+ // we look forward. This is used for a Boyer-Moore-like string searching
+ // implementation. TODO(erikcorry): This should share more code with
+ // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
+ // the number of nodes we are willing to look at in order to create this data.
+ static const int kRecursionBudget = 200;
+ bool KeepRecursing(RegExpCompiler* compiler);
+ virtual void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) {
+ UNREACHABLE();
+ }
+
+ // If we know that the input is one-byte then there are some nodes that can
+ // never match. This method returns a node that can be substituted for
+ // itself, or nullptr if the node can never match.
+ virtual RegExpNode* FilterOneByte(int depth) { return this; }
+ // Helper for FilterOneByte.
+ RegExpNode* replacement() {
+ DCHECK(info()->replacement_calculated);
+ return replacement_;
+ }
+ RegExpNode* set_replacement(RegExpNode* replacement) {
+ info()->replacement_calculated = true;
+ replacement_ = replacement;
+ return replacement; // For convenience.
+ }
+
+ // We want to avoid recalculating the lookahead info, so we store it on the
+ // node. Only info that is for this node is stored. We can tell that the
+ // info is for this node when offset == 0, so the information is calculated
+ // relative to this node.
+ void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
+ if (offset == 0) set_bm_info(not_at_start, bm);
+ }
+
+ Label* label() { return &label_; }
+ // If non-generic code is generated for a node (i.e. the node is not at the
+ // start of the trace) then it cannot be reused. This variable sets a limit
+ // on how often we allow that to happen before we insist on starting a new
+ // trace and generating generic code for a node that can be reused by flushing
+ // the deferred actions in the current trace and generating a goto.
+ static const int kMaxCopiesCodeGenerated = 10;
+
+ bool on_work_list() { return on_work_list_; }
+ void set_on_work_list(bool value) { on_work_list_ = value; }
+
+ NodeInfo* info() { return &info_; }
+
+ BoyerMooreLookahead* bm_info(bool not_at_start) {
+ return bm_info_[not_at_start ? 1 : 0];
+ }
+
+ Zone* zone() const { return zone_; }
+
+ protected:
+ enum LimitResult { DONE, CONTINUE };
+ RegExpNode* replacement_;
+
+ LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
+
+ void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
+ bm_info_[not_at_start ? 1 : 0] = bm;
+ }
+
+ private:
+ static const int kFirstCharBudget = 10;
+ Label label_;
+ bool on_work_list_;
+ NodeInfo info_;
+ // This variable keeps track of how many times code has been generated for
+ // this node (in different traces). We don't keep track of where the
+ // generated code is located unless the code is generated at the start of
+ // a trace, in which case it is generic and can be reused by flushing the
+ // deferred operations in the current trace and generating a goto.
+ int trace_count_;
+ BoyerMooreLookahead* bm_info_[2];
+
+ Zone* zone_;
+};
+
+class SeqRegExpNode : public RegExpNode {
+ public:
+ explicit SeqRegExpNode(RegExpNode* on_success)
+ : RegExpNode(on_success->zone()), on_success_(on_success) {}
+ RegExpNode* on_success() { return on_success_; }
+ void set_on_success(RegExpNode* node) { on_success_ = node; }
+ RegExpNode* FilterOneByte(int depth) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
+ on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
+ if (offset == 0) set_bm_info(not_at_start, bm);
+ }
+
+ protected:
+ RegExpNode* FilterSuccessor(int depth);
+
+ private:
+ RegExpNode* on_success_;
+};
+
+class ActionNode : public SeqRegExpNode {
+ public:
+ enum ActionType {
+ SET_REGISTER,
+ INCREMENT_REGISTER,
+ STORE_POSITION,
+ BEGIN_SUBMATCH,
+ POSITIVE_SUBMATCH_SUCCESS,
+ EMPTY_MATCH_CHECK,
+ CLEAR_CAPTURES
+ };
+ static ActionNode* SetRegister(int reg, int val, RegExpNode* on_success);
+ static ActionNode* IncrementRegister(int reg, RegExpNode* on_success);
+ static ActionNode* StorePosition(int reg, bool is_capture,
+ RegExpNode* on_success);
+ static ActionNode* ClearCaptures(Interval range, RegExpNode* on_success);
+ static ActionNode* BeginSubmatch(int stack_pointer_reg, int position_reg,
+ RegExpNode* on_success);
+ static ActionNode* PositiveSubmatchSuccess(int stack_pointer_reg,
+ int restore_reg,
+ int clear_capture_count,
+ int clear_capture_from,
+ RegExpNode* on_success);
+ static ActionNode* EmptyMatchCheck(int start_register,
+ int repetition_register,
+ int repetition_limit,
+ RegExpNode* on_success);
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) override {
+ return on_success()->GetQuickCheckDetails(details, compiler, filled_in,
+ not_at_start);
+ }
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ ActionType action_type() { return action_type_; }
+ // TODO(erikcorry): We should allow some action nodes in greedy loops.
+ int GreedyLoopTextLength() override {
+ return kNodeIsTooComplexForGreedyLoops;
+ }
+
+ private:
+ union {
+ struct {
+ int reg;
+ int value;
+ } u_store_register;
+ struct {
+ int reg;
+ } u_increment_register;
+ struct {
+ int reg;
+ bool is_capture;
+ } u_position_register;
+ struct {
+ int stack_pointer_register;
+ int current_position_register;
+ int clear_register_count;
+ int clear_register_from;
+ } u_submatch;
+ struct {
+ int start_register;
+ int repetition_register;
+ int repetition_limit;
+ } u_empty_match_check;
+ struct {
+ int range_from;
+ int range_to;
+ } u_clear_captures;
+ } data_;
+ ActionNode(ActionType action_type, RegExpNode* on_success)
+ : SeqRegExpNode(on_success), action_type_(action_type) {}
+ ActionType action_type_;
+ friend class DotPrinterImpl;
+};
+
+class TextNode : public SeqRegExpNode {
+ public:
+ TextNode(ZoneList<TextElement>* elms, bool read_backward,
+ RegExpNode* on_success)
+ : SeqRegExpNode(on_success), elms_(elms), read_backward_(read_backward) {}
+ TextNode(RegExpCharacterClass* that, bool read_backward,
+ RegExpNode* on_success)
+ : SeqRegExpNode(on_success),
+ elms_(new (zone()) ZoneList<TextElement>(1, zone())),
+ read_backward_(read_backward) {
+ elms_->Add(TextElement::CharClass(that), zone());
+ }
+ // Create TextNode for a single character class for the given ranges.
+ static TextNode* CreateForCharacterRanges(Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
+ // Create TextNode for a surrogate pair with a range given for the
+ // lead and the trail surrogate each.
+ static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+ CharacterRange trail,
+ bool read_backward,
+ RegExpNode* on_success,
+ JSRegExp::Flags flags);
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ ZoneList<TextElement>* elements() { return elms_; }
+ bool read_backward() { return read_backward_; }
+ void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
+ int GreedyLoopTextLength() override;
+ RegExpNode* GetSuccessorOfOmnivorousTextNode(
+ RegExpCompiler* compiler) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ void CalculateOffsets();
+ RegExpNode* FilterOneByte(int depth) override;
+
+ private:
+ enum TextEmitPassType {
+ NON_LATIN1_MATCH, // Check for characters that can't match.
+ SIMPLE_CHARACTER_MATCH, // Case-dependent single character check.
+ NON_LETTER_CHARACTER_MATCH, // Check characters that have no case equivs.
+ CASE_CHARACTER_MATCH, // Case-independent single character check.
+ CHARACTER_CLASS_MATCH // Character class.
+ };
+ static bool SkipPass(TextEmitPassType pass, bool ignore_case);
+ static const int kFirstRealPass = SIMPLE_CHARACTER_MATCH;
+ static const int kLastPass = CHARACTER_CLASS_MATCH;
+ void TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
+ bool preloaded, Trace* trace, bool first_element_checked,
+ int* checked_up_to);
+ int Length();
+ ZoneList<TextElement>* elms_;
+ bool read_backward_;
+};
+
+class AssertionNode : public SeqRegExpNode {
+ public:
+ enum AssertionType {
+ AT_END,
+ AT_START,
+ AT_BOUNDARY,
+ AT_NON_BOUNDARY,
+ AFTER_NEWLINE
+ };
+ static AssertionNode* AtEnd(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_END, on_success);
+ }
+ static AssertionNode* AtStart(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_START, on_success);
+ }
+ static AssertionNode* AtBoundary(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_BOUNDARY, on_success);
+ }
+ static AssertionNode* AtNonBoundary(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AT_NON_BOUNDARY, on_success);
+ }
+ static AssertionNode* AfterNewline(RegExpNode* on_success) {
+ return new (on_success->zone()) AssertionNode(AFTER_NEWLINE, on_success);
+ }
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ AssertionType assertion_type() { return assertion_type_; }
+
+ private:
+ void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
+ enum IfPrevious { kIsNonWord, kIsWord };
+ void BacktrackIfPrevious(RegExpCompiler* compiler, Trace* trace,
+ IfPrevious backtrack_if_previous);
+ AssertionNode(AssertionType t, RegExpNode* on_success)
+ : SeqRegExpNode(on_success), assertion_type_(t) {}
+ AssertionType assertion_type_;
+};
+
+class BackReferenceNode : public SeqRegExpNode {
+ public:
+ BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
+ bool read_backward, RegExpNode* on_success)
+ : SeqRegExpNode(on_success),
+ start_reg_(start_reg),
+ end_reg_(end_reg),
+ flags_(flags),
+ read_backward_(read_backward) {}
+ void Accept(NodeVisitor* visitor) override;
+ int start_register() { return start_reg_; }
+ int end_register() { return end_reg_; }
+ bool read_backward() { return read_backward_; }
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int recursion_depth,
+ bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override {
+ return;
+ }
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+
+ private:
+ int start_reg_;
+ int end_reg_;
+ JSRegExp::Flags flags_;
+ bool read_backward_;
+};
+
+class EndNode : public RegExpNode {
+ public:
+ enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
+ EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
+ void Accept(NodeVisitor* visitor) override;
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int recursion_depth,
+ bool not_at_start) override {
+ return 0;
+ }
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override {
+ // Returning 0 from EatsAtLeast should ensure we never get here.
+ UNREACHABLE();
+ }
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
+ // Returning 0 from EatsAtLeast should ensure we never get here.
+ UNREACHABLE();
+ }
+
+ private:
+ Action action_;
+};
+
+class NegativeSubmatchSuccess : public EndNode {
+ public:
+ NegativeSubmatchSuccess(int stack_pointer_reg, int position_reg,
+ int clear_capture_count, int clear_capture_start,
+ Zone* zone)
+ : EndNode(NEGATIVE_SUBMATCH_SUCCESS, zone),
+ stack_pointer_register_(stack_pointer_reg),
+ current_position_register_(position_reg),
+ clear_capture_count_(clear_capture_count),
+ clear_capture_start_(clear_capture_start) {}
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+
+ private:
+ int stack_pointer_register_;
+ int current_position_register_;
+ int clear_capture_count_;
+ int clear_capture_start_;
+};
+
+class Guard : public ZoneObject {
+ public:
+ enum Relation { LT, GEQ };
+ Guard(int reg, Relation op, int value) : reg_(reg), op_(op), value_(value) {}
+ int reg() { return reg_; }
+ Relation op() { return op_; }
+ int value() { return value_; }
+
+ private:
+ int reg_;
+ Relation op_;
+ int value_;
+};
+
+class GuardedAlternative {
+ public:
+ explicit GuardedAlternative(RegExpNode* node)
+ : node_(node), guards_(nullptr) {}
+ void AddGuard(Guard* guard, Zone* zone);
+ RegExpNode* node() { return node_; }
+ void set_node(RegExpNode* node) { node_ = node; }
+ ZoneList<Guard*>* guards() { return guards_; }
+
+ private:
+ RegExpNode* node_;
+ ZoneList<Guard*>* guards_;
+};
+
+class AlternativeGeneration;
+
+class ChoiceNode : public RegExpNode {
+ public:
+ explicit ChoiceNode(int expected_size, Zone* zone)
+ : RegExpNode(zone),
+ alternatives_(new (zone)
+ ZoneList<GuardedAlternative>(expected_size, zone)),
+ not_at_start_(false),
+ being_calculated_(false) {}
+ void Accept(NodeVisitor* visitor) override;
+ void AddAlternative(GuardedAlternative node) {
+ alternatives()->Add(node, zone());
+ }
+ ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ int EatsAtLeastHelper(int still_to_find, int budget,
+ RegExpNode* ignore_this_node, bool not_at_start);
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+
+ bool being_calculated() { return being_calculated_; }
+ bool not_at_start() { return not_at_start_; }
+ void set_not_at_start() { not_at_start_ = true; }
+ void set_being_calculated(bool b) { being_calculated_ = b; }
+ virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+ return true;
+ }
+ RegExpNode* FilterOneByte(int depth) override;
+ virtual bool read_backward() { return false; }
+
+ protected:
+ int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
+ ZoneList<GuardedAlternative>* alternatives_;
+
+ private:
+ friend class Analysis;
+
+ void GenerateGuard(RegExpMacroAssembler* macro_assembler, Guard* guard,
+ Trace* trace);
+ int CalculatePreloadCharacters(RegExpCompiler* compiler, int eats_at_least);
+ void EmitOutOfLineContinuation(RegExpCompiler* compiler, Trace* trace,
+ GuardedAlternative alternative,
+ AlternativeGeneration* alt_gen,
+ int preload_characters,
+ bool next_expects_preload);
+ void SetUpPreLoad(RegExpCompiler* compiler, Trace* current_trace,
+ PreloadState* preloads);
+ void AssertGuardsMentionRegisters(Trace* trace);
+ int EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler, Trace* trace);
+ Trace* EmitGreedyLoop(RegExpCompiler* compiler, Trace* trace,
+ AlternativeGenerationList* alt_gens,
+ PreloadState* preloads,
+ GreedyLoopState* greedy_loop_state, int text_length);
+ void EmitChoices(RegExpCompiler* compiler,
+ AlternativeGenerationList* alt_gens, int first_choice,
+ Trace* trace, PreloadState* preloads);
+
+ // If true, this node is never checked at the start of the input.
+ // Allows a new trace to start with at_start() set to false.
+ bool not_at_start_;
+ bool being_calculated_;
+};
+
+class NegativeLookaroundChoiceNode : public ChoiceNode {
+ public:
+ explicit NegativeLookaroundChoiceNode(GuardedAlternative this_must_fail,
+ GuardedAlternative then_do_this,
+ Zone* zone)
+ : ChoiceNode(2, zone) {
+ AddAlternative(this_must_fail);
+ AddAlternative(then_do_this);
+ }
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override {
+ alternatives_->at(1).node()->FillInBMInfo(isolate, offset, budget - 1, bm,
+ not_at_start);
+ if (offset == 0) set_bm_info(not_at_start, bm);
+ }
+ // For a negative lookahead we don't emit the quick check for the
+ // alternative that is expected to fail. This is because quick check code
+ // starts by loading enough characters for the alternative that takes fewest
+ // characters, but on a negative lookahead the negative branch did not take
+ // part in that calculation (EatsAtLeast) so the assumptions don't hold.
+ bool try_to_emit_quick_check_for_alternative(bool is_first) override {
+ return !is_first;
+ }
+ RegExpNode* FilterOneByte(int depth) override;
+};
+
+class LoopChoiceNode : public ChoiceNode {
+ public:
+ LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
+ : ChoiceNode(2, zone),
+ loop_node_(nullptr),
+ continue_node_(nullptr),
+ body_can_be_zero_length_(body_can_be_zero_length),
+ read_backward_(read_backward) {}
+ void AddLoopAlternative(GuardedAlternative alt);
+ void AddContinueAlternative(GuardedAlternative alt);
+ void Emit(RegExpCompiler* compiler, Trace* trace) override;
+ int EatsAtLeast(int still_to_find, int budget, bool not_at_start) override;
+ void GetQuickCheckDetails(QuickCheckDetails* details,
+ RegExpCompiler* compiler, int characters_filled_in,
+ bool not_at_start) override;
+ void FillInBMInfo(Isolate* isolate, int offset, int budget,
+ BoyerMooreLookahead* bm, bool not_at_start) override;
+ RegExpNode* loop_node() { return loop_node_; }
+ RegExpNode* continue_node() { return continue_node_; }
+ bool body_can_be_zero_length() { return body_can_be_zero_length_; }
+ bool read_backward() override { return read_backward_; }
+ void Accept(NodeVisitor* visitor) override;
+ RegExpNode* FilterOneByte(int depth) override;
+
+ private:
+ // AddAlternative is made private for loop nodes because alternatives
+ // should not be added freely, we need to keep track of which node
+ // goes back to the node itself.
+ void AddAlternative(GuardedAlternative node) {
+ ChoiceNode::AddAlternative(node);
+ }
+
+ RegExpNode* loop_node_;
+ RegExpNode* continue_node_;
+ bool body_can_be_zero_length_;
+ bool read_backward_;
+};
+
+class NodeVisitor {
+ public:
+ virtual ~NodeVisitor() = default;
+#define DECLARE_VISIT(Type) virtual void Visit##Type(Type##Node* that) = 0;
+ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
+#undef DECLARE_VISIT
+ virtual void VisitLoopChoice(LoopChoiceNode* that) { VisitChoice(that); }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_NODES_H_
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 7cae456f56..3647680969 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -9,8 +9,9 @@
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
#include "src/regexp/property-sequences.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp.h"
#include "src/strings/char-predicates-inl.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
@@ -879,24 +880,25 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
+ RegExpCapture* capture = GetCapture(index);
+ DCHECK_NULL(capture->name());
+
+ capture->set_name(name);
+
if (named_captures_ == nullptr) {
- named_captures_ = new (zone()) ZoneList<RegExpCapture*>(1, zone());
+ named_captures_ = new (zone_->New(sizeof(*named_captures_)))
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>(zone());
} else {
// Check for duplicates and bail if we find any.
- // TODO(jgruber): O(n^2).
- for (const auto& named_capture : *named_captures_) {
- if (*named_capture->name() == *name) {
- ReportError(CStrVector("Duplicate capture group name"));
- return false;
- }
+
+ const auto& named_capture_it = named_captures_->find(capture);
+ if (named_capture_it != named_captures_->end()) {
+ ReportError(CStrVector("Duplicate capture group name"));
+ return false;
}
}
- RegExpCapture* capture = GetCapture(index);
- DCHECK_NULL(capture->name());
-
- capture->set_name(name);
- named_captures_->Add(capture, zone());
+ named_captures_->emplace(capture);
return true;
}
@@ -943,20 +945,22 @@ void RegExpParser::PatchNamedBackReferences() {
}
// Look up and patch the actual capture for each named back reference.
- // TODO(jgruber): O(n^2), optimize if necessary.
for (int i = 0; i < named_back_references_->length(); i++) {
RegExpBackReference* ref = named_back_references_->at(i);
- int index = -1;
- for (const auto& capture : *named_captures_) {
- if (*capture->name() == *ref->name()) {
- index = capture->index();
- break;
- }
- }
+ // Capture used to search the named_captures_ by name, index of the
+ // capture is never used.
+ static const int kInvalidIndex = 0;
+ RegExpCapture* search_capture = new (zone()) RegExpCapture(kInvalidIndex);
+ DCHECK_NULL(search_capture->name());
+ search_capture->set_name(ref->name());
- if (index == -1) {
+ int index = -1;
+ const auto& capture_it = named_captures_->find(search_capture);
+ if (capture_it != named_captures_->end()) {
+ index = (*capture_it)->index();
+ } else {
ReportError(CStrVector("Invalid named capture referenced"));
return;
}
@@ -981,16 +985,17 @@ RegExpCapture* RegExpParser::GetCapture(int index) {
}
Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
- if (named_captures_ == nullptr || named_captures_->is_empty())
+ if (named_captures_ == nullptr || named_captures_->empty()) {
return Handle<FixedArray>();
+ }
Factory* factory = isolate()->factory();
- int len = named_captures_->length() * 2;
+ int len = static_cast<int>(named_captures_->size()) * 2;
Handle<FixedArray> array = factory->NewFixedArray(len);
- for (int i = 0; i < named_captures_->length(); i++) {
- RegExpCapture* capture = named_captures_->at(i);
+ int i = 0;
+ for (const auto& capture : *named_captures_) {
Vector<const uc16> capture_name(capture->name()->data(),
capture->name()->size());
// CSA code in ConstructNewResultFromMatchInfo requires these strings to be
@@ -998,7 +1003,10 @@ Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
Handle<String> name = factory->InternalizeString(capture_name);
array->set(i * 2, *name);
array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+
+ i++;
}
+ DCHECK_EQ(i * 2, len);
return array;
}
@@ -1963,12 +1971,6 @@ void RegExpBuilder::AddTerm(RegExpTree* term) {
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
- if (terms_.length() > 0 && terms_.last()->IsAssertion()) {
- // Omit repeated assertions of the same type.
- RegExpAssertion* last = terms_.last()->AsAssertion();
- RegExpAssertion* next = assert->AsAssertion();
- if (last->assertion_type() == next->assertion_type()) return;
- }
terms_.Add(assert, zone());
LAST(ADD_ASSERT);
}
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index bf9e62ed71..36cec7e984 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -326,11 +326,19 @@ class V8_EXPORT_PRIVATE RegExpParser {
FlatStringReader* in() { return in_; }
void ScanForCaptures();
+ struct RegExpCaptureNameLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return *lhs->name() < *rhs->name();
+ }
+ };
+
Isolate* isolate_;
Zone* zone_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
- ZoneList<RegExpCapture*>* named_captures_;
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
ZoneList<RegExpBackReference*>* named_back_references_;
FlatStringReader* in_;
uc32 current_;
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 49f9d4476b..ad50270fdc 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -8,7 +8,7 @@
#include "src/heap/factory.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
@@ -179,7 +179,9 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
return false;
}
- if (!isolate->IsRegExpSpeciesLookupChainIntact()) return false;
+ if (!isolate->IsRegExpSpeciesLookupChainIntact(isolate->native_context())) {
+ return false;
+ }
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
new file mode 100644
index 0000000000..15b0321c46
--- /dev/null
+++ b/deps/v8/src/regexp/regexp.cc
@@ -0,0 +1,1018 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp.h"
+
+#include "src/codegen/compilation-cache.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/js-regexp-inl.h"
+#include "src/regexp/regexp-bytecode-generator.h"
+#include "src/regexp/regexp-compiler.h"
+#include "src/regexp/regexp-dotprinter.h"
+#include "src/regexp/regexp-interpreter.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
+#include "src/regexp/regexp-parser.h"
+#include "src/strings/string-search.h"
+
+namespace v8 {
+namespace internal {
+
+using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+
+class RegExpImpl final : public AllStatic {
+ public:
+ // Returns a string representation of a regular expression.
+ // Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
+ // This function calls the garbage collector if necessary.
+ static Handle<String> ToString(Handle<Object> value);
+
+ // Prepares a JSRegExp object with Irregexp-specific data.
+ static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
+ int capture_register_count);
+
+ static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> match_pattern);
+
+ static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
+ int output_size);
+
+ static Handle<Object> AtomExec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info);
+
+ // Execute a regular expression on the subject, starting from index.
+ // If matching succeeds, return the number of matches. This can be larger
+ // than one in the case of global regular expressions.
+ // The captures and subcaptures are stored into the registers vector.
+ // If matching fails, returns RE_FAILURE.
+ // If execution fails, sets a pending exception and returns RE_EXCEPTION.
+ static int IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
+ int output_size);
+
+ // Execute an Irregexp bytecode pattern.
+ // On a successful match, the result is a JSArray containing
+ // captured positions. On a failure, the result is the null value.
+ // Returns an empty handle in case of an exception.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
+
+ static bool CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> sample_subject, bool is_one_byte);
+ static inline bool EnsureCompiledIrregexp(Isolate* isolate,
+ Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte);
+
+ // Returns true on success, false on failure.
+ static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
+ JSRegExp::Flags flags, Handle<String> pattern,
+ Handle<String> sample_subject, bool is_one_byte);
+
+ // For acting on the JSRegExp data FixedArray.
+ static int IrregexpMaxRegisterCount(FixedArray re);
+ static void SetIrregexpMaxRegisterCount(FixedArray re, int value);
+ static void SetIrregexpCaptureNameMap(FixedArray re,
+ Handle<FixedArray> value);
+ static int IrregexpNumberOfCaptures(FixedArray re);
+ static int IrregexpNumberOfRegisters(FixedArray re);
+ static ByteArray IrregexpByteCode(FixedArray re, bool is_one_byte);
+ static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
+};
+
+V8_WARN_UNUSED_RESULT
+static inline MaybeHandle<Object> ThrowRegExpException(
+ Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
+ Handle<String> error_text) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kMalformedRegExp, pattern, error_text),
+ Object);
+}
+
+inline void ThrowRegExpException(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> error_text) {
+ USE(ThrowRegExpException(isolate, re, Handle<String>(re->Pattern(), isolate),
+ error_text));
+}
+
+// Identifies the sort of regexps where the regexp engine is faster
+// than the code used for atom matches.
+static bool HasFewDifferentCharacters(Handle<String> pattern) {
+ int length = Min(kMaxLookaheadForBoyerMoore, pattern->length());
+ if (length <= kPatternTooShortForBoyerMoore) return false;
+ const int kMod = 128;
+ bool character_found[kMod];
+ int different = 0;
+ memset(&character_found[0], 0, sizeof(character_found));
+ for (int i = 0; i < length; i++) {
+ int ch = (pattern->Get(i) & (kMod - 1));
+ if (!character_found[ch]) {
+ character_found[ch] = true;
+ different++;
+ // We declare a regexp low-alphabet if it has at least 3 times as many
+ // characters as it has different characters.
+ if (different * 3 > length) return false;
+ }
+ }
+ return true;
+}
+
+// Generic RegExp methods. Dispatches to implementation specific methods.
+
+// static
+MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern,
+ JSRegExp::Flags flags) {
+ DCHECK(pattern->IsFlat());
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ CompilationCache* compilation_cache = isolate->compilation_cache();
+ MaybeHandle<FixedArray> maybe_cached =
+ compilation_cache->LookupRegExp(pattern, flags);
+ Handle<FixedArray> cached;
+ if (maybe_cached.ToHandle(&cached)) {
+ re->set_data(*cached);
+ return re;
+ }
+
+ PostponeInterruptsScope postpone(isolate);
+ RegExpCompileData parse_result;
+ FlatStringReader reader(isolate, pattern);
+ DCHECK(!isolate->has_pending_exception());
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ &parse_result)) {
+ // Throw an exception if we fail to parse the pattern.
+ return ThrowRegExpException(isolate, re, pattern, parse_result.error);
+ }
+
+ bool has_been_compiled = false;
+
+ if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
+ !HasFewDifferentCharacters(pattern)) {
+ // Parse-tree is a single atom that is equal to the pattern.
+ RegExpImpl::AtomCompile(isolate, re, pattern, flags, pattern);
+ has_been_compiled = true;
+ } else if (parse_result.tree->IsAtom() && !IsSticky(flags) &&
+ parse_result.capture_count == 0) {
+ RegExpAtom* atom = parse_result.tree->AsAtom();
+ Vector<const uc16> atom_pattern = atom->data();
+ Handle<String> atom_string;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, atom_string,
+ isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
+ if (!IgnoreCase(atom->flags()) && !HasFewDifferentCharacters(atom_string)) {
+ RegExpImpl::AtomCompile(isolate, re, pattern, flags, atom_string);
+ has_been_compiled = true;
+ }
+ }
+ if (!has_been_compiled) {
+ RegExpImpl::IrregexpInitialize(isolate, re, pattern, flags,
+ parse_result.capture_count);
+ }
+ DCHECK(re->data().IsFixedArray());
+ // Compilation succeeded so the data is set on the regexp
+ // and we can store it in the cache.
+ Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
+ compilation_cache->PutRegExp(pattern, flags, data);
+
+ return re;
+}
+
+// static
+MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info) {
+ switch (regexp->TypeTag()) {
+ case JSRegExp::ATOM:
+ return RegExpImpl::AtomExec(isolate, regexp, subject, index,
+ last_match_info);
+ case JSRegExp::IRREGEXP: {
+ return RegExpImpl::IrregexpExec(isolate, regexp, subject, index,
+ last_match_info);
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+// RegExp Atom implementation: Simple string search using indexOf.
+
+void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> match_pattern) {
+ isolate->factory()->SetRegExpAtomData(re, JSRegExp::ATOM, pattern, flags,
+ match_pattern);
+}
+
+static void SetAtomLastCapture(Isolate* isolate,
+ Handle<RegExpMatchInfo> last_match_info,
+ String subject, int from, int to) {
+ SealHandleScope shs(isolate);
+ last_match_info->SetNumberOfCaptureRegisters(2);
+ last_match_info->SetLastSubject(subject);
+ last_match_info->SetLastInput(subject);
+ last_match_info->SetCapture(0, from);
+ last_match_info->SetCapture(1, to);
+}
+
+int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index, int32_t* output,
+ int output_size) {
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, subject->length());
+
+ subject = String::Flatten(isolate, subject);
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+
+ String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
+ int needle_len = needle.length();
+ DCHECK(needle.IsFlat());
+ DCHECK_LT(0, needle_len);
+
+ if (index + needle_len > subject->length()) {
+ return RegExp::RE_FAILURE;
+ }
+
+ for (int i = 0; i < output_size; i += 2) {
+ String::FlatContent needle_content = needle.GetFlatContent(no_gc);
+ String::FlatContent subject_content = subject->GetFlatContent(no_gc);
+ DCHECK(needle_content.IsFlat());
+ DCHECK(subject_content.IsFlat());
+ // dispatch on type of strings
+ index =
+ (needle_content.IsOneByte()
+ ? (subject_content.IsOneByte()
+ ? SearchString(isolate, subject_content.ToOneByteVector(),
+ needle_content.ToOneByteVector(), index)
+ : SearchString(isolate, subject_content.ToUC16Vector(),
+ needle_content.ToOneByteVector(), index))
+ : (subject_content.IsOneByte()
+ ? SearchString(isolate, subject_content.ToOneByteVector(),
+ needle_content.ToUC16Vector(), index)
+ : SearchString(isolate, subject_content.ToUC16Vector(),
+ needle_content.ToUC16Vector(), index)));
+ if (index == -1) {
+ return i / 2; // Return number of matches.
+ } else {
+ output[i] = index;
+ output[i + 1] = index + needle_len;
+ index += needle_len;
+ }
+ }
+ return output_size / 2;
+}
+
+Handle<Object> RegExpImpl::AtomExec(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> subject, int index,
+ Handle<RegExpMatchInfo> last_match_info) {
+ static const int kNumRegisters = 2;
+ STATIC_ASSERT(kNumRegisters <= Isolate::kJSRegexpStaticOffsetsVectorSize);
+ int32_t* output_registers = isolate->jsregexp_static_offsets_vector();
+
+ int res =
+ AtomExecRaw(isolate, re, subject, index, output_registers, kNumRegisters);
+
+ if (res == RegExp::RE_FAILURE) return isolate->factory()->null_value();
+
+ DCHECK_EQ(res, RegExp::RE_SUCCESS);
+ SealHandleScope shs(isolate);
+ SetAtomLastCapture(isolate, last_match_info, *subject, output_registers[0],
+ output_registers[1]);
+ return last_match_info;
+}
+
+// Irregexp implementation.
+
+// Ensures that the regexp object contains a compiled version of the
+// source for either one-byte or two-byte subject strings.
+// If the compiled version doesn't already exist, it is compiled
+// from the source pattern.
+// If compilation fails, an exception is thrown and this function
+// returns false.
+bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
+ if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
+ DCHECK(FLAG_regexp_interpret_all ? compiled_code.IsByteArray()
+ : compiled_code.IsCode());
+ return true;
+ }
+ return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
+}
+
+bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ // Compile the RegExp.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ PostponeInterruptsScope postpone(isolate);
+#ifdef DEBUG
+ Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
+ // When arriving here entry can only be a smi representing an uncompiled
+ // regexp.
+ DCHECK(entry.IsSmi());
+ int entry_value = Smi::ToInt(entry);
+ DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
+#endif
+
+ JSRegExp::Flags flags = re->GetFlags();
+
+ Handle<String> pattern(re->Pattern(), isolate);
+ pattern = String::Flatten(isolate, pattern);
+ RegExpCompileData compile_data;
+ FlatStringReader reader(isolate, pattern);
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ &compile_data)) {
+ // Throw an exception if we fail to parse the pattern.
+ // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
+ USE(ThrowRegExpException(isolate, re, pattern, compile_data.error));
+ return false;
+ }
+ const bool compilation_succeeded =
+ Compile(isolate, &zone, &compile_data, flags, pattern, sample_subject,
+ is_one_byte);
+ if (!compilation_succeeded) {
+ DCHECK(!compile_data.error.is_null());
+ ThrowRegExpException(isolate, re, compile_data.error);
+ return false;
+ }
+
+ Handle<FixedArray> data =
+ Handle<FixedArray>(FixedArray::cast(re->data()), isolate);
+ data->set(JSRegExp::code_index(is_one_byte), compile_data.code);
+ SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
+ int register_max = IrregexpMaxRegisterCount(*data);
+ if (compile_data.register_count > register_max) {
+ SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
+ }
+
+ return true;
+}
+
+int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
+ return Smi::cast(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex)).value();
+}
+
+void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
+ re.set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
+}
+
+void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray re,
+ Handle<FixedArray> value) {
+ if (value.is_null()) {
+ re.set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
+ } else {
+ re.set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
+ }
+}
+
+int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpCaptureCountIndex));
+}
+
+int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
+}
+
+ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
+ return ByteArray::cast(re.get(JSRegExp::code_index(is_one_byte)));
+}
+
+Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
+ return Code::cast(re.get(JSRegExp::code_index(is_one_byte)));
+}
+
+void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
+ Handle<String> pattern,
+ JSRegExp::Flags flags, int capture_count) {
+ // Initialize compiled code entries to null.
+ isolate->factory()->SetRegExpIrregexpData(re, JSRegExp::IRREGEXP, pattern,
+ flags, capture_count);
+}
+
+// static
+int RegExp::IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject) {
+ DCHECK(subject->IsFlat());
+
+ // Check representation of the underlying storage.
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ if (!RegExpImpl::EnsureCompiledIrregexp(isolate, regexp, subject,
+ is_one_byte)) {
+ return -1;
+ }
+
+ DisallowHeapAllocation no_gc;
+ FixedArray data = FixedArray::cast(regexp->data());
+ if (FLAG_regexp_interpret_all) {
+ // Byte-code regexp needs space allocated for all its registers.
+ // The result captures are copied to the start of the registers array
+ // if the match succeeds. This way those registers are not clobbered
+ // when we set the last match info from last successful match.
+ return RegExpImpl::IrregexpNumberOfRegisters(data) +
+ (RegExpImpl::IrregexpNumberOfCaptures(data) + 1) * 2;
+ } else {
+ // Native regexp only needs room to output captures. Registers are handled
+ // internally.
+ return (RegExpImpl::IrregexpNumberOfCaptures(data) + 1) * 2;
+ }
+}
+
+int RegExpImpl::IrregexpExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject, int index,
+ int32_t* output, int output_size) {
+ Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
+
+ DCHECK_LE(0, index);
+ DCHECK_LE(index, subject->length());
+ DCHECK(subject->IsFlat());
+
+ bool is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+
+ if (!FLAG_regexp_interpret_all) {
+ DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+ do {
+ EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
+ Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
+ // The stack is used to allocate registers for the compiled regexp code.
+ // This means that in case of failure, the output registers array is left
+ // untouched and contains the capture results from the previous successful
+ // match. We can use that to set the last match info lazily.
+ int res = NativeRegExpMacroAssembler::Match(code, subject, output,
+ output_size, index, isolate);
+ if (res != NativeRegExpMacroAssembler::RETRY) {
+ DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
+ isolate->has_pending_exception());
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) ==
+ RegExp::RE_SUCCESS);
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::FAILURE) ==
+ RegExp::RE_FAILURE);
+ STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION) ==
+ RegExp::RE_EXCEPTION);
+ return res;
+ }
+ // If result is RETRY, the string has changed representation, and we
+ // must restart from scratch.
+ // In this case, it means we must make sure we are prepared to handle
+ // the, potentially, different subject (the string can switch between
+ // being internal and external, and even between being Latin1 and UC16,
+ // but the characters are always the same).
+ RegExp::IrregexpPrepare(isolate, regexp, subject);
+ is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ } while (true);
+ UNREACHABLE();
+ } else {
+ DCHECK(FLAG_regexp_interpret_all);
+ DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
+ // We must have done EnsureCompiledIrregexp, so we can get the number of
+ // registers.
+ int number_of_capture_registers =
+ (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+ int32_t* raw_output = &output[number_of_capture_registers];
+
+ do {
+ // We do not touch the actual capture result registers until we know there
+ // has been a match so that we can use those capture results to set the
+ // last match info.
+ for (int i = number_of_capture_registers - 1; i >= 0; i--) {
+ raw_output[i] = -1;
+ }
+ Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
+ isolate);
+
+ IrregexpInterpreter::Result result = IrregexpInterpreter::Match(
+ isolate, byte_codes, subject, raw_output, index);
+ DCHECK_IMPLIES(result == IrregexpInterpreter::EXCEPTION,
+ isolate->has_pending_exception());
+
+ switch (result) {
+ case IrregexpInterpreter::SUCCESS:
+ // Copy capture results to the start of the registers array.
+ MemCopy(output, raw_output,
+ number_of_capture_registers * sizeof(int32_t));
+ return result;
+ case IrregexpInterpreter::EXCEPTION:
+ case IrregexpInterpreter::FAILURE:
+ return result;
+ case IrregexpInterpreter::RETRY:
+ // The string has changed representation, and we must restart the
+ // match.
+ is_one_byte = String::IsOneByteRepresentationUnderneath(*subject);
+ EnsureCompiledIrregexp(isolate, regexp, subject, is_one_byte);
+ break;
+ }
+ } while (true);
+ UNREACHABLE();
+ }
+}
+
+MaybeHandle<Object> RegExpImpl::IrregexpExec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int previous_index, Handle<RegExpMatchInfo> last_match_info) {
+ DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+
+ subject = String::Flatten(isolate, subject);
+
+ // Prepare space for the return values.
+#ifdef DEBUG
+ if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
+ String pattern = regexp->Pattern();
+ PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
+ PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
+ }
+#endif
+ int required_registers = RegExp::IrregexpPrepare(isolate, regexp, subject);
+ if (required_registers < 0) {
+ // Compiling failed with an exception.
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
+
+ int32_t* output_registers = nullptr;
+ if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ output_registers = NewArray<int32_t>(required_registers);
+ }
+ std::unique_ptr<int32_t[]> auto_release(output_registers);
+ if (output_registers == nullptr) {
+ output_registers = isolate->jsregexp_static_offsets_vector();
+ }
+
+ int res =
+ RegExpImpl::IrregexpExecRaw(isolate, regexp, subject, previous_index,
+ output_registers, required_registers);
+ if (res == RegExp::RE_SUCCESS) {
+ int capture_count =
+ IrregexpNumberOfCaptures(FixedArray::cast(regexp->data()));
+ return RegExp::SetLastMatchInfo(isolate, last_match_info, subject,
+ capture_count, output_registers);
+ }
+ if (res == RegExp::RE_EXCEPTION) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
+ }
+ DCHECK(res == RegExp::RE_FAILURE);
+ return isolate->factory()->null_value();
+}
+
+// static
+Handle<RegExpMatchInfo> RegExp::SetLastMatchInfo(
+ Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
+ Handle<String> subject, int capture_count, int32_t* match) {
+ // This is the only place where match infos can grow. If, after executing the
+ // regexp, RegExpExecStub finds that the match info is too small, it restarts
+ // execution in RegExpImpl::Exec, which finally grows the match info right
+ // here.
+
+ int capture_register_count = (capture_count + 1) * 2;
+ Handle<RegExpMatchInfo> result = RegExpMatchInfo::ReserveCaptures(
+ isolate, last_match_info, capture_register_count);
+ result->SetNumberOfCaptureRegisters(capture_register_count);
+
+ if (*result != *last_match_info) {
+ if (*last_match_info == *isolate->regexp_last_match_info()) {
+ // This inner condition is only needed for special situations like the
+ // regexp fuzzer, where we pass our own custom RegExpMatchInfo to
+ // RegExpImpl::Exec; there actually want to bypass the Isolate's match
+ // info and execute the regexp without side effects.
+ isolate->native_context()->set_regexp_last_match_info(*result);
+ }
+ }
+
+ DisallowHeapAllocation no_allocation;
+ if (match != nullptr) {
+ for (int i = 0; i < capture_register_count; i += 2) {
+ result->SetCapture(i, match[i]);
+ result->SetCapture(i + 1, match[i + 1]);
+ }
+ }
+ result->SetLastSubject(*subject);
+ result->SetLastInput(*subject);
+ return result;
+}
+
+// static
+void RegExp::DotPrintForTesting(const char* label, RegExpNode* node) {
+ DotPrinter::DotPrint(label, node);
+}
+
+namespace {
+
+// Returns true if we've either generated too much irregex code within this
+// isolate, or the pattern string is too long.
+bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
+ // Limit the space regexps take up on the heap. In order to limit this we
+ // would like to keep track of the amount of regexp code on the heap. This
+ // is not tracked, however. As a conservative approximation we track the
+ // total regexp code compiled including code that has subsequently been freed
+ // and the total executable memory at any point.
+ static constexpr size_t kRegExpExecutableMemoryLimit = 16 * MB;
+ static constexpr size_t kRegExpCompiledLimit = 1 * MB;
+
+ Heap* heap = isolate->heap();
+ if (pattern->length() > RegExp::kRegExpTooLargeToOptimize) return true;
+ return (isolate->total_regexp_code_generated() > kRegExpCompiledLimit &&
+ heap->CommittedMemoryExecutable() > kRegExpExecutableMemoryLimit);
+}
+
+} // namespace
+
+// static
+bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
+ RegExpCompileData* data, JSRegExp::Flags flags,
+ Handle<String> pattern,
+ Handle<String> sample_subject,
+ bool is_one_byte) {
+ return RegExpImpl::Compile(isolate, zone, data, flags, pattern,
+ sample_subject, is_one_byte);
+}
+
+bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
+ JSRegExp::Flags flags, Handle<String> pattern,
+ Handle<String> sample_subject, bool is_one_byte) {
+ if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
+ data->error =
+ isolate->factory()->NewStringFromAsciiChecked("RegExp too big");
+ return false;
+ }
+
+ bool is_sticky = IsSticky(flags);
+ bool is_global = IsGlobal(flags);
+ bool is_unicode = IsUnicode(flags);
+ RegExpCompiler compiler(isolate, zone, data->capture_count, is_one_byte);
+
+ if (compiler.optimize()) {
+ compiler.set_optimize(!TooMuchRegExpCode(isolate, pattern));
+ }
+
+ // Sample some characters from the middle of the string.
+ static const int kSampleSize = 128;
+
+ sample_subject = String::Flatten(isolate, sample_subject);
+ int chars_sampled = 0;
+ int half_way = (sample_subject->length() - kSampleSize) / 2;
+ for (int i = Max(0, half_way);
+ i < sample_subject->length() && chars_sampled < kSampleSize;
+ i++, chars_sampled++) {
+ compiler.frequency_collator()->CountCharacter(sample_subject->Get(i));
+ }
+
+ // Wrap the body of the regexp in capture #0.
+ RegExpNode* captured_body =
+ RegExpCapture::ToNode(data->tree, 0, &compiler, compiler.accept());
+ RegExpNode* node = captured_body;
+ bool is_end_anchored = data->tree->IsAnchoredAtEnd();
+ bool is_start_anchored = data->tree->IsAnchoredAtStart();
+ int max_length = data->tree->max_match();
+ if (!is_start_anchored && !is_sticky) {
+ // Add a .*? at the beginning, outside the body capture, unless
+ // this expression is anchored at the beginning or sticky.
+ JSRegExp::Flags default_flags = JSRegExp::Flags();
+ RegExpNode* loop_node = RegExpQuantifier::ToNode(
+ 0, RegExpTree::kInfinity, false,
+ new (zone) RegExpCharacterClass('*', default_flags), &compiler,
+ captured_body, data->contains_anchor);
+
+ if (data->contains_anchor) {
+ // Unroll loop once, to take care of the case that might start
+ // at the start of input.
+ ChoiceNode* first_step_node = new (zone) ChoiceNode(2, zone);
+ first_step_node->AddAlternative(GuardedAlternative(captured_body));
+ first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
+ new (zone) RegExpCharacterClass('*', default_flags), false,
+ loop_node)));
+ node = first_step_node;
+ } else {
+ node = loop_node;
+ }
+ }
+ if (is_one_byte) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ // Do it again to propagate the new nodes to places where they were not
+ // put because they had not been calculated yet.
+ if (node != nullptr) {
+ node = node->FilterOneByte(RegExpCompiler::kMaxRecursion);
+ }
+ } else if (is_unicode && (is_global || is_sticky)) {
+ node = RegExpCompiler::OptionallyStepBackToLeadSurrogate(&compiler, node,
+ flags);
+ }
+
+ if (node == nullptr) node = new (zone) EndNode(EndNode::BACKTRACK, zone);
+ data->node = node;
+ Analysis analysis(isolate, is_one_byte);
+ analysis.EnsureAnalyzed(node);
+ if (analysis.has_failed()) {
+ data->error =
+ isolate->factory()->NewStringFromAsciiChecked(analysis.error_message());
+ return false;
+ }
+
+ // Create the correct assembler for the architecture.
+ std::unique_ptr<RegExpMacroAssembler> macro_assembler;
+ if (!FLAG_regexp_interpret_all) {
+ // Native regexp implementation.
+ DCHECK(!FLAG_jitless);
+
+ NativeRegExpMacroAssembler::Mode mode =
+ is_one_byte ? NativeRegExpMacroAssembler::LATIN1
+ : NativeRegExpMacroAssembler::UC16;
+
+#if V8_TARGET_ARCH_IA32
+ macro_assembler.reset(new RegExpMacroAssemblerIA32(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_X64
+ macro_assembler.reset(new RegExpMacroAssemblerX64(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_ARM
+ macro_assembler.reset(new RegExpMacroAssemblerARM(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_ARM64
+ macro_assembler.reset(new RegExpMacroAssemblerARM64(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_S390
+ macro_assembler.reset(new RegExpMacroAssemblerS390(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_PPC
+ macro_assembler.reset(new RegExpMacroAssemblerPPC(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_MIPS
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#elif V8_TARGET_ARCH_MIPS64
+ macro_assembler.reset(new RegExpMacroAssemblerMIPS(
+ isolate, zone, mode, (data->capture_count + 1) * 2));
+#else
+#error "Unsupported architecture"
+#endif
+ } else {
+ DCHECK(FLAG_regexp_interpret_all);
+
+ // Interpreted regexp implementation.
+ macro_assembler.reset(new RegExpBytecodeGenerator(isolate, zone));
+ }
+
+ macro_assembler->set_slow_safe(TooMuchRegExpCode(isolate, pattern));
+
+ // Inserted here, instead of in Assembler, because it depends on information
+ // in the AST that isn't replicated in the Node structure.
+ static const int kMaxBacksearchLimit = 1024;
+ if (is_end_anchored && !is_start_anchored && !is_sticky &&
+ max_length < kMaxBacksearchLimit) {
+ macro_assembler->SetCurrentPositionFromEnd(max_length);
+ }
+
+ if (is_global) {
+ RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
+ if (data->tree->min_match() > 0) {
+ mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
+ } else if (is_unicode) {
+ mode = RegExpMacroAssembler::GLOBAL_UNICODE;
+ }
+ macro_assembler->set_global_mode(mode);
+ }
+
+ RegExpCompiler::CompilationResult result = compiler.Assemble(
+ isolate, macro_assembler.get(), node, data->capture_count, pattern);
+
+ if (FLAG_correctness_fuzzer_suppressions &&
+ strncmp(result.error_message, "Stack overflow", 15) == 0) {
+ FATAL("Aborting on stack overflow");
+ }
+
+ if (result.error_message != nullptr) {
+ data->error =
+ isolate->factory()->NewStringFromAsciiChecked(result.error_message);
+ }
+ data->code = result.code;
+ data->register_count = result.num_registers;
+
+ return result.Succeeded();
+}
+
+RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
+ Handle<String> subject, Isolate* isolate)
+ : register_array_(nullptr),
+ register_array_size_(0),
+ regexp_(regexp),
+ subject_(subject),
+ isolate_(isolate) {
+ bool interpreted = FLAG_regexp_interpret_all;
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ static const int kAtomRegistersPerMatch = 2;
+ registers_per_match_ = kAtomRegistersPerMatch;
+ // There is no distinction between interpreted and native for atom regexps.
+ interpreted = false;
+ } else {
+ registers_per_match_ = RegExp::IrregexpPrepare(isolate_, regexp_, subject_);
+ if (registers_per_match_ < 0) {
+ num_matches_ = -1; // Signal exception.
+ return;
+ }
+ }
+
+ DCHECK(IsGlobal(regexp->GetFlags()));
+ if (!interpreted) {
+ register_array_size_ =
+ Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ max_matches_ = register_array_size_ / registers_per_match_;
+ } else {
+ // Global loop in interpreted regexp is not implemented. We choose
+ // the size of the offsets vector so that it can only store one match.
+ register_array_size_ = registers_per_match_;
+ max_matches_ = 1;
+ }
+
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ register_array_ = NewArray<int32_t>(register_array_size_);
+ } else {
+ register_array_ = isolate->jsregexp_static_offsets_vector();
+ }
+
+ // Set state so that fetching the results the first time triggers a call
+ // to the compiled regexp.
+ current_match_index_ = max_matches_ - 1;
+ num_matches_ = max_matches_;
+ DCHECK_LE(2, registers_per_match_); // Each match has at least one capture.
+ DCHECK_GE(register_array_size_, registers_per_match_);
+ int32_t* last_match =
+ &register_array_[current_match_index_ * registers_per_match_];
+ last_match[0] = -1;
+ last_match[1] = 0;
+}
+
+RegExpGlobalCache::~RegExpGlobalCache() {
+ // Deallocate the register array if we allocated it in the constructor
+ // (as opposed to using the existing jsregexp_static_offsets_vector).
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ DeleteArray(register_array_);
+ }
+}
+
+int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
+ if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
+ unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
+ unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
+ // Advance over the surrogate pair.
+ return last_index + 2;
+ }
+ return last_index + 1;
+}
+
+int32_t* RegExpGlobalCache::FetchNext() {
+ current_match_index_++;
+ if (current_match_index_ >= num_matches_) {
+ // Current batch of results exhausted.
+ // Fail if last batch was not even fully filled.
+ if (num_matches_ < max_matches_) {
+ num_matches_ = 0; // Signal failed match.
+ return nullptr;
+ }
+
+ int32_t* last_match =
+ &register_array_[(current_match_index_ - 1) * registers_per_match_];
+ int last_end_index = last_match[1];
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ num_matches_ =
+ RegExpImpl::AtomExecRaw(isolate_, regexp_, subject_, last_end_index,
+ register_array_, register_array_size_);
+ } else {
+ int last_start_index = last_match[0];
+ if (last_start_index == last_end_index) {
+ // Zero-length match. Advance by one code point.
+ last_end_index = AdvanceZeroLength(last_end_index);
+ }
+ if (last_end_index > subject_->length()) {
+ num_matches_ = 0; // Signal failed match.
+ return nullptr;
+ }
+ num_matches_ = RegExpImpl::IrregexpExecRaw(
+ isolate_, regexp_, subject_, last_end_index, register_array_,
+ register_array_size_);
+ }
+
+ if (num_matches_ <= 0) return nullptr;
+ current_match_index_ = 0;
+ return register_array_;
+ } else {
+ return &register_array_[current_match_index_ * registers_per_match_];
+ }
+}
+
+int32_t* RegExpGlobalCache::LastSuccessfulMatch() {
+ int index = current_match_index_ * registers_per_match_;
+ if (num_matches_ == 0) {
+ // After a failed match we shift back by one result.
+ index -= registers_per_match_;
+ }
+ return &register_array_[index];
+}
+
+Object RegExpResultsCache::Lookup(Heap* heap, String key_string,
+ Object key_pattern,
+ FixedArray* last_match_cache,
+ ResultsCacheType type) {
+ FixedArray cache;
+ if (!key_string.IsInternalizedString()) return Smi::kZero;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ DCHECK(key_pattern.IsString());
+ if (!key_pattern.IsInternalizedString()) return Smi::kZero;
+ cache = heap->string_split_cache();
+ } else {
+ DCHECK(type == REGEXP_MULTIPLE_INDICES);
+ DCHECK(key_pattern.IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string.Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+ ~(kArrayEntriesPerCacheEntry - 1));
+ if (cache.get(index + kStringOffset) != key_string ||
+ cache.get(index + kPatternOffset) != key_pattern) {
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache.get(index + kStringOffset) != key_string ||
+ cache.get(index + kPatternOffset) != key_pattern) {
+ return Smi::kZero;
+ }
+ }
+
+ *last_match_cache = FixedArray::cast(cache.get(index + kLastMatchOffset));
+ return cache.get(index + kArrayOffset);
+}
+
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern,
+ Handle<FixedArray> value_array,
+ Handle<FixedArray> last_match_cache,
+ ResultsCacheType type) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> cache;
+ if (!key_string->IsInternalizedString()) return;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ DCHECK(key_pattern->IsString());
+ if (!key_pattern->IsInternalizedString()) return;
+ cache = factory->string_split_cache();
+ } else {
+ DCHECK(type == REGEXP_MULTIPLE_INDICES);
+ DCHECK(key_pattern->IsFixedArray());
+ cache = factory->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+ ~(kArrayEntriesPerCacheEntry - 1));
+ if (cache->get(index + kStringOffset) == Smi::kZero) {
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
+ } else {
+ uint32_t index2 =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index2 + kStringOffset) == Smi::kZero) {
+ cache->set(index2 + kStringOffset, *key_string);
+ cache->set(index2 + kPatternOffset, *key_pattern);
+ cache->set(index2 + kArrayOffset, *value_array);
+ cache->set(index2 + kLastMatchOffset, *last_match_cache);
+ } else {
+ cache->set(index2 + kStringOffset, Smi::kZero);
+ cache->set(index2 + kPatternOffset, Smi::kZero);
+ cache->set(index2 + kArrayOffset, Smi::kZero);
+ cache->set(index2 + kLastMatchOffset, Smi::kZero);
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
+ cache->set(index + kLastMatchOffset, *last_match_cache);
+ }
+ }
+ // If the array is a reasonably short list of substrings, convert it into a
+ // list of internalized strings.
+ if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+ for (int i = 0; i < value_array->length(); i++) {
+ Handle<String> str(String::cast(value_array->get(i)), isolate);
+ Handle<String> internalized_str = factory->InternalizeString(str);
+ value_array->set(i, *internalized_str);
+ }
+ }
+ // Convert backing store to a copy-on-write array.
+ value_array->set_map_no_write_barrier(
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
+}
+
+void RegExpResultsCache::Clear(FixedArray cache) {
+ for (int i = 0; i < kRegExpResultsCacheSize; i++) {
+ cache.set(i, Smi::kZero);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
new file mode 100644
index 0000000000..0f3ed463da
--- /dev/null
+++ b/deps/v8/src/regexp/regexp.h
@@ -0,0 +1,177 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_H_
+#define V8_REGEXP_REGEXP_H_
+
+#include "src/objects/js-regexp.h"
+
+namespace v8 {
+namespace internal {
+
+class RegExpNode;
+class RegExpTree;
+
+// TODO(jgruber): Consider splitting between ParseData and CompileData.
+struct RegExpCompileData {
+ // The parsed AST as produced by the RegExpParser.
+ RegExpTree* tree = nullptr;
+
+ // The compiled Node graph as produced by RegExpTree::ToNode methods.
+ RegExpNode* node = nullptr;
+
+ // The generated code as produced by the compiler. Either a Code object (for
+ // irregexp native code) or a ByteArray (for irregexp bytecode).
+ Object code;
+
+ // True, iff the pattern is a 'simple' atom with zero captures. In other
+ // words, the pattern consists of a string with no metacharacters and special
+ // regexp features, and can be implemented as a standard string search.
+ bool simple = true;
+
+ // True, iff the pattern is anchored at the start of the string with '^'.
+ bool contains_anchor = false;
+
+ // Only use if the pattern contains named captures. If so, this contains a
+ // mapping of capture names to capture indices.
+ Handle<FixedArray> capture_name_map;
+
+ // The error message. Only used if an error occurred during parsing or
+ // compilation.
+ Handle<String> error;
+
+ // The number of capture groups, without the global capture \0.
+ int capture_count = 0;
+
+ // The number of registers used by the generated code.
+ int register_count = 0;
+};
+
+class RegExp final : public AllStatic {
+ public:
+ // Whether the irregexp engine generates native code or interpreter bytecode.
+ static bool GeneratesNativeCode() { return !FLAG_regexp_interpret_all; }
+
+ // Parses the RegExp pattern and prepares the JSRegExp object with
+ // generic data and choice of implementation - as well as what
+ // the implementation wants to store in the data field.
+ // Returns false if compilation fails.
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
+ Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
+ JSRegExp::Flags flags);
+
+ // See ECMA-262 section 15.10.6.2.
+ // This function calls the garbage collector if necessary.
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ int index, Handle<RegExpMatchInfo> last_match_info);
+
+ // Integral return values used throughout regexp code layers.
+ static constexpr int kInternalRegExpFailure = 0;
+ static constexpr int kInternalRegExpSuccess = 1;
+ static constexpr int kInternalRegExpException = -1;
+ static constexpr int kInternalRegExpRetry = -2;
+
+ enum IrregexpResult {
+ RE_FAILURE = kInternalRegExpFailure,
+ RE_SUCCESS = kInternalRegExpSuccess,
+ RE_EXCEPTION = kInternalRegExpException,
+ };
+
+ // Prepare a RegExp for being executed one or more times (using
+ // IrregexpExecOnce) on the subject.
+ // This ensures that the regexp is compiled for the subject, and that
+ // the subject is flat.
+ // Returns the number of integer spaces required by IrregexpExecOnce
+ // as its "registers" argument. If the regexp cannot be compiled,
+ // an exception is set as pending, and this function returns negative.
+ static int IrregexpPrepare(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> subject);
+
+ // Set last match info. If match is nullptr, then setting captures is
+ // omitted.
+ static Handle<RegExpMatchInfo> SetLastMatchInfo(
+ Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
+ Handle<String> subject, int capture_count, int32_t* match);
+
+ V8_EXPORT_PRIVATE static bool CompileForTesting(Isolate* isolate, Zone* zone,
+ RegExpCompileData* input,
+ JSRegExp::Flags flags,
+ Handle<String> pattern,
+ Handle<String> sample_subject,
+ bool is_one_byte);
+
+ V8_EXPORT_PRIVATE static void DotPrintForTesting(const char* label,
+ RegExpNode* node);
+
+ static const int kRegExpTooLargeToOptimize = 20 * KB;
+};
+
+// Uses a special global mode of irregexp-generated code to perform a global
+// search and return multiple results at once. As such, this is essentially an
+// iterator over multiple results (retrieved batch-wise in advance).
+class RegExpGlobalCache final {
+ public:
+ RegExpGlobalCache(Handle<JSRegExp> regexp, Handle<String> subject,
+ Isolate* isolate);
+
+ ~RegExpGlobalCache();
+
+ // Fetch the next entry in the cache for global regexp match results.
+ // This does not set the last match info. Upon failure, nullptr is
+ // returned. The cause can be checked with Result(). The previous result is
+ // still in available in memory when a failure happens.
+ int32_t* FetchNext();
+
+ int32_t* LastSuccessfulMatch();
+
+ bool HasException() { return num_matches_ < 0; }
+
+ private:
+ int AdvanceZeroLength(int last_index);
+
+ int num_matches_;
+ int max_matches_;
+ int current_match_index_;
+ int registers_per_match_;
+ // Pointer to the last set of captures.
+ int32_t* register_array_;
+ int register_array_size_;
+ Handle<JSRegExp> regexp_;
+ Handle<String> subject_;
+ Isolate* isolate_;
+};
+
+// Caches results for specific regexp queries on the isolate. At the time of
+// writing, this is used during global calls to RegExp.prototype.exec and
+// @@split.
+class RegExpResultsCache final : public AllStatic {
+ public:
+ enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+ // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
+ // On success, the returned result is guaranteed to be a COW-array.
+ static Object Lookup(Heap* heap, String key_string, Object key_pattern,
+ FixedArray* last_match_out, ResultsCacheType type);
+ // Attempt to add value_array to the cache specified by type. On success,
+ // value_array is turned into a COW-array.
+ static void Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern, Handle<FixedArray> value_array,
+ Handle<FixedArray> last_match_cache, ResultsCacheType type);
+ static void Clear(FixedArray cache);
+
+ static constexpr int kRegExpResultsCacheSize = 0x100;
+
+ private:
+ static constexpr int kStringOffset = 0;
+ static constexpr int kPatternOffset = 1;
+ static constexpr int kArrayOffset = 2;
+ static constexpr int kLastMatchOffset = 3;
+ static constexpr int kArrayEntriesPerCacheEntry = 4;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_H_
diff --git a/deps/v8/src/roots/OWNERS b/deps/v8/src/roots/OWNERS
new file mode 100644
index 0000000000..2d6e1ae7c2
--- /dev/null
+++ b/deps/v8/src/roots/OWNERS
@@ -0,0 +1,11 @@
+bmeurer@chromium.org
+delphick@chromium.org
+hpayer@chromium.org
+ishell@chromium.org
+jgruber@chromium.org
+jkummerow@chromium.org
+marja@chromium.org
+sigurds@chromium.org
+ulan@chromium.org
+
+# COMPONENT: Blink>JavaScript>GC
diff --git a/deps/v8/src/roots/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index 8153f1758f..4513f7ba97 100644
--- a/deps/v8/src/roots/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -31,7 +31,8 @@ V8_INLINE constexpr bool operator<(RootIndex lhs, RootIndex rhs) {
return static_cast<type>(lhs) < static_cast<type>(rhs);
}
-V8_INLINE RootIndex operator++(RootIndex& index) {
+V8_INLINE RootIndex
+operator++(RootIndex& index) { // NOLINT(runtime/references)
using type = typename std::underlying_type<RootIndex>::type;
index = static_cast<RootIndex>(static_cast<type>(index) + 1);
return index;
diff --git a/deps/v8/src/roots/roots.h b/deps/v8/src/roots/roots.h
index 5684c28f4e..e6bcd94c01 100644
--- a/deps/v8/src/roots/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -117,7 +117,9 @@ class Symbol;
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
V(Map, small_ordered_name_dictionary_map, SmallOrderedNameDictionaryMap) \
+ V(Map, source_text_module_map, SourceTextModuleMap) \
V(Map, string_table_map, StringTableMap) \
+ V(Map, synthetic_module_map, SyntheticModuleMap) \
V(Map, uncompiled_data_without_preparse_data_map, \
UncompiledDataWithoutPreparseDataMap) \
V(Map, uncompiled_data_with_preparse_data_map, \
@@ -217,7 +219,6 @@ class Symbol;
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
- V(PropertyCell, regexp_species_protector, RegExpSpeciesProtector) \
V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
V(Cell, string_length_protector, StringLengthProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
diff --git a/deps/v8/src/runtime/OWNERS b/deps/v8/src/runtime/OWNERS
new file mode 100644
index 0000000000..450423f878
--- /dev/null
+++ b/deps/v8/src/runtime/OWNERS
@@ -0,0 +1,3 @@
+file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 0c17047795..522e93da3f 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -8,10 +8,10 @@
#include <limits>
#include "src/builtins/accessors.h"
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/elements.h"
@@ -150,8 +150,9 @@ inline void SetHomeObject(Isolate* isolate, JSFunction method,
// shared name.
template <typename Dictionary>
MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
- Isolate* isolate, Arguments& args, Smi index, Handle<JSObject> home_object,
- Handle<String> name_prefix, Handle<Object> key) {
+ Isolate* isolate, Arguments& args, // NOLINT(runtime/references)
+ Smi index, Handle<JSObject> home_object, Handle<String> name_prefix,
+ Handle<Object> key) {
int int_index = index.value();
// Class constructor and prototype values do not require post processing.
@@ -185,9 +186,10 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
// This is a simplified version of GetMethodWithSharedNameAndSetHomeObject()
// function above that is used when it's guaranteed that the method has
// shared name.
-Object GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
- Arguments& args, Object index,
- JSObject home_object) {
+Object GetMethodWithSharedNameAndSetHomeObject(
+ Isolate* isolate,
+ Arguments& args, // NOLINT(runtime/references)
+ Object index, JSObject home_object) {
DisallowHeapAllocation no_gc;
int int_index = Smi::ToInt(index);
@@ -226,7 +228,8 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
template <typename Dictionary>
bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
- Handle<JSObject> receiver, Arguments& args,
+ Handle<JSObject> receiver,
+ Arguments& args, // NOLINT(runtime/references)
bool* install_name_accessor = nullptr) {
Handle<Name> name_string = isolate->factory()->name_string();
@@ -284,7 +287,7 @@ bool AddDescriptorsByTemplate(
Isolate* isolate, Handle<Map> map,
Handle<DescriptorArray> descriptors_template,
Handle<NumberDictionary> elements_dictionary_template,
- Handle<JSObject> receiver, Arguments& args) {
+ Handle<JSObject> receiver, Arguments& args) { // NOLINT(runtime/references)
int nof_descriptors = descriptors_template->number_of_descriptors();
Handle<DescriptorArray> descriptors =
@@ -329,7 +332,8 @@ bool AddDescriptorsByTemplate(
value = GetMethodWithSharedNameAndSetHomeObject(isolate, args, value,
*receiver);
}
- details = details.CopyWithRepresentation(value.OptimalRepresentation());
+ details = details.CopyWithRepresentation(
+ value.OptimalRepresentation(isolate));
} else {
DCHECK_EQ(kAccessor, details.kind());
if (value.IsAccessorPair()) {
@@ -391,7 +395,8 @@ bool AddDescriptorsByTemplate(
Handle<NameDictionary> properties_dictionary_template,
Handle<NumberDictionary> elements_dictionary_template,
Handle<FixedArray> computed_properties, Handle<JSObject> receiver,
- bool install_name_accessor, Arguments& args) {
+ bool install_name_accessor,
+ Arguments& args) { // NOLINT(runtime/references)
int computed_properties_length = computed_properties->length();
// Shallow-copy properties template.
@@ -476,7 +481,8 @@ bool InitClassPrototype(Isolate* isolate,
Handle<ClassBoilerplate> class_boilerplate,
Handle<JSObject> prototype,
Handle<HeapObject> prototype_parent,
- Handle<JSFunction> constructor, Arguments& args) {
+ Handle<JSFunction> constructor,
+ Arguments& args) { // NOLINT(runtime/references)
Handle<Map> map(prototype->map(), isolate);
map = Map::CopyDropDescriptors(isolate, map);
map->set_is_prototype_map(true);
@@ -523,7 +529,8 @@ bool InitClassPrototype(Isolate* isolate,
bool InitClassConstructor(Isolate* isolate,
Handle<ClassBoilerplate> class_boilerplate,
Handle<HeapObject> constructor_parent,
- Handle<JSFunction> constructor, Arguments& args) {
+ Handle<JSFunction> constructor,
+ Arguments& args) { // NOLINT(runtime/references)
Handle<Map> map(constructor->map(), isolate);
map = Map::CopyDropDescriptors(isolate, map);
DCHECK(map->is_prototype_map());
@@ -572,11 +579,10 @@ bool InitClassConstructor(Isolate* isolate,
}
}
-MaybeHandle<Object> DefineClass(Isolate* isolate,
- Handle<ClassBoilerplate> class_boilerplate,
- Handle<Object> super_class,
- Handle<JSFunction> constructor,
- Arguments& args) {
+MaybeHandle<Object> DefineClass(
+ Isolate* isolate, Handle<ClassBoilerplate> class_boilerplate,
+ Handle<Object> super_class, Handle<JSFunction> constructor,
+ Arguments& args) { // NOLINT(runtime/references)
Handle<Object> prototype_parent;
Handle<HeapObject> constructor_parent;
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index b3b51ecc07..19c6f8bff5 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -4,12 +4,12 @@
#include "src/asmjs/asm-js.h"
#include "src/codegen/compiler.h"
+#include "src/common/message-template.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -294,7 +294,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
return Object();
}
-static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
+static Object CompileGlobalEval(Isolate* isolate,
+ Handle<i::Object> source_object,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
int eval_scope_position, int eval_position) {
@@ -303,9 +304,15 @@ static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings().IsFalse(isolate) &&
- !Compiler::CodeGenerationFromStringsAllowed(isolate, native_context,
- source)) {
+ MaybeHandle<String> source;
+ bool unknown_object;
+ std::tie(source, unknown_object) = Compiler::ValidateDynamicCompilationSource(
+ isolate, native_context, source_object);
+ // If the argument is an unhandled string time, bounce to GlobalEval.
+ if (unknown_object) {
+ return native_context->global_eval_fun();
+ }
+ if (source.is_null()) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
Handle<Object> error;
@@ -321,9 +328,9 @@ static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<JSFunction> compiled;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, compiled,
- Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
- restriction, kNoSourcePosition,
- eval_scope_position, eval_position),
+ Compiler::GetFunctionFromEval(
+ source.ToHandleChecked(), outer_info, context, language_mode,
+ restriction, kNoSourcePosition, eval_scope_position, eval_position),
ReadOnlyRoots(isolate).exception());
return *compiled;
}
@@ -336,11 +343,7 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
// If "eval" didn't refer to the original GlobalEval, it's not a
// direct call to eval.
- // (And even if it is, but the first argument isn't a string, just let
- // execution default to an indirect call to eval, which will also return
- // the first argument without doing anything).
- if (*callee != isolate->native_context()->global_eval_fun() ||
- !args[1].IsString()) {
+ if (*callee != isolate->native_context()->global_eval_fun()) {
return *callee;
}
@@ -350,7 +353,7 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
DCHECK(args[4].IsSmi());
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
- return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
+ return CompileGlobalEval(isolate, args.at<Object>(1), outer_info,
language_mode, args.smi_at(4), args.smi_at(5));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index afe4a921e6..94320740af 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -283,8 +283,9 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(4, *is_revoked_str);
result->set(5, isolate->heap()->ToBoolean(js_proxy->IsRevoked()));
return factory->NewJSArrayWithElements(result);
- } else if (object->IsJSValue()) {
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+ } else if (object->IsJSPrimitiveWrapper()) {
+ Handle<JSPrimitiveWrapper> js_value =
+ Handle<JSPrimitiveWrapper>::cast(object);
Handle<FixedArray> result = factory->NewFixedArray(2);
Handle<String> primitive_value =
@@ -750,6 +751,23 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionEntered) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->PopPromise();
+ isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionResumed) {
+ DCHECK_EQ(1, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->PushPromise(promise);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
@@ -763,14 +781,6 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionFinished) {
return *promise;
}
-RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionSuspended) {
- DCHECK_EQ(1, args.length());
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
- isolate->OnAsyncFunctionStateChanged(promise, debug::kAsyncFunctionSuspended);
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 069ea88e12..7225e43012 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -65,6 +65,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
generator->set_context(isolate->context());
generator->set_receiver(*receiver);
generator->set_parameters_and_registers(*parameters_and_registers);
+ generator->set_resume_mode(JSGeneratorObject::ResumeMode::kNext);
generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
if (generator->IsJSAsyncGeneratorObject()) {
Handle<JSAsyncGeneratorObject>::cast(generator)->set_is_awaiting(0);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 21b1b1ef7c..4b8a0e38a1 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -8,11 +8,11 @@
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
#include "src/builtins/builtins.h"
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
#include "src/numbers/conversions.h"
@@ -94,6 +94,22 @@ RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, call(message_id, arg0, arg1, arg2));
RUNTIME_FUNCTION(Runtime_ThrowRangeError) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ DCHECK_LE(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+
+ // If the result of a BigInt computation is truncated to 64 bit, Turbofan
+ // can sometimes truncate intermediate results already, which can prevent
+ // those from exceeding the maximum length, effectively preventing a
+ // RangeError from being thrown. As this is a performance optimization, this
+ // behavior is accepted. To prevent the correctness fuzzer from detecting
+ // this difference, we crash the program.
+ if (MessageTemplateFromInt(message_id_smi) ==
+ MessageTemplate::kBigIntTooBig) {
+ FATAL("Aborting on invalid BigInt length");
+ }
+ }
+
THROW_ERROR(isolate, args, NewRangeError);
}
@@ -287,13 +303,25 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ bool allow_large_object_allocation =
+ AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
CHECK(FLAG_young_generation_large_objects ||
size <= kMaxRegularHeapObjectSize);
- return *isolate->factory()->NewFillerObject(size, false,
+ if (!allow_large_object_allocation) {
+ CHECK(size <= kMaxRegularHeapObjectSize);
+ }
+
+ // TODO(v8:9472): Until double-aligned allocation is fixed for new-space
+ // allocations, don't request it.
+ double_align = false;
+
+ return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kYoung);
}
@@ -302,9 +330,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
+ bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ bool allow_large_object_allocation =
+ AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
- bool double_align = AllocateDoubleAlignFlag::decode(flags);
+ if (!allow_large_object_allocation) {
+ CHECK(size <= kMaxRegularHeapObjectSize);
+ }
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kOld);
}
@@ -695,7 +728,8 @@ RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared_info, 1);
CONVERT_SMI_ARG_CHECKED(slot_id, 2);
- Handle<Context> native_context(isolate->context().native_context(), isolate);
+ Handle<NativeContext> native_context(isolate->context().native_context(),
+ isolate);
return *TemplateObjectDescription::GetTemplateObject(
isolate, native_context, description, shared_info, slot_id);
}
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 48b4d2b6e7..1632554130 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -26,7 +26,9 @@ namespace internal {
namespace {
void AdvanceToOffsetForTracing(
- interpreter::BytecodeArrayIterator& bytecode_iterator, int offset) {
+ interpreter::BytecodeArrayIterator&
+ bytecode_iterator, // NOLINT(runtime/references)
+ int offset) {
while (bytecode_iterator.current_offset() +
bytecode_iterator.current_bytecode_size() <=
offset) {
@@ -39,7 +41,8 @@ void AdvanceToOffsetForTracing(
}
void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
- interpreter::BytecodeArrayIterator& bytecode_iterator,
+ interpreter::BytecodeArrayIterator&
+ bytecode_iterator, // NOLINT(runtime/references)
Handle<Object> accumulator) {
static const char kAccumulator[] = "accumulator";
static const int kRegFieldWidth = static_cast<int>(sizeof(kAccumulator) - 1);
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 67aa097484..0c7a28c279 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -33,10 +33,6 @@ void PreInitializeLiteralSite(Handle<FeedbackVector> vector,
vector->Set(slot, Smi::FromInt(1));
}
-Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
- Handle<Object> description,
- AllocationType allocation);
-
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
template <class ContextObject>
@@ -86,14 +82,14 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
- if (object->map().is_deprecated()) {
- JSObject::MigrateInstance(object);
+ if (object->map(isolate).is_deprecated()) {
+ JSObject::MigrateInstance(isolate, object);
}
Handle<JSObject> copy;
if (copying) {
// JSFunction objects are not allowed to be in normal boilerplates at all.
- DCHECK(!object->IsJSFunction());
+ DCHECK(!object->IsJSFunction(isolate));
Handle<AllocationSite> site_to_pass;
if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
@@ -111,23 +107,23 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
HandleScope scope(isolate);
// Deep copy own properties. Arrays only have 1 property "length".
- if (!copy->IsJSArray()) {
- if (copy->HasFastProperties()) {
- Handle<DescriptorArray> descriptors(copy->map().instance_descriptors(),
- isolate);
- int limit = copy->map().NumberOfOwnDescriptors();
+ if (!copy->IsJSArray(isolate)) {
+ if (copy->HasFastProperties(isolate)) {
+ Handle<DescriptorArray> descriptors(
+ copy->map(isolate).instance_descriptors(isolate), isolate);
+ int limit = copy->map(isolate).NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
DCHECK_EQ(kField, descriptors->GetDetails(i).location());
DCHECK_EQ(kData, descriptors->GetDetails(i).kind());
- FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
- if (copy->IsUnboxedDoubleField(index)) continue;
- Object raw = copy->RawFastPropertyAt(index);
- if (raw.IsJSObject()) {
+ FieldIndex index = FieldIndex::ForDescriptor(copy->map(isolate), i);
+ if (copy->IsUnboxedDoubleField(isolate, index)) continue;
+ Object raw = copy->RawFastPropertyAt(isolate, index);
+ if (raw.IsJSObject(isolate)) {
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
if (copying) copy->FastPropertyAtPut(index, *value);
- } else if (copying && raw.IsMutableHeapNumber()) {
+ } else if (copying && raw.IsMutableHeapNumber(isolate)) {
DCHECK(descriptors->GetDetails(i).representation().IsDouble());
uint64_t double_value = MutableHeapNumber::cast(raw).value_as_bits();
auto value =
@@ -136,11 +132,12 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
} else {
- Handle<NameDictionary> dict(copy->property_dictionary(), isolate);
- for (int i = 0; i < dict->Capacity(); i++) {
- Object raw = dict->ValueAt(i);
- if (!raw.IsJSObject()) continue;
- DCHECK(dict->KeyAt(i).IsName());
+ Handle<NameDictionary> dict(copy->property_dictionary(isolate), isolate);
+ int capacity = dict->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object raw = dict->ValueAt(isolate, i);
+ if (!raw.IsJSObject(isolate)) continue;
+ DCHECK(dict->KeyAt(isolate, i).IsName());
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -149,19 +146,21 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
// Assume non-arrays don't end up having elements.
- if (copy->elements().length() == 0) return copy;
+ if (copy->elements(isolate).length() == 0) return copy;
}
// Deep copy own elements.
- switch (copy->GetElementsKind()) {
+ switch (copy->GetElementsKind(isolate)) {
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case HOLEY_FROZEN_ELEMENTS:
case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(copy->elements()), isolate);
- if (elements->map() == ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements(isolate)),
+ isolate);
+ if (elements->map(isolate) ==
+ ReadOnlyRoots(isolate).fixed_cow_array_map()) {
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
DCHECK(!elements->get(i).IsJSObject());
@@ -169,8 +168,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
#endif
} else {
for (int i = 0; i < elements->length(); i++) {
- Object raw = elements->get(i);
- if (!raw.IsJSObject()) continue;
+ Object raw = elements->get(isolate, i);
+ if (!raw.IsJSObject(isolate)) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -180,12 +179,12 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> element_dictionary(copy->element_dictionary(),
- isolate);
+ Handle<NumberDictionary> element_dictionary(
+ copy->element_dictionary(isolate), isolate);
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Object raw = element_dictionary->ValueAt(i);
- if (!raw.IsJSObject()) continue;
+ Object raw = element_dictionary->ValueAt(isolate, i);
+ if (!raw.IsJSObject(isolate)) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -324,155 +323,182 @@ MaybeHandle<JSObject> DeepCopy(Handle<JSObject> object,
return copy;
}
+Handle<JSObject> CreateObjectLiteral(
+ Isolate* isolate,
+ Handle<ObjectBoilerplateDescription> object_boilerplate_description,
+ int flags, AllocationType allocation);
+
+Handle<JSObject> CreateArrayLiteral(
+ Isolate* isolate,
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description,
+ AllocationType allocation);
+
struct ObjectLiteralHelper {
- static Handle<JSObject> Create(Isolate* isolate,
- Handle<HeapObject> description, int flags,
- AllocationType allocation) {
- Handle<NativeContext> native_context = isolate->native_context();
+ static inline Handle<JSObject> Create(Isolate* isolate,
+ Handle<HeapObject> description,
+ int flags, AllocationType allocation) {
Handle<ObjectBoilerplateDescription> object_boilerplate_description =
Handle<ObjectBoilerplateDescription>::cast(description);
- bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
-
- // In case we have function literals, we want the object to be in
- // slow properties mode for now. We don't go in the map cache because
- // maps with constant functions can't be shared if the functions are
- // not the same (which is the common case).
- int number_of_properties =
- object_boilerplate_description->backing_store_size();
-
- // Ignoring number_of_properties for force dictionary map with
- // __proto__:null.
- Handle<Map> map =
- has_null_prototype
- ? handle(native_context->slow_object_with_null_prototype_map(),
- isolate)
- : isolate->factory()->ObjectLiteralMapFromCache(
- native_context, number_of_properties);
-
- Handle<JSObject> boilerplate =
- map->is_dictionary_map()
- ? isolate->factory()->NewSlowJSObjectFromMap(
- map, number_of_properties, allocation)
- : isolate->factory()->NewJSObjectFromMap(map, allocation);
-
- // Normalize the elements of the boilerplate to save space if needed.
- if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
-
- // Add the constant properties to the boilerplate.
- int length = object_boilerplate_description->size();
- // TODO(verwaest): Support tracking representations in the boilerplate.
- for (int index = 0; index < length; index++) {
- Handle<Object> key(object_boilerplate_description->name(index), isolate);
- Handle<Object> value(object_boilerplate_description->value(index),
- isolate);
-
- if (value->IsObjectBoilerplateDescription() ||
- value->IsArrayBoilerplateDescription()) {
- value = InnerCreateBoilerplate(isolate, value, allocation);
- }
- uint32_t element_index = 0;
- if (key->ToArrayIndex(&element_index)) {
- // Array index (uint32).
- if (value->IsUninitialized(isolate)) {
- value = handle(Smi::kZero, isolate);
- }
- JSObject::SetOwnElementIgnoreAttributes(boilerplate, element_index,
- value, NONE)
- .Check();
- } else {
- Handle<String> name = Handle<String>::cast(key);
- DCHECK(!name->AsArrayIndex(&element_index));
- JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name, value, NONE)
- .Check();
- }
- }
-
- if (map->is_dictionary_map() && !has_null_prototype) {
- // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
- // supports dict-mode objects directly.
- JSObject::MigrateSlowToFast(boilerplate,
- boilerplate->map().UnusedPropertyFields(),
- "FastLiteral");
- }
- return boilerplate;
+ return CreateObjectLiteral(isolate, object_boilerplate_description, flags,
+ allocation);
}
};
struct ArrayLiteralHelper {
- static Handle<JSObject> Create(Isolate* isolate,
- Handle<HeapObject> description, int flags,
- AllocationType allocation) {
+ static inline Handle<JSObject> Create(Isolate* isolate,
+ Handle<HeapObject> description,
+ int flags_not_used,
+ AllocationType allocation) {
Handle<ArrayBoilerplateDescription> array_boilerplate_description =
Handle<ArrayBoilerplateDescription>::cast(description);
+ return CreateArrayLiteral(isolate, array_boilerplate_description,
+ allocation);
+ }
+};
- ElementsKind constant_elements_kind =
- array_boilerplate_description->elements_kind();
-
- Handle<FixedArrayBase> constant_elements_values(
- array_boilerplate_description->constant_elements(), isolate);
+Handle<JSObject> CreateObjectLiteral(
+ Isolate* isolate,
+ Handle<ObjectBoilerplateDescription> object_boilerplate_description,
+ int flags, AllocationType allocation) {
+ Handle<NativeContext> native_context = isolate->native_context();
+ bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
+
+ // In case we have function literals, we want the object to be in
+ // slow properties mode for now. We don't go in the map cache because
+ // maps with constant functions can't be shared if the functions are
+ // not the same (which is the common case).
+ int number_of_properties =
+ object_boilerplate_description->backing_store_size();
+
+ // Ignoring number_of_properties for force dictionary map with
+ // __proto__:null.
+ Handle<Map> map =
+ has_null_prototype
+ ? handle(native_context->slow_object_with_null_prototype_map(),
+ isolate)
+ : isolate->factory()->ObjectLiteralMapFromCache(native_context,
+ number_of_properties);
+
+ Handle<JSObject> boilerplate =
+ isolate->factory()->NewFastOrSlowJSObjectFromMap(
+ map, number_of_properties, allocation);
+
+ // Normalize the elements of the boilerplate to save space if needed.
+ if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
+
+ // Add the constant properties to the boilerplate.
+ int length = object_boilerplate_description->size();
+ // TODO(verwaest): Support tracking representations in the boilerplate.
+ for (int index = 0; index < length; index++) {
+ Handle<Object> key(object_boilerplate_description->name(isolate, index),
+ isolate);
+ Handle<Object> value(object_boilerplate_description->value(isolate, index),
+ isolate);
+
+ if (value->IsHeapObject()) {
+ if (HeapObject::cast(*value).IsArrayBoilerplateDescription(isolate)) {
+ Handle<ArrayBoilerplateDescription> boilerplate =
+ Handle<ArrayBoilerplateDescription>::cast(value);
+ value = CreateArrayLiteral(isolate, boilerplate, allocation);
+
+ } else if (HeapObject::cast(*value).IsObjectBoilerplateDescription(
+ isolate)) {
+ Handle<ObjectBoilerplateDescription> boilerplate =
+ Handle<ObjectBoilerplateDescription>::cast(value);
+ value = CreateObjectLiteral(isolate, boilerplate, boilerplate->flags(),
+ allocation);
+ }
+ }
- // Create the JSArray.
- Handle<FixedArrayBase> copied_elements_values;
- if (IsDoubleElementsKind(constant_elements_kind)) {
- copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
- Handle<FixedDoubleArray>::cast(constant_elements_values));
+ uint32_t element_index = 0;
+ if (key->ToArrayIndex(&element_index)) {
+ // Array index (uint32).
+ if (value->IsUninitialized(isolate)) {
+ value = handle(Smi::kZero, isolate);
+ }
+ JSObject::SetOwnElementIgnoreAttributes(boilerplate, element_index, value,
+ NONE)
+ .Check();
} else {
- DCHECK(IsSmiOrObjectElementsKind(constant_elements_kind));
- const bool is_cow = (constant_elements_values->map() ==
- ReadOnlyRoots(isolate).fixed_cow_array_map());
- if (is_cow) {
- copied_elements_values = constant_elements_values;
-#if DEBUG
+ Handle<String> name = Handle<String>::cast(key);
+ DCHECK(!name->AsArrayIndex(&element_index));
+ JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name, value, NONE)
+ .Check();
+ }
+ }
+
+ if (map->is_dictionary_map() && !has_null_prototype) {
+ // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
+ // supports dict-mode objects directly.
+ JSObject::MigrateSlowToFast(
+ boilerplate, boilerplate->map().UnusedPropertyFields(), "FastLiteral");
+ }
+ return boilerplate;
+}
+
+Handle<JSObject> CreateArrayLiteral(
+ Isolate* isolate,
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description,
+ AllocationType allocation) {
+ ElementsKind constant_elements_kind =
+ array_boilerplate_description->elements_kind();
+
+ Handle<FixedArrayBase> constant_elements_values(
+ array_boilerplate_description->constant_elements(isolate), isolate);
+
+ // Create the JSArray.
+ Handle<FixedArrayBase> copied_elements_values;
+ if (IsDoubleElementsKind(constant_elements_kind)) {
+ copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
+ Handle<FixedDoubleArray>::cast(constant_elements_values));
+ } else {
+ DCHECK(IsSmiOrObjectElementsKind(constant_elements_kind));
+ const bool is_cow = (constant_elements_values->map(isolate) ==
+ ReadOnlyRoots(isolate).fixed_cow_array_map());
+ if (is_cow) {
+ copied_elements_values = constant_elements_values;
+ if (DEBUG_BOOL) {
Handle<FixedArray> fixed_array_values =
Handle<FixedArray>::cast(copied_elements_values);
for (int i = 0; i < fixed_array_values->length(); i++) {
DCHECK(!fixed_array_values->get(i).IsFixedArray());
}
-#endif
- } else {
- Handle<FixedArray> fixed_array_values =
- Handle<FixedArray>::cast(constant_elements_values);
- Handle<FixedArray> fixed_array_values_copy =
- isolate->factory()->CopyFixedArray(fixed_array_values);
- copied_elements_values = fixed_array_values_copy;
- FOR_WITH_HANDLE_SCOPE(
- isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
- Handle<Object> value(fixed_array_values->get(i), isolate);
-
- if (value->IsArrayBoilerplateDescription() ||
- value->IsObjectBoilerplateDescription()) {
- Handle<Object> result =
- InnerCreateBoilerplate(isolate, value, allocation);
- fixed_array_values_copy->set(i, *result);
- }
- });
+ }
+ } else {
+ Handle<FixedArray> fixed_array_values =
+ Handle<FixedArray>::cast(constant_elements_values);
+ Handle<FixedArray> fixed_array_values_copy =
+ isolate->factory()->CopyFixedArray(fixed_array_values);
+ copied_elements_values = fixed_array_values_copy;
+ for (int i = 0; i < fixed_array_values->length(); i++) {
+ Object value = fixed_array_values_copy->get(isolate, i);
+ HeapObject value_heap_object;
+ if (value.GetHeapObject(isolate, &value_heap_object)) {
+ if (value_heap_object.IsArrayBoilerplateDescription(isolate)) {
+ HandleScope sub_scope(isolate);
+ Handle<ArrayBoilerplateDescription> boilerplate(
+ ArrayBoilerplateDescription::cast(value_heap_object), isolate);
+ Handle<JSObject> result =
+ CreateArrayLiteral(isolate, boilerplate, allocation);
+ fixed_array_values_copy->set(i, *result);
+
+ } else if (value_heap_object.IsObjectBoilerplateDescription(
+ isolate)) {
+ HandleScope sub_scope(isolate);
+ Handle<ObjectBoilerplateDescription> boilerplate(
+ ObjectBoilerplateDescription::cast(value_heap_object), isolate);
+ Handle<JSObject> result = CreateObjectLiteral(
+ isolate, boilerplate, boilerplate->flags(), allocation);
+ fixed_array_values_copy->set(i, *result);
+ }
+ }
}
}
-
- return isolate->factory()->NewJSArrayWithElements(
- copied_elements_values, constant_elements_kind,
- copied_elements_values->length(), allocation);
- }
-};
-
-Handle<Object> InnerCreateBoilerplate(Isolate* isolate,
- Handle<Object> description,
- AllocationType allocation) {
- if (description->IsObjectBoilerplateDescription()) {
- Handle<ObjectBoilerplateDescription> object_boilerplate_description =
- Handle<ObjectBoilerplateDescription>::cast(description);
- return ObjectLiteralHelper::Create(isolate, object_boilerplate_description,
- object_boilerplate_description->flags(),
- allocation);
- } else {
- DCHECK(description->IsArrayBoilerplateDescription());
- Handle<ArrayBoilerplateDescription> array_boilerplate_description =
- Handle<ArrayBoilerplateDescription>::cast(description);
- return ArrayLiteralHelper::Create(
- isolate, array_boilerplate_description,
- array_boilerplate_description->elements_kind(), allocation);
}
+ return isolate->factory()->NewJSArrayWithElements(
+ copied_elements_values, constant_elements_kind,
+ copied_elements_values->length(), allocation);
}
inline DeepCopyHints DecodeCopyHints(int flags) {
@@ -556,6 +582,7 @@ MaybeHandle<JSObject> CreateLiteral(Isolate* isolate,
usage_context.ExitScope(site, boilerplate);
return copy;
}
+
} // namespace
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 41f21865a6..eb21e0a9a4 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -5,8 +5,8 @@
#include "src/execution/arguments-inl.h"
#include "src/logging/counters.h"
#include "src/objects/js-promise.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/source-text-module.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -33,14 +33,14 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(module_request, 0);
- Handle<Module> module(isolate->context().module(), isolate);
- return *Module::GetModuleNamespace(isolate, module, module_request);
+ Handle<SourceTextModule> module(isolate->context().module(), isolate);
+ return *SourceTextModule::GetModuleNamespace(isolate, module, module_request);
}
RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- Handle<Module> module(isolate->context().module(), isolate);
+ Handle<SourceTextModule> module(isolate->context().module(), isolate);
return *isolate->RunHostInitializeImportMetaObjectCallback(module);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 8b94d83f31..25bd07b535 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/common/message-template.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -217,6 +217,8 @@ RUNTIME_FUNCTION(Runtime_ObjectGetOwnPropertyNames) {
Object::ToObject(isolate, object));
// Collect the own keys for the {receiver}.
+ // TODO(v8:9401): We should extend the fast path of KeyAccumulator::GetKeys to
+ // also use fast path even when filter = SKIP_SYMBOLS.
Handle<FixedArray> keys;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, keys,
@@ -304,7 +306,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
}
Map map = js_obj->map();
- if (!map.has_hidden_prototype() &&
+ if (!map.IsJSGlobalProxyMap() &&
(key_is_array_index ? !map.has_indexed_interceptor()
: !map.has_named_interceptor())) {
return ReadOnlyRoots(isolate).false_value();
@@ -440,8 +442,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
// Conservative upper limit to prevent fuzz tests from going OOM.
if (properties > 100000) return isolate->ThrowIllegalOperation();
if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties,
- "OptimizeForAdding");
+ JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES,
+ properties, "OptimizeForAdding");
}
return *object;
}
@@ -502,6 +504,76 @@ RUNTIME_FUNCTION(Runtime_ObjectEntriesSkipFastPath) {
return *isolate->factory()->NewJSArrayWithElements(entries);
}
+RUNTIME_FUNCTION(Runtime_ObjectIsExtensible) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+ : Just(false);
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsThrow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+
+ MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
+ kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
+ return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverPreventExtensionsDontThrow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+
+ Maybe<bool> result = JSReceiver::PreventExtensions(
+ Handle<JSReceiver>::cast(object), kDontThrow);
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverGetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSReceiver::GetPrototype(isolate, receiver));
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfThrow) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, proto, 1);
+
+ MAYBE_RETURN(JSReceiver::SetPrototype(object, proto, true, kThrowOnError),
+ ReadOnlyRoots(isolate).exception());
+
+ return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfDontThrow) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, proto, 1);
+
+ Maybe<bool> result =
+ JSReceiver::SetPrototype(object, proto, true, kDontThrow);
+ MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -786,7 +858,7 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
// code where we can't handle lazy deopts for lack of a suitable bailout
// ID. So we just try migration and signal failure if necessary,
// which will also trigger a deopt.
- if (!JSObject::TryMigrateInstance(js_object)) return Smi::kZero;
+ if (!JSObject::TryMigrateInstance(isolate, js_object)) return Smi::kZero;
return *object;
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index dd07234a4a..2543b3f5d4 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -86,5 +86,17 @@ RUNTIME_FUNCTION(Runtime_CheckProxyHasTrapResult) {
return isolate->heap()->ToBoolean(result.FromJust());
}
+RUNTIME_FUNCTION(Runtime_CheckProxyDeleteTrapResult) {
+ HandleScope scope(isolate);
+
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 1);
+
+ Maybe<bool> result = JSProxy::CheckDeleteTrap(isolate, name, target);
+ if (!result.IsJust()) return ReadOnlyRoots(isolate).exception();
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 85c9ebcb1b..76056a7823 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -4,15 +4,16 @@
#include <functional>
+#include "src/common/message-template.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/logging/counters.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/regexp/jsregexp-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/regexp/regexp-utils.h"
+#include "src/regexp/regexp.h"
#include "src/runtime/runtime-utils.h"
#include "src/strings/string-builder-inl.h"
#include "src/strings/string-search.h"
@@ -594,8 +595,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
}
int32_t match_indices[] = {indices->back(), indices->back() + pattern_len};
- RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, 0,
- match_indices);
+ RegExp::SetLastMatchInfo(isolate, last_match_info, subject, 0, match_indices);
TruncateRegexpIndicesList(isolate);
@@ -614,7 +614,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
JSRegExp::Type typeTag = regexp->TypeTag();
if (typeTag == JSRegExp::IRREGEXP) {
// Ensure the RegExp is compiled so we can access the capture-name map.
- if (RegExpImpl::IrregexpPrepare(isolate, regexp, subject) == -1) {
+ if (RegExp::IrregexpPrepare(isolate, regexp, subject) == -1) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
}
@@ -638,7 +638,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
+ RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
int32_t* current_match = global_cache.FetchNext();
@@ -679,8 +679,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithString(
builder.AddSubjectSlice(prev, subject_length);
}
- RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
- global_cache.LastSuccessfulMatch());
+ RegExp::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
RETURN_RESULT_OR_FAILURE(isolate, builder.ToString());
}
@@ -703,7 +703,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
+ RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
int32_t* current_match = global_cache.FetchNext();
@@ -749,8 +749,8 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalRegExpWithEmptyString(
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
- RegExpImpl::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
- global_cache.LastSuccessfulMatch());
+ RegExp::SetLastMatchInfo(isolate, last_match_info, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
if (prev < subject_length) {
// Add substring subject[prev;length] to answer string.
@@ -877,8 +877,8 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CHECK_LE(0, index);
CHECK_GE(subject->length(), index);
isolate->counters()->regexp_entry_runtime()->Increment();
- RETURN_RESULT_OR_FAILURE(isolate, RegExpImpl::Exec(isolate, regexp, subject,
- index, last_match_info));
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExp::Exec(isolate, regexp, subject, index, last_match_info));
}
namespace {
@@ -1108,14 +1108,14 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
isolate->factory()->CopyFixedArrayWithMap(
cached_fixed_array, isolate->factory()->fixed_array_map());
JSArray::SetContent(result_array, copied_fixed_array);
- RegExpImpl::SetLastMatchInfo(isolate, last_match_array, subject,
- capture_count, last_match);
+ RegExp::SetLastMatchInfo(isolate, last_match_array, subject,
+ capture_count, last_match);
DeleteArray(last_match);
return *result_array;
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
+ RegExpGlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return ReadOnlyRoots(isolate).exception();
// Ensured in Runtime_RegExpExecMultiple.
@@ -1216,9 +1216,8 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
subject_length);
}
- RegExpImpl::SetLastMatchInfo(isolate, last_match_array, subject,
- capture_count,
- global_cache.LastSuccessfulMatch());
+ RegExp::SetLastMatchInfo(isolate, last_match_array, subject, capture_count,
+ global_cache.LastSuccessfulMatch());
if (subject_length > kMinLengthToCache) {
// Store the last successful match into the array for caching.
@@ -1282,10 +1281,10 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
// A lastIndex exceeding the string length always returns null (signalling
// failure) in RegExpBuiltinExec, thus we can skip the call.
if (last_index <= static_cast<uint32_t>(string->length())) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, match_indices_obj,
- RegExpImpl::Exec(isolate, regexp, string,
- last_index, last_match_info),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, match_indices_obj,
+ RegExp::Exec(isolate, regexp, string, last_index, last_match_info),
+ String);
}
if (match_indices_obj->IsNull(isolate)) {
@@ -1414,8 +1413,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
if (last_index <= static_cast<uint32_t>(subject->length())) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, match_indices_obj,
- RegExpImpl::Exec(isolate, regexp, subject, last_index,
- last_match_info));
+ RegExp::Exec(isolate, regexp, subject, last_index, last_match_info));
}
if (match_indices_obj->IsNull(isolate)) {
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 25d10e3395..f67b6922bf 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -6,11 +6,11 @@
#include "src/ast/scopes.h"
#include "src/builtins/accessors.h"
+#include "src/common/message-template.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frames-inl.h"
#include "src/execution/isolate-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/init/bootstrapper.h"
#include "src/logging/counters.h"
@@ -240,7 +240,7 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
Handle<Object> holder =
Context::Lookup(context, name, DONT_FOLLOW_CHAINS, &index, &attributes,
&init_flag, &mode);
- DCHECK(holder.is_null() || !holder->IsModule());
+ DCHECK(holder.is_null() || !holder->IsSourceTextModule());
DCHECK(!isolate->has_pending_exception());
Handle<JSObject> object;
@@ -715,7 +715,7 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
RUNTIME_FUNCTION(Runtime_PushModuleContext) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Module, module, 0);
+ CONVERT_ARG_HANDLE_CHECKED(SourceTextModule, module, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
Handle<NativeContext> outer(NativeContext::cast(isolate->context()), isolate);
@@ -773,7 +773,7 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
// If the slot was found in a context or in module imports and exports it
// should be DONT_DELETE.
- if (holder->IsContext() || holder->IsModule()) {
+ if (holder->IsContext() || holder->IsSourceTextModule()) {
return ReadOnlyRoots(isolate).false_value();
}
@@ -801,10 +801,11 @@ MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
&attributes, &flag, &mode);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- if (!holder.is_null() && holder->IsModule()) {
+ if (!holder.is_null() && holder->IsSourceTextModule()) {
Handle<Object> receiver = isolate->factory()->undefined_value();
if (receiver_return) *receiver_return = receiver;
- return Module::LoadVariable(isolate, Handle<Module>::cast(holder), index);
+ return SourceTextModule::LoadVariable(
+ isolate, Handle<SourceTextModule>::cast(holder), index);
}
if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
@@ -903,9 +904,10 @@ MaybeHandle<Object> StoreLookupSlot(
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- } else if (holder->IsModule()) {
+ } else if (holder->IsSourceTextModule()) {
if ((attributes & READ_ONLY) == 0) {
- Module::StoreVariable(Handle<Module>::cast(holder), index, value);
+ SourceTextModule::StoreVariable(Handle<SourceTextModule>::cast(holder),
+ index, value);
} else {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kConstAssign, name), Object);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 2e2918e47d..2ddd9d13f7 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -10,7 +10,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
-#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime-utils.h"
#include "src/strings/string-builder-inl.h"
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 85a50fca61..f0caaaa14c 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -11,6 +11,7 @@
#include "src/base/platform/mutex.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
+#include "src/codegen/pending-optimization-table.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/arguments-inl.h"
@@ -218,28 +219,6 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
isolate->concurrent_recompilation_enabled());
}
-namespace {
-
-void RemoveBytecodeFromPendingOptimizeTable(v8::internal::Isolate* isolate,
- Handle<JSFunction> function) {
- // TODO(mythria): Remove the check for undefined, once we fix all tests to
- // add PrepareForOptimization when using OptimizeFunctionOnNextCall.
- if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
- return;
- }
-
- Handle<ObjectHashTable> table =
- handle(ObjectHashTable::cast(
- isolate->heap()->pending_optimize_for_test_bytecode()),
- isolate);
- bool was_present;
- table = table->Remove(isolate, table, handle(function->shared(), isolate),
- &was_present);
- isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
-}
-
-} // namespace
-
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
@@ -271,9 +250,9 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return ReadOnlyRoots(isolate).undefined_value();
}
- if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
- BailoutReason::kNeverOptimize) {
+ if (!FLAG_opt || (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize)) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -281,20 +260,15 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // Check we called PrepareFunctionForOptimization and hold the bytecode
- // array to prevent it from getting flushed.
- // TODO(mythria): Enable this check once we add PrepareForOptimization in all
- // tests before calling OptimizeFunctionOnNextCall.
- // CHECK(!ObjectHashTable::cast(
- // isolate->heap()->pending_optimize_for_test_bytecode())
- // ->Lookup(handle(function->shared(), isolate))
- // ->IsTheHole());
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::MarkedForOptimization(isolate, function);
+ }
if (function->HasOptimizedCode()) {
DCHECK(function->IsOptimized() || function->ChecksOptimizationMarker());
- // If function is already optimized, remove the bytecode array from the
- // pending optimize for test table and return.
- RemoveBytecodeFromPendingOptimizeTable(isolate, function);
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::FunctionWasOptimized(isolate, function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -359,8 +333,10 @@ bool EnsureFeedbackVector(Handle<JSFunction> function) {
RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
+ if (!args[0].IsJSFunction()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
EnsureFeedbackVector(function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -368,6 +344,9 @@ RUNTIME_FUNCTION(Runtime_EnsureFeedbackVectorForFunction) {
RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
+ if (!args[0].IsJSFunction()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (!EnsureFeedbackVector(function)) {
@@ -389,16 +368,9 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
- Handle<ObjectHashTable> table =
- isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
- ? ObjectHashTable::New(isolate, 1)
- : handle(ObjectHashTable::cast(
- isolate->heap()->pending_optimize_for_test_bytecode()),
- isolate);
- table = ObjectHashTable::Put(
- table, handle(function->shared(), isolate),
- handle(function->shared().GetBytecodeArray(), isolate));
- isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::PreparedForOptimization(isolate, function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -418,26 +390,23 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- if (function->shared().optimization_disabled() &&
- function->shared().disable_optimization_reason() ==
- BailoutReason::kNeverOptimize) {
+ if (!FLAG_opt || (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
+ BailoutReason::kNeverOptimize)) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // Check we called PrepareFunctionForOptimization and hold the bytecode
- // array to prevent it from getting flushed.
- // TODO(mythria): Enable this check once we add PrepareForOptimization in all
- // tests before calling OptimizeOsr.
- // CHECK(!ObjectHashTable::cast(
- // isolate->heap()->pending_optimize_for_test_bytecode())
- // ->Lookup(handle(function->shared(), isolate))
- // ->IsTheHole());
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::MarkedForOptimization(isolate, function);
+ }
if (function->HasOptimizedCode()) {
DCHECK(function->IsOptimized() || function->ChecksOptimizationMarker());
// If function is already optimized, remove the bytecode array from the
// pending optimize for test table and return.
- RemoveBytecodeFromPendingOptimizeTable(isolate, function);
+ if (FLAG_testing_d8_test_runner) {
+ PendingOptimizationTable::FunctionWasOptimized(isolate, function);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -593,14 +562,11 @@ RUNTIME_FUNCTION(Runtime_GetUndetectable) {
}
static void call_as_function(const v8::FunctionCallbackInfo<v8::Value>& args) {
- double v1 = args[0]
- ->NumberValue(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToChecked();
- double v2 = args[1]
- ->NumberValue(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToChecked();
- args.GetReturnValue().Set(
- v8::Number::New(v8::Isolate::GetCurrent(), v1 - v2));
+ double v1 =
+ args[0]->NumberValue(args.GetIsolate()->GetCurrentContext()).ToChecked();
+ double v2 =
+ args[1]->NumberValue(args.GetIsolate()->GetCurrentContext()).ToChecked();
+ args.GetReturnValue().Set(v8::Number::New(args.GetIsolate(), v1 - v2));
}
// Returns a callable object. The object returns the difference of its two
@@ -624,6 +590,9 @@ RUNTIME_FUNCTION(Runtime_GetCallable) {
RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
+ if (!args[0].IsJSFunction()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->ClearTypeFeedbackInfo();
return ReadOnlyRoots(isolate).undefined_value();
@@ -832,7 +801,6 @@ RUNTIME_FUNCTION(Runtime_Abort) {
UNREACHABLE();
}
-
RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -847,6 +815,16 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
UNREACHABLE();
}
+RUNTIME_FUNCTION(Runtime_AbortCSAAssert) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
+ base::OS::PrintError("abort: CSA_ASSERT failed: %s\n",
+ message->ToCString().get());
+ isolate->PrintStack(stderr);
+ base::OS::Abort();
+ UNREACHABLE();
+}
RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
HandleScope scope(isolate);
@@ -1153,6 +1131,19 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
return *module_object;
}
+// Create a new Module object using the same NativeModule.
+RUNTIME_FUNCTION(Runtime_CloneWasmModule) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_object, 0);
+
+ Handle<WasmModuleObject> new_module_object =
+ wasm::WasmEngine::GetWasmEngine()->ImportNativeModule(
+ isolate, module_object->shared_native_module());
+
+ return *new_module_object;
+}
+
RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 1736ee3939..7fab051cbf 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/common/message-template.h"
#include "src/execution/arguments-inl.h"
-#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 288bfa1141..65acb296cc 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
+#include "src/common/message-template.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug.h"
#include "src/execution/arguments-inl.h"
#include "src/execution/frame-constants.h"
-#include "src/execution/message-template.h"
+#include "src/execution/frames.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/numbers/conversions.h"
@@ -62,7 +63,7 @@ Object ThrowWasmError(Isolate* isolate, MessageTemplate message) {
}
} // namespace
-RUNTIME_FUNCTION(Runtime_WasmIsValidAnyFuncValue) {
+RUNTIME_FUNCTION(Runtime_WasmIsValidFuncRefValue) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, function, 0);
@@ -209,12 +210,13 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// methods that could trigger a GC are being called.
Address arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_params; ++i) {
-#define CASE_ARG_TYPE(type, ctype) \
- case wasm::type: \
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)), \
- sizeof(ctype)); \
- wasm_args[i] = wasm::WasmValue(ReadUnalignedValue<ctype>(arg_buf_ptr)); \
- arg_buf_ptr += sizeof(ctype); \
+#define CASE_ARG_TYPE(type, ctype) \
+ case wasm::type: \
+ DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)), \
+ sizeof(ctype)); \
+ wasm_args[i] = \
+ wasm::WasmValue(base::ReadUnalignedValue<ctype>(arg_buf_ptr)); \
+ arg_buf_ptr += sizeof(ctype); \
break;
switch (sig->GetParam(i)) {
CASE_ARG_TYPE(kWasmI32, uint32_t)
@@ -223,11 +225,12 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CASE_ARG_TYPE(kWasmF64, double)
#undef CASE_ARG_TYPE
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef: {
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef: {
DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetParam(i)),
kSystemPointerSize);
- Handle<Object> ref(ReadUnalignedValue<Object>(arg_buf_ptr), isolate);
+ Handle<Object> ref(base::ReadUnalignedValue<Object>(arg_buf_ptr),
+ isolate);
wasm_args[i] = wasm::WasmValue(ref);
arg_buf_ptr += kSystemPointerSize;
break;
@@ -259,12 +262,12 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
// also un-boxes reference types from handles into raw pointers.
arg_buf_ptr = arg_buffer;
for (int i = 0; i < num_returns; ++i) {
-#define CASE_RET_TYPE(type, ctype) \
- case wasm::type: \
- DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)), \
- sizeof(ctype)); \
- WriteUnalignedValue<ctype>(arg_buf_ptr, wasm_rets[i].to<ctype>()); \
- arg_buf_ptr += sizeof(ctype); \
+#define CASE_RET_TYPE(type, ctype) \
+ case wasm::type: \
+ DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)), \
+ sizeof(ctype)); \
+ base::WriteUnalignedValue<ctype>(arg_buf_ptr, wasm_rets[i].to<ctype>()); \
+ arg_buf_ptr += sizeof(ctype); \
break;
switch (sig->GetReturn(i)) {
CASE_RET_TYPE(kWasmI32, uint32_t)
@@ -273,11 +276,12 @@ RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
CASE_RET_TYPE(kWasmF64, double)
#undef CASE_RET_TYPE
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef: {
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef: {
DCHECK_EQ(wasm::ValueTypes::ElementSizeInBytes(sig->GetReturn(i)),
kSystemPointerSize);
- WriteUnalignedValue<Object>(arg_buf_ptr, *wasm_rets[i].to_anyref());
+ base::WriteUnalignedValue<Object>(arg_buf_ptr,
+ *wasm_rets[i].to_anyref());
arg_buf_ptr += kSystemPointerSize;
break;
}
@@ -476,116 +480,6 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_WasmIndirectCallCheckSignatureAndGetTargetInstance) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(entry_index, 1);
- CONVERT_UINT32_ARG_CHECKED(sig_index, 2);
- DCHECK(isolate->context().is_null());
- isolate->set_context(instance->native_context());
-
- DCHECK_LT(table_index, instance->tables().length());
- auto table_obj = handle(
- WasmTableObject::cast(instance->tables().get(table_index)), isolate);
-
- // This check is already done in generated code.
- DCHECK(WasmTableObject::IsInBounds(isolate, table_obj, entry_index));
-
- bool is_valid;
- bool is_null;
- MaybeHandle<WasmInstanceObject> maybe_target_instance;
- int function_index;
- WasmTableObject::GetFunctionTableEntry(
- isolate, table_obj, entry_index, &is_valid, &is_null,
- &maybe_target_instance, &function_index);
-
- CHECK(is_valid);
- if (is_null) {
- // We throw a signature mismatch trap to be in sync with the generated
- // code. There we do a signature check instead of a null-check. Trap
- // reasons are not defined in the spec. Otherwise, a null-check is
- // performed before a signature, according to the spec.
- return ThrowWasmError(isolate, MessageTemplate::kWasmTrapFuncSigMismatch);
- }
-
- // Now we do the signature check.
- Handle<WasmInstanceObject> target_instance =
- maybe_target_instance.ToHandleChecked();
-
- const wasm::WasmModule* target_module =
- target_instance->module_object().native_module()->module();
-
- wasm::FunctionSig* target_sig = target_module->functions[function_index].sig;
-
- auto target_sig_id = instance->module()->signature_map.Find(*target_sig);
- uint32_t expected_sig_id = instance->module()->signature_ids[sig_index];
-
- if (expected_sig_id != static_cast<uint32_t>(target_sig_id)) {
- return ThrowWasmError(isolate, MessageTemplate::kWasmTrapFuncSigMismatch);
- }
-
- if (function_index <
- static_cast<int>(target_instance->module()->num_imported_functions)) {
- // The function in the target instance was imported. Use its imports table,
- // which contains a tuple needed by the import wrapper.
- ImportedFunctionEntry entry(target_instance, function_index);
- return entry.object_ref();
- }
- return *target_instance;
-}
-
-RUNTIME_FUNCTION(Runtime_WasmIndirectCallGetTargetAddress) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- auto instance =
- Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_index, 0);
- CONVERT_UINT32_ARG_CHECKED(entry_index, 1);
-
- DCHECK_LT(table_index, instance->tables().length());
- auto table_obj = handle(
- WasmTableObject::cast(instance->tables().get(table_index)), isolate);
-
- DCHECK(WasmTableObject::IsInBounds(isolate, table_obj, entry_index));
-
- bool is_valid;
- bool is_null;
- MaybeHandle<WasmInstanceObject> maybe_target_instance;
- int function_index;
- WasmTableObject::GetFunctionTableEntry(
- isolate, table_obj, entry_index, &is_valid, &is_null,
- &maybe_target_instance, &function_index);
-
- CHECK(is_valid);
- // The null-check should already have been done in
- // Runtime_WasmIndirectCallCheckSignatureAndGetTargetInstance. That runtime
- // function should always be called first.
- CHECK(!is_null);
-
- Handle<WasmInstanceObject> target_instance =
- maybe_target_instance.ToHandleChecked();
-
- Address call_target = 0;
- if (function_index <
- static_cast<int>(target_instance->module()->num_imported_functions)) {
- // The function in the target instance was imported. Use its imports table,
- // which contains a tuple needed by the import wrapper.
- ImportedFunctionEntry entry(target_instance, function_index);
- call_target = entry.target();
- } else {
- // The function in the target instance was not imported.
- call_target = target_instance->GetCallTarget(function_index);
- }
-
- // The return value is an address and not a SMI. However, the address is
- // always aligned, and a SMI uses the same space as {Address}.
- CHECK(HAS_SMI_TAG(call_target));
- return Smi(call_target);
-}
-
RUNTIME_FUNCTION(Runtime_WasmTableInit) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -609,16 +503,18 @@ RUNTIME_FUNCTION(Runtime_WasmTableInit) {
RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(GetNativeContextFromWasmInstanceOnStackTop(isolate));
auto instance =
Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
- CONVERT_UINT32_ARG_CHECKED(table_src_index, 0);
- CONVERT_UINT32_ARG_CHECKED(table_dst_index, 1);
+ CONVERT_UINT32_ARG_CHECKED(table_dst_index, 0);
+ CONVERT_UINT32_ARG_CHECKED(table_src_index, 1);
CONVERT_UINT32_ARG_CHECKED(dst, 2);
CONVERT_UINT32_ARG_CHECKED(src, 3);
CONVERT_UINT32_ARG_CHECKED(count, 4);
bool oob = !WasmInstanceObject::CopyTableEntries(
- isolate, instance, table_src_index, table_dst_index, dst, src, count);
+ isolate, instance, table_dst_index, table_src_index, dst, src, count);
if (oob) return ThrowTableOutOfBounds(isolate, instance);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
index fbb5b42344..2720176c30 100644
--- a/deps/v8/src/runtime/runtime-weak-refs.cc
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -20,7 +20,8 @@ RUNTIME_FUNCTION(Runtime_FinalizationGroupCleanupJob) {
CONVERT_ARG_HANDLE_CHECKED(JSFinalizationGroup, finalization_group, 0);
finalization_group->set_scheduled_for_cleanup(false);
- JSFinalizationGroup::Cleanup(finalization_group, isolate);
+ Handle<Object> cleanup(finalization_group->cleanup(), isolate);
+ JSFinalizationGroup::Cleanup(isolate, finalization_group, cleanup);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 773a5065e2..92ca9f3142 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -115,8 +115,9 @@ namespace internal {
F(ClearStepping, 0, 1) \
F(CollectGarbage, 1, 1) \
F(DebugAsyncFunctionEntered, 1, 1) \
- F(DebugAsyncFunctionFinished, 2, 1) \
F(DebugAsyncFunctionSuspended, 1, 1) \
+ F(DebugAsyncFunctionResumed, 1, 1) \
+ F(DebugAsyncFunctionFinished, 2, 1) \
F(DebugBreakAtEntry, 1, 1) \
F(DebugCollectCoverage, 0, 1) \
F(DebugGetLoadedScriptIds, 0, 1) \
@@ -202,7 +203,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
F(AccessCheck, 1, 1) \
F(AllocateByteArray, 1, 1) \
- F(AllocateInYoungGeneration, 1, 1) \
+ F(AllocateInYoungGeneration, 2, 1) \
F(AllocateInOldGeneration, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \
@@ -299,14 +300,20 @@ namespace internal {
I(HasProperty, 2, 1) \
F(InternalSetPrototype, 2, 1) \
I(IsJSReceiver, 1, 1) \
+ F(JSReceiverPreventExtensionsDontThrow, 1, 1) \
+ F(JSReceiverPreventExtensionsThrow, 1, 1) \
+ F(JSReceiverGetPrototypeOf, 1, 1) \
+ F(JSReceiverSetPrototypeOfDontThrow, 2, 1) \
+ F(JSReceiverSetPrototypeOfThrow, 2, 1) \
F(NewObject, 2, 1) \
F(ObjectCreate, 2, 1) \
F(ObjectEntries, 1, 1) \
F(ObjectEntriesSkipFastPath, 1, 1) \
- F(ObjectHasOwnProperty, 2, 1) \
- F(ObjectKeys, 1, 1) \
F(ObjectGetOwnPropertyNames, 1, 1) \
F(ObjectGetOwnPropertyNamesTryFast, 1, 1) \
+ F(ObjectHasOwnProperty, 2, 1) \
+ F(ObjectIsExtensible, 1, 1) \
+ F(ObjectKeys, 1, 1) \
F(ObjectValues, 1, 1) \
F(ObjectValuesSkipFastPath, 1, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
@@ -355,6 +362,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_PROXY(F, I) \
F(CheckProxyGetSetTrapResult, 2, 1) \
F(CheckProxyHasTrapResult, 2, 1) \
+ F(CheckProxyDeleteTrapResult, 2, 1) \
F(GetPropertyWithReceiver, 3, 1) \
F(SetPropertyWithReceiver, 4, 1)
@@ -425,23 +433,27 @@ namespace internal {
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F, I) \
- F(ClearMegamorphicStubCache, 0, 1) \
F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
+ F(AbortCSAAssert, 1, 1) \
+ F(ArraySpeciesProtector, 0, 1) \
F(ClearFunctionFeedback, 1, 1) \
+ F(ClearMegamorphicStubCache, 0, 1) \
+ F(CloneWasmModule, 1, 1) \
F(CompleteInobjectSlackTracking, 1, 1) \
F(ConstructConsString, 2, 1) \
- F(ConstructSlicedString, 2, 1) \
F(ConstructDouble, 2, 1) \
+ F(ConstructSlicedString, 2, 1) \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
F(DebugTrackRetainingPath, -1, 1) \
F(DeoptimizeFunction, 1, 1) \
- I(DeoptimizeNow, 0, 1) \
F(DeserializeWasmModule, 2, 1) \
F(DisallowCodegenFromStrings, 1, 1) \
F(DisallowWasmCodegen, 1, 1) \
F(DisassembleFunction, 1, 1) \
+ F(EnableCodeLoggingForTesting, 0, 1) \
+ F(EnsureFeedbackVectorForFunction, 1, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
@@ -452,7 +464,6 @@ namespace internal {
F(GetWasmRecoveredTrapCount, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
- F(HasPackedElements, 1, 1) \
F(HasDoubleElements, 1, 1) \
F(HasElementsInALargeObjectSpace, 1, 1) \
F(HasFastElements, 1, 1) \
@@ -470,6 +481,7 @@ namespace internal {
F(HasFixedUint8Elements, 1, 1) \
F(HasHoleyElements, 1, 1) \
F(HasObjectElements, 1, 1) \
+ F(HasPackedElements, 1, 1) \
F(HasSloppyArgumentsElements, 1, 1) \
F(HasSmiElements, 1, 1) \
F(HasSmiOrObjectElements, 1, 1) \
@@ -479,16 +491,15 @@ namespace internal {
F(InYoungGeneration, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
- F(WasmTierUpFunction, 2, 1) \
F(IsLiftoffFunction, 1, 1) \
+ F(IsThreadInWasm, 0, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
- F(IsThreadInWasm, 0, 1) \
+ F(MapIteratorProtector, 0, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
F(OptimizeOsr, -1, 1) \
- F(EnsureFeedbackVectorForFunction, 1, 1) \
F(PrepareFunctionForOptimization, 1, 1) \
F(PrintWithNameForAssert, 2, 1) \
F(RedirectToWasmInterpreter, 2, 1) \
@@ -496,22 +507,21 @@ namespace internal {
F(SerializeWasmModule, 1, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(SetForceSlowPath, 1, 1) \
+ F(SetIteratorProtector, 0, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
- F(ArraySpeciesProtector, 0, 1) \
- F(MapIteratorProtector, 0, 1) \
- F(SetIteratorProtector, 0, 1) \
+ F(SetWasmThreadsEnabled, 1, 1) \
F(StringIteratorProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
+ F(TurbofanStaticAssert, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
+ F(WasmTierUpFunction, 2, 1) \
F(WasmTraceMemory, 1, 1) \
- F(SetWasmThreadsEnabled, 1, 1) \
- F(TurbofanStaticAssert, 1, 1) \
- F(EnableCodeLoggingForTesting, 0, 1)
+ I(DeoptimizeNow, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
F(ArrayBufferDetach, 1, 1) \
@@ -520,29 +530,27 @@ namespace internal {
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F, I) \
- F(ThrowWasmError, 1, 1) \
- F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmI32AtomicWait, 4, 1) \
- F(WasmI64AtomicWait, 5, 1) \
- F(WasmAtomicNotify, 3, 1) \
- F(WasmExceptionGetValues, 1, 1) \
- F(WasmExceptionGetTag, 1, 1) \
- F(WasmMemoryGrow, 2, 1) \
- F(WasmRunInterpreter, 2, 1) \
- F(WasmStackGuard, 0, 1) \
- F(WasmThrowCreate, 2, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmRefFunc, 1, 1) \
- F(WasmFunctionTableGet, 3, 1) \
- F(WasmFunctionTableSet, 4, 1) \
- F(WasmTableInit, 5, 1) \
- F(WasmTableCopy, 5, 1) \
- F(WasmTableGrow, 3, 1) \
- F(WasmTableFill, 4, 1) \
- F(WasmIndirectCallCheckSignatureAndGetTargetInstance, 3, 1) \
- F(WasmIndirectCallGetTargetAddress, 2, 1) \
- F(WasmIsValidAnyFuncValue, 1, 1) \
+#define FOR_EACH_INTRINSIC_WASM(F, I) \
+ F(ThrowWasmError, 1, 1) \
+ F(ThrowWasmStackOverflow, 0, 1) \
+ F(WasmI32AtomicWait, 4, 1) \
+ F(WasmI64AtomicWait, 5, 1) \
+ F(WasmAtomicNotify, 3, 1) \
+ F(WasmExceptionGetValues, 1, 1) \
+ F(WasmExceptionGetTag, 1, 1) \
+ F(WasmMemoryGrow, 2, 1) \
+ F(WasmRunInterpreter, 2, 1) \
+ F(WasmStackGuard, 0, 1) \
+ F(WasmThrowCreate, 2, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmRefFunc, 1, 1) \
+ F(WasmFunctionTableGet, 3, 1) \
+ F(WasmFunctionTableSet, 4, 1) \
+ F(WasmTableInit, 5, 1) \
+ F(WasmTableCopy, 5, 1) \
+ F(WasmTableGrow, 3, 1) \
+ F(WasmTableFill, 4, 1) \
+ F(WasmIsValidFuncRefValue, 1, 1) \
F(WasmCompileLazy, 2, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR_IMPL(F, I) \
@@ -557,8 +565,6 @@ namespace internal {
F(KeyedStoreIC_Miss, 5, 1) \
F(StoreInArrayLiteralIC_Miss, 5, 1) \
F(KeyedStoreIC_Slow, 3, 1) \
- F(LoadAccessorProperty, 4, 1) \
- F(LoadCallbackProperty, 4, 1) \
F(LoadElementWithInterceptor, 2, 1) \
F(LoadGlobalIC_Miss, 4, 1) \
F(LoadGlobalIC_Slow, 3, 1) \
@@ -765,6 +771,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
class AllocateDoubleAlignFlag : public BitField<bool, 0, 1> {};
+class AllowLargeObjectAllocationFlag : public BitField<bool, 1, 1> {};
+
class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
// A set of bits returned by Runtime_GetOptimizationStatus.
diff --git a/deps/v8/src/snapshot/OWNERS b/deps/v8/src/snapshot/OWNERS
index f55d5b57e5..2dec1c33db 100644
--- a/deps/v8/src/snapshot/OWNERS
+++ b/deps/v8/src/snapshot/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
delphick@chromium.org
jgruber@chromium.org
petermarshall@chromium.org
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index b4e75a6c20..d7e208eac5 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -96,22 +96,22 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
}
bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
- PagedSpace* read_only_space = isolate()->heap()->read_only_space();
- if (!read_only_space->Contains(obj)) return false;
+ if (!ReadOnlyHeap::Contains(obj)) return false;
- // For objects in RO_SPACE, never serialize the object, but instead create a
- // back reference that encodes the page number as the chunk_index and the
- // offset within the page as the chunk_offset.
+ // For objects on the read-only heap, never serialize the object, but instead
+ // create a back reference that encodes the page number as the chunk_index and
+ // the offset within the page as the chunk_offset.
Address address = obj.address();
Page* page = Page::FromAddress(address);
uint32_t chunk_index = 0;
+ ReadOnlySpace* const read_only_space = isolate()->heap()->read_only_space();
for (Page* p : *read_only_space) {
if (p == page) break;
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
- SerializerReference back_reference =
- SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
+ SerializerReference back_reference = SerializerReference::BackReference(
+ SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
CHECK(SerializeBackReference(obj));
return true;
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index 4fb600d1dd..0b96a5a050 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -20,8 +20,9 @@ namespace internal {
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
-Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
- if (space == LO_SPACE) {
+Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
+ const int space_number = static_cast<int>(space);
+ if (space == SnapshotSpace::kLargeObject) {
AlwaysAllocateScope scope(heap_);
// Note that we currently do not support deserialization of large code
// objects.
@@ -30,21 +31,21 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
HeapObject obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
return obj.address();
- } else if (space == MAP_SPACE) {
+ } else if (space == SnapshotSpace::kMap) {
DCHECK_EQ(Map::kSize, size);
return allocated_maps_[next_map_index_++];
} else {
- DCHECK_LT(space, kNumberOfPreallocatedSpaces);
- Address address = high_water_[space];
+ DCHECK(IsPreAllocatedSpace(space));
+ Address address = high_water_[space_number];
DCHECK_NE(address, kNullAddress);
- high_water_[space] += size;
+ high_water_[space_number] += size;
#ifdef DEBUG
// Assert that the current reserved chunk is still big enough.
- const Heap::Reservation& reservation = reservations_[space];
- int chunk_index = current_chunk_[space];
- DCHECK_LE(high_water_[space], reservation[chunk_index].end);
+ const Heap::Reservation& reservation = reservations_[space_number];
+ int chunk_index = current_chunk_[space_number];
+ DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
#endif
- if (space == CODE_SPACE)
+ if (space == SnapshotSpace::kCode)
MemoryChunk::FromAddress(address)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject(address);
@@ -52,7 +53,7 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
}
}
-Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
+Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
Address address;
HeapObject obj;
@@ -75,16 +76,17 @@ Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
}
}
-void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
- DCHECK_LT(space, kNumberOfPreallocatedSpaces);
- uint32_t chunk_index = current_chunk_[space];
- const Heap::Reservation& reservation = reservations_[space];
+void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
+ DCHECK(IsPreAllocatedSpace(space));
+ const int space_number = static_cast<int>(space);
+ uint32_t chunk_index = current_chunk_[space_number];
+ const Heap::Reservation& reservation = reservations_[space_number];
// Make sure the current chunk is indeed exhausted.
- CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+ CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]);
// Move to next reserved chunk.
- chunk_index = ++current_chunk_[space];
+ chunk_index = ++current_chunk_[space_number];
CHECK_LT(chunk_index, reservation.size());
- high_water_[space] = reservation[chunk_index].start;
+ high_water_[space_number] = reservation[chunk_index].start;
}
HeapObject DeserializerAllocator::GetMap(uint32_t index) {
@@ -97,12 +99,14 @@ HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
return deserialized_large_objects_[index];
}
-HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
+HeapObject DeserializerAllocator::GetObject(SnapshotSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
- DCHECK_LT(space, kNumberOfPreallocatedSpaces);
- DCHECK_LE(chunk_index, current_chunk_[space]);
- Address address = reservations_[space][chunk_index].start + chunk_offset;
+ DCHECK(IsPreAllocatedSpace(space));
+ const int space_number = static_cast<int>(space);
+ DCHECK_LE(chunk_index, current_chunk_[space_number]);
+ Address address =
+ reservations_[space_number][chunk_index].start + chunk_offset;
if (next_alignment_ != kWordAligned) {
int padding = Heap::GetFillToAlign(address, next_alignment_);
next_alignment_ = kWordAligned;
@@ -114,8 +118,8 @@ HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
void DeserializerAllocator::DecodeReservation(
const std::vector<SerializedData::Reservation>& res) {
- DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
- int current_space = FIRST_SPACE;
+ DCHECK_EQ(0, reservations_[0].size());
+ int current_space = 0;
for (auto& r : res) {
reservations_[current_space].push_back(
{r.chunk_size(), kNullAddress, kNullAddress});
@@ -127,11 +131,13 @@ void DeserializerAllocator::DecodeReservation(
bool DeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
- for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
+ for (int i = 0; i < kNumberOfSpaces; ++i) {
DCHECK_GT(reservations_[i].size(), 0);
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
+ // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
+ // implemented.
if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
return false;
}
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
index 27cacc79d5..18f9363cdf 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ b/deps/v8/src/snapshot/deserializer-allocator.h
@@ -25,9 +25,9 @@ class DeserializerAllocator final {
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
- Address Allocate(AllocationSpace space, int size);
+ Address Allocate(SnapshotSpace space, int size);
- void MoveToNextChunk(AllocationSpace space);
+ void MoveToNextChunk(SnapshotSpace space);
void SetAlignment(AllocationAlignment alignment) {
DCHECK_EQ(kWordAligned, next_alignment_);
DCHECK_LE(kWordAligned, alignment);
@@ -51,7 +51,7 @@ class DeserializerAllocator final {
HeapObject GetMap(uint32_t index);
HeapObject GetLargeObject(uint32_t index);
- HeapObject GetObject(AllocationSpace space, uint32_t chunk_index,
+ HeapObject GetObject(SnapshotSpace space, uint32_t chunk_index,
uint32_t chunk_offset);
// ------- Reservation Methods -------
@@ -69,13 +69,13 @@ class DeserializerAllocator final {
private:
// Raw allocation without considering alignment.
- Address AllocateRaw(AllocationSpace space, int size);
+ Address AllocateRaw(SnapshotSpace space, int size);
private:
static constexpr int kNumberOfPreallocatedSpaces =
- SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static constexpr int kNumberOfSpaces =
- SerializerDeserializer::kNumberOfSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// The address of the next object that will be allocated in each space.
// Each space has a number of chunks reserved by the GC, with each chunk
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 1fd590db26..25e32e2cc0 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -90,10 +90,10 @@ Deserializer::~Deserializer() {
// process. It is also called on the body of each function.
void Deserializer::VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) {
- // We are reading to a location outside of JS heap, so pass NEW_SPACE to
- // avoid triggering write barriers.
- ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end), NEW_SPACE,
- kNullAddress);
+ // We are reading to a location outside of JS heap, so pass kNew to avoid
+ // triggering write barriers.
+ ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end),
+ SnapshotSpace::kNew, kNullAddress);
}
void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
@@ -112,9 +112,10 @@ void Deserializer::DeserializeDeferredObjects() {
break;
}
default: {
- int space = code & kSpaceMask;
- DCHECK_LE(space, kNumberOfSpaces);
- DCHECK_EQ(code - space, kNewObject);
+ const int space_number = code & kSpaceMask;
+ DCHECK_LE(space_number, kNumberOfSpaces);
+ DCHECK_EQ(code - space_number, kNewObject);
+ SnapshotSpace space = static_cast<SnapshotSpace>(space_number);
HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kTaggedSizeLog2;
Address obj_address = object.address();
@@ -201,7 +202,8 @@ String ForwardStringIfExists(Isolate* isolate, StringTableInsertionKey* key) {
} // namespace
-HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
+HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
+ SnapshotSpace space) {
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash.
@@ -209,7 +211,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
string.set_hash_field(String::kEmptyHashField);
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
- if (space == RO_SPACE) {
+ if (space == SnapshotSpace::kReadOnlyHeap) {
to_rehash_.push_back(obj);
}
} else if (obj.NeedsRehashing()) {
@@ -249,7 +251,7 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
- if (deserializing_user_code() || space == LO_SPACE) {
+ if (deserializing_user_code() || space == SnapshotSpace::kLargeObject) {
new_code_objects_.push_back(Code::cast(obj));
}
} else if (FLAG_trace_maps && obj.IsMap()) {
@@ -326,16 +328,16 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
return obj;
}
-HeapObject Deserializer::GetBackReferencedObject(int space) {
+HeapObject Deserializer::GetBackReferencedObject(SnapshotSpace space) {
HeapObject obj;
switch (space) {
- case LO_SPACE:
+ case SnapshotSpace::kLargeObject:
obj = allocator()->GetLargeObject(source_.GetInt());
break;
- case MAP_SPACE:
+ case SnapshotSpace::kMap:
obj = allocator()->GetMap(source_.GetInt());
break;
- case RO_SPACE: {
+ case SnapshotSpace::kReadOnlyHeap: {
uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt();
if (isolate()->heap()->deserialization_complete()) {
@@ -347,16 +349,14 @@ HeapObject Deserializer::GetBackReferencedObject(int space) {
Address address = page->OffsetToAddress(chunk_offset);
obj = HeapObject::FromAddress(address);
} else {
- obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
- chunk_index, chunk_offset);
+ obj = allocator()->GetObject(space, chunk_index, chunk_offset);
}
break;
}
default: {
uint32_t chunk_index = source_.GetInt();
uint32_t chunk_offset = source_.GetInt();
- obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
- chunk_index, chunk_offset);
+ obj = allocator()->GetObject(space, chunk_index, chunk_offset);
break;
}
}
@@ -372,49 +372,48 @@ HeapObject Deserializer::GetBackReferencedObject(int space) {
HeapObject Deserializer::ReadObject() {
MaybeObject object;
- // We are reading to a location outside of JS heap, so pass NEW_SPACE to
- // avoid triggering write barriers.
+ // We are reading to a location outside of JS heap, so pass kNew to avoid
+ // triggering write barriers.
bool filled =
ReadData(FullMaybeObjectSlot(&object), FullMaybeObjectSlot(&object + 1),
- NEW_SPACE, kNullAddress);
+ SnapshotSpace::kNew, kNullAddress);
CHECK(filled);
return object.GetHeapObjectAssumeStrong();
}
-HeapObject Deserializer::ReadObject(int space_number) {
+HeapObject Deserializer::ReadObject(SnapshotSpace space) {
const int size = source_.GetInt() << kObjectAlignmentBits;
- Address address =
- allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
+ Address address = allocator()->Allocate(space, size);
HeapObject obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
MaybeObjectSlot current(address);
MaybeObjectSlot limit(address + size);
- if (ReadData(current, limit, space_number, address)) {
+ if (ReadData(current, limit, space, address)) {
// Only post process if object content has not been deferred.
- obj = PostProcessNewObject(obj, space_number);
+ obj = PostProcessNewObject(obj, space);
}
#ifdef DEBUG
if (obj.IsCode()) {
- DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
+ DCHECK_EQ(space, SnapshotSpace::kCode);
} else {
- DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
+ DCHECK_NE(space, SnapshotSpace::kCode);
}
#endif // DEBUG
return obj;
}
-void Deserializer::ReadCodeObjectBody(int space_number,
+void Deserializer::ReadCodeObjectBody(SnapshotSpace space,
Address code_object_address) {
// At this point the code object is already allocated, its map field is
// initialized and its raw data fields and code stream are also read.
// Now we read the rest of code header's fields.
MaybeObjectSlot current(code_object_address + HeapObject::kHeaderSize);
MaybeObjectSlot limit(code_object_address + Code::kDataStart);
- bool filled = ReadData(current, limit, space_number, code_object_address);
+ bool filled = ReadData(current, limit, space, code_object_address);
CHECK(filled);
// Now iterate RelocInfos the same way it was done by the serialzier and
@@ -517,21 +516,22 @@ static void NoExternalReferencesCallback() {
}
template <typename TSlot>
-bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
+bool Deserializer::ReadData(TSlot current, TSlot limit,
+ SnapshotSpace source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
- bool write_barrier_needed =
- (current_object_address != kNullAddress && source_space != NEW_SPACE &&
- source_space != CODE_SPACE);
+ bool write_barrier_needed = (current_object_address != kNullAddress &&
+ source_space != SnapshotSpace::kNew &&
+ source_space != SnapshotSpace::kCode);
while (current < limit) {
byte data = source_.Get();
switch (data) {
-#define CASE_STATEMENT(bytecode, space_number) \
- case bytecode + space_number: \
- STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
+#define CASE_STATEMENT(bytecode, snapshot_space) \
+ case bytecode + static_cast<int>(snapshot_space): \
+ STATIC_ASSERT((static_cast<int>(snapshot_space) & ~kSpaceMask) == 0);
#define CASE_BODY(bytecode, space_number_if_any) \
current = ReadDataCase<TSlot, bytecode, space_number_if_any>( \
@@ -541,18 +541,18 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with fall-through cases
// and one body.
-#define ALL_SPACES(bytecode) \
- CASE_STATEMENT(bytecode, NEW_SPACE) \
- CASE_BODY(bytecode, NEW_SPACE) \
- CASE_STATEMENT(bytecode, OLD_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, CODE_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, MAP_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, LO_SPACE) \
- V8_FALLTHROUGH; \
- CASE_STATEMENT(bytecode, RO_SPACE) \
+#define ALL_SPACES(bytecode) \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kNew) \
+ CASE_BODY(bytecode, SnapshotSpace::kNew) \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kOld) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kCode) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kMap) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kLargeObject) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(bytecode, SnapshotSpace::kReadOnlyHeap) \
CASE_BODY(bytecode, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
@@ -579,16 +579,16 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
ALL_SPACES(kBackref)
// Find an object in the roots array and write a pointer to it to the
// current object.
- SINGLE_CASE(kRootArray, RO_SPACE)
+ SINGLE_CASE(kRootArray, SnapshotSpace::kReadOnlyHeap)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kPartialSnapshotCache, RO_SPACE)
+ SINGLE_CASE(kPartialSnapshotCache, SnapshotSpace::kReadOnlyHeap)
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
- SINGLE_CASE(kReadOnlyObjectCache, RO_SPACE)
+ SINGLE_CASE(kReadOnlyObjectCache, SnapshotSpace::kReadOnlyHeap)
// Find an object in the attached references and write a pointer to it to
// the current object.
- SINGLE_CASE(kAttachedReference, RO_SPACE)
+ SINGLE_CASE(kAttachedReference, SnapshotSpace::kReadOnlyHeap)
#undef CASE_STATEMENT
#undef CASE_BODY
@@ -614,7 +614,7 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
case kNextChunk: {
int space = source_.Get();
- allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
+ allocator()->MoveToNextChunk(static_cast<SnapshotSpace>(space));
break;
}
@@ -791,13 +791,15 @@ Address Deserializer::ReadExternalReferenceCase() {
}
template <typename TSlot, SerializerDeserializer::Bytecode bytecode,
- int space_number_if_any>
+ SnapshotSpace space_number_if_any>
TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
Address current_object_address, byte data,
bool write_barrier_needed) {
bool emit_write_barrier = false;
- int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
- : space_number_if_any;
+ SnapshotSpace space = static_cast<SnapshotSpace>(
+ space_number_if_any == kAnyOldSpace
+ ? static_cast<SnapshotSpace>(data & kSpaceMask)
+ : space_number_if_any);
HeapObject heap_object;
HeapObjectReferenceType reference_type =
allocator()->GetAndClearNextReferenceIsWeak()
@@ -805,11 +807,11 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
: HeapObjectReferenceType::STRONG;
if (bytecode == kNewObject) {
- heap_object = ReadObject(space_number);
- emit_write_barrier = (space_number == NEW_SPACE);
+ heap_object = ReadObject(space);
+ emit_write_barrier = (space == SnapshotSpace::kNew);
} else if (bytecode == kBackref) {
- heap_object = GetBackReferencedObject(space_number);
- emit_write_barrier = (space_number == NEW_SPACE);
+ heap_object = GetBackReferencedObject(space);
+ emit_write_barrier = (space == SnapshotSpace::kNew);
} else if (bytecode == kRootArray) {
int id = source_.GetInt();
RootIndex root_index = static_cast<RootIndex>(id);
@@ -819,8 +821,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
} else if (bytecode == kReadOnlyObjectCache) {
int cache_index = source_.GetInt();
heap_object = HeapObject::cast(
- isolate->heap()->read_only_heap()->cached_read_only_object(
- cache_index));
+ isolate->read_only_heap()->cached_read_only_object(cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
} else if (bytecode == kPartialSnapshotCache) {
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 6e3f497d38..8dce1b3f3f 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -5,6 +5,7 @@
#ifndef V8_SNAPSHOT_DESERIALIZER_H_
#define V8_SNAPSHOT_DESERIALIZER_H_
+#include <utility>
#include <vector>
#include "src/objects/allocation-site.h"
@@ -39,6 +40,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
~Deserializer() override;
void SetRehashability(bool v) { can_rehash_ = v; }
+ std::pair<uint32_t, uint32_t> GetChecksum() const {
+ return source_.GetChecksum();
+ }
protected:
// Create a deserializer from a snapshot byte source.
@@ -65,7 +69,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// This returns the address of an object that has been described in the
// snapshot by chunk index and offset.
- HeapObject GetBackReferencedObject(int space);
+ HeapObject GetBackReferencedObject(SnapshotSpace space);
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
@@ -122,11 +126,13 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
template <typename TSlot>
- bool ReadData(TSlot start, TSlot end, int space, Address object_address);
+ bool ReadData(TSlot start, TSlot end, SnapshotSpace space,
+ Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
- template <typename TSlot, Bytecode bytecode, int space_number_if_any>
+ template <typename TSlot, Bytecode bytecode,
+ SnapshotSpace space_number_if_any>
inline TSlot ReadDataCase(Isolate* isolate, TSlot current,
Address current_object_address, byte data,
bool write_barrier_needed);
@@ -135,8 +141,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
inline Address ReadExternalReferenceCase();
HeapObject ReadObject();
- HeapObject ReadObject(int space_number);
- void ReadCodeObjectBody(int space_number, Address code_object_address);
+ HeapObject ReadObject(SnapshotSpace space_number);
+ void ReadCodeObjectBody(SnapshotSpace space_number,
+ Address code_object_address);
public:
void VisitCodeTarget(Code host, RelocInfo* rinfo);
@@ -151,7 +158,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
TSlot ReadRepeatedObject(TSlot current, int repeat_count);
// Special handling for serialized code like hooking up internalized strings.
- HeapObject PostProcessNewObject(HeapObject obj, int space);
+ HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space);
// Objects from the attached object descriptions in the serialized user code.
std::vector<Handle<HeapObject>> attached_objects_;
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index 4cee1ac131..f4183b4b87 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -127,7 +127,15 @@ void PlatformEmbeddedFileWriterGeneric::DeclareExternalFilename(
fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
}
-void PlatformEmbeddedFileWriterGeneric::FileEpilogue() {}
+void PlatformEmbeddedFileWriterGeneric::FileEpilogue() {
+ // Omitting this section can imply an executable stack, which is usually
+ // a linker warning/error. C++ compilers add these automatically, but
+ // compiling assembly requires the .note.GNU-stack section to be inserted
+ // manually.
+ // Additional documentation:
+ // https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart
+ fprintf(fp_, ".section .note.GNU-stack,\"\",%%progbits\n");
+}
int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
DataDirective directive) {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index d0758cb42c..69457e11a5 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -11,7 +11,6 @@
#if defined(V8_OS_WIN_X64)
#include "src/builtins/builtins.h"
#include "src/diagnostics/unwinding-info-win64.h"
-#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#endif
@@ -570,11 +569,7 @@ void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
// Replace any Windows style paths (backslashes) with forward
// slashes.
std::string fixed_filename(filename);
- for (auto& c : fixed_filename) {
- if (c == '\\') {
- c = '/';
- }
- }
+ std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
}
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 6bf198230f..819f7009c2 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -259,12 +259,14 @@ int main(int argc, char** argv) {
// Set code range such that relative jumps for builtins to
// builtin calls in the snapshot are possible.
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- size_t code_range_size =
+ size_t code_range_size_mb =
i::kMaximalCodeRangeSize == 0
? i::kMaxPCRelativeCodeRangeInMB
: std::min(i::kMaximalCodeRangeSize / i::MB,
i::kMaxPCRelativeCodeRangeInMB);
- i_isolate->heap()->ConfigureHeap(0, 0, code_range_size);
+ v8::ResourceConstraints constraints;
+ constraints.set_code_range_size_in_bytes(code_range_size_mb * i::MB);
+ i_isolate->heap()->ConfigureHeap(constraints);
// The isolate contains data from builtin compilation that needs
// to be written out if builtins are embedded.
i_isolate->RegisterEmbeddedFileWriter(&embedded_writer);
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index f294d33b5c..ea2136007b 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -16,7 +16,6 @@ namespace internal {
enum NativeType {
EXTRAS,
- TEST
};
// Extra handling for V8_EXPORT_PRIVATE in combination with USING_V8_SHARED
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 9b56f129df..22854bf14a 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -73,7 +73,8 @@ void PartialDeserializer::DeserializeEmbedderFields(
int space = code & kSpaceMask;
DCHECK_LE(space, kNumberOfSpaces);
DCHECK_EQ(code - space, kNewObject);
- Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
+ Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(
+ static_cast<SnapshotSpace>(space))),
isolate());
int index = source()->GetInt();
int size = source()->GetInt();
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 036f0a0414..7b4ffbb2bf 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -214,7 +214,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
EmbedderDataSlot(js_obj, i).store_raw(original_embedder_values[i], no_gc);
- embedder_fields_sink_.Put(kNewObject + reference.space(),
+ embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()),
"embedder field holder");
embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
embedder_fields_sink_.PutInt(reference.chunk_offset(),
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index 576e644846..5ac5a6444a 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -21,7 +21,7 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer");
}
- ReadOnlyHeap* ro_heap = isolate->heap()->read_only_heap();
+ ReadOnlyHeap* ro_heap = isolate->read_only_heap();
// No active threads.
DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index f4b45a15cc..4ddaf37773 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -67,7 +67,7 @@ void ReadOnlySerializer::FinalizeSerialization() {
#ifdef DEBUG
// Check that every object on read-only heap is reachable (and was
// serialized).
- ReadOnlyHeapIterator iterator(isolate()->heap()->read_only_heap());
+ ReadOnlyHeapObjectIterator iterator(isolate()->read_only_heap());
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(serialized_objects_.count(object));
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index e7c44236ac..c81e9a1e21 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -12,6 +12,30 @@
namespace v8 {
namespace internal {
+// TODO(goszczycki): Move this somewhere every file in src/snapshot can use it.
+// The spaces suported by the serializer. Spaces after LO_SPACE (NEW_LO_SPACE
+// and CODE_LO_SPACE) are not supported.
+enum class SnapshotSpace {
+ kReadOnlyHeap = RO_SPACE,
+ kNew = NEW_SPACE,
+ kOld = OLD_SPACE,
+ kCode = CODE_SPACE,
+ kMap = MAP_SPACE,
+ kLargeObject = LO_SPACE,
+ kNumberOfPreallocatedSpaces = kCode + 1,
+ kNumberOfSpaces = kLargeObject + 1,
+ kSpecialValueSpace = kNumberOfSpaces,
+ // Number of spaces which should be allocated by the heap. Eventually
+ // kReadOnlyHeap will move to the end of this enum and this will be equal to
+ // it.
+ kNumberOfHeapSpaces = kNumberOfSpaces,
+};
+
+constexpr bool IsPreAllocatedSpace(SnapshotSpace space) {
+ return static_cast<int>(space) <
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
+}
+
class SerializerReference {
private:
enum SpecialValueType {
@@ -21,33 +45,32 @@ class SerializerReference {
kBuiltinReference,
};
- static const int kSpecialValueSpace = LAST_SPACE + 1;
- STATIC_ASSERT(kSpecialValueSpace < (1 << kSpaceTagSize));
+ STATIC_ASSERT(static_cast<int>(SnapshotSpace::kSpecialValueSpace) <
+ (1 << kSpaceTagSize));
SerializerReference(SpecialValueType type, uint32_t value)
- : bitfield_(SpaceBits::encode(kSpecialValueSpace) |
+ : bitfield_(SpaceBits::encode(SnapshotSpace::kSpecialValueSpace) |
SpecialValueTypeBits::encode(type)),
value_(value) {}
public:
SerializerReference() : SerializerReference(kInvalidValue, 0) {}
- SerializerReference(uint32_t space, uint32_t chunk_index,
+ SerializerReference(SnapshotSpace space, uint32_t chunk_index,
uint32_t chunk_offset)
: bitfield_(SpaceBits::encode(space) |
ChunkIndexBits::encode(chunk_index)),
value_(chunk_offset) {}
- static SerializerReference BackReference(AllocationSpace space,
+ static SerializerReference BackReference(SnapshotSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- DCHECK_LT(space, LO_SPACE);
return SerializerReference(space, chunk_index, chunk_offset);
}
static SerializerReference MapReference(uint32_t index) {
- return SerializerReference(MAP_SPACE, 0, index);
+ return SerializerReference(SnapshotSpace::kMap, 0, index);
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
@@ -55,7 +78,7 @@ class SerializerReference {
}
static SerializerReference LargeObjectReference(uint32_t index) {
- return SerializerReference(LO_SPACE, 0, index);
+ return SerializerReference(SnapshotSpace::kLargeObject, 0, index);
}
static SerializerReference AttachedReference(uint32_t index) {
@@ -67,17 +90,17 @@ class SerializerReference {
}
bool is_valid() const {
- return SpaceBits::decode(bitfield_) != kSpecialValueSpace ||
+ return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace ||
SpecialValueTypeBits::decode(bitfield_) != kInvalidValue;
}
bool is_back_reference() const {
- return SpaceBits::decode(bitfield_) <= LAST_SPACE;
+ return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace;
}
- AllocationSpace space() const {
+ SnapshotSpace space() const {
DCHECK(is_back_reference());
- return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
+ return SpaceBits::decode(bitfield_);
}
uint32_t chunk_offset() const {
@@ -86,17 +109,17 @@ class SerializerReference {
}
uint32_t chunk_index() const {
- DCHECK(space() != MAP_SPACE && space() != LO_SPACE);
+ DCHECK(IsPreAllocatedSpace(space()));
return ChunkIndexBits::decode(bitfield_);
}
uint32_t map_index() const {
- DCHECK_EQ(MAP_SPACE, SpaceBits::decode(bitfield_));
+ DCHECK_EQ(SnapshotSpace::kMap, SpaceBits::decode(bitfield_));
return value_;
}
bool is_off_heap_backing_store_reference() const {
- return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore;
}
@@ -106,12 +129,12 @@ class SerializerReference {
}
uint32_t large_object_index() const {
- DCHECK_EQ(LO_SPACE, SpaceBits::decode(bitfield_));
+ DCHECK_EQ(SnapshotSpace::kLargeObject, SpaceBits::decode(bitfield_));
return value_;
}
bool is_attached_reference() const {
- return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kAttachedReference;
}
@@ -121,7 +144,7 @@ class SerializerReference {
}
bool is_builtin_reference() const {
- return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
+ return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference;
}
@@ -131,7 +154,7 @@ class SerializerReference {
}
private:
- class SpaceBits : public BitField<int, 0, kSpaceTagSize> {};
+ class SpaceBits : public BitField<SnapshotSpace, 0, kSpaceTagSize> {};
class ChunkIndexBits
: public BitField<uint32_t, SpaceBits::kNext, 32 - kSpaceTagSize> {};
class SpecialValueTypeBits
diff --git a/deps/v8/src/snapshot/serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc
index 763244137f..a709715bdd 100644
--- a/deps/v8/src/snapshot/serializer-allocator.cc
+++ b/deps/v8/src/snapshot/serializer-allocator.cc
@@ -23,42 +23,42 @@ void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
custom_chunk_size_ = chunk_size;
}
-static uint32_t PageSizeOfSpace(int space) {
+static uint32_t PageSizeOfSpace(SnapshotSpace space) {
return static_cast<uint32_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
}
-uint32_t SerializerAllocator::TargetChunkSize(int space) {
+uint32_t SerializerAllocator::TargetChunkSize(SnapshotSpace space) {
if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
return custom_chunk_size_;
}
-SerializerReference SerializerAllocator::Allocate(AllocationSpace space,
+SerializerReference SerializerAllocator::Allocate(SnapshotSpace space,
uint32_t size) {
- DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
+ const int space_number = static_cast<int>(space);
+ DCHECK(IsPreAllocatedSpace(space));
DCHECK(size > 0 && size <= PageSizeOfSpace(space));
// Maps are allocated through AllocateMap.
- DCHECK_NE(MAP_SPACE, space);
- // We tenure large object allocations.
- DCHECK_NE(NEW_LO_SPACE, space);
+ DCHECK_NE(SnapshotSpace::kMap, space);
- uint32_t old_chunk_size = pending_chunk_[space];
+ uint32_t old_chunk_size = pending_chunk_[space_number];
uint32_t new_chunk_size = old_chunk_size + size;
// Start a new chunk if the new size exceeds the target chunk size.
// We may exceed the target chunk size if the single object size does.
if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) {
serializer_->PutNextChunk(space);
- completed_chunks_[space].push_back(pending_chunk_[space]);
- pending_chunk_[space] = 0;
+ completed_chunks_[space_number].push_back(pending_chunk_[space_number]);
+ pending_chunk_[space_number] = 0;
new_chunk_size = size;
}
- uint32_t offset = pending_chunk_[space];
- pending_chunk_[space] = new_chunk_size;
+ uint32_t offset = pending_chunk_[space_number];
+ pending_chunk_[space_number] = new_chunk_size;
return SerializerReference::BackReference(
- space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
+ space, static_cast<uint32_t>(completed_chunks_[space_number].size()),
+ offset);
}
SerializerReference SerializerAllocator::AllocateMap() {
@@ -83,23 +83,25 @@ SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() {
bool SerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_back_reference());
- AllocationSpace space = reference.space();
- if (space == LO_SPACE) {
+ SnapshotSpace space = reference.space();
+ if (space == SnapshotSpace::kLargeObject) {
return reference.large_object_index() < seen_large_objects_index_;
- } else if (space == MAP_SPACE) {
+ } else if (space == SnapshotSpace::kMap) {
return reference.map_index() < num_maps_;
- } else if (space == RO_SPACE &&
+ } else if (space == SnapshotSpace::kReadOnlyHeap &&
serializer_->isolate()->heap()->deserialization_complete()) {
// If not deserializing the isolate itself, then we create BackReferences
- // for all RO_SPACE objects without ever allocating.
+ // for all read-only heap objects without ever allocating.
return true;
} else {
+ const int space_number = static_cast<int>(space);
size_t chunk_index = reference.chunk_index();
- if (chunk_index == completed_chunks_[space].size()) {
- return reference.chunk_offset() < pending_chunk_[space];
+ if (chunk_index == completed_chunks_[space_number].size()) {
+ return reference.chunk_offset() < pending_chunk_[space_number];
} else {
- return chunk_index < completed_chunks_[space].size() &&
- reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ return chunk_index < completed_chunks_[space_number].size() &&
+ reference.chunk_offset() <
+ completed_chunks_[space_number][chunk_index];
}
}
}
@@ -109,7 +111,7 @@ std::vector<SerializedData::Reservation>
SerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
- for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
+ for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out.emplace_back(completed_chunks_[i][j]);
}
@@ -120,11 +122,14 @@ SerializerAllocator::EncodeReservations() const {
out.back().mark_as_last();
}
- STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
+ STATIC_ASSERT(SnapshotSpace::kMap ==
+ SnapshotSpace::kNumberOfPreallocatedSpaces);
out.emplace_back(num_maps_ * Map::kSize);
out.back().mark_as_last();
- STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
+ STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
+ 1);
out.emplace_back(large_objects_total_size_);
out.back().mark_as_last();
@@ -136,21 +141,24 @@ void SerializerAllocator::OutputStatistics() {
PrintF(" Spaces (bytes):\n");
- for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
+ for (int space = 0; space < kNumberOfSpaces; space++) {
PrintF("%16s", Heap::GetSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
- for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) {
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16zu", s);
}
- STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
+ STATIC_ASSERT(SnapshotSpace::kMap ==
+ SnapshotSpace::kNumberOfPreallocatedSpaces);
PrintF("%16d", num_maps_ * Map::kSize);
- STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
+ STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
+ 1);
PrintF("%16d\n", large_objects_total_size_);
}
diff --git a/deps/v8/src/snapshot/serializer-allocator.h b/deps/v8/src/snapshot/serializer-allocator.h
index 0ca968f0fe..0d15c5a91b 100644
--- a/deps/v8/src/snapshot/serializer-allocator.h
+++ b/deps/v8/src/snapshot/serializer-allocator.h
@@ -16,7 +16,7 @@ class SerializerAllocator final {
public:
explicit SerializerAllocator(Serializer* serializer);
- SerializerReference Allocate(AllocationSpace space, uint32_t size);
+ SerializerReference Allocate(SnapshotSpace space, uint32_t size);
SerializerReference AllocateMap();
SerializerReference AllocateLargeObject(uint32_t size);
SerializerReference AllocateOffHeapBackingStore();
@@ -35,12 +35,12 @@ class SerializerAllocator final {
private:
// We try to not exceed this size for every chunk. We will not succeed for
// larger objects though.
- uint32_t TargetChunkSize(int space);
+ uint32_t TargetChunkSize(SnapshotSpace space);
static constexpr int kNumberOfPreallocatedSpaces =
- SerializerDeserializer::kNumberOfPreallocatedSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static constexpr int kNumberOfSpaces =
- SerializerDeserializer::kNumberOfSpaces;
+ static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 30da8db662..c845a089a3 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -6,9 +6,9 @@
#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
#include "src/base/bits.h"
+#include "src/base/memory.h"
#include "src/codegen/external-reference-table.h"
#include "src/common/globals.h"
-#include "src/common/v8memory.h"
#include "src/objects/visitors.h"
#include "src/sanitizer/msan.h"
#include "src/snapshot/references.h"
@@ -102,19 +102,6 @@ class SerializerDeserializer : public RootVisitor {
public:
static void Iterate(Isolate* isolate, RootVisitor* visitor);
- // No reservation for large object space necessary.
- // We also handle map space differenly.
- STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
-
- // We do not support young generation large objects and large code objects.
- STATIC_ASSERT(LAST_SPACE == NEW_LO_SPACE);
- STATIC_ASSERT(LAST_SPACE - 2 == LO_SPACE);
- static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
-
- // The number of spaces supported by the serializer. Spaces after LO_SPACE
- // (NEW_LO_SPACE and CODE_LO_SPACE) are not supported.
- static const int kNumberOfSpaces = LO_SPACE + 1;
-
protected:
static bool CanBeDeferred(HeapObject o);
@@ -123,6 +110,12 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo>& call_handler_infos);
+ static const int kNumberOfPreallocatedSpaces =
+ static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
+
+ static const int kNumberOfSpaces =
+ static_cast<int>(SnapshotSpace::kNumberOfSpaces);
+
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \
@@ -259,7 +252,7 @@ class SerializerDeserializer : public RootVisitor {
//
// Some other constants.
//
- static const int kAnyOldSpace = -1;
+ static const SnapshotSpace kAnyOldSpace = SnapshotSpace::kNumberOfSpaces;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
@@ -344,12 +337,13 @@ class SerializedData {
protected:
void SetHeaderValue(uint32_t offset, uint32_t value) {
- WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset, value);
+ base::WriteLittleEndianValue(reinterpret_cast<Address>(data_) + offset,
+ value);
}
uint32_t GetHeaderValue(uint32_t offset) const {
- return ReadLittleEndianValue<uint32_t>(reinterpret_cast<Address>(data_) +
- offset);
+ return base::ReadLittleEndianValue<uint32_t>(
+ reinterpret_cast<Address>(data_) + offset);
}
void AllocateData(uint32_t size);
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index b2dd6a33e7..5b68aaa87b 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -27,7 +27,7 @@ Serializer::Serializer(Isolate* isolate)
allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
- for (int space = 0; space < LAST_SPACE; ++space) {
+ for (int space = 0; space < kNumberOfSpaces; ++space) {
instance_type_count_[space] = NewArray<int>(kInstanceTypes);
instance_type_size_[space] = NewArray<size_t>(kInstanceTypes);
for (int i = 0; i < kInstanceTypes; i++) {
@@ -36,7 +36,7 @@ Serializer::Serializer(Isolate* isolate)
}
}
} else {
- for (int space = 0; space < LAST_SPACE; ++space) {
+ for (int space = 0; space < kNumberOfSpaces; ++space) {
instance_type_count_[space] = nullptr;
instance_type_size_[space] = nullptr;
}
@@ -47,7 +47,7 @@ Serializer::Serializer(Isolate* isolate)
Serializer::~Serializer() {
if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
- for (int space = 0; space < LAST_SPACE; ++space) {
+ for (int space = 0; space < kNumberOfSpaces; ++space) {
if (instance_type_count_[space] != nullptr) {
DeleteArray(instance_type_count_[space]);
DeleteArray(instance_type_size_[space]);
@@ -57,10 +57,11 @@ Serializer::~Serializer() {
}
#ifdef OBJECT_PRINT
-void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) {
+void Serializer::CountInstanceType(Map map, int size, SnapshotSpace space) {
+ const int space_number = static_cast<int>(space);
int instance_type = map.instance_type();
- instance_type_count_[space][instance_type]++;
- instance_type_size_[space][instance_type] += size;
+ instance_type_count_[space_number][instance_type]++;
+ instance_type_size_[space_number][instance_type] += size;
}
#endif // OBJECT_PRINT
@@ -73,7 +74,7 @@ void Serializer::OutputStatistics(const char* name) {
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
- for (int space = 0; space < LAST_SPACE; ++space) { \
+ for (int space = 0; space < kNumberOfSpaces; ++space) { \
if (instance_type_count_[space][Name]) { \
PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
instance_type_size_[space][Name], \
@@ -173,8 +174,8 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
}
PutAlignmentPrefix(obj);
- AllocationSpace space = reference.space();
- sink_.Put(kBackref + space, "BackRef");
+ SnapshotSpace space = reference.space();
+ sink_.Put(kBackref + static_cast<int>(space), "BackRef");
PutBackReference(obj, reference);
}
return true;
@@ -221,11 +222,11 @@ void Serializer::PutBackReference(HeapObject object,
SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
switch (reference.space()) {
- case MAP_SPACE:
+ case SnapshotSpace::kMap:
sink_.PutInt(reference.map_index(), "BackRefMapIndex");
break;
- case LO_SPACE:
+ case SnapshotSpace::kLargeObject:
sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
break;
@@ -255,9 +256,9 @@ int Serializer::PutAlignmentPrefix(HeapObject object) {
return 0;
}
-void Serializer::PutNextChunk(int space) {
+void Serializer::PutNextChunk(SnapshotSpace space) {
sink_.Put(kNextChunk, "NextChunk");
- sink_.Put(space, "NextChunkSpace");
+ sink_.Put(static_cast<int>(space), "NextChunkSpace");
}
void Serializer::PutRepeat(int repeat_count) {
@@ -298,7 +299,7 @@ Code Serializer::CopyCode(Code code) {
reinterpret_cast<Address>(&code_buffer_.front())));
}
-void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
+void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
@@ -307,22 +308,23 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
CodeNameEvent(object_.address(), sink_->Position(), code_name));
}
+ const int space_number = static_cast<int>(space);
SerializerReference back_reference;
- if (space == LO_SPACE) {
- sink_->Put(kNewObject + space, "NewLargeObject");
+ if (space == SnapshotSpace::kLargeObject) {
+ sink_->Put(kNewObject + space_number, "NewLargeObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
CHECK(!object_.IsCode());
back_reference = serializer_->allocator()->AllocateLargeObject(size);
- } else if (space == MAP_SPACE) {
+ } else if (space == SnapshotSpace::kMap) {
DCHECK_EQ(Map::kSize, size);
back_reference = serializer_->allocator()->AllocateMap();
- sink_->Put(kNewObject + space, "NewMap");
+ sink_->Put(kNewObject + space_number, "NewMap");
// This is redundant, but we include it anyways.
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
back_reference = serializer_->allocator()->Allocate(space, size + fill);
- sink_->Put(kNewObject + space, "NewObject");
+ sink_->Put(kNewObject + space_number, "NewObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
@@ -468,8 +470,9 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
ExternalTwoByteString::cast(string).resource()->data());
}
- AllocationSpace space =
- (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
+ SnapshotSpace space = (allocation_size > kMaxRegularHeapObjectSize)
+ ? SnapshotSpace::kLargeObject
+ : SnapshotSpace::kOld;
SerializePrologue(space, allocation_size, map);
// Output the rest of the imaginary string.
@@ -534,8 +537,8 @@ void Serializer::ObjectSerializer::Serialize() {
SerializeExternalString();
return;
} else if (!ReadOnlyHeap::Contains(object_)) {
- // Only clear padding for strings outside RO_SPACE. RO_SPACE should have
- // been cleared elsewhere.
+ // Only clear padding for strings outside the read-only heap. Read-only heap
+ // should have been cleared elsewhere.
if (object_.IsSeqOneByteString()) {
// Clear padding bytes at the end. Done here to avoid having to do this
// at allocation sites in generated code.
@@ -568,11 +571,21 @@ void Serializer::ObjectSerializer::Serialize() {
void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size();
Map map = object_.map();
- AllocationSpace space =
- MemoryChunk::FromHeapObject(object_)->owner()->identity();
- // Young generation large objects are tenured.
- if (space == NEW_LO_SPACE) {
- space = LO_SPACE;
+ SnapshotSpace space;
+ if (ReadOnlyHeap::Contains(object_)) {
+ space = SnapshotSpace::kReadOnlyHeap;
+ } else {
+ AllocationSpace heap_space =
+ MemoryChunk::FromHeapObject(object_)->owner_identity();
+ // Large code objects are not supported and cannot be expressed by
+ // SnapshotSpace.
+ DCHECK_NE(heap_space, CODE_LO_SPACE);
+ // Young generation large objects are tenured.
+ if (heap_space == NEW_LO_SPACE) {
+ space = SnapshotSpace::kLargeObject;
+ } else {
+ space = static_cast<SnapshotSpace>(heap_space);
+ }
}
SerializePrologue(space, size, map);
@@ -612,7 +625,8 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
bytes_processed_so_far_ = kTaggedSize;
serializer_->PutAlignmentPrefix(object_);
- sink_->Put(kNewObject + back_reference.space(), "deferred object");
+ sink_->Put(kNewObject + static_cast<int>(back_reference.space()),
+ "deferred object");
serializer_->PutBackReference(object_, back_reference);
sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size");
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index b70c7fd45a..fad2ec8a88 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -205,7 +205,7 @@ class Serializer : public SerializerDeserializer {
void PutAttachedReference(SerializerReference reference);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject object);
- void PutNextChunk(int space);
+ void PutNextChunk(SnapshotSpace space);
void PutRepeat(int repeat_count);
// Returns true if the object was successfully serialized as a root.
@@ -243,7 +243,7 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
- void CountInstanceType(Map map, int size, AllocationSpace space);
+ void CountInstanceType(Map map, int size, SnapshotSpace space);
#endif // OBJECT_PRINT
#ifdef DEBUG
@@ -272,8 +272,8 @@ class Serializer : public SerializerDeserializer {
#ifdef OBJECT_PRINT
static const int kInstanceTypes = LAST_TYPE + 1;
- int* instance_type_count_[LAST_SPACE];
- size_t* instance_type_size_[LAST_SPACE];
+ int* instance_type_count_[kNumberOfSpaces];
+ size_t* instance_type_size_[kNumberOfSpaces];
#endif // OBJECT_PRINT
#ifdef DEBUG
@@ -321,7 +321,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitOffHeapTarget(Code host, RelocInfo* target) override;
private:
- void SerializePrologue(AllocationSpace space, int size, Map map);
+ void SerializePrologue(SnapshotSpace space, int size, Map map);
// This function outputs or skips the raw data between the last pointer and
// up to the current position.
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 61396aaa71..f20f2ad33f 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -5,7 +5,10 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
#define V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
+#include <utility>
+
#include "src/base/logging.h"
+#include "src/snapshot/serializer-common.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -66,6 +69,11 @@ class SnapshotByteSource final {
int position() { return position_; }
void set_position(int position) { position_ = position; }
+ std::pair<uint32_t, uint32_t> GetChecksum() const {
+ Checksum checksum(Vector<const byte>(data_, length_));
+ return {checksum.a(), checksum.b()};
+ }
+
private:
const byte* data_;
int length_;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index ef933ef83a..4a4da9f755 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -8,7 +8,6 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/objects/objects-inl.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -99,11 +98,12 @@ class Snapshot : public AllStatic {
uint32_t index);
static uint32_t GetHeaderValue(const v8::StartupData* data, uint32_t offset) {
- return ReadLittleEndianValue<uint32_t>(
+ return base::ReadLittleEndianValue<uint32_t>(
reinterpret_cast<Address>(data->data) + offset);
}
static void SetHeaderValue(char* data, uint32_t offset, uint32_t value) {
- WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset, value);
+ base::WriteLittleEndianValue(reinterpret_cast<Address>(data) + offset,
+ value);
}
static void CheckVersion(const v8::StartupData* data);
diff --git a/deps/v8/src/strings/OWNERS b/deps/v8/src/strings/OWNERS
index 037c916f24..35ab9a4634 100644
--- a/deps/v8/src/strings/OWNERS
+++ b/deps/v8/src/strings/OWNERS
@@ -1,5 +1,7 @@
bmeurer@chromium.org
-jgruber@chromium.org
jkummerow@chromium.org
+leszeks@chromium.org
verwaest@chromium.org
yangguo@chromium.org
+
+# COMPONENT: Blink>JavaScript>Runtime
diff --git a/deps/v8/src/strings/char-predicates-inl.h b/deps/v8/src/strings/char-predicates-inl.h
index cdd8ddb4ea..3b9b13aba5 100644
--- a/deps/v8/src/strings/char-predicates-inl.h
+++ b/deps/v8/src/strings/char-predicates-inl.h
@@ -51,6 +51,18 @@ inline constexpr bool IsBinaryDigit(uc32 c) {
return c == '0' || c == '1';
}
+inline constexpr bool IsAsciiLower(uc32 c) { return IsInRange(c, 'a', 'z'); }
+
+inline constexpr bool IsAsciiUpper(uc32 c) { return IsInRange(c, 'A', 'Z'); }
+
+inline constexpr uc32 ToAsciiUpper(uc32 c) {
+ return c & ~(IsAsciiLower(c) << 5);
+}
+
+inline constexpr uc32 ToAsciiLower(uc32 c) {
+ return c | (IsAsciiUpper(c) << 5);
+}
+
inline constexpr bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c) ||
(c == '_');
diff --git a/deps/v8/src/strings/char-predicates.h b/deps/v8/src/strings/char-predicates.h
index 43b4d091d1..0262048ec7 100644
--- a/deps/v8/src/strings/char-predicates.h
+++ b/deps/v8/src/strings/char-predicates.h
@@ -26,6 +26,12 @@ inline constexpr bool IsBinaryDigit(uc32 c);
inline constexpr bool IsRegExpWord(uc32 c);
inline constexpr bool IsRegExpNewline(uc32 c);
+inline constexpr bool IsAsciiLower(uc32 ch);
+inline constexpr bool IsAsciiUpper(uc32 ch);
+
+inline constexpr uc32 ToAsciiUpper(uc32 ch);
+inline constexpr uc32 ToAsciiLower(uc32 ch);
+
// ES#sec-names-and-keywords
// This includes '_', '$' and '\', and ID_Start according to
// http://www.unicode.org/reports/tr31/, which consists of categories
diff --git a/deps/v8/src/strings/string-builder-inl.h b/deps/v8/src/strings/string-builder-inl.h
index 88d69b37b5..9f78884a60 100644
--- a/deps/v8/src/strings/string-builder-inl.h
+++ b/deps/v8/src/strings/string-builder-inl.h
@@ -147,6 +147,13 @@ class IncrementalStringBuilder {
}
}
+ V8_INLINE void AppendInt(int i) {
+ char buffer[kIntToCStringBufferSize];
+ const char* str =
+ IntToCString(i, Vector<char>(buffer, kIntToCStringBufferSize));
+ AppendCString(str);
+ }
+
V8_INLINE bool CurrentPartCanFit(int length) {
return part_length_ - current_index_ > length;
}
@@ -277,9 +284,13 @@ class IncrementalStringBuilder {
Handle<SeqString>::cast(current_part()), current_index_));
}
+ void AppendStringByCopy(Handle<String> string);
+ bool CanAppendByCopy(Handle<String> string);
+
static const int kInitialPartLength = 32;
static const int kMaxPartLength = 16 * 1024;
static const int kPartLengthGrowthFactor = 2;
+ static const int kIntToCStringBufferSize = 100;
Isolate* isolate_;
String::Encoding encoding_;
diff --git a/deps/v8/src/strings/string-builder.cc b/deps/v8/src/strings/string-builder.cc
index f647aed190..cfb9a55412 100644
--- a/deps/v8/src/strings/string-builder.cc
+++ b/deps/v8/src/strings/string-builder.cc
@@ -284,7 +284,41 @@ MaybeHandle<String> IncrementalStringBuilder::Finish() {
return accumulator();
}
+// Short strings can be copied directly to {current_part_}.
+// Requires the IncrementalStringBuilder to either have two byte encoding or
+// the incoming string to have one byte representation "underneath" (The
+// one byte check requires the string to be flat).
+bool IncrementalStringBuilder::CanAppendByCopy(Handle<String> string) {
+ constexpr int kMaxStringLengthForCopy = 16;
+ const bool representation_ok =
+ encoding_ == String::TWO_BYTE_ENCODING ||
+ (string->IsFlat() && String::IsOneByteRepresentationUnderneath(*string));
+
+ return representation_ok && string->length() <= kMaxStringLengthForCopy &&
+ CurrentPartCanFit(string->length());
+}
+
+void IncrementalStringBuilder::AppendStringByCopy(Handle<String> string) {
+ DCHECK(CanAppendByCopy(string));
+
+ Handle<SeqOneByteString> part =
+ Handle<SeqOneByteString>::cast(current_part());
+ {
+ DisallowHeapAllocation no_gc;
+ String::WriteToFlat(*string, part->GetChars(no_gc) + current_index_, 0,
+ string->length());
+ }
+ current_index_ += string->length();
+ DCHECK(current_index_ <= part_length_);
+ if (current_index_ == part_length_) Extend();
+}
+
void IncrementalStringBuilder::AppendString(Handle<String> string) {
+ if (CanAppendByCopy(string)) {
+ AppendStringByCopy(string);
+ return;
+ }
+
ShrinkCurrentPart();
part_length_ = kInitialPartLength; // Allocate conservatively.
Extend(); // Attach current part and allocate new part.
diff --git a/deps/v8/src/strings/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index db1891949e..25a8ffc3c1 100644
--- a/deps/v8/src/strings/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -378,8 +378,9 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
printee.ShortPrint(this);
Add("\n");
if (printee.IsJSObject()) {
- if (printee.IsJSValue()) {
- Add(" value(): %o\n", JSValue::cast(printee).value());
+ if (printee.IsJSPrimitiveWrapper()) {
+ Add(" value(): %o\n",
+ JSPrimitiveWrapper::cast(printee).value());
}
PrintUsingMap(JSObject::cast(printee));
if (printee.IsJSArray()) {
diff --git a/deps/v8/src/tasks/OWNERS b/deps/v8/src/tasks/OWNERS
new file mode 100644
index 0000000000..2c6630da0c
--- /dev/null
+++ b/deps/v8/src/tasks/OWNERS
@@ -0,0 +1,6 @@
+ahaas@chromium.org
+clemensh@chromium.org
+mlippautz@chromium.org
+mstarzinger@chromium.org
+rmcilroy@chromium.org
+ulan@chromium.org
diff --git a/deps/v8/src/third_party/siphash/OWNERS b/deps/v8/src/third_party/siphash/OWNERS
new file mode 100644
index 0000000000..208670527f
--- /dev/null
+++ b/deps/v8/src/third_party/siphash/OWNERS
@@ -0,0 +1,3 @@
+sigurds@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/third_party/utf8-decoder/OWNERS b/deps/v8/src/third_party/utf8-decoder/OWNERS
new file mode 100644
index 0000000000..c008e4cbce
--- /dev/null
+++ b/deps/v8/src/third_party/utf8-decoder/OWNERS
@@ -0,0 +1,2 @@
+mathias@chromium.org
+marja@chromium.org
diff --git a/deps/v8/src/third_party/valgrind/OWNERS b/deps/v8/src/third_party/valgrind/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/third_party/valgrind/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/third_party/vtune/OWNERS b/deps/v8/src/third_party/vtune/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index f26e9b2326..23de121065 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -6,13 +6,16 @@
#define V8_TORQUE_AST_H_
#include <iostream>
+#include <map>
#include <memory>
+#include <set>
#include <string>
#include <vector>
#include "src/base/optional.h"
#include "src/torque/constants.h"
#include "src/torque/source-positions.h"
+#include "src/torque/utils.h"
namespace v8 {
namespace internal {
@@ -52,7 +55,6 @@ namespace torque {
V(IfStatement) \
V(WhileStatement) \
V(ForLoopStatement) \
- V(ForOfLoopStatement) \
V(BreakStatement) \
V(ContinueStatement) \
V(ReturnStatement) \
@@ -143,6 +145,12 @@ struct AstNodeClassCheck {
struct Expression : AstNode {
Expression(Kind kind, SourcePosition pos) : AstNode(kind, pos) {}
DEFINE_AST_NODE_INNER_BOILERPLATE(Expression)
+
+ using VisitCallback = std::function<void(Expression*)>;
+ virtual void VisitAllSubExpressions(VisitCallback callback) {
+ // TODO(szuend): Hoist this up to AstNode and make it a
+ // general Ast visitor.
+ }
};
struct LocationExpression : Expression {
@@ -193,9 +201,14 @@ class Ast {
return result;
}
+ void DeclareImportForCurrentFile(SourceId import_id) {
+ declared_imports_[CurrentSourcePosition::Get().source].insert(import_id);
+ }
+
private:
std::vector<Declaration*> declarations_;
std::vector<std::unique_ptr<AstNode>> nodes_;
+ std::map<SourceId, std::set<SourceId>> declared_imports_;
};
static const char* const kThisParameterName = "this";
@@ -227,6 +240,11 @@ struct IdentifierExpression : LocationExpression {
std::vector<TypeExpression*> args = {})
: IdentifierExpression(pos, {}, name, std::move(args)) {}
bool IsThis() const { return name->value == kThisParameterName; }
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
std::vector<std::string> namespace_qualification;
Identifier* name;
std::vector<TypeExpression*> generic_arguments;
@@ -241,6 +259,14 @@ struct IntrinsicCallExpression : Expression {
name(std::move(name)),
generic_arguments(std::move(generic_arguments)),
arguments(std::move(arguments)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ for (auto argument : arguments) {
+ argument->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
std::string name;
std::vector<TypeExpression*> generic_arguments;
std::vector<Expression*> arguments;
@@ -257,6 +283,16 @@ struct CallMethodExpression : Expression {
method(method),
arguments(std::move(arguments)),
labels(std::move(labels)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ target->VisitAllSubExpressions(callback);
+ method->VisitAllSubExpressions(callback);
+ for (auto argument : arguments) {
+ argument->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
Expression* target;
IdentifierExpression* method;
std::vector<Expression*> arguments;
@@ -272,6 +308,15 @@ struct CallExpression : Expression {
callee(callee),
arguments(std::move(arguments)),
labels(std::move(labels)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callee->VisitAllSubExpressions(callback);
+ for (auto argument : arguments) {
+ argument->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
IdentifierExpression* callee;
std::vector<Expression*> arguments;
std::vector<Identifier*> labels;
@@ -289,6 +334,14 @@ struct StructExpression : Expression {
: Expression(kKind, pos),
type(type),
initializers(std::move(initializers)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ for (auto& initializer : initializers) {
+ initializer.expression->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
TypeExpression* type;
std::vector<NameAndExpression> initializers;
};
@@ -297,6 +350,13 @@ struct LogicalOrExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalOrExpression)
LogicalOrExpression(SourcePosition pos, Expression* left, Expression* right)
: Expression(kKind, pos), left(left), right(right) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ left->VisitAllSubExpressions(callback);
+ right->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* left;
Expression* right;
};
@@ -305,6 +365,13 @@ struct LogicalAndExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LogicalAndExpression)
LogicalAndExpression(SourcePosition pos, Expression* left, Expression* right)
: Expression(kKind, pos), left(left), right(right) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ left->VisitAllSubExpressions(callback);
+ right->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* left;
Expression* right;
};
@@ -313,6 +380,12 @@ struct SpreadExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(SpreadExpression)
SpreadExpression(SourcePosition pos, Expression* spreadee)
: Expression(kKind, pos), spreadee(spreadee) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ spreadee->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* spreadee;
};
@@ -324,6 +397,14 @@ struct ConditionalExpression : Expression {
condition(condition),
if_true(if_true),
if_false(if_false) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ condition->VisitAllSubExpressions(callback);
+ if_true->VisitAllSubExpressions(callback);
+ if_false->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* condition;
Expression* if_true;
Expression* if_false;
@@ -333,6 +414,11 @@ struct StringLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StringLiteralExpression)
StringLiteralExpression(SourcePosition pos, std::string literal)
: Expression(kKind, pos), literal(std::move(literal)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
std::string literal;
};
@@ -340,6 +426,11 @@ struct NumberLiteralExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(NumberLiteralExpression)
NumberLiteralExpression(SourcePosition pos, std::string name)
: Expression(kKind, pos), number(std::move(name)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ callback(this);
+ }
+
std::string number;
};
@@ -348,6 +439,13 @@ struct ElementAccessExpression : LocationExpression {
ElementAccessExpression(SourcePosition pos, Expression* array,
Expression* index)
: LocationExpression(kKind, pos), array(array), index(index) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ array->VisitAllSubExpressions(callback);
+ index->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* array;
Expression* index;
};
@@ -357,6 +455,12 @@ struct FieldAccessExpression : LocationExpression {
FieldAccessExpression(SourcePosition pos, Expression* object,
Identifier* field)
: LocationExpression(kKind, pos), object(object), field(field) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ object->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* object;
Identifier* field;
};
@@ -365,6 +469,12 @@ struct DereferenceExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(DereferenceExpression)
DereferenceExpression(SourcePosition pos, Expression* reference)
: LocationExpression(kKind, pos), reference(reference) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ reference->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* reference;
};
@@ -379,6 +489,13 @@ struct AssignmentExpression : Expression {
location(location),
op(std::move(op)),
value(value) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ location->VisitAllSubExpressions(callback);
+ value->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* location;
base::Optional<std::string> op;
Expression* value;
@@ -391,6 +508,12 @@ struct IncrementDecrementExpression : Expression {
IncrementDecrementExpression(SourcePosition pos, Expression* location,
IncrementDecrementOperator op, bool postfix)
: Expression(kKind, pos), location(location), op(op), postfix(postfix) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ location->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
Expression* location;
IncrementDecrementOperator op;
bool postfix;
@@ -408,6 +531,12 @@ struct AssumeTypeImpossibleExpression : Expression {
: Expression(kKind, pos),
excluded_type(excluded_type),
expression(expression) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ expression->VisitAllSubExpressions(callback);
+ callback(this);
+ }
+
TypeExpression* excluded_type;
Expression* expression;
};
@@ -419,18 +548,30 @@ struct NewExpression : Expression {
: Expression(kKind, pos),
type(type),
initializers(std::move(initializers)) {}
+
+ void VisitAllSubExpressions(VisitCallback callback) override {
+ for (auto& initializer : initializers) {
+ initializer.expression->VisitAllSubExpressions(callback);
+ }
+ callback(this);
+ }
+
TypeExpression* type;
std::vector<NameAndExpression> initializers;
};
+enum class ImplicitKind { kNoImplicit, kJSImplicit, kImplicit };
+
struct ParameterList {
std::vector<Identifier*> names;
std::vector<TypeExpression*> types;
- size_t implicit_count;
- bool has_varargs;
- std::string arguments_variable;
+ ImplicitKind implicit_kind = ImplicitKind::kNoImplicit;
+ SourcePosition implicit_kind_pos = SourcePosition::Invalid();
+ size_t implicit_count = 0;
+ bool has_varargs = false;
+ std::string arguments_variable = "";
- static ParameterList Empty() { return ParameterList{{}, {}, 0, false, ""}; }
+ static ParameterList Empty() { return {}; }
std::vector<TypeExpression*> GetImplicitTypes() {
return std::vector<TypeExpression*>(types.begin(),
types.begin() + implicit_count);
@@ -445,14 +586,17 @@ struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
BasicTypeExpression(SourcePosition pos,
std::vector<std::string> namespace_qualification,
- std::string name)
+ std::string name,
+ std::vector<TypeExpression*> generic_arguments)
: TypeExpression(kKind, pos),
namespace_qualification(std::move(namespace_qualification)),
is_constexpr(IsConstexprName(name)),
- name(std::move(name)) {}
+ name(std::move(name)),
+ generic_arguments(std::move(generic_arguments)) {}
std::vector<std::string> namespace_qualification;
bool is_constexpr;
std::string name;
+ std::vector<TypeExpression*> generic_arguments;
};
struct FunctionTypeExpression : TypeExpression {
@@ -605,31 +749,6 @@ struct ForLoopStatement : Statement {
Statement* body;
};
-struct RangeExpression {
- base::Optional<Expression*> begin;
- base::Optional<Expression*> end;
-};
-
-struct ForOfLoopStatement : Statement {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(ForOfLoopStatement)
- ForOfLoopStatement(SourcePosition pos, Statement* decl, Expression* iterable,
- base::Optional<RangeExpression> range, Statement* body)
- : Statement(kKind, pos),
- var_declaration(VarDeclarationStatement::cast(decl)),
- iterable(iterable),
- body(body) {
- if (range) {
- begin = range->begin;
- end = range->end;
- }
- }
- VarDeclarationStatement* var_declaration;
- Expression* iterable;
- base::Optional<Expression*> begin;
- base::Optional<Expression*> end;
- Statement* body;
-};
-
struct LabelBlock : AstNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
LabelBlock(SourcePosition pos, Identifier* label,
@@ -710,6 +829,11 @@ struct NameAndTypeExpression {
TypeExpression* type;
};
+struct ImplicitParameters {
+ Identifier* kind;
+ std::vector<NameAndTypeExpression> parameters;
+};
+
struct StructFieldExpression {
NameAndTypeExpression name_and_type;
bool const_qualified;
@@ -769,7 +893,12 @@ struct MacroDeclaration : CallableNode {
const LabelAndTypesVector& labels)
: CallableNode(kind, pos, transitioning, std::move(name),
std::move(parameters), return_type, labels),
- op(std::move(op)) {}
+ op(std::move(op)) {
+ if (parameters.implicit_kind == ImplicitKind::kJSImplicit) {
+ Error("Cannot use \"js-implicit\" with macros, use \"implicit\" instead.")
+ .Position(parameters.implicit_kind_pos);
+ }
+ }
base::Optional<std::string> op;
};
@@ -793,7 +922,11 @@ struct IntrinsicDeclaration : CallableNode {
IntrinsicDeclaration(SourcePosition pos, std::string name,
ParameterList parameters, TypeExpression* return_type)
: CallableNode(kKind, pos, false, std::move(name), std::move(parameters),
- return_type, {}) {}
+ return_type, {}) {
+ if (parameters.implicit_kind != ImplicitKind::kNoImplicit) {
+ Error("Intinsics cannot have implicit parameters.");
+ }
+ }
};
struct TorqueMacroDeclaration : MacroDeclaration {
@@ -817,7 +950,21 @@ struct BuiltinDeclaration : CallableNode {
TypeExpression* return_type)
: CallableNode(kind, pos, transitioning, std::move(name),
std::move(parameters), return_type, {}),
- javascript_linkage(javascript_linkage) {}
+ javascript_linkage(javascript_linkage) {
+ if (parameters.implicit_kind == ImplicitKind::kJSImplicit &&
+ !javascript_linkage) {
+ Error(
+ "\"js-implicit\" is for implicit parameters passed according to the "
+ "JavaScript calling convention. Use \"implicit\" instead.");
+ }
+ if (parameters.implicit_kind == ImplicitKind::kImplicit &&
+ javascript_linkage) {
+ Error(
+ "The JavaScript calling convention implicitly passes a fixed set of "
+ "values. Use \"js-implicit\" to refer to those.")
+ .Position(parameters.implicit_kind_pos);
+ }
+ }
bool javascript_linkage;
};
@@ -926,12 +1073,17 @@ struct StructDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
StructDeclaration(SourcePosition pos, Identifier* name,
std::vector<Declaration*> methods,
- std::vector<StructFieldExpression> fields)
+ std::vector<StructFieldExpression> fields,
+ std::vector<Identifier*> generic_parameters)
: TypeDeclaration(kKind, pos, name),
methods(std::move(methods)),
- fields(std::move(fields)) {}
+ fields(std::move(fields)),
+ generic_parameters(std::move(generic_parameters)) {}
std::vector<Declaration*> methods;
std::vector<StructFieldExpression> fields;
+ std::vector<Identifier*> generic_parameters;
+
+ bool IsGeneric() const { return !generic_parameters.empty(); }
};
struct ClassDeclaration : TypeDeclaration {
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
index 650b134140..4ad3a6ec3c 100644
--- a/deps/v8/src/torque/constants.h
+++ b/deps/v8/src/torque/constants.h
@@ -18,16 +18,20 @@ static const char* const CONSTEXPR_TYPE_PREFIX = "constexpr ";
static const char* const NEVER_TYPE_STRING = "never";
static const char* const CONSTEXPR_BOOL_TYPE_STRING = "constexpr bool";
static const char* const CONSTEXPR_INTPTR_TYPE_STRING = "constexpr intptr";
+static const char* const CONSTEXPR_INSTANCE_TYPE_TYPE_STRING =
+ "constexpr InstanceType";
static const char* const BOOL_TYPE_STRING = "bool";
static const char* const VOID_TYPE_STRING = "void";
static const char* const ARGUMENTS_TYPE_STRING = "Arguments";
static const char* const CONTEXT_TYPE_STRING = "Context";
+static const char* const JS_FUNCTION_TYPE_STRING = "JSFunction";
static const char* const MAP_TYPE_STRING = "Map";
static const char* const OBJECT_TYPE_STRING = "Object";
static const char* const HEAP_OBJECT_TYPE_STRING = "HeapObject";
static const char* const JSOBJECT_TYPE_STRING = "JSObject";
static const char* const SMI_TYPE_STRING = "Smi";
static const char* const TAGGED_TYPE_STRING = "Tagged";
+static const char* const UNINITIALIZED_TYPE_STRING = "Uninitialized";
static const char* const RAWPTR_TYPE_STRING = "RawPtr";
static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
static const char* const STRING_TYPE_STRING = "String";
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index 628d5b8514..92d2bdf3d7 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -14,6 +14,9 @@ namespace v8 {
namespace internal {
namespace torque {
+template <class Variable>
+V8_EXPORT_PRIVATE typename Variable::VariableType*& ContextualVariableTop();
+
// {ContextualVariable} provides a clean alternative to a global variable.
// The contextual variable is mutable, and supports managing the value of
// a variable in a well-nested fashion via the {Scope} class.
@@ -66,7 +69,9 @@ class ContextualVariable {
}
private:
- V8_EXPORT_PRIVATE static VarType*& Top();
+ template <class T>
+ friend typename T::VariableType*& ContextualVariableTop();
+ static VarType*& Top() { return ContextualVariableTop<Derived>(); }
static bool HasScope() { return Top() != nullptr; }
friend class MessageBuilder;
@@ -77,12 +82,11 @@ class ContextualVariable {
struct VarName \
: v8::internal::torque::ContextualVariable<VarName, __VA_ARGS__> {}
-#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
- template <> \
- V8_EXPORT_PRIVATE VarName::VariableType*& \
- ContextualVariable<VarName, VarName::VariableType>::Top() { \
- static thread_local VarName::VariableType* top = nullptr; \
- return top; \
+#define DEFINE_CONTEXTUAL_VARIABLE(VarName) \
+ template <> \
+ V8_EXPORT_PRIVATE VarName::VariableType*& ContextualVariableTop<VarName>() { \
+ static thread_local VarName::VariableType* top = nullptr; \
+ return top; \
}
// By inheriting from {ContextualClass} a class can become a contextual variable
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index a29f832e7d..6a798a2707 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -56,14 +56,10 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
}
void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
- std::string file = SourceFileMap::GetSource(pos.source);
+ const std::string& file = SourceFileMap::AbsolutePath(pos.source);
if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
// Lines in Torque SourcePositions are zero-based, while the
// CodeStubAssembler and downwind systems are one-based.
- for (auto& c : file) {
- if (c == '\\')
- c = '/';
- }
out_ << " ca_.SetSourcePosition(\"" << file << "\", "
<< (pos.start.line + 1) << ");\n";
previous_position_ = pos;
@@ -260,9 +256,8 @@ void CSAGenerator::EmitInstruction(const CallIntrinsicInstruction& instruction,
} else if (instruction.intrinsic->ExternalName() == "%Allocate") {
out_ << "ca_.UncheckedCast<" << return_type->GetGeneratedTNodeTypeName()
<< ">(CodeStubAssembler(state_).Allocate";
- } else if (instruction.intrinsic->ExternalName() ==
- "%AllocateInternalClass") {
- out_ << "CodeStubAssembler(state_).AllocateUninitializedFixedArray";
+ } else if (instruction.intrinsic->ExternalName() == "%GetStructMap") {
+ out_ << "CodeStubAssembler(state_).GetStructMap";
} else {
ReportError("no built in intrinsic with name " +
instruction.intrinsic->ExternalName());
@@ -318,8 +313,7 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
out_ << ") = ";
} else {
if (results.size() == 1) {
- out_ << results[0] << " = ca_.UncheckedCast<"
- << return_type->GetGeneratedTNodeTypeName() << ">(";
+ out_ << results[0] << " = ";
} else {
DCHECK_EQ(0, results.size());
}
@@ -334,7 +328,6 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
if (needs_flattening) {
out_ << ").Flatten();\n";
} else {
- if (results.size() == 1) out_ << ")";
out_ << ");\n";
}
PostCallableExceptionPreparation(catch_name, return_type,
@@ -528,9 +521,9 @@ std::string CSAGenerator::PreCallableExceptionPreparation(
if (catch_block) {
catch_name = FreshCatchName();
out_ << " compiler::CodeAssemblerExceptionHandlerLabel " << catch_name
- << "_label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
+ << "__label(&ca_, compiler::CodeAssemblerLabel::kDeferred);\n";
out_ << " { compiler::CodeAssemblerScopedExceptionHandler s(&ca_, &"
- << catch_name << "_label);\n";
+ << catch_name << "__label);\n";
}
return catch_name;
}
@@ -541,7 +534,7 @@ void CSAGenerator::PostCallableExceptionPreparation(
if (catch_block) {
std::string block_name = BlockName(*catch_block);
out_ << " }\n";
- out_ << " if (" << catch_name << "_label.is_used()) {\n";
+ out_ << " if (" << catch_name << "__label.is_used()) {\n";
out_ << " compiler::CodeAssemblerLabel " << catch_name
<< "_skip(&ca_);\n";
if (!return_type->IsNever()) {
@@ -549,7 +542,7 @@ void CSAGenerator::PostCallableExceptionPreparation(
}
out_ << " compiler::TNode<Object> " << catch_name
<< "_exception_object;\n";
- out_ << " ca_.Bind(&" << catch_name << "_label, &" << catch_name
+ out_ << " ca_.Bind(&" << catch_name << "__label, &" << catch_name
<< "_exception_object);\n";
out_ << " ca_.Goto(&" << block_name;
for (size_t i = 0; i < stack->Size(); ++i) {
@@ -695,8 +688,8 @@ void CSAGenerator::EmitInstruction(const AbortInstruction& instruction,
out_ << " CodeStubAssembler(state_).DebugBreak();\n";
break;
case AbortInstruction::Kind::kAssertionFailure: {
- std::string file =
- StringLiteralQuote(SourceFileMap::GetSource(instruction.pos.source));
+ std::string file = StringLiteralQuote(
+ SourceFileMap::PathFromV8Root(instruction.pos.source));
out_ << " CodeStubAssembler(state_).FailAssert("
<< StringLiteralQuote(instruction.message) << ", " << file << ", "
<< instruction.pos.start.line + 1 << ");\n";
@@ -723,12 +716,8 @@ void CSAGenerator::EmitInstruction(
out_ << " compiler::TNode<IntPtrT> " << offset_name
<< " = ca_.IntPtrConstant(";
- if (instruction.class_type->IsExtern()) {
out_ << field.aggregate->GetGeneratedTNodeTypeName() << "::k"
<< CamelifyString(field.name_and_type.name) << "Offset";
- } else {
- out_ << "FixedArray::kHeaderSize + " << field.offset;
- }
out_ << ");\n"
<< " USE(" << stack->Top() << ");\n";
}
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index afa6d50d94..cf6fd2554b 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -50,6 +50,7 @@ class Declarable {
kRuntimeFunction,
kIntrinsic,
kGeneric,
+ kGenericStructType,
kTypeAlias,
kExternConstant,
kNamespaceConstant
@@ -64,6 +65,7 @@ class Declarable {
bool IsBuiltin() const { return kind() == kBuiltin; }
bool IsRuntimeFunction() const { return kind() == kRuntimeFunction; }
bool IsGeneric() const { return kind() == kGeneric; }
+ bool IsGenericStructType() const { return kind() == kGenericStructType; }
bool IsTypeAlias() const { return kind() == kTypeAlias; }
bool IsExternConstant() const { return kind() == kExternConstant; }
bool IsNamespaceConstant() const { return kind() == kNamespaceConstant; }
@@ -183,15 +185,9 @@ class Namespace : public Scope {
const std::string& name() const { return name_; }
bool IsDefaultNamespace() const;
bool IsTestNamespace() const;
- std::ostream& source_stream() { return source_stream_; }
- std::ostream& header_stream() { return header_stream_; }
- std::string source() { return source_stream_.str(); }
- std::string header() { return header_stream_.str(); }
private:
std::string name_;
- std::stringstream header_stream_;
- std::stringstream source_stream_;
};
inline Namespace* CurrentNamespace() {
@@ -318,16 +314,23 @@ class Macro : public Callable {
return Callable::ShouldBeInlined();
}
+ void SetUsed() { used_ = true; }
+ bool IsUsed() const { return used_; }
+
protected:
Macro(Declarable::Kind kind, std::string external_name,
std::string readable_name, const Signature& signature,
bool transitioning, base::Optional<Statement*> body)
: Callable(kind, std::move(external_name), std::move(readable_name),
- signature, transitioning, body) {
+ signature, transitioning, body),
+ used_(false) {
if (signature.parameter_types.var_args) {
ReportError("Varargs are not supported for macros.");
}
}
+
+ private:
+ bool used_;
};
class ExternMacro : public Macro {
@@ -449,26 +452,43 @@ class Intrinsic : public Callable {
}
};
-class Generic : public Declarable {
+template <class T>
+class SpecializationMap {
+ private:
+ using Map = std::unordered_map<TypeVector, T*, base::hash<TypeVector>>;
+
public:
- DECLARE_DECLARABLE_BOILERPLATE(Generic, generic)
+ SpecializationMap() {}
- GenericDeclaration* declaration() const { return declaration_; }
- const std::vector<Identifier*> generic_parameters() const {
- return declaration()->generic_parameters;
- }
- const std::string& name() const { return name_; }
- void AddSpecialization(const TypeVector& type_arguments,
- Callable* specialization) {
+ void Add(const TypeVector& type_arguments, T* specialization) {
DCHECK_EQ(0, specializations_.count(type_arguments));
specializations_[type_arguments] = specialization;
}
- base::Optional<Callable*> GetSpecialization(
- const TypeVector& type_arguments) const {
+ base::Optional<T*> Get(const TypeVector& type_arguments) const {
auto it = specializations_.find(type_arguments);
if (it != specializations_.end()) return it->second;
return base::nullopt;
}
+
+ using iterator = typename Map::const_iterator;
+ iterator begin() const { return specializations_.begin(); }
+ iterator end() const { return specializations_.end(); }
+
+ private:
+ Map specializations_;
+};
+
+class Generic : public Declarable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(Generic, generic)
+
+ const std::string& name() const { return name_; }
+ GenericDeclaration* declaration() const { return declaration_; }
+ const std::vector<Identifier*> generic_parameters() const {
+ return declaration()->generic_parameters;
+ }
+ SpecializationMap<Callable>& specializations() { return specializations_; }
+
base::Optional<TypeVector> InferSpecializationTypes(
const TypeVector& explicit_specialization_types,
const TypeVector& arguments);
@@ -481,9 +501,8 @@ class Generic : public Declarable {
declaration_(declaration) {}
std::string name_;
- std::unordered_map<TypeVector, Callable*, base::hash<TypeVector>>
- specializations_;
GenericDeclaration* declaration_;
+ SpecializationMap<Callable> specializations_;
};
struct SpecializationKey {
@@ -491,6 +510,32 @@ struct SpecializationKey {
TypeVector specialized_types;
};
+class GenericStructType : public Declarable {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(GenericStructType, generic_type)
+ const std::string& name() const { return name_; }
+ StructDeclaration* declaration() const { return declaration_; }
+ const std::vector<Identifier*>& generic_parameters() const {
+ return declaration_->generic_parameters;
+ }
+ SpecializationMap<const StructType>& specializations() {
+ return specializations_;
+ }
+
+ private:
+ friend class Declarations;
+ GenericStructType(const std::string& name, StructDeclaration* declaration)
+ : Declarable(Declarable::kGenericStructType),
+ name_(name),
+ declaration_(declaration) {
+ DCHECK_GT(declaration->generic_parameters.size(), 0);
+ }
+
+ std::string name_;
+ StructDeclaration* declaration_;
+ SpecializationMap<const StructType> specializations_;
+};
+
class TypeAlias : public Declarable {
public:
DECLARE_DECLARABLE_BOILERPLATE(TypeAlias, type_alias)
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index 34914d7b72..e0e996f33b 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -76,28 +76,12 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Builtin::Kind kind = !javascript ? Builtin::kStub
: varargs ? Builtin::kVarArgsJavaScript
: Builtin::kFixedArgsJavaScript;
- const Type* context_type =
- Declarations::LookupGlobalType(CONTEXT_TYPE_STRING);
- if (signature.types().size() == 0 ||
- !(signature.types()[0] == context_type)) {
- Error("First parameter to builtin ", decl->name, " must be of type ",
- *context_type);
- }
if (varargs && !javascript) {
Error("Rest parameters require ", decl->name,
" to be a JavaScript builtin");
}
- if (javascript) {
- if (signature.types().size() >= 2 &&
- !(signature.types()[1] ==
- Declarations::LookupGlobalType(OBJECT_TYPE_STRING))) {
- Error("Second parameter to javascript builtin ", decl->name, " is ",
- *signature.types()[1], " but should be Object");
- }
- }
-
for (size_t i = 0; i < signature.types().size(); ++i) {
if (const StructType* type =
StructType::DynamicCast(signature.types()[i])) {
@@ -136,8 +120,7 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
const Signature& signature,
base::Optional<Statement*> body) {
if (signature.parameter_types.types.size() == 0 ||
- !(signature.parameter_types.types[0] ==
- Declarations::LookupGlobalType(CONTEXT_TYPE_STRING))) {
+ !(signature.parameter_types.types[0] == TypeOracle::GetContextType())) {
ReportError(
"first parameter to runtime functions has to be the context and have "
"type Context, but found type ",
@@ -350,7 +333,7 @@ Callable* DeclarationVisitor::Specialize(
<< std::to_string(generic_parameter_count) << ")";
ReportError(stream.str());
}
- if (key.generic->GetSpecialization(key.specialized_types)) {
+ if (key.generic->specializations().Get(key.specialized_types)) {
ReportError("cannot redeclare specialization of ", key.generic->name(),
" with types <", key.specialized_types, ">");
}
@@ -381,7 +364,7 @@ Callable* DeclarationVisitor::Specialize(
callable = CreateBuiltin(builtin, generated_name, readable_name.str(),
type_signature, *body);
}
- key.generic->AddSpecialization(key.specialized_types, callable);
+ key.generic->specializations().Add(key.specialized_types, callable);
return callable;
}
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index 4c6053d86a..dbd28f4b87 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -37,6 +37,13 @@ class PredeclarationVisitor {
static void Predeclare(TypeDeclaration* decl) {
Declarations::PredeclareTypeAlias(decl->name, decl, false);
}
+ static void Predeclare(StructDeclaration* decl) {
+ if (decl->IsGeneric()) {
+ Declarations::DeclareGenericStructType(decl->name->value, decl);
+ } else {
+ Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ }
+ }
static void Predeclare(GenericDeclaration* decl) {
Declarations::DeclareGeneric(decl->callable->name, decl);
}
@@ -59,6 +66,11 @@ class DeclarationVisitor {
// are reported even if the type is unused.
Declarations::LookupType(decl->name);
}
+ static void Visit(StructDeclaration* decl) {
+ if (!decl->IsGeneric()) {
+ Declarations::LookupType(decl->name);
+ }
+ }
static Builtin* CreateBuiltin(BuiltinDeclaration* decl,
std::string external_name,
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index f3f3e84cad..73d46d6998 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -133,6 +133,12 @@ Generic* Declarations::LookupUniqueGeneric(const QualifiedName& name) {
"generic");
}
+GenericStructType* Declarations::LookupUniqueGenericStructType(
+ const QualifiedName& name) {
+ return EnsureUnique(FilterDeclarables<GenericStructType>(Lookup(name)), name,
+ "generic struct");
+}
+
Namespace* Declarations::DeclareNamespace(const std::string& name) {
return Declare(name, std::unique_ptr<Namespace>(new Namespace(name)));
}
@@ -278,6 +284,12 @@ Generic* Declarations::DeclareGeneric(const std::string& name,
return Declare(name, std::unique_ptr<Generic>(new Generic(name, generic)));
}
+GenericStructType* Declarations::DeclareGenericStructType(
+ const std::string& name, StructDeclaration* decl) {
+ return Declare(name, std::unique_ptr<GenericStructType>(
+ new GenericStructType(name, decl)));
+}
+
std::string Declarations::GetGeneratedCallableName(
const std::string& name, const TypeVector& specialized_types) {
std::string result = name;
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 0dd9be9974..00e0facefe 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -15,8 +15,13 @@ namespace internal {
namespace torque {
static constexpr const char* const kFromConstexprMacroName = "FromConstexpr";
-static constexpr const char* kTrueLabelName = "_True";
-static constexpr const char* kFalseLabelName = "_False";
+static constexpr const char* kTrueLabelName = "__True";
+static constexpr const char* kFalseLabelName = "__False";
+static constexpr const char* kMacroEndLabelName = "__macro_end";
+static constexpr const char* kBreakLabelName = "__break";
+static constexpr const char* kContinueLabelName = "__continue";
+static constexpr const char* kCatchLabelName = "__catch";
+static constexpr const char* kNextCaseLabelName = "__NextCase";
template <class T>
std::vector<T*> FilterDeclarables(const std::vector<Declarable*> list) {
@@ -71,6 +76,9 @@ class Declarations {
static std::vector<Generic*> LookupGeneric(const std::string& name);
static Generic* LookupUniqueGeneric(const QualifiedName& name);
+ static GenericStructType* LookupUniqueGenericStructType(
+ const QualifiedName& name);
+
static Namespace* DeclareNamespace(const std::string& name);
static TypeAlias* DeclareType(const Identifier* name, const Type* type);
@@ -124,6 +132,8 @@ class Declarations {
static Generic* DeclareGeneric(const std::string& name,
GenericDeclaration* generic);
+ static GenericStructType* DeclareGenericStructType(const std::string& name,
+ StructDeclaration* decl);
template <class T>
static T* Declare(const std::string& name, T* d) {
diff --git a/deps/v8/src/torque/earley-parser.cc b/deps/v8/src/torque/earley-parser.cc
index ff12d4a449..9ebb132c82 100644
--- a/deps/v8/src/torque/earley-parser.cc
+++ b/deps/v8/src/torque/earley-parser.cc
@@ -159,26 +159,21 @@ Symbol* Lexer::MatchToken(InputPosition* pos, InputPosition end) {
symbol = &pair.second;
}
}
- // Check if matched pattern coincides with a keyword. Prefer the keyword in
- // this case.
- if (*pos != token_start) {
- auto found_keyword = keywords_.find(std::string(token_start, *pos));
- if (found_keyword != keywords_.end()) {
- return &found_keyword->second;
- }
- return symbol;
- }
- // Now check for a keyword (that doesn't overlap with a pattern).
- // Iterate from the end to ensure that if one keyword is a prefix of another,
- // we first try to match the longer one.
+ size_t pattern_size = *pos - token_start;
+
+ // Now check for keywords. Prefer keywords over patterns unless the pattern is
+ // longer. Iterate from the end to ensure that if one keyword is a prefix of
+ // another, we first try to match the longer one.
for (auto it = keywords_.rbegin(); it != keywords_.rend(); ++it) {
const std::string& keyword = it->first;
- if (static_cast<size_t>(end - *pos) < keyword.size()) continue;
- if (keyword == std::string(*pos, *pos + keyword.size())) {
- *pos += keyword.size();
+ if (static_cast<size_t>(end - token_start) < keyword.size()) continue;
+ if (keyword.size() >= pattern_size &&
+ keyword == std::string(token_start, token_start + keyword.size())) {
+ *pos = token_start + keyword.size();
return &it->second;
}
}
+ if (pattern_size > 0) return symbol;
return nullptr;
}
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 9d9cfb02c0..d3d0c89c42 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -53,6 +53,8 @@ enum class ParseResultHolderBase::TypeId {
kLabelBlockPtr,
kOptionalLabelBlockPtr,
kNameAndTypeExpression,
+ kImplicitParameters,
+ kOptionalImplicitParameters,
kNameAndExpression,
kConditionalAnnotation,
kOptionalConditionalAnnotation,
@@ -70,8 +72,6 @@ enum class ParseResultHolderBase::TypeId {
kStdVectorOfExpressionPtr,
kExpressionWithSource,
kParameterList,
- kRangeExpression,
- kOptionalRangeExpression,
kTypeList,
kOptionalTypeList,
kLabelAndTypes,
diff --git a/deps/v8/src/torque/global-context.cc b/deps/v8/src/torque/global-context.cc
new file mode 100644
index 0000000000..f258f18474
--- /dev/null
+++ b/deps/v8/src/torque/global-context.cc
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/global-context.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+GlobalContext::GlobalContext(Ast ast)
+ : collect_language_server_data_(false),
+ force_assert_statements_(false),
+ ast_(std::move(ast)) {
+ CurrentScope::Scope current_scope(nullptr);
+ CurrentSourcePosition::Scope current_source_position(
+ SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ default_namespace_ =
+ RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index aa70b23fb5..e103a22575 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -7,9 +7,9 @@
#include <map>
+#include "src/torque/ast.h"
+#include "src/torque/contextual.h"
#include "src/torque/declarable.h"
-#include "src/torque/declarations.h"
-#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
@@ -19,16 +19,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
public:
GlobalContext(GlobalContext&&) V8_NOEXCEPT = default;
GlobalContext& operator=(GlobalContext&&) V8_NOEXCEPT = default;
- explicit GlobalContext(Ast ast)
- : collect_language_server_data_(false),
- force_assert_statements_(false),
- ast_(std::move(ast)) {
- CurrentScope::Scope current_scope(nullptr);
- CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
- default_namespace_ =
- RegisterDeclarable(base::make_unique<Namespace>(kBaseNamespaceName));
- }
+ explicit GlobalContext(Ast ast);
+
static Namespace* GetDefaultNamespace() { return Get().default_namespace_; }
template <class T>
T* RegisterDeclarable(std::unique_ptr<T> d) {
@@ -41,16 +33,6 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return Get().declarables_;
}
- static const std::vector<Namespace*> GetNamespaces() {
- std::vector<Namespace*> result;
- for (auto& declarable : AllDeclarables()) {
- if (Namespace* n = Namespace::DynamicCast(declarable.get())) {
- result.push_back(n);
- }
- }
- return result;
- }
-
static void RegisterClass(const TypeAlias* alias) {
DCHECK(alias->ParentScope()->IsNamespace());
Get().classes_.push_back(alias);
@@ -82,6 +64,14 @@ class GlobalContext : public ContextualClass<GlobalContext> {
static Ast* ast() { return &Get().ast_; }
static size_t FreshId() { return Get().fresh_id_++; }
+ struct PerFileStreams {
+ std::stringstream csa_headerfile;
+ std::stringstream csa_ccfile;
+ };
+ static PerFileStreams& GeneratedPerFile(SourceId file) {
+ return Get().generated_per_file_[file];
+ }
+
private:
bool collect_language_server_data_;
bool force_assert_statements_;
@@ -89,6 +79,7 @@ class GlobalContext : public ContextualClass<GlobalContext> {
Ast ast_;
std::vector<std::unique_ptr<Declarable>> declarables_;
std::vector<std::string> cpp_includes_;
+ std::map<SourceId, PerFileStreams> generated_per_file_;
GlobalClassList classes_;
size_t fresh_id_ = 0;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index d4798b28cb..a0aeeee81b 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -48,61 +48,62 @@ const Type* ImplementationVisitor::Visit(Statement* stmt) {
return result;
}
-void ImplementationVisitor::BeginNamespaceFile(Namespace* nspace) {
- std::ostream& source = nspace->source_stream();
- std::ostream& header = nspace->header_stream();
+void ImplementationVisitor::BeginCSAFiles() {
+ for (SourceId file : SourceFileMap::AllSources()) {
+ std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
+ std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
- for (const std::string& include_path : GlobalContext::CppIncludes()) {
- source << "#include " << StringLiteralQuote(include_path) << "\n";
- }
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ source << "#include " << StringLiteralQuote(include_path) << "\n";
+ }
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- source << "#include \"torque-generated/builtins-" +
- DashifyString(n->name()) + "-gen-tq.h\"\n";
- }
- source << "\n";
+ for (SourceId file : SourceFileMap::AllSources()) {
+ source << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
+ }
+ source << "\n";
- source << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
+ source << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
- std::string upper_name(nspace->name());
- transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
- ::toupper);
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" + upper_name + "_NAMESPACE_TQ_H_";
- header << "#ifndef " << headerDefine << "\n";
- header << "#define " << headerDefine << "\n\n";
- header << "#include \"src/compiler/code-assembler.h\"\n";
- header << "#include \"src/codegen/code-stub-assembler.h\"\n";
- header << "#include \"src/utils/utils.h\"\n";
- header << "#include \"torque-generated/field-offsets-tq.h\"\n";
- header << "#include \"torque-generated/csa-types-tq.h\"\n";
- header << "\n";
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
+ header << "#ifndef " << headerDefine << "\n";
+ header << "#define " << headerDefine << "\n\n";
+ header << "#include \"src/compiler/code-assembler.h\"\n";
+ header << "#include \"src/codegen/code-stub-assembler.h\"\n";
+ header << "#include \"src/utils/utils.h\"\n";
+ header << "#include \"torque-generated/field-offsets-tq.h\"\n";
+ header << "#include \"torque-generated/csa-types-tq.h\"\n";
+ header << "\n";
- header << "namespace v8 {\n"
- << "namespace internal {\n"
- << "\n";
+ header << "namespace v8 {\n"
+ << "namespace internal {\n"
+ << "\n";
+ }
}
-void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
- std::ostream& source = nspace->source_stream();
- std::ostream& header = nspace->header_stream();
+void ImplementationVisitor::EndCSAFiles() {
+ for (SourceId file : SourceFileMap::AllSources()) {
+ std::ostream& source = GlobalContext::GeneratedPerFile(file).csa_ccfile;
+ std::ostream& header = GlobalContext::GeneratedPerFile(file).csa_headerfile;
- std::string upper_name(nspace->name());
- transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
- ::toupper);
- std::string headerDefine =
- "V8_GEN_TORQUE_GENERATED_" + upper_name + "_NAMESPACE_V8_H_";
+ std::string headerDefine =
+ "V8_GEN_TORQUE_GENERATED_" +
+ UnderlinifyPath(SourceFileMap::PathFromV8Root(file)) + "_H_";
- source << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
+ source << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
- header << "} // namespace internal\n"
- << "} // namespace v8\n"
- << "\n";
- header << "#endif // " << headerDefine << "\n";
+ header << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "\n";
+ header << "#endif // " << headerDefine << "\n";
+ }
}
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
@@ -179,14 +180,15 @@ VisitResult ImplementationVisitor::InlineMacro(
DCHECK(macro->IsMethod());
LocalValue this_value = LocalValue{!this_reference->IsVariableAccess(),
this_reference->GetVisitResult()};
- parameter_bindings.Add(kThisParameterName, this_value);
+ parameter_bindings.Add(kThisParameterName, this_value, true);
}
size_t i = 0;
for (auto arg : arguments) {
if (this_reference && i == signature.implicit_count) i++;
+ const bool mark_as_used = signature.implicit_count > i;
const Identifier* name = macro->parameter_names()[i++];
- parameter_bindings.Add(name, LocalValue{true, arg});
+ parameter_bindings.Add(name, LocalValue{true, arg}, mark_as_used);
}
DCHECK_EQ(label_blocks.size(), signature.labels.size());
@@ -217,7 +219,7 @@ VisitResult ImplementationVisitor::InlineMacro(
}
}
macro_end = assembler().NewBlock(std::move(stack));
- macro_end_binding.emplace(&LabelBindingsManager::Get(), "_macro_end",
+ macro_end_binding.emplace(&LabelBindingsManager::Get(), kMacroEndLabelName,
LocalLabel{macro_end, {return_type}});
} else {
SetReturnValue(VisitResult::NeverResult());
@@ -380,13 +382,15 @@ namespace {
std::string AddParameter(size_t i, Builtin* builtin,
Stack<std::string>* parameters,
Stack<const Type*>* parameter_types,
- BlockBindings<LocalValue>* parameter_bindings) {
+ BlockBindings<LocalValue>* parameter_bindings,
+ bool mark_as_used) {
const Identifier* name = builtin->signature().parameter_names[i];
const Type* type = builtin->signature().types()[i];
std::string external_name = "parameter" + std::to_string(i);
parameters->Push(external_name);
StackRange range = parameter_types->PushMany(LowerType(type));
- parameter_bindings->Add(name, LocalValue{true, VisitResult(type, range)});
+ parameter_bindings->Add(name, LocalValue{true, VisitResult(type, range)},
+ mark_as_used);
return external_name;
}
@@ -395,15 +399,15 @@ std::string AddParameter(size_t i, Builtin* builtin,
void ImplementationVisitor::Visit(Builtin* builtin) {
if (builtin->IsExternal()) return;
CurrentScope::Scope current_scope(builtin);
+ CurrentCallable::Scope current_callable(builtin);
+ CurrentReturnValue::Scope current_return_value;
+
const std::string& name = builtin->ExternalName();
const Signature& signature = builtin->signature();
source_out() << "TF_BUILTIN(" << name << ", CodeStubAssembler) {\n"
<< " compiler::CodeAssemblerState* state_ = state();"
<< " compiler::CodeAssembler ca_(state());\n";
- CurrentCallable::Scope current_callable(builtin);
- CurrentReturnValue::Scope current_return_value;
-
Stack<const Type*> parameter_types;
Stack<std::string> parameters;
@@ -411,58 +415,128 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
BlockBindings<LocalValue> parameter_bindings(&ValueBindingsManager::Get());
- // Context
- std::string parameter0 = AddParameter(0, builtin, &parameters,
- &parameter_types, &parameter_bindings);
- source_out() << " TNode<Context> " << parameter0
- << " = UncheckedCast<Context>(Parameter("
- << "Descriptor::kContext));\n";
- source_out() << " USE(" << parameter0 << ");\n";
-
- size_t first = 1;
- if (builtin->IsVarArgsJavaScript()) {
- DCHECK(signature.parameter_types.var_args);
- source_out()
- << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
- std::string parameter1 = AddParameter(
- 1, builtin, &parameters, &parameter_types, &parameter_bindings);
- source_out()
- << " TNode<IntPtrT> arguments_length(ChangeInt32ToIntPtr(argc));\n";
- source_out() << " TNode<RawPtrT> arguments_frame = "
- "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
- source_out() << " TorqueStructArguments "
- "torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length));\n";
- source_out() << " CodeStubArguments arguments(this, torque_arguments);\n";
-
- source_out() << " TNode<Object> " << parameter1
- << " = arguments.GetReceiver();\n";
- source_out() << "USE(" << parameter1 << ");\n";
- parameters.Push("torque_arguments.frame");
- parameters.Push("torque_arguments.base");
- parameters.Push("torque_arguments.length");
- const Type* arguments_type = TypeOracle::GetArgumentsType();
- StackRange range = parameter_types.PushMany(LowerType(arguments_type));
- parameter_bindings.Add(
- *signature.arguments_variable,
- LocalValue{true, VisitResult(arguments_type, range)});
-
- first = 2;
- }
-
- for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
- if (i < first) continue;
- const std::string& parameter_name = signature.parameter_names[i]->value;
- const Type* type = signature.types()[i];
- std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
- &parameter_bindings);
- source_out() << " " << type->GetGeneratedTypeName() << " " << var << " = "
- << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
- << ">(Parameter(Descriptor::k"
- << CamelifyString(parameter_name) << "));\n";
- source_out() << " USE(" << var << ");\n";
- }
+ if (builtin->IsVarArgsJavaScript() || builtin->IsFixedArgsJavaScript()) {
+ if (builtin->IsVarArgsJavaScript()) {
+ DCHECK(signature.parameter_types.var_args);
+ if (signature.ExplicitCount() > 0) {
+ Error("Cannot mix explicit parameters with varargs.")
+ .Position(signature.parameter_names[signature.implicit_count]->pos);
+ }
+ source_out()
+ << " Node* argc = Parameter(Descriptor::kJSActualArgumentsCount);\n";
+ source_out()
+ << " TNode<IntPtrT> arguments_length(ChangeInt32ToIntPtr(argc));\n";
+ source_out() << " TNode<RawPtrT> arguments_frame = "
+ "UncheckedCast<RawPtrT>(LoadFramePointer());\n";
+ source_out() << " TorqueStructArguments "
+ "torque_arguments(GetFrameArguments(arguments_frame, "
+ "arguments_length));\n";
+ source_out()
+ << " CodeStubArguments arguments(this, torque_arguments);\n";
+
+ parameters.Push("torque_arguments.frame");
+ parameters.Push("torque_arguments.base");
+ parameters.Push("torque_arguments.length");
+ const Type* arguments_type = TypeOracle::GetArgumentsType();
+ StackRange range = parameter_types.PushMany(LowerType(arguments_type));
+ parameter_bindings.Add(
+ *signature.arguments_variable,
+ LocalValue{true, VisitResult(arguments_type, range)}, true);
+ }
+
+ for (size_t i = 0; i < signature.implicit_count; ++i) {
+ const std::string& param_name = signature.parameter_names[i]->value;
+ SourcePosition param_pos = signature.parameter_names[i]->pos;
+ std::string generated_name = AddParameter(
+ i, builtin, &parameters, &parameter_types, &parameter_bindings, true);
+ const Type* actual_type = signature.parameter_types.types[i];
+ const Type* expected_type;
+ if (param_name == "context") {
+ source_out() << " TNode<Context> " << generated_name
+ << " = UncheckedCast<Context>(Parameter("
+ << "Descriptor::kContext));\n";
+ source_out() << " USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetContextType();
+ } else if (param_name == "receiver") {
+ source_out()
+ << " TNode<Object> " << generated_name << " = "
+ << (builtin->IsVarArgsJavaScript()
+ ? "arguments.GetReceiver()"
+ : "UncheckedCast<Object>(Parameter(Descriptor::kReceiver))")
+ << ";\n";
+ source_out() << "USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetObjectType();
+ } else if (param_name == "newTarget") {
+ source_out() << " TNode<Object> " << generated_name
+ << " = UncheckedCast<Object>(Parameter("
+ << "Descriptor::kJSNewTarget));\n";
+ source_out() << "USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetObjectType();
+ } else if (param_name == "target") {
+ source_out() << " TNode<JSFunction> " << generated_name
+ << " = UncheckedCast<JSFunction>(Parameter("
+ << "Descriptor::kJSTarget));\n";
+ source_out() << "USE(" << generated_name << ");\n";
+ expected_type = TypeOracle::GetJSFunctionType();
+ } else {
+ Error(
+ "Unexpected implicit parameter \"", param_name,
+ "\" for JavaScript calling convention, "
+ "expected \"context\", \"receiver\", \"target\", or \"newTarget\"")
+ .Position(param_pos);
+ expected_type = actual_type;
+ }
+ if (actual_type != expected_type) {
+ Error("According to JavaScript calling convention, expected parameter ",
+ param_name, " to have type ", *expected_type, " but found type ",
+ *actual_type)
+ .Position(param_pos);
+ }
+ }
+
+ for (size_t i = signature.implicit_count;
+ i < signature.parameter_names.size(); ++i) {
+ const std::string& parameter_name = signature.parameter_names[i]->value;
+ const Type* type = signature.types()[i];
+ const bool mark_as_used = signature.implicit_count > i;
+ std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
+ &parameter_bindings, mark_as_used);
+ source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ << " = "
+ << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
+ << ">(Parameter(Descriptor::k"
+ << CamelifyString(parameter_name) << "));\n";
+ source_out() << " USE(" << var << ");\n";
+ }
+
+ } else {
+ DCHECK(builtin->IsStub());
+
+ // Context
+ const bool context_is_implicit = signature.implicit_count > 0;
+ std::string parameter0 =
+ AddParameter(0, builtin, &parameters, &parameter_types,
+ &parameter_bindings, context_is_implicit);
+ source_out() << " TNode<Context> " << parameter0
+ << " = UncheckedCast<Context>(Parameter("
+ << "Descriptor::kContext));\n";
+ source_out() << " USE(" << parameter0 << ");\n";
+
+ for (size_t i = 1; i < signature.parameter_names.size(); ++i) {
+ const std::string& parameter_name = signature.parameter_names[i]->value;
+ const Type* type = signature.types()[i];
+ const bool mark_as_used = signature.implicit_count > i;
+ std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
+ &parameter_bindings, mark_as_used);
+ source_out() << " " << type->GetGeneratedTypeName() << " " << var
+ << " = "
+ << "UncheckedCast<" << type->GetGeneratedTNodeTypeName()
+ << ">(Parameter(Descriptor::k"
+ << CamelifyString(parameter_name) << "));\n";
+ source_out() << " USE(" << var << ");\n";
+ }
+ }
assembler_ = CfgAssembler(parameter_types);
const Type* body_result = Visit(*builtin->body());
if (body_result != TypeOracle::GetNeverType()) {
@@ -961,6 +1035,26 @@ const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
"Torque assert '" + FormatAssertSource(stmt->source) + "' failed"});
assembler().Bind(true_block);
+ } else {
+ // Visit the expression so bindings only used in asserts are marked
+ // as such. Otherwise they might be wrongly reported as unused bindings
+ // in release builds.
+ stmt->expression->VisitAllSubExpressions([](Expression* expression) {
+ if (auto id = IdentifierExpression::DynamicCast(expression)) {
+ ValueBindingsManager::Get().TryLookup(id->name->value);
+ } else if (auto call = CallExpression::DynamicCast(expression)) {
+ for (Identifier* label : call->labels) {
+ LabelBindingsManager::Get().TryLookup(label->value);
+ }
+ // TODO(szuend): In case the call expression resolves to a macro
+ // callable, mark the macro as used as well.
+ } else if (auto call = CallMethodExpression::DynamicCast(expression)) {
+ for (Identifier* label : call->labels) {
+ LabelBindingsManager::Get().TryLookup(label->value);
+ }
+ // TODO(szuend): Mark the underlying macro as used.
+ }
+ });
}
return TypeOracle::GetVoidType();
}
@@ -978,7 +1072,7 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
ReportError(s.str());
}
LocalLabel* end =
- current_callable->IsMacro() ? LookupLabel("_macro_end") : nullptr;
+ current_callable->IsMacro() ? LookupLabel(kMacroEndLabelName) : nullptr;
if (current_callable->HasReturnValue()) {
if (!stmt->value) {
std::stringstream s;
@@ -1016,81 +1110,6 @@ const Type* ImplementationVisitor::Visit(ReturnStatement* stmt) {
return TypeOracle::GetNeverType();
}
-const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
- VisitResult expression_result = Visit(stmt->iterable);
- VisitResult begin = stmt->begin
- ? Visit(*stmt->begin)
- : VisitResult(TypeOracle::GetConstInt31Type(), "0");
-
- VisitResult end = stmt->end
- ? Visit(*stmt->end)
- : GenerateCall(".length", {{expression_result}, {}});
-
- const Type* common_type = GetCommonType(begin.type(), end.type());
- VisitResult index = GenerateImplicitConvert(common_type, begin);
-
- Block* body_block = assembler().NewBlock();
- Block* increment_block = assembler().NewBlock(assembler().CurrentStack());
- Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
-
- Block* header_block = assembler().NewBlock();
-
- assembler().Goto(header_block);
-
- assembler().Bind(header_block);
-
- BreakContinueActivator activator(exit_block, increment_block);
-
- {
- StackScope comparison_scope(this);
- VisitResult result = GenerateCall("<", {{index, end}, {}});
- if (result.type() != TypeOracle::GetBoolType()) {
- ReportError("operator < with arguments(", *index.type(), ", ",
- *end.type(),
- ") used in for-of loop has to return type bool, but "
- "returned type ",
- *result.type());
- }
- comparison_scope.Yield(result);
- }
- assembler().Branch(body_block, exit_block);
-
- assembler().Bind(body_block);
- {
- VisitResult element_result;
- {
- StackScope element_scope(this);
- VisitResult result = GenerateCall("[]", {{expression_result, index}, {}});
- if (stmt->var_declaration->type) {
- const Type* declared_type =
- TypeVisitor::ComputeType(*stmt->var_declaration->type);
- result = GenerateImplicitConvert(declared_type, result);
- }
- element_result = element_scope.Yield(result);
- }
- Binding<LocalValue> element_var_binding{&ValueBindingsManager::Get(),
- stmt->var_declaration->name->value,
- LocalValue{true, element_result}};
- Visit(stmt->body);
- }
- assembler().Goto(increment_block);
-
- assembler().Bind(increment_block);
- {
- Arguments increment_args;
- increment_args.parameters = {index, {TypeOracle::GetConstInt31Type(), "1"}};
- VisitResult increment_result = GenerateCall("+", increment_args);
-
- GenerateAssignToLocation(LocationReference::VariableAccess(index),
- increment_result);
- }
-
- assembler().Goto(header_block);
-
- assembler().Bind(exit_block);
- return TypeOracle::GetVoidType();
-}
-
VisitResult ImplementationVisitor::TemporaryUninitializedStruct(
const StructType* struct_type, const std::string& reason) {
StackRange range = assembler().TopRange(0);
@@ -1346,43 +1365,51 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
InitializerResults initializer_results =
VisitInitializerResults(class_type, expr->initializers);
- // Output the code to generate an uninitialized object of the class size in
- // the GC heap.
- VisitResult allocate_result;
+ VisitResult object_map;
+ const Field& map_field = class_type->LookupField("map");
+ if (map_field.offset != 0) {
+ ReportError("class initializers must have a map as first parameter");
+ }
+ const std::map<std::string, VisitResult>& initializer_fields =
+ initializer_results.field_value_map;
+ auto it_object_map = initializer_fields.find(map_field.name_and_type.name);
if (class_type->IsExtern()) {
- const Field& map_field = class_type->LookupField("map");
- if (map_field.offset != 0) {
- ReportError(
- "external classes initializers must have a map as first parameter");
- }
- NameValueMap initializer_fields = initializer_results.field_value_map;
- if (initializer_fields.find(map_field.name_and_type.name) ==
- initializer_fields.end()) {
+ if (it_object_map == initializer_fields.end()) {
ReportError("Constructor for ", class_type->name(),
" needs Map argument!");
}
- VisitResult object_map = initializer_fields[map_field.name_and_type.name];
- Arguments size_arguments;
- size_arguments.parameters.push_back(object_map);
- VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
- size_arguments, {class_type}, false);
-
- object_size =
- AddVariableObjectSize(object_size, class_type, initializer_results);
-
- Arguments allocate_arguments;
- allocate_arguments.parameters.push_back(object_size);
- allocate_result =
- GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
- DCHECK(allocate_result.IsOnStack());
+ object_map = it_object_map->second;
} else {
- Arguments allocate_arguments;
- allocate_arguments.parameters.push_back(
- VisitResult(TypeOracle::GetConstexprIntPtrType(),
- std::to_string(class_type->size() / kTaggedSize)));
- allocate_result = GenerateCall("%AllocateInternalClass", allocate_arguments,
- {class_type}, false);
- }
+ if (it_object_map != initializer_fields.end()) {
+ ReportError(
+ "Constructor for ", class_type->name(),
+ " must not specify Map argument; it is automatically inserted.");
+ }
+ Arguments get_struct_map_arguments;
+ get_struct_map_arguments.parameters.push_back(
+ VisitResult(TypeOracle::GetConstexprInstanceTypeType(),
+ CapifyStringWithUnderscores(class_type->name()) + "_TYPE"));
+ object_map =
+ GenerateCall("%GetStructMap", get_struct_map_arguments, {}, false);
+ CurrentSourcePosition::Scope current_pos(expr->pos);
+ initializer_results.names.insert(initializer_results.names.begin(),
+ MakeNode<Identifier>("map"));
+ initializer_results.field_value_map[map_field.name_and_type.name] =
+ object_map;
+ }
+ Arguments size_arguments;
+ size_arguments.parameters.push_back(object_map);
+ VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
+ size_arguments, {class_type}, false);
+
+ object_size =
+ AddVariableObjectSize(object_size, class_type, initializer_results);
+
+ Arguments allocate_arguments;
+ allocate_arguments.parameters.push_back(object_size);
+ VisitResult allocate_result =
+ GenerateCall("%Allocate", allocate_arguments, {class_type}, false);
+ DCHECK(allocate_result.IsOnStack());
InitializeAggregate(class_type, allocate_result, initializer_results);
@@ -1390,7 +1417,8 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
}
const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
- base::Optional<Binding<LocalLabel>*> break_label = TryLookupLabel("_break");
+ base::Optional<Binding<LocalLabel>*> break_label =
+ TryLookupLabel(kBreakLabelName);
if (!break_label) {
ReportError("break used outside of loop");
}
@@ -1400,7 +1428,7 @@ const Type* ImplementationVisitor::Visit(BreakStatement* stmt) {
const Type* ImplementationVisitor::Visit(ContinueStatement* stmt) {
base::Optional<Binding<LocalLabel>*> continue_label =
- TryLookupLabel("_continue");
+ TryLookupLabel(kContinueLabelName);
if (!continue_label) {
ReportError("continue used outside of loop");
}
@@ -1466,17 +1494,21 @@ VisitResult ImplementationVisitor::Visit(SpreadExpression* expr) {
"initialization expressions");
}
-void ImplementationVisitor::GenerateImplementation(const std::string& dir,
- Namespace* nspace) {
- std::string new_source(nspace->source());
- std::string base_file_name =
- "builtins-" + DashifyString(nspace->name()) + "-gen-tq";
+void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
+ for (SourceId file : SourceFileMap::AllSources()) {
+ std::string path_from_root =
+ SourceFileMap::PathFromV8RootWithoutExtension(file);
- std::string source_file_name = dir + "/" + base_file_name + ".cc";
- WriteFile(source_file_name, new_source);
- std::string new_header(nspace->header());
- std::string header_file_name = dir + "/" + base_file_name + ".h";
- WriteFile(header_file_name, new_header);
+ std::string new_source(
+ GlobalContext::GeneratedPerFile(file).csa_ccfile.str());
+
+ std::string source_file_name = dir + "/" + path_from_root + "-tq-csa.cc";
+ WriteFile(source_file_name, new_source);
+ std::string new_header(
+ GlobalContext::GeneratedPerFile(file).csa_headerfile.str());
+ std::string header_file_name = dir + "/" + path_from_root + "-tq-csa.h";
+ WriteFile(header_file_name, new_header);
+ }
}
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
@@ -1569,7 +1601,7 @@ void FailCallableLookup(const std::string& reason, const QualifiedName& name,
Callable* GetOrCreateSpecialization(const SpecializationKey& key) {
if (base::Optional<Callable*> specialization =
- key.generic->GetSpecialization(key.specialized_types)) {
+ key.generic->specializations().Get(key.specialized_types)) {
return *specialization;
}
return DeclarationVisitor::SpecializeImplicit(key);
@@ -1876,7 +1908,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
return LocationReference::Temporary(
(*value)->value, "constant value " + expr->name->value);
}
- return LocationReference::VariableAccess((*value)->value);
+ return LocationReference::VariableAccess((*value)->value, *value);
}
}
@@ -1973,6 +2005,12 @@ void ImplementationVisitor::GenerateAssignToLocation(
GenerateImplicitConvert(variable.type(), assignment_value);
assembler().Poke(variable.stack_range(), converted_value.stack_range(),
variable.type());
+
+ // Local variables are detected by the existence of a binding. Assignment
+ // to local variables is recorded to support lint errors.
+ if (reference.binding()) {
+ (*reference.binding())->SetWritten();
+ }
} else if (reference.IsIndexedFieldAccess()) {
ReportError("assigning a value directly to an indexed field isn't allowed");
} else if (reference.IsHeapReference()) {
@@ -2167,6 +2205,7 @@ VisitResult ImplementationVisitor::GenerateCall(
if (is_tailcall) {
ReportError("can't tail call a macro");
}
+ macro->SetUsed();
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
@@ -2534,6 +2573,7 @@ std::string ImplementationVisitor::ExternalParameterName(
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::ValueBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::LabelBindingsManager)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentCallable)
+DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentFileStreams)
DEFINE_CONTEXTUAL_VARIABLE(ImplementationVisitor::CurrentReturnValue)
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
@@ -2556,7 +2596,7 @@ bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
base::Optional<Block*> ImplementationVisitor::GetCatchBlock() {
base::Optional<Block*> catch_block;
if (base::Optional<Binding<LocalLabel>*> catch_handler =
- TryLookupLabel("_catch")) {
+ TryLookupLabel(kCatchLabelName)) {
catch_block = assembler().NewBlock(base::nullopt, true);
}
return catch_block;
@@ -2566,7 +2606,7 @@ void ImplementationVisitor::GenerateCatchBlock(
base::Optional<Block*> catch_block) {
if (catch_block) {
base::Optional<Binding<LocalLabel>*> catch_handler =
- TryLookupLabel("_catch");
+ TryLookupLabel(kCatchLabelName);
if (assembler().CurrentBlockIsComplete()) {
assembler().Bind(*catch_block);
assembler().Goto((*catch_handler)->block, 1);
@@ -2594,6 +2634,12 @@ void ImplementationVisitor::VisitAllDeclarables() {
void ImplementationVisitor::Visit(Declarable* declarable) {
CurrentScope::Scope current_scope(declarable->ParentScope());
CurrentSourcePosition::Scope current_source_position(declarable->Position());
+ CurrentFileStreams::Scope current_file_streams(
+ &GlobalContext::GeneratedPerFile(declarable->Position().source));
+ if (Callable* callable = Callable::DynamicCast(declarable)) {
+ if (!callable->ShouldGenerateExternalCode())
+ CurrentFileStreams::Get() = nullptr;
+ }
switch (declarable->kind()) {
case Declarable::kExternMacro:
return Visit(ExternMacro::cast(declarable));
@@ -2612,6 +2658,7 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
case Declarable::kExternConstant:
case Declarable::kNamespace:
case Declarable::kGeneric:
+ case Declarable::kGenericStructType:
return;
}
}
@@ -2891,9 +2938,81 @@ class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
private:
std::ostream& out_;
};
-
} // namespace
+void ImplementationVisitor::GenerateInstanceTypes(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::string file_name = "instance-types-tq.h";
+ {
+ IncludeGuardScope(header, file_name);
+
+ header << "#define TORQUE_DEFINED_INSTANCE_TYPES(V) \\\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ header << " V(" << type_name << ") \\\n";
+ }
+ header << "\n\n";
+
+ header << "#define TORQUE_STRUCT_LIST_GENERATOR(V, _) \\\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ std::string type_name =
+ CapifyStringWithUnderscores(type->name()) + "_TYPE";
+ std::string variable_name = SnakeifyString(type->name());
+ header << " V(_, " << type_name << ", " << type->name() << ", "
+ << variable_name << ") \\\n";
+ }
+ header << "\n";
+ }
+ std::string output_header_path = output_directory + "/" + file_name;
+ WriteFile(output_header_path, header.str());
+}
+
+void ImplementationVisitor::GenerateCppForInternalClasses(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::stringstream inl;
+ std::string base_name = "internal-class-definitions-tq";
+ {
+ IncludeGuardScope header_guard(header, base_name + ".h");
+ header << "#include \"src/objects/objects.h\"\n";
+ header << "#include \"src/objects/struct.h\"\n";
+ header << "#include \"src/objects/js-objects.h\"\n";
+ header << "#include \"src/utils/utils.h\"\n";
+ header << "#include \"torque-generated/class-definitions-tq.h\"\n";
+ IncludeObjectMacrosScope header_macros(header);
+ NamespaceScope header_namespaces(header, {"v8", "internal"});
+
+ IncludeGuardScope inl_guard(inl, base_name + "-inl.h");
+ inl << "#include \"torque-generated/" << base_name << ".h\"\n";
+ inl << "#include \"torque-generated/class-definitions-tq-inl.h\"\n";
+ IncludeObjectMacrosScope inl_macros(inl);
+ NamespaceScope inl_namespaces(inl, {"v8", "internal"});
+
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (type->IsExtern()) continue;
+ const ClassType* super = type->GetSuperClass();
+ std::string parent = "TorqueGenerated" + type->name() + "<" +
+ type->name() + ", " + super->name() + ">";
+ header << "class " << type->name() << ": public " << parent << " {\n";
+ header << " public:\n";
+ header << " TQ_OBJECT_CONSTRUCTORS(" << type->name() << ")\n";
+ header << "};\n\n";
+
+ inl << "TQ_OBJECT_CONSTRUCTORS_IMPL(" << type->name() << ")\n";
+ }
+ }
+ std::string dir_basename = output_directory + "/" + base_name;
+ WriteFile(dir_basename + ".h", header.str());
+ WriteFile(dir_basename + "-inl.h", inl.str());
+}
+
void ImplementationVisitor::GenerateClassFieldOffsets(
const std::string& output_directory) {
std::stringstream header;
@@ -2903,7 +3022,6 @@ void ImplementationVisitor::GenerateClassFieldOffsets(
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- if (!type->IsExtern()) continue;
// TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
// to generate field offsets without the use of macros.
@@ -2938,8 +3056,8 @@ class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
hdr_ << " static constexpr int " << field << " = " << previous_field_end_
<< ";\n";
hdr_ << " static constexpr int " << field_end << " = " << field << " + "
- << size_string << ";\n";
- previous_field_end_ = field_end;
+ << size_string << " - 1;\n";
+ previous_field_end_ = field_end + " + 1";
}
virtual void WriteMarker(const std::string& marker) {
hdr_ << " static constexpr int " << marker << " = " << previous_field_end_
@@ -3148,7 +3266,7 @@ void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
- inl_ << " return Smi::cast(READ_FIELD(*this, " << offset << "));\n";
+ inl_ << " return TaggedField<Smi, " << offset << ">::load(*this);\n";
inl_ << "}\n";
inl_ << "template <class D, class P>\n";
@@ -3173,6 +3291,7 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
hdr_ << " // Torque type: " << field_type->ToString() << "\n";
}
hdr_ << " inline " << type << " " << name << "() const;\n";
+ hdr_ << " inline " << type << " " << name << "(Isolate* isolate) const;\n";
hdr_ << " inline void set_" << name << "(" << type
<< " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
@@ -3185,10 +3304,20 @@ void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
// Generate implementation in inline header.
inl_ << "template <class D, class P>\n";
inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
- inl_ << " Object value = READ_FIELD(*this, " << offset << ");\n";
+ inl_ << " Isolate* isolate = GetIsolateForPtrCompr(*this);\n";
+ inl_ << " return " << gen_name_ << "::" << name << "(isolate);\n";
+ inl_ << "}\n";
+
+ inl_ << "template <class D, class P>\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name
+ << "(Isolate* isolate) const {\n";
if (class_type) {
- inl_ << " return " << type << "::cast(value);\n";
+ inl_ << " return TaggedField<" << type << ", " << offset
+ << ">::load(isolate, *this);\n";
} else {
+ // TODO(tebbi): load value as HeapObject when possible
+ inl_ << " Object value = TaggedField<Object, " << offset
+ << ">::load(isolate, *this);\n";
inl_ << " DCHECK(" << type_check << ");\n";
inl_ << " return value;\n";
}
@@ -3235,14 +3364,18 @@ void ImplementationVisitor::GenerateClassDefinitions(
<< "#include \"torque-generated/class-definitions-tq.h\"\n\n";
implementation << "#include \"torque-generated/class-verifiers-tq.h\"\n\n";
implementation << "#include \"src/objects/struct-inl.h\"\n\n";
+ implementation
+ << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n\n";
NamespaceScope implementation_namespaces(implementation,
{"v8", "internal"});
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- if (!type->GenerateCppClassDefinitions()) continue;
- CppClassGenerator g(type, header, inline_header, implementation);
- g.GenerateClass();
+ if (type->GenerateCppClassDefinitions()) {
+ CppClassGenerator g(type, header, inline_header, implementation);
+ g.GenerateClass();
+ }
}
}
WriteFile(file_basename + ".h", header.str());
@@ -3282,6 +3415,8 @@ void ImplementationVisitor::GeneratePrintDefinitions(
impl << "#include \"src/objects/objects.h\"\n\n";
impl << "#include <iosfwd>\n\n";
+ impl << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
impl << "#include \"src/objects/struct-inl.h\"\n\n";
impl << "#include \"src/objects/template-objects-inl.h\"\n\n";
@@ -3291,7 +3426,7 @@ void ImplementationVisitor::GeneratePrintDefinitions(
const ClassType* type = ClassType::DynamicCast(alias->type());
if (!type->ShouldGeneratePrint()) continue;
- if (type->IsExtern() && type->GenerateCppClassDefinitions()) {
+ if (type->GenerateCppClassDefinitions()) {
const ClassType* super = type->GetSuperClass();
std::string gen_name = "TorqueGenerated" + type->name();
std::string gen_name_T =
@@ -3319,8 +3454,10 @@ void GenerateClassFieldVerifier(const std::string& class_name,
if (!f.generate_verify) return;
const Type* field_type = f.name_and_type.type;
- // We only verify tagged types, not raw numbers or pointers.
- if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) return;
+ // We only verify tagged types, not raw numbers or pointers. Note that this
+ // must check against GetObjectType not GetTaggedType, because Uninitialized
+ // is a Tagged but should not be verified.
+ if (!field_type->IsSubtypeOf(TypeOracle::GetObjectType())) return;
if (f.index) {
if ((*f.index)->name_and_type.type != TypeOracle::GetSmiType()) {
@@ -3328,26 +3465,24 @@ void GenerateClassFieldVerifier(const std::string& class_name,
}
// We already verified the index field because it was listed earlier, so we
// can assume it's safe to read here.
- cc_contents << " for (int i = 0; i < Smi::ToInt(READ_FIELD(o, "
- << class_name << "::k"
- << CamelifyString((*f.index)->name_and_type.name)
- << "Offset)); ++i) {\n";
+ cc_contents << " for (int i = 0; i < TaggedField<Smi, " << class_name
+ << "::k" << CamelifyString((*f.index)->name_and_type.name)
+ << "Offset>::load(o).value(); ++i) {\n";
} else {
cc_contents << " {\n";
}
const char* object_type = f.is_weak ? "MaybeObject" : "Object";
- const char* read_fn = f.is_weak ? "READ_WEAK_FIELD" : "READ_FIELD";
const char* verify_fn =
f.is_weak ? "VerifyMaybeObjectPointer" : "VerifyPointer";
- const char* index_offset = f.index ? " + i * kTaggedSize" : "";
+ const char* index_offset = f.index ? "i * kTaggedSize" : "0";
// Name the local var based on the field name for nicer CHECK output.
- const std::string value = f.name_and_type.name + "_value";
+ const std::string value = f.name_and_type.name + "__value";
// Read the field.
- cc_contents << " " << object_type << " " << value << " = " << read_fn
- << "(o, " << class_name << "::k"
- << CamelifyString(f.name_and_type.name) << "Offset"
+ cc_contents << " " << object_type << " " << value << " = TaggedField<"
+ << object_type << ", " << class_name << "::k"
+ << CamelifyString(f.name_and_type.name) << "Offset>::load(o, "
<< index_offset << ");\n";
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
@@ -3365,16 +3500,6 @@ void GenerateClassFieldVerifier(const std::string& class_name,
if (!type_check.empty()) type_check += " || ";
type_check += strong_value + ".Is" + runtime_type + "()";
}
- // Many subtypes of JSObject can be verified in partially-initialized states
- // where their fields are all undefined. We explicitly allow that here. For
- // any such fields that should never be undefined, we can include extra code
- // in the custom verifier functions for them.
- // TODO(1240798): If Factory::InitializeJSObjectFromMap is updated to use
- // correct initial values based on the type of the field, then make this
- // check stricter too.
- if (class_type.IsSubtypeOf(TypeOracle::GetJSObjectType())) {
- type_check += " || " + strong_value + ".IsUndefined(isolate)";
- }
cc_contents << " CHECK(" << type_check << ");\n";
}
cc_contents << " }\n";
@@ -3398,6 +3523,8 @@ void ImplementationVisitor::GenerateClassVerifiers(
cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
}
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
+ cc_contents << "#include "
+ "\"torque-generated/internal-class-definitions-tq-inl.h\"\n";
IncludeObjectMacrosScope object_macros(cc_contents);
@@ -3408,7 +3535,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
h_contents << "class Isolate;\n";
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
- if (!type->IsExtern() || !type->ShouldGenerateVerify()) continue;
+ if (!type->ShouldGenerateVerify()) continue;
h_contents << "class " << type->name() << ";\n";
}
@@ -3420,7 +3547,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
for (const TypeAlias* alias : GlobalContext::GetClasses()) {
const ClassType* type = ClassType::DynamicCast(alias->type());
std::string name = type->name();
- if (!type->IsExtern() || !type->ShouldGenerateVerify()) continue;
+ if (!type->ShouldGenerateVerify()) continue;
std::string method_name = name + "Verify";
@@ -3483,10 +3610,14 @@ void ImplementationVisitor::GenerateExportedMacrosAssembler(
h_contents << "#include \"src/compiler/code-assembler.h\"\n";
h_contents << "#include \"src/execution/frames.h\"\n";
h_contents << "#include \"torque-generated/csa-types-tq.h\"\n";
+ h_contents
+ << "#include \"torque-generated/internal-class-definitions-tq.h\"\n";
cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- cc_contents << "#include \"torque-generated/builtins-" +
- DashifyString(n->name()) + "-gen-tq.h\"\n";
+
+ for (SourceId file : SourceFileMap::AllSources()) {
+ cc_contents << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
}
NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
@@ -3541,13 +3672,13 @@ void ImplementationVisitor::GenerateCSATypes(
NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
- for (auto& declarable : GlobalContext::AllDeclarables()) {
- TypeAlias* alias = TypeAlias::DynamicCast(declarable.get());
- if (!alias || alias->IsRedeclaration()) continue;
- const StructType* struct_type = StructType::DynamicCast(alias->type());
+ // Generates headers for all structs in a topologically-sorted order, since
+ // TypeOracle keeps them in the order of their resolution
+ for (auto& type : *TypeOracle::GetAggregateTypes()) {
+ const StructType* struct_type = StructType::DynamicCast(type.get());
if (!struct_type) continue;
- const std::string& name = struct_type->name();
- h_contents << "struct TorqueStruct" << name << " {\n";
+ h_contents << "struct " << struct_type->GetGeneratedTypeNameImpl()
+ << " {\n";
for (auto& field : struct_type->fields()) {
h_contents << " " << field.name_and_type.type->GetGeneratedTypeName();
h_contents << " " << field.name_and_type.name << ";\n";
@@ -3583,6 +3714,33 @@ void ImplementationVisitor::GenerateCSATypes(
WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
}
+void ReportAllUnusedMacros() {
+ for (const auto& declarable : GlobalContext::AllDeclarables()) {
+ if (!declarable->IsMacro() || declarable->IsExternMacro()) continue;
+
+ Macro* macro = Macro::cast(declarable.get());
+ if (macro->IsUsed()) continue;
+
+ if (macro->IsTorqueMacro() && TorqueMacro::cast(macro)->IsExportedToCSA()) {
+ continue;
+ }
+
+ std::vector<std::string> ignored_prefixes = {"Convert<", "Cast<",
+ "FromConstexpr<"};
+ const std::string name = macro->ReadableName();
+ const bool ignore =
+ std::any_of(ignored_prefixes.begin(), ignored_prefixes.end(),
+ [&name](const std::string& prefix) {
+ return StringStartsWith(name, prefix);
+ });
+
+ if (!ignore) {
+ Lint("Macro '", macro->ReadableName(), "' is never used.")
+ .Position(macro->IdentifierPosition());
+ }
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index e79c768e5c..a572ebb936 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/torque/ast.h"
#include "src/torque/cfg.h"
+#include "src/torque/declarations.h"
#include "src/torque/global-context.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -18,6 +19,10 @@ namespace v8 {
namespace internal {
namespace torque {
+template <typename T>
+class Binding;
+struct LocalValue;
+
// LocationReference is the representation of an l-value, so a value that might
// allow for assignment. For uniformity, this class can also represent
// unassignable temporaries. Assignable values fall in two categories:
@@ -26,10 +31,13 @@ namespace torque {
class LocationReference {
public:
// An assignable stack range.
- static LocationReference VariableAccess(VisitResult variable) {
+ static LocationReference VariableAccess(
+ VisitResult variable,
+ base::Optional<Binding<LocalValue>*> binding = base::nullopt) {
DCHECK(variable.IsOnStack());
LocationReference result;
result.variable_ = std::move(variable);
+ result.binding_ = binding;
return result;
}
// An unassignable value. {description} is only used for error messages.
@@ -145,6 +153,10 @@ class LocationReference {
DCHECK(IsCallAccess());
return *assign_function_;
}
+ base::Optional<Binding<LocalValue>*> binding() const {
+ DCHECK(IsVariableAccess());
+ return binding_;
+ }
private:
base::Optional<VisitResult> variable_;
@@ -155,13 +167,14 @@ class LocationReference {
base::Optional<std::string> assign_function_;
VisitResultVector call_arguments_;
base::Optional<std::string> index_field_;
+ base::Optional<Binding<LocalValue>*> binding_;
LocationReference() = default;
};
struct InitializerResults {
std::vector<Identifier*> names;
- NameValueMap field_value_map;
+ std::map<std::string, VisitResult> field_value_map;
};
template <class T>
@@ -171,7 +184,15 @@ template <class T>
class BindingsManager {
public:
base::Optional<Binding<T>*> TryLookup(const std::string& name) {
- return current_bindings_[name];
+ if (name.length() >= 2 && name[0] == '_' && name[1] != '_') {
+ Error("Trying to reference '", name, "' which is marked as unused.")
+ .Throw();
+ }
+ auto binding = current_bindings_[name];
+ if (binding) {
+ (*binding)->SetUsed();
+ }
+ return binding;
}
private:
@@ -188,7 +209,9 @@ class Binding : public T {
: T(std::forward<Args>(args)...),
manager_(manager),
name_(name),
- previous_binding_(this) {
+ previous_binding_(this),
+ used_(false),
+ written_(false) {
std::swap(previous_binding_, manager_->current_bindings_[name]);
}
template <class... Args>
@@ -196,16 +219,43 @@ class Binding : public T {
: Binding(manager, name->value, std::forward<Args>(args)...) {
declaration_position_ = name->pos;
}
- ~Binding() { manager_->current_bindings_[name_] = previous_binding_; }
+ ~Binding() {
+ if (!used_ && !SkipLintCheck()) {
+ Lint(BindingTypeString(), "'", name_,
+ "' is never used. Prefix with '_' if this is intentional.")
+ .Position(declaration_position_);
+ }
+
+ if (CheckWritten() && !written_ && !SkipLintCheck()) {
+ Lint(BindingTypeString(), "'", name_,
+ "' is never assigned to. Use 'const' instead of 'let'.")
+ .Position(declaration_position_);
+ }
+
+ manager_->current_bindings_[name_] = previous_binding_;
+ }
+
+ std::string BindingTypeString() const;
+ bool CheckWritten() const;
const std::string& name() const { return name_; }
SourcePosition declaration_position() const { return declaration_position_; }
+ bool Used() const { return used_; }
+ void SetUsed() { used_ = true; }
+
+ bool Written() const { return written_; }
+ void SetWritten() { written_ = true; }
+
private:
+ bool SkipLintCheck() const { return name_.length() > 0 && name_[0] == '_'; }
+
BindingsManager<T>* manager_;
const std::string name_;
base::Optional<Binding*> previous_binding_;
SourcePosition declaration_position_ = CurrentSourcePosition::Get();
+ bool used_;
+ bool written_;
DISALLOW_COPY_AND_ASSIGN(Binding);
};
@@ -213,16 +263,20 @@ template <class T>
class BlockBindings {
public:
explicit BlockBindings(BindingsManager<T>* manager) : manager_(manager) {}
- void Add(std::string name, T value) {
+ void Add(std::string name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name);
- bindings_.push_back(base::make_unique<Binding<T>>(manager_, std::move(name),
- std::move(value)));
+ auto binding =
+ base::make_unique<Binding<T>>(manager_, name, std::move(value));
+ if (mark_as_used) binding->SetUsed();
+ bindings_.push_back(std::move(binding));
}
- void Add(const Identifier* name, T value) {
+ void Add(const Identifier* name, T value, bool mark_as_used = false) {
ReportErrorIfAlreadyBound(name->value);
- bindings_.push_back(
- base::make_unique<Binding<T>>(manager_, name, std::move(value)));
+ auto binding =
+ base::make_unique<Binding<T>>(manager_, name, std::move(value));
+ if (mark_as_used) binding->SetUsed();
+ bindings_.push_back(std::move(binding));
}
std::vector<Binding<T>*> bindings() const {
@@ -264,6 +318,25 @@ struct LocalLabel {
: block(block), parameter_types(std::move(parameter_types)) {}
};
+template <>
+inline std::string Binding<LocalValue>::BindingTypeString() const {
+ return "Variable ";
+}
+template <>
+inline bool Binding<LocalValue>::CheckWritten() const {
+ // Do the check only for non-const variables and non struct types.
+ auto binding = *manager_->current_bindings_[name_];
+ return !binding->is_const && !binding->value.type()->IsStructType();
+}
+template <>
+inline std::string Binding<LocalLabel>::BindingTypeString() const {
+ return "Label ";
+}
+template <>
+inline bool Binding<LocalLabel>::CheckWritten() const {
+ return false;
+}
+
struct Arguments {
VisitResultVector parameters;
std::vector<Binding<LocalLabel>*> labels;
@@ -279,9 +352,11 @@ class ImplementationVisitor {
void GenerateClassFieldOffsets(const std::string& output_directory);
void GeneratePrintDefinitions(const std::string& output_directory);
void GenerateClassDefinitions(const std::string& output_directory);
+ void GenerateInstanceTypes(const std::string& output_directory);
void GenerateClassVerifiers(const std::string& output_directory);
void GenerateExportedMacrosAssembler(const std::string& output_directory);
void GenerateCSATypes(const std::string& output_directory);
+ void GenerateCppForInternalClasses(const std::string& output_directory);
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
@@ -365,22 +440,23 @@ class ImplementationVisitor {
const Type* Visit(VarDeclarationStatement* stmt);
const Type* Visit(VarDeclarationStatement* stmt,
BlockBindings<LocalValue>* block_bindings);
- const Type* Visit(ForOfLoopStatement* stmt);
const Type* Visit(BlockStatement* block);
const Type* Visit(ExpressionStatement* stmt);
const Type* Visit(DebugStatement* stmt);
const Type* Visit(AssertStatement* stmt);
- void BeginNamespaceFile(Namespace* nspace);
- void EndNamespaceFile(Namespace* nspace);
+ void BeginCSAFiles();
+ void EndCSAFiles();
- void GenerateImplementation(const std::string& dir, Namespace* nspace);
+ void GenerateImplementation(const std::string& dir);
DECLARE_CONTEXTUAL_VARIABLE(ValueBindingsManager,
BindingsManager<LocalValue>);
DECLARE_CONTEXTUAL_VARIABLE(LabelBindingsManager,
BindingsManager<LocalLabel>);
DECLARE_CONTEXTUAL_VARIABLE(CurrentCallable, Callable*);
+ DECLARE_CONTEXTUAL_VARIABLE(CurrentFileStreams,
+ GlobalContext::PerFileStreams*);
DECLARE_CONTEXTUAL_VARIABLE(CurrentReturnValue, base::Optional<VisitResult>);
// A BindingsManagersScope has to be active for local bindings to be created.
@@ -463,9 +539,9 @@ class ImplementationVisitor {
class BreakContinueActivator {
public:
BreakContinueActivator(Block* break_block, Block* continue_block)
- : break_binding_{&LabelBindingsManager::Get(), "_break",
+ : break_binding_{&LabelBindingsManager::Get(), kBreakLabelName,
LocalLabel{break_block}},
- continue_binding_{&LabelBindingsManager::Get(), "_continue",
+ continue_binding_{&LabelBindingsManager::Get(), kContinueLabelName,
LocalLabel{continue_block}} {}
private:
@@ -567,20 +643,16 @@ class ImplementationVisitor {
std::string ExternalParameterName(const std::string& name);
std::ostream& source_out() {
- Callable* callable = CurrentCallable::Get();
- if (!callable || callable->ShouldGenerateExternalCode()) {
- return CurrentNamespace()->source_stream();
- } else {
- return null_stream_;
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->csa_ccfile;
}
+ return null_stream_;
}
std::ostream& header_out() {
- Callable* callable = CurrentCallable::Get();
- if (!callable || callable->ShouldGenerateExternalCode()) {
- return CurrentNamespace()->header_stream();
- } else {
- return null_stream_;
+ if (auto* streams = CurrentFileStreams::Get()) {
+ return streams->csa_headerfile;
}
+ return null_stream_;
}
CfgAssembler& assembler() { return *assembler_; }
@@ -607,6 +679,8 @@ class ImplementationVisitor {
bool is_dry_run_;
};
+void ReportAllUnusedMacros();
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/ls/json-parser.cc b/deps/v8/src/torque/ls/json-parser.cc
index 708b75fd4c..c13449bfcc 100644
--- a/deps/v8/src/torque/ls/json-parser.cc
+++ b/deps/v8/src/torque/ls/json-parser.cc
@@ -183,7 +183,7 @@ class JsonGrammar : public Grammar {
JsonParserResult ParseJson(const std::string& input) {
// Torque needs a CurrentSourceFile scope during parsing.
// As JSON lives in memory only, a unknown file scope is created.
- SourceFileMap::Scope source_map_scope;
+ SourceFileMap::Scope source_map_scope("");
TorqueMessages::Scope messages_scope;
CurrentSourceFile::Scope unkown_file(SourceFileMap::AddSource("<json>"));
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
index d55c11af56..6ec124b5a2 100644
--- a/deps/v8/src/torque/ls/message-handler.cc
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -53,7 +53,7 @@ JsonValue ReadMessage() {
return ParseJson(content).value;
}
-void WriteMessage(JsonValue& message) {
+void WriteMessage(JsonValue message) {
std::string content = SerializeToString(message);
Logger::Log("[outgoing] ", content, "\n\n");
@@ -69,12 +69,12 @@ void ResetCompilationErrorDiagnostics(MessageWriter writer) {
PublishDiagnosticsNotification notification;
notification.set_method("textDocument/publishDiagnostics");
- std::string error_file = SourceFileMap::GetSource(source);
+ std::string error_file = SourceFileMap::AbsolutePath(source);
notification.params().set_uri(error_file);
// Trigger empty array creation.
USE(notification.params().diagnostics_size());
- writer(notification.GetJsonValue());
+ writer(std::move(notification.GetJsonValue()));
}
DiagnosticsFiles::Get() = {};
}
@@ -115,7 +115,7 @@ class DiagnosticCollector {
notification.set_method("textDocument/publishDiagnostics");
std::string file =
- id.IsValid() ? SourceFileMap::GetSource(id) : "<unknown>";
+ id.IsValid() ? SourceFileMap::AbsolutePath(id) : "<unknown>";
notification.params().set_uri(file);
return notification;
}
@@ -151,7 +151,7 @@ void SendCompilationDiagnostics(const TorqueCompilerResult& result,
for (auto& pair : collector.notifications()) {
PublishDiagnosticsNotification& notification = pair.second;
- writer(notification.GetJsonValue());
+ writer(std::move(notification.GetJsonValue()));
// Record all source files for which notifications are sent, so they
// can be reset before the next compiler run.
@@ -164,7 +164,7 @@ void SendCompilationDiagnostics(const TorqueCompilerResult& result,
void CompilationFinished(TorqueCompilerResult result, MessageWriter writer) {
LanguageServerData::Get() = std::move(result.language_server_data);
- SourceFileMap::Get() = result.source_file_map;
+ SourceFileMap::Get() = *result.source_file_map;
SendCompilationDiagnostics(result, writer);
}
@@ -205,7 +205,7 @@ void HandleInitializeRequest(InitializeRequest request, MessageWriter writer) {
// "workspace/didChangeWatchedFiles" capability.
// TODO(szuend): Check if client supports "LocationLink". This will
// influence the result of "goto definition".
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
}
void HandleInitializedNotification(MessageWriter writer) {
@@ -224,7 +224,7 @@ void HandleInitializedNotification(MessageWriter writer) {
reg.set_id("did-change-id");
reg.set_method("workspace/didChangeWatchedFiles");
- writer(request.GetJsonValue());
+ writer(std::move(request.GetJsonValue()));
}
void HandleTorqueFileListNotification(TorqueFileListNotification notification,
@@ -258,7 +258,7 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
// the definition not beeing found.
if (!id.IsValid()) {
response.SetNull("result");
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
return;
}
@@ -272,7 +272,7 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
response.SetNull("result");
}
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
}
void HandleChangeWatchedFilesNotification(
@@ -325,13 +325,13 @@ void HandleDocumentSymbolRequest(DocumentSymbolRequest request,
// Trigger empty array creation in case no symbols were found.
USE(response.result_size());
- writer(response.GetJsonValue());
+ writer(std::move(response.GetJsonValue()));
}
} // namespace
-void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
- Request<bool> request(raw_message);
+void HandleMessage(JsonValue raw_message, MessageWriter writer) {
+ Request<bool> request(std::move(raw_message));
// We ignore responses for now. They are matched to requests
// by id and don't have a method set.
@@ -344,21 +344,23 @@ void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
const std::string method = request.method();
if (method == "initialize") {
- HandleInitializeRequest(InitializeRequest(request.GetJsonValue()), writer);
+ HandleInitializeRequest(
+ InitializeRequest(std::move(request.GetJsonValue())), writer);
} else if (method == "initialized") {
HandleInitializedNotification(writer);
} else if (method == "torque/fileList") {
HandleTorqueFileListNotification(
- TorqueFileListNotification(request.GetJsonValue()), writer);
+ TorqueFileListNotification(std::move(request.GetJsonValue())), writer);
} else if (method == "textDocument/definition") {
- HandleGotoDefinitionRequest(GotoDefinitionRequest(request.GetJsonValue()),
- writer);
+ HandleGotoDefinitionRequest(
+ GotoDefinitionRequest(std::move(request.GetJsonValue())), writer);
} else if (method == "workspace/didChangeWatchedFiles") {
HandleChangeWatchedFilesNotification(
- DidChangeWatchedFilesNotification(request.GetJsonValue()), writer);
+ DidChangeWatchedFilesNotification(std::move(request.GetJsonValue())),
+ writer);
} else if (method == "textDocument/documentSymbol") {
- HandleDocumentSymbolRequest(DocumentSymbolRequest(request.GetJsonValue()),
- writer);
+ HandleDocumentSymbolRequest(
+ DocumentSymbolRequest(std::move(request.GetJsonValue())), writer);
} else {
Logger::Log("[error] Message of type ", method, " is not handled!\n\n");
}
diff --git a/deps/v8/src/torque/ls/message-handler.h b/deps/v8/src/torque/ls/message-handler.h
index 3be5cf03e4..7f8a82b869 100644
--- a/deps/v8/src/torque/ls/message-handler.h
+++ b/deps/v8/src/torque/ls/message-handler.h
@@ -24,9 +24,9 @@ namespace ls {
// The message handler might send responses or follow up requests.
// To allow unit testing, the "sending" function is configurable.
-using MessageWriter = void (*)(JsonValue& message);
+using MessageWriter = std::function<void(JsonValue)>;
-V8_EXPORT_PRIVATE void HandleMessage(JsonValue& raw_message, MessageWriter);
+V8_EXPORT_PRIVATE void HandleMessage(JsonValue raw_message, MessageWriter);
// Called when a compilation run finishes. Exposed for testability.
V8_EXPORT_PRIVATE void CompilationFinished(TorqueCompilerResult result,
diff --git a/deps/v8/src/torque/ls/message-pipe.h b/deps/v8/src/torque/ls/message-pipe.h
index 981fed4b2f..0fbdfe4f1c 100644
--- a/deps/v8/src/torque/ls/message-pipe.h
+++ b/deps/v8/src/torque/ls/message-pipe.h
@@ -14,7 +14,7 @@ namespace torque {
namespace ls {
JsonValue ReadMessage();
-void WriteMessage(JsonValue& message);
+void WriteMessage(JsonValue message);
} // namespace ls
} // namespace torque
diff --git a/deps/v8/src/torque/ls/message.h b/deps/v8/src/torque/ls/message.h
index 4389e9265d..0d84d2ffaf 100644
--- a/deps/v8/src/torque/ls/message.h
+++ b/deps/v8/src/torque/ls/message.h
@@ -73,7 +73,7 @@ class Message : public BaseJsonAccessor {
value_ = JsonValue::From(JsonObject{});
set_jsonrpc("2.0");
}
- explicit Message(JsonValue& value) : value_(std::move(value)) {
+ explicit Message(JsonValue value) : value_(std::move(value)) {
CHECK(value_.tag == JsonValue::OBJECT);
}
@@ -241,7 +241,7 @@ class Location : public NestedJsonAccessor {
JSON_OBJECT_ACCESSORS(Range, range)
void SetTo(SourcePosition position) {
- set_uri(SourceFileMap::GetSource(position.source));
+ set_uri(SourceFileMap::AbsolutePath(position.source));
range().start().set_line(position.start.line);
range().start().set_character(position.start.column);
range().end().set_line(position.end.line);
@@ -323,7 +323,7 @@ class SymbolInformation : public NestedJsonAccessor {
template <class T>
class Request : public Message {
public:
- explicit Request(JsonValue& value) : Message(value) {}
+ explicit Request(JsonValue value) : Message(std::move(value)) {}
Request() : Message() {}
JSON_INT_ACCESSORS(id)
@@ -341,7 +341,7 @@ using DocumentSymbolRequest = Request<DocumentSymbolParams>;
template <class T>
class Response : public Message {
public:
- explicit Response(JsonValue& value) : Message(value) {}
+ explicit Response(JsonValue value) : Message(std::move(value)) {}
Response() : Message() {}
JSON_INT_ACCESSORS(id)
@@ -355,7 +355,7 @@ using GotoDefinitionResponse = Response<Location>;
template <class T>
class ResponseArrayResult : public Message {
public:
- explicit ResponseArrayResult(JsonValue& value) : Message(value) {}
+ explicit ResponseArrayResult(JsonValue value) : Message(std::move(value)) {}
ResponseArrayResult() : Message() {}
JSON_INT_ACCESSORS(id)
diff --git a/deps/v8/src/torque/ls/torque-language-server.cc b/deps/v8/src/torque/ls/torque-language-server.cc
index 4cf0b4c9fb..21e2c3957c 100644
--- a/deps/v8/src/torque/ls/torque-language-server.cc
+++ b/deps/v8/src/torque/ls/torque-language-server.cc
@@ -21,7 +21,7 @@ int WrappedMain(int argc, const char** argv) {
Logger::Scope log_scope;
TorqueFileList::Scope files_scope;
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
DiagnosticsFiles::Scope diagnostics_files_scope;
for (int i = 1; i < argc; ++i) {
@@ -32,13 +32,13 @@ int WrappedMain(int argc, const char** argv) {
}
while (true) {
- auto message = ReadMessage();
+ JsonValue message = ReadMessage();
// TODO(szuend): We should probably offload the actual message handling
// (even the parsing) to a background thread, so we can
// keep receiving messages. We might also receive
// $/cancelRequests or contet updates, that require restarts.
- HandleMessage(message, &WriteMessage);
+ HandleMessage(std::move(message), &WriteMessage);
}
return 0;
}
diff --git a/deps/v8/src/torque/server-data.h b/deps/v8/src/torque/server-data.h
index ebaafb2fd0..04cd0b317f 100644
--- a/deps/v8/src/torque/server-data.h
+++ b/deps/v8/src/torque/server-data.h
@@ -13,6 +13,7 @@
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
#include "src/torque/source-positions.h"
+#include "src/torque/type-oracle.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/torque/source-positions.cc b/deps/v8/src/torque/source-positions.cc
index b10c98f125..69be0e0911 100644
--- a/deps/v8/src/torque/source-positions.cc
+++ b/deps/v8/src/torque/source-positions.cc
@@ -4,6 +4,9 @@
#include "src/torque/source-positions.h"
+#include <fstream>
+#include "src/torque/utils.h"
+
namespace v8 {
namespace internal {
namespace torque {
@@ -12,6 +15,62 @@ DEFINE_CONTEXTUAL_VARIABLE(CurrentSourceFile)
DEFINE_CONTEXTUAL_VARIABLE(CurrentSourcePosition)
DEFINE_CONTEXTUAL_VARIABLE(SourceFileMap)
+// static
+const std::string& SourceFileMap::PathFromV8Root(SourceId file) {
+ CHECK(file.IsValid());
+ return Get().sources_[file.id_];
+}
+
+// static
+std::string SourceFileMap::AbsolutePath(SourceId file) {
+ const std::string& root_path = PathFromV8Root(file);
+ if (StringStartsWith(root_path, "file://")) return root_path;
+ return Get().v8_root_ + "/" + PathFromV8Root(file);
+}
+
+// static
+std::string SourceFileMap::PathFromV8RootWithoutExtension(SourceId file) {
+ std::string path_from_root = PathFromV8Root(file);
+ if (!StringEndsWith(path_from_root, ".tq")) {
+ Error("Not a .tq file: ", path_from_root).Throw();
+ }
+ path_from_root.resize(path_from_root.size() - strlen(".tq"));
+ return path_from_root;
+}
+
+// static
+SourceId SourceFileMap::AddSource(std::string path) {
+ Get().sources_.push_back(std::move(path));
+ return SourceId(static_cast<int>(Get().sources_.size()) - 1);
+}
+
+// static
+SourceId SourceFileMap::GetSourceId(const std::string& path) {
+ for (size_t i = 0; i < Get().sources_.size(); ++i) {
+ if (Get().sources_[i] == path) {
+ return SourceId(static_cast<int>(i));
+ }
+ }
+ return SourceId::Invalid();
+}
+
+// static
+std::vector<SourceId> SourceFileMap::AllSources() {
+ SourceFileMap& self = Get();
+ std::vector<SourceId> result;
+ for (int i = 0; i < static_cast<int>(self.sources_.size()); ++i) {
+ result.push_back(SourceId(i));
+ }
+ return result;
+}
+
+// static
+bool SourceFileMap::FileRelativeToV8RootExists(const std::string& path) {
+ const std::string file = Get().v8_root_ + "/" + path;
+ std::ifstream stream(file);
+ return stream.good();
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/source-positions.h b/deps/v8/src/torque/source-positions.h
index c609d4600b..69ce78cf7c 100644
--- a/deps/v8/src/torque/source-positions.h
+++ b/deps/v8/src/torque/source-positions.h
@@ -71,34 +71,24 @@ struct SourcePosition {
DECLARE_CONTEXTUAL_VARIABLE(CurrentSourceFile, SourceId);
DECLARE_CONTEXTUAL_VARIABLE(CurrentSourcePosition, SourcePosition);
-class SourceFileMap : public ContextualClass<SourceFileMap> {
+class V8_EXPORT_PRIVATE SourceFileMap : public ContextualClass<SourceFileMap> {
public:
- SourceFileMap() = default;
- static const std::string& GetSource(SourceId source) {
- CHECK(source.IsValid());
- return Get().sources_[source.id_];
- }
-
- static SourceId AddSource(std::string path) {
- Get().sources_.push_back(std::move(path));
- return SourceId(static_cast<int>(Get().sources_.size()) - 1);
- }
-
- static SourceId GetSourceId(const std::string& path) {
- for (size_t i = 0; i < Get().sources_.size(); ++i) {
- if (Get().sources_[i] == path) {
- return SourceId(static_cast<int>(i));
- }
- }
- return SourceId::Invalid();
- }
+ explicit SourceFileMap(std::string v8_root) : v8_root_(std::move(v8_root)) {}
+ static const std::string& PathFromV8Root(SourceId file);
+ static std::string PathFromV8RootWithoutExtension(SourceId file);
+ static std::string AbsolutePath(SourceId file);
+ static SourceId AddSource(std::string path);
+ static SourceId GetSourceId(const std::string& path);
+ static std::vector<SourceId> AllSources();
+ static bool FileRelativeToV8RootExists(const std::string& path);
private:
std::vector<std::string> sources_;
+ std::string v8_root_;
};
inline std::string PositionAsString(SourcePosition pos) {
- return SourceFileMap::GetSource(pos.source) + ":" +
+ return SourceFileMap::PathFromV8Root(pos.source) + ":" +
std::to_string(pos.start.line + 1) + ":" +
std::to_string(pos.start.column + 1);
}
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index d761b3ab53..a3da95c747 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -31,7 +31,7 @@ void ReadAndParseTorqueFile(const std::string& path) {
CurrentSourceFile::Scope source_id_scope(source_id);
// path might be either a normal file path or an encoded URI.
- auto maybe_content = ReadFile(path);
+ auto maybe_content = ReadFile(SourceFileMap::AbsolutePath(source_id));
if (!maybe_content) {
if (auto maybe_path = FileUriDecode(path)) {
maybe_content = ReadFile(*maybe_path);
@@ -57,27 +57,27 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
// Two-step process of predeclaration + resolution allows to resolve type
// declarations independent of the order they are given.
- PredeclarationVisitor::Predeclare(GlobalContext::Get().ast());
+ PredeclarationVisitor::Predeclare(GlobalContext::ast());
PredeclarationVisitor::ResolvePredeclarations();
// Process other declarations.
- DeclarationVisitor::Visit(GlobalContext::Get().ast());
+ DeclarationVisitor::Visit(GlobalContext::ast());
// A class types' fields are resolved here, which allows two class fields to
// mutually refer to each others.
- TypeOracle::FinalizeClassTypes();
+ TypeOracle::FinalizeAggregateTypes();
std::string output_directory = options.output_directory;
ImplementationVisitor implementation_visitor;
implementation_visitor.SetDryRun(output_directory.length() == 0);
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- implementation_visitor.BeginNamespaceFile(n);
- }
+ implementation_visitor.BeginCSAFiles();
implementation_visitor.VisitAllDeclarables();
+ ReportAllUnusedMacros();
+
implementation_visitor.GenerateBuiltinDefinitions(output_directory);
implementation_visitor.GenerateClassFieldOffsets(output_directory);
implementation_visitor.GeneratePrintDefinitions(output_directory);
@@ -85,11 +85,11 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
implementation_visitor.GenerateClassVerifiers(output_directory);
implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
implementation_visitor.GenerateCSATypes(output_directory);
+ implementation_visitor.GenerateInstanceTypes(output_directory);
+ implementation_visitor.GenerateCppForInternalClasses(output_directory);
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- implementation_visitor.EndNamespaceFile(n);
- implementation_visitor.GenerateImplementation(output_directory, n);
- }
+ implementation_visitor.EndCSAFiles();
+ implementation_visitor.GenerateImplementation(output_directory);
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::SetGlobalContext(std::move(GlobalContext::Get()));
@@ -101,8 +101,9 @@ void CompileCurrentAst(TorqueCompilerOptions options) {
TorqueCompilerResult CompileTorque(const std::string& source,
TorqueCompilerOptions options) {
- SourceFileMap::Scope source_map_scope;
- CurrentSourceFile::Scope no_file_scope(SourceFileMap::AddSource("<torque>"));
+ SourceFileMap::Scope source_map_scope(options.v8_root);
+ CurrentSourceFile::Scope no_file_scope(
+ SourceFileMap::AddSource("dummy-filename.tq"));
CurrentAst::Scope ast_scope;
TorqueMessages::Scope messages_scope;
LanguageServerData::Scope server_data_scope;
@@ -125,7 +126,7 @@ TorqueCompilerResult CompileTorque(const std::string& source,
TorqueCompilerResult CompileTorque(std::vector<std::string> files,
TorqueCompilerOptions options) {
- SourceFileMap::Scope source_map_scope;
+ SourceFileMap::Scope source_map_scope(options.v8_root);
CurrentSourceFile::Scope unknown_source_file_scope(SourceId::Invalid());
CurrentAst::Scope ast_scope;
TorqueMessages::Scope messages_scope;
@@ -133,7 +134,9 @@ TorqueCompilerResult CompileTorque(std::vector<std::string> files,
TorqueCompilerResult result;
try {
- for (const auto& path : files) ReadAndParseTorqueFile(path);
+ for (const auto& path : files) {
+ ReadAndParseTorqueFile(path);
+ }
CompileCurrentAst(options);
} catch (TorqueAbortCompilation&) {
// Do nothing. The relevant TorqueMessage is part of the
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index 8e412d1be0..32680986fd 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -17,6 +17,7 @@ namespace torque {
struct TorqueCompilerOptions {
std::string output_directory = "";
+ std::string v8_root = "";
bool collect_language_server_data = false;
// assert(...) are only generated for debug builds. The provide
@@ -29,7 +30,7 @@ struct TorqueCompilerResult {
// Map translating SourceIds to filenames. This field is
// set on errors, so the SourcePosition of the error can be
// resolved.
- SourceFileMap source_file_map;
+ base::Optional<SourceFileMap> source_file_map;
// Eagerly collected data needed for the LanguageServer.
// Set the corresponding options flag to enable.
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index 619096c6a5..0a371b79f9 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/torque/constants.h"
+#include "src/torque/declarations.h"
#include "src/torque/earley-parser.h"
#include "src/torque/torque-parser.h"
#include "src/torque/utils.h"
@@ -128,6 +129,14 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultTypeId::kStdVectorOfNameAndTypeExpression;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<ImplicitParameters>::id =
+ ParseResultTypeId::kImplicitParameters;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<ImplicitParameters>>::id =
+ ParseResultTypeId::kOptionalImplicitParameters;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<NameAndExpression>>::id =
ParseResultTypeId::kStdVectorOfNameAndExpression;
template <>
@@ -170,14 +179,6 @@ template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<ParameterList>::id =
ParseResultTypeId::kParameterList;
template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId
- ParseResultHolder<RangeExpression>::id =
- ParseResultTypeId::kRangeExpression;
-template <>
-V8_EXPORT_PRIVATE const ParseResultTypeId
- ParseResultHolder<base::Optional<RangeExpression>>::id =
- ParseResultTypeId::kOptionalRangeExpression;
-template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<TypeList>::id =
ParseResultTypeId::kTypeList;
template <>
@@ -226,11 +227,23 @@ base::Optional<ParseResult> AddGlobalDeclarations(
return base::nullopt;
}
+void NamingConventionError(const std::string& type, const std::string& name,
+ const std::string& convention,
+ SourcePosition pos = CurrentSourcePosition::Get()) {
+ Lint(type, " \"", name, "\" does not follow \"", convention,
+ "\" naming convention.")
+ .Position(pos);
+}
+
+void NamingConventionError(const std::string& type, const Identifier* name,
+ const std::string& convention) {
+ NamingConventionError(type, name->value, convention, name->pos);
+}
+
void LintGenericParameters(const GenericParameters& parameters) {
- for (const Identifier* parameter : parameters) {
+ for (auto parameter : parameters) {
if (!IsUpperCamelCase(parameter->value)) {
- NamingConventionError("Generic parameter", parameter->value,
- "UpperCamelCase");
+ NamingConventionError("Generic parameter", parameter, "UpperCamelCase");
}
}
}
@@ -277,7 +290,7 @@ Expression* MakeCall(IdentifierExpression* callee,
continue;
}
}
- auto label_name = std::string("_label") + std::to_string(label_id++);
+ auto label_name = std::string("__label") + std::to_string(label_id++);
auto label_id = MakeNode<Identifier>(label_name);
label_id->pos = SourcePosition::Invalid();
labels.push_back(label_id);
@@ -371,60 +384,61 @@ base::Optional<ParseResult> MakeSpreadExpression(
return ParseResult{result};
}
-template <bool has_varargs>
-base::Optional<ParseResult> MakeParameterListFromTypes(
+base::Optional<ParseResult> MakeImplicitParameterList(
ParseResultIterator* child_results) {
- auto implicit_params =
- child_results->NextAs<std::vector<NameAndTypeExpression>>();
- auto explicit_types = child_results->NextAs<TypeList>();
- ParameterList result;
- result.has_varargs = has_varargs;
- result.implicit_count = implicit_params.size();
- for (NameAndTypeExpression& implicit_param : implicit_params) {
- if (!IsLowerCamelCase(implicit_param.name->value)) {
- NamingConventionError("Parameter", implicit_param.name->value,
- "lowerCamelCase");
- }
- result.names.push_back(implicit_param.name);
- result.types.push_back(implicit_param.type);
- }
- for (auto* explicit_type : explicit_types) {
- result.types.push_back(explicit_type);
+ auto kind = child_results->NextAs<Identifier*>();
+ auto parameters = child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ return ParseResult{ImplicitParameters{kind, parameters}};
+}
+
+void AddParameter(ParameterList* parameter_list,
+ const NameAndTypeExpression& param) {
+ if (!IsLowerCamelCase(param.name->value)) {
+ NamingConventionError("Parameter", param.name, "lowerCamelCase");
}
- return ParseResult{std::move(result)};
+ parameter_list->names.push_back(param.name);
+ parameter_list->types.push_back(param.type);
}
-template <bool has_varargs>
-base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
+template <bool has_varargs, bool has_explicit_parameter_names>
+base::Optional<ParseResult> MakeParameterList(
ParseResultIterator* child_results) {
auto implicit_params =
- child_results->NextAs<std::vector<NameAndTypeExpression>>();
- auto explicit_params =
- child_results->NextAs<std::vector<NameAndTypeExpression>>();
- std::string arguments_variable = "";
- if (child_results->HasNext()) {
- arguments_variable = child_results->NextAs<std::string>();
- }
+ child_results->NextAs<base::Optional<ImplicitParameters>>();
ParameterList result;
- for (NameAndTypeExpression& pair : implicit_params) {
- if (!IsLowerCamelCase(pair.name->value)) {
- NamingConventionError("Parameter", pair.name->value, "lowerCamelCase");
+ result.has_varargs = has_varargs;
+ result.implicit_count = 0;
+ result.implicit_kind = ImplicitKind::kNoImplicit;
+ if (implicit_params) {
+ result.implicit_count = implicit_params->parameters.size();
+ if (implicit_params->kind->value == "implicit") {
+ result.implicit_kind = ImplicitKind::kImplicit;
+ } else {
+ DCHECK_EQ(implicit_params->kind->value, "js-implicit");
+ result.implicit_kind = ImplicitKind::kJSImplicit;
+ }
+ result.implicit_kind_pos = implicit_params->kind->pos;
+ for (NameAndTypeExpression& implicit_param : implicit_params->parameters) {
+ AddParameter(&result, implicit_param);
}
-
- result.names.push_back(std::move(pair.name));
- result.types.push_back(pair.type);
}
- for (NameAndTypeExpression& pair : explicit_params) {
- if (!IsLowerCamelCase(pair.name->value)) {
- NamingConventionError("Parameter", pair.name->value, "lowerCamelCase");
+ if (has_explicit_parameter_names) {
+ auto explicit_params =
+ child_results->NextAs<std::vector<NameAndTypeExpression>>();
+ std::string arguments_variable = "";
+ if (has_varargs) {
+ arguments_variable = child_results->NextAs<std::string>();
+ }
+ for (NameAndTypeExpression& param : explicit_params) {
+ AddParameter(&result, param);
+ }
+ result.arguments_variable = arguments_variable;
+ } else {
+ auto explicit_types = child_results->NextAs<TypeList>();
+ for (auto* explicit_type : explicit_types) {
+ result.types.push_back(explicit_type);
}
-
- result.names.push_back(pair.name);
- result.types.push_back(pair.type);
}
- result.implicit_count = implicit_params.size();
- result.has_varargs = has_varargs;
- result.arguments_variable = arguments_variable;
return ParseResult{std::move(result)};
}
@@ -447,8 +461,8 @@ base::Optional<ParseResult> MakeDebugStatement(
}
base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
- TypeExpression* result =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "void");
+ TypeExpression* result = MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, "void", std::vector<TypeExpression*>{});
return ParseResult{result};
}
@@ -567,7 +581,7 @@ base::Optional<ParseResult> MakeConstDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
if (!IsValidNamespaceConstName(name->value)) {
- NamingConventionError("Constant", name->value, "kUpperCamelCase");
+ NamingConventionError("Constant", name, "kUpperCamelCase");
}
auto type = child_results->NextAs<TypeExpression*>();
@@ -599,7 +613,7 @@ base::Optional<ParseResult> MakeAbstractTypeDeclaration(
auto transient = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
- NamingConventionError("Type", name->value, "UpperCamelCase");
+ NamingConventionError("Type", name, "UpperCamelCase");
}
auto extends = child_results->NextAs<base::Optional<Identifier*>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
@@ -700,7 +714,7 @@ base::Optional<ParseResult> MakeClassDeclaration(
if (transient) flags |= ClassFlag::kTransient;
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
- NamingConventionError("Type", name->value, "UpperCamelCase");
+ NamingConventionError("Type", name, "UpperCamelCase");
}
auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
if (extends && !BasicTypeExpression::DynamicCast(*extends)) {
@@ -760,12 +774,15 @@ base::Optional<ParseResult> MakeStructDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
- NamingConventionError("Struct", name->value, "UpperCamelCase");
+ NamingConventionError("Struct", name, "UpperCamelCase");
}
+ auto generic_parameters = child_results->NextAs<GenericParameters>();
+ LintGenericParameters(generic_parameters);
auto methods = child_results->NextAs<std::vector<Declaration*>>();
auto fields = child_results->NextAs<std::vector<StructFieldExpression>>();
Declaration* result =
- MakeNode<StructDeclaration>(name, std::move(methods), std::move(fields));
+ MakeNode<StructDeclaration>(name, std::move(methods), std::move(fields),
+ std::move(generic_parameters));
return ParseResult{result};
}
@@ -777,6 +794,25 @@ base::Optional<ParseResult> MakeCppIncludeDeclaration(
return ParseResult{result};
}
+base::Optional<ParseResult> ProcessTorqueImportDeclaration(
+ ParseResultIterator* child_results) {
+ auto import_path = child_results->NextAs<std::string>();
+ if (!SourceFileMap::FileRelativeToV8RootExists(import_path)) {
+ Error("File '", import_path, "' not found.");
+ }
+
+ auto import_id = SourceFileMap::GetSourceId(import_path);
+ if (!import_id.IsValid()) {
+ // TODO(szuend): Instead of reporting and error. Queue the file up
+ // for compilation.
+ Error("File '", import_path, "'is not part of the source set.").Throw();
+ }
+
+ CurrentAst::Get().DeclareImportForCurrentFile(import_id);
+
+ return base::nullopt;
+}
+
base::Optional<ParseResult> MakeExternalBuiltin(
ParseResultIterator* child_results) {
auto transitioning = child_results->NextAs<bool>();
@@ -822,9 +858,12 @@ base::Optional<ParseResult> MakeBasicTypeExpression(
child_results->NextAs<std::vector<std::string>>();
auto is_constexpr = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
+ auto generic_arguments =
+ child_results->NextAs<std::vector<TypeExpression*>>();
TypeExpression* result = MakeNode<BasicTypeExpression>(
std::move(namespace_qualification),
- is_constexpr ? GetConstexprName(name) : std::move(name));
+ is_constexpr ? GetConstexprName(name) : std::move(name),
+ std::move(generic_arguments));
return ParseResult{result};
}
@@ -920,14 +959,14 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
{
CurrentSourcePosition::Scope current_source_position(expression->pos);
current_block->statements.push_back(MakeNode<VarDeclarationStatement>(
- true, MakeNode<Identifier>("_value"), base::nullopt, expression));
+ true, MakeNode<Identifier>("__value"), base::nullopt, expression));
}
TypeExpression* accumulated_types;
for (size_t i = 0; i < cases.size(); ++i) {
CurrentSourcePosition::Scope current_source_position(cases[i].pos);
Expression* value =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>("_value"));
+ MakeNode<IdentifierExpression>(MakeNode<Identifier>("__value"));
if (i >= 1) {
value =
MakeNode<AssumeTypeImpossibleExpression>(accumulated_types, value);
@@ -939,12 +978,12 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
std::vector<Expression*>{value},
std::vector<Statement*>{MakeNode<ExpressionStatement>(
MakeNode<IdentifierExpression>(
- MakeNode<Identifier>("_NextCase")))});
+ MakeNode<Identifier>(kNextCaseLabelName)))});
case_block = MakeNode<BlockStatement>();
} else {
case_block = current_block;
}
- std::string name = "_case_value";
+ std::string name = "__case_value";
if (cases[i].name) name = *cases[i].name;
case_block->statements.push_back(MakeNode<VarDeclarationStatement>(
true, MakeNode<Identifier>(name), cases[i].type, value));
@@ -954,7 +993,7 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
current_block->statements.push_back(
MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
false, MakeNode<StatementExpression>(case_block),
- MakeNode<LabelBlock>(MakeNode<Identifier>("_NextCase"),
+ MakeNode<LabelBlock>(MakeNode<Identifier>(kNextCaseLabelName),
ParameterList::Empty(), next_block))));
current_block = next_block;
}
@@ -1004,7 +1043,7 @@ base::Optional<ParseResult> MakeVarDeclarationStatement(
if (!const_qualified) DCHECK_EQ("let", kind->value);
auto name = child_results->NextAs<Identifier*>();
if (!IsLowerCamelCase(name->value)) {
- NamingConventionError("Variable", name->value, "lowerCamelCase");
+ NamingConventionError("Variable", name, "lowerCamelCase");
}
auto type = child_results->NextAs<base::Optional<TypeExpression*>>();
@@ -1068,19 +1107,6 @@ base::Optional<ParseResult> MakeTryLabelExpression(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeForOfLoopStatement(
- ParseResultIterator* child_results) {
- auto var_decl = child_results->NextAs<Statement*>();
- CheckNotDeferredStatement(var_decl);
- auto iterable = child_results->NextAs<Expression*>();
- auto range = child_results->NextAs<base::Optional<RangeExpression>>();
- auto body = child_results->NextAs<Statement*>();
- CheckNotDeferredStatement(body);
- Statement* result =
- MakeNode<ForOfLoopStatement>(var_decl, iterable, range, body);
- return ParseResult{result};
-}
-
base::Optional<ParseResult> MakeForLoopStatement(
ParseResultIterator* child_results) {
auto var_decl = child_results->NextAs<base::Optional<Statement*>>();
@@ -1098,7 +1124,7 @@ base::Optional<ParseResult> MakeForLoopStatement(
base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
auto label = child_results->NextAs<Identifier*>();
if (!IsUpperCamelCase(label->value)) {
- NamingConventionError("Label", label->value, "UpperCamelCase");
+ NamingConventionError("Label", label, "UpperCamelCase");
}
auto parameters = child_results->NextAs<ParameterList>();
auto body = child_results->NextAs<Statement*>();
@@ -1114,19 +1140,11 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
}
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>(variable));
- parameters.types.push_back(
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "Object"));
+ parameters.types.push_back(MakeNode<BasicTypeExpression>(
+ std::vector<std::string>{}, "Object", std::vector<TypeExpression*>{}));
parameters.has_varargs = false;
- LabelBlock* result = MakeNode<LabelBlock>(MakeNode<Identifier>("_catch"),
- std::move(parameters), body);
- return ParseResult{result};
-}
-
-base::Optional<ParseResult> MakeRangeExpression(
- ParseResultIterator* child_results) {
- auto begin = child_results->NextAs<base::Optional<Expression*>>();
- auto end = child_results->NextAs<base::Optional<Expression*>>();
- RangeExpression result = {begin, end};
+ LabelBlock* result = MakeNode<LabelBlock>(
+ MakeNode<Identifier>(kCatchLabelName), std::move(parameters), body);
return ParseResult{result};
}
@@ -1149,6 +1167,17 @@ base::Optional<ParseResult> MakeIdentifierFromMatchedInput(
MakeNode<Identifier>(child_results->matched_input().ToString())};
}
+base::Optional<ParseResult> MakeRightShiftIdentifier(
+ ParseResultIterator* child_results) {
+ std::string str = child_results->matched_input().ToString();
+ for (auto character : str) {
+ if (character != '>') {
+ ReportError("right-shift operators may not contain any whitespace");
+ }
+ }
+ return ParseResult{MakeNode<Identifier>(str)};
+}
+
base::Optional<ParseResult> MakeIdentifierExpression(
ParseResultIterator* child_results) {
auto namespace_qualification =
@@ -1265,7 +1294,7 @@ base::Optional<ParseResult> MakeLabelAndTypes(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
if (!IsUpperCamelCase(name->value)) {
- NamingConventionError("Label", name->value, "UpperCamelCase");
+ NamingConventionError("Label", name, "UpperCamelCase");
}
auto types = child_results->NextAs<std::vector<TypeExpression*>>();
return ParseResult{LabelAndTypes{name, std::move(types)}};
@@ -1357,9 +1386,12 @@ struct TorqueGrammar : Grammar {
}
static bool MatchIdentifier(InputPosition* pos) {
- if (!MatchChar(std::isalpha, pos)) return false;
- while (MatchChar(std::isalnum, pos) || MatchString("_", pos)) {
+ InputPosition current = *pos;
+ MatchString("_", &current);
+ if (!MatchChar(std::isalpha, &current)) return false;
+ while (MatchChar(std::isalnum, &current) || MatchString("_", &current)) {
}
+ *pos = current;
return true;
}
@@ -1476,7 +1508,9 @@ struct TorqueGrammar : Grammar {
Symbol simpleType = {
Rule({Token("("), &type, Token(")")}),
Rule({List<std::string>(Sequence({&identifier, Token("::")})),
- CheckIf(Token("constexpr")), &identifier},
+ CheckIf(Token("constexpr")), &identifier,
+ TryOrDefault<std::vector<TypeExpression*>>(
+ &genericSpecializationTypeList)},
MakeBasicTypeExpression),
Rule({Token("builtin"), Token("("), typeList, Token(")"), Token("=>"),
&simpleType},
@@ -1501,20 +1535,22 @@ struct TorqueGrammar : Grammar {
// Result: base::Optional<TypeList>
Symbol* optionalGenericParameters = Optional<TypeList>(&genericParameters);
+ Symbol implicitParameterList{
+ Rule({Token("("), OneOf({"implicit", "js-implicit"}),
+ List<NameAndTypeExpression>(&nameAndType, Token(",")), Token(")")},
+ MakeImplicitParameterList)};
+
Symbol* optionalImplicitParameterList{
- TryOrDefault<std::vector<NameAndTypeExpression>>(
- Sequence({Token("("), Token("implicit"),
- List<NameAndTypeExpression>(&nameAndType, Token(",")),
- Token(")")}))};
+ Optional<ImplicitParameters>(&implicitParameterList)};
// Result: ParameterList
Symbol typeListMaybeVarArgs = {
Rule({optionalImplicitParameterList, Token("("),
List<TypeExpression*>(Sequence({&type, Token(",")})), Token("..."),
Token(")")},
- MakeParameterListFromTypes<true>),
+ MakeParameterList<true, false>),
Rule({optionalImplicitParameterList, Token("("), typeList, Token(")")},
- MakeParameterListFromTypes<false>)};
+ MakeParameterList<false, false>)};
// Result: LabelAndTypes
Symbol labelParameter = {Rule(
@@ -1561,15 +1597,15 @@ struct TorqueGrammar : Grammar {
Symbol parameterListNoVararg = {
Rule({optionalImplicitParameterList, Token("("),
List<NameAndTypeExpression>(&nameAndType, Token(",")), Token(")")},
- MakeParameterListFromNameAndTypeList<false>)};
+ MakeParameterList<false, true>)};
// Result: ParameterList
Symbol parameterListAllowVararg = {
Rule({&parameterListNoVararg}),
Rule({optionalImplicitParameterList, Token("("),
- NonemptyList<NameAndTypeExpression>(&nameAndType, Token(",")),
- Token(","), Token("..."), &identifier, Token(")")},
- MakeParameterListFromNameAndTypeList<true>)};
+ List<NameAndTypeExpression>(Sequence({&nameAndType, Token(",")})),
+ Token("..."), &identifier, Token(")")},
+ MakeParameterList<true, true>)};
// Result: Identifier*
Symbol* OneOf(const std::vector<std::string>& alternatives) {
@@ -1672,9 +1708,14 @@ struct TorqueGrammar : Grammar {
Symbol* additiveExpression =
BinaryOperator(multiplicativeExpression, OneOf({"+", "-"}));
+ // Result: Identifier*
+ Symbol shiftOperator = {
+ Rule({Token("<<")}, MakeIdentifierFromMatchedInput),
+ Rule({Token(">"), Token(">")}, MakeRightShiftIdentifier),
+ Rule({Token(">"), Token(">"), Token(">")}, MakeRightShiftIdentifier)};
+
// Result: Expression*
- Symbol* shiftExpression =
- BinaryOperator(additiveExpression, OneOf({"<<", ">>", ">>>"}));
+ Symbol* shiftExpression = BinaryOperator(additiveExpression, &shiftOperator);
// Do not allow expressions like a < b > c because this is never
// useful and ambiguous with template parameters.
@@ -1742,12 +1783,6 @@ struct TorqueGrammar : Grammar {
// Result: ExpressionWithSource
Symbol expressionWithSource = {Rule({expression}, MakeExpressionWithSource)};
- // Result: RangeExpression
- Symbol rangeSpecifier = {
- Rule({Token("["), Optional<Expression*>(expression), Token(":"),
- Optional<Expression*>(expression), Token("]")},
- MakeRangeExpression)};
-
Symbol* optionalTypeSpecifier =
Optional<TypeExpression*>(Sequence({Token(":"), &type}));
@@ -1800,9 +1835,6 @@ struct TorqueGrammar : Grammar {
MakeAssertStatement),
Rule({Token("while"), Token("("), expression, Token(")"), &statement},
MakeWhileStatement),
- Rule({Token("for"), Token("("), &varDeclaration, Token("of"), expression,
- Optional<RangeExpression>(&rangeSpecifier), Token(")"), &statement},
- MakeForOfLoopStatement),
Rule({Token("for"), Token("("),
Optional<Statement*>(&varDeclarationWithInitialization), Token(";"),
Optional<Expression*>(expression), Token(";"),
@@ -1845,7 +1877,9 @@ struct TorqueGrammar : Grammar {
Token("{"), List<Declaration*>(&method),
List<ClassFieldExpression>(&classField), Token("}")},
AsSingletonVector<Declaration*, MakeClassDeclaration>()),
- Rule({Token("struct"), &name, Token("{"), List<Declaration*>(&method),
+ Rule({Token("struct"), &name,
+ TryOrDefault<GenericParameters>(&genericParameters), Token("{"),
+ List<Declaration*>(&method),
List<StructFieldExpression>(&structField), Token("}")},
AsSingletonVector<Declaration*, MakeStructDeclaration>()),
Rule({CheckIf(Token("transient")), Token("type"), &name,
@@ -1909,7 +1943,9 @@ struct TorqueGrammar : Grammar {
Token("}")},
AsSingletonVector<Declaration*, MakeNamespaceDeclaration>())};
- Symbol file = {Rule({&file, &namespaceDeclaration}, AddGlobalDeclarations),
+ Symbol file = {Rule({&file, Token("import"), &externalString},
+ ProcessTorqueImportDeclaration),
+ Rule({&file, &namespaceDeclaration}, AddGlobalDeclarations),
Rule({&file, &declaration}, AddGlobalDeclarations), Rule({})};
};
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index 6b596aab39..e759ce613c 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -20,21 +20,29 @@ std::string ErrorPrefixFor(TorqueMessage::Kind kind) {
int WrappedMain(int argc, const char** argv) {
std::string output_directory;
+ std::string v8_root;
std::vector<std::string> files;
for (int i = 1; i < argc; ++i) {
// Check for options
- if (!strcmp("-o", argv[i])) {
+ if (std::string(argv[i]) == "-o") {
output_directory = argv[++i];
- continue;
+ } else if (std::string(argv[i]) == "-v8-root") {
+ v8_root = std::string(argv[++i]);
+ } else {
+ // Otherwise it's a .tq file. Remember it for compilation.
+ files.emplace_back(argv[i]);
+ if (!StringEndsWith(files.back(), ".tq")) {
+ std::cerr << "Unexpected command-line argument \"" << files.back()
+ << "\", expected a .tq file.\n";
+ base::OS::Abort();
+ }
}
-
- // Otherwise it's a .tq file. Remember it for compilation.
- files.emplace_back(argv[i]);
}
TorqueCompilerOptions options;
- options.output_directory = output_directory;
+ options.output_directory = std::move(output_directory);
+ options.v8_root = std::move(v8_root);
options.collect_language_server_data = false;
options.force_assert_statements = false;
@@ -42,7 +50,7 @@ int WrappedMain(int argc, const char** argv) {
// PositionAsString requires the SourceFileMap to be set to
// resolve the file name. Needed to report errors and lint warnings.
- SourceFileMap::Scope source_file_map_scope(result.source_file_map);
+ SourceFileMap::Scope source_file_map_scope(*result.source_file_map);
for (const TorqueMessage& message : result.messages) {
if (message.position) {
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
index 7c266a419a..47331543fc 100644
--- a/deps/v8/src/torque/type-oracle.cc
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -11,8 +11,14 @@ namespace torque {
DEFINE_CONTEXTUAL_VARIABLE(TypeOracle)
// static
-void TypeOracle::FinalizeClassTypes() {
- for (const std::unique_ptr<AggregateType>& p : Get().struct_types_) {
+const std::vector<std::unique_ptr<AggregateType>>*
+TypeOracle::GetAggregateTypes() {
+ return &Get().aggregate_types_;
+}
+
+// static
+void TypeOracle::FinalizeAggregateTypes() {
+ for (const std::unique_ptr<AggregateType>& p : Get().aggregate_types_) {
p->Finalize();
}
}
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index c9d6bb0bf3..405cb41e75 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -32,7 +32,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
static StructType* GetStructType(const std::string& name) {
StructType* result = new StructType(CurrentNamespace(), name);
- Get().struct_types_.push_back(std::unique_ptr<StructType>(result));
+ Get().aggregate_types_.push_back(std::unique_ptr<StructType>(result));
return result;
}
@@ -42,7 +42,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
const TypeAlias* alias) {
ClassType* result = new ClassType(parent, CurrentNamespace(), name, flags,
generates, decl, alias);
- Get().struct_types_.push_back(std::unique_ptr<ClassType>(result));
+ Get().aggregate_types_.push_back(std::unique_ptr<ClassType>(result));
return result;
}
@@ -107,6 +107,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONSTEXPR_INTPTR_TYPE_STRING);
}
+ static const Type* GetConstexprInstanceTypeType() {
+ return Get().GetBuiltinType(CONSTEXPR_INSTANCE_TYPE_TYPE_STRING);
+ }
+
static const Type* GetVoidType() {
return Get().GetBuiltinType(VOID_TYPE_STRING);
}
@@ -135,6 +139,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(TAGGED_TYPE_STRING);
}
+ static const Type* GetUninitializedType() {
+ return Get().GetBuiltinType(UNINITIALIZED_TYPE_STRING);
+ }
+
static const Type* GetSmiType() {
return Get().GetBuiltinType(SMI_TYPE_STRING);
}
@@ -203,11 +211,19 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONST_INT32_TYPE_STRING);
}
+ static const Type* GetContextType() {
+ return Get().GetBuiltinType(CONTEXT_TYPE_STRING);
+ }
+
+ static const Type* GetJSFunctionType() {
+ return Get().GetBuiltinType(JS_FUNCTION_TYPE_STRING);
+ }
+
static bool IsImplicitlyConvertableFrom(const Type* to, const Type* from) {
for (Generic* from_constexpr :
Declarations::LookupGeneric(kFromConstexprMacroName)) {
- if (base::Optional<Callable*> specialization =
- from_constexpr->GetSpecialization({to, from})) {
+ if (base::Optional<const Callable*> specialization =
+ from_constexpr->specializations().Get({to, from})) {
if ((*specialization)->signature().GetExplicitTypes() ==
TypeVector{from}) {
return true;
@@ -217,7 +233,9 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return false;
}
- static void FinalizeClassTypes();
+ static const std::vector<std::unique_ptr<AggregateType>>* GetAggregateTypes();
+
+ static void FinalizeAggregateTypes();
private:
const Type* GetBuiltinType(const std::string& name) {
@@ -229,7 +247,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
Deduplicator<UnionType> union_types_;
Deduplicator<ReferenceType> reference_types_;
std::vector<std::unique_ptr<Type>> nominal_types_;
- std::vector<std::unique_ptr<AggregateType>> struct_types_;
+ std::vector<std::unique_ptr<AggregateType>> aggregate_types_;
std::vector<std::unique_ptr<Type>> top_types_;
};
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
index e9fd50c02a..37be0df006 100644
--- a/deps/v8/src/torque/type-visitor.cc
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -57,6 +57,12 @@ const AbstractType* TypeVisitor::ComputeType(AbstractTypeDeclaration* decl) {
const Type* parent_type = nullptr;
if (decl->extends) {
parent_type = Declarations::LookupType(*decl->extends);
+ if (parent_type->IsUnionType()) {
+ // UnionType::IsSupertypeOf requires that types can only extend from non-
+ // union types in order to work correctly.
+ ReportError("type \"", decl->name->value,
+ "\" cannot extend a type union");
+ }
}
if (generates == "" && parent_type) {
@@ -104,9 +110,25 @@ void DeclareMethods(AggregateType* container_type,
}
}
+namespace {
+std::string ComputeStructName(StructDeclaration* decl) {
+ TypeVector args;
+ if (decl->IsGeneric()) {
+ args.resize(decl->generic_parameters.size());
+ std::transform(
+ decl->generic_parameters.begin(), decl->generic_parameters.end(),
+ args.begin(), [](Identifier* parameter) {
+ return Declarations::LookupTypeAlias(QualifiedName(parameter->value))
+ ->type();
+ });
+ }
+ return StructType::ComputeName(decl->name->value, args);
+}
+} // namespace
+
const StructType* TypeVisitor::ComputeType(StructDeclaration* decl) {
CurrentSourcePosition::Scope position_activator(decl->pos);
- StructType* struct_type = TypeOracle::GetStructType(decl->name->value);
+ StructType* struct_type = TypeOracle::GetStructType(ComputeStructName(decl));
size_t offset = 0;
for (auto& field : decl->fields) {
CurrentSourcePosition::Scope position_activator(
@@ -156,28 +178,78 @@ const ClassType* TypeVisitor::ComputeType(ClassDeclaration* decl) {
new_class = TypeOracle::GetClassType(super_type, decl->name->value,
decl->flags, generates, decl, alias);
} else {
- if (decl->super) {
- ReportError("Only extern classes can inherit.");
+ if (!decl->super) {
+ ReportError("Intern class ", decl->name->value,
+ " must extend class Struct.");
+ }
+ const Type* super_type = TypeVisitor::ComputeType(*decl->super);
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ const Type* struct_type = Declarations::LookupGlobalType("Struct");
+ if (!super_class || super_class != struct_type) {
+ ReportError("Intern class ", decl->name->value,
+ " must extend class Struct.");
}
if (decl->generates) {
ReportError("Only extern classes can specify a generated type.");
}
- new_class =
- TypeOracle::GetClassType(TypeOracle::GetTaggedType(), decl->name->value,
- decl->flags, "FixedArray", decl, alias);
+ new_class = TypeOracle::GetClassType(
+ super_type, decl->name->value,
+ decl->flags | ClassFlag::kGeneratePrint | ClassFlag::kGenerateVerify,
+ decl->name->value, decl, alias);
}
return new_class;
}
const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
- const TypeAlias* alias = Declarations::LookupTypeAlias(
- QualifiedName{basic->namespace_qualification, basic->name});
+ QualifiedName qualified_name{basic->namespace_qualification, basic->name};
+ auto& args = basic->generic_arguments;
+ const Type* type;
+ SourcePosition pos = SourcePosition::Invalid();
+
+ if (args.empty()) {
+ auto* alias = Declarations::LookupTypeAlias(qualified_name);
+ type = alias->type();
+ pos = alias->GetDeclarationPosition();
+ } else {
+ auto* generic_struct =
+ Declarations::LookupUniqueGenericStructType(qualified_name);
+ auto& params = generic_struct->generic_parameters();
+ auto& specializations = generic_struct->specializations();
+ if (params.size() != args.size()) {
+ ReportError("Generic struct takes ", params.size(),
+ " parameters, but only ", args.size(), " were given");
+ }
+
+ std::vector<const Type*> arg_types = ComputeTypeVector(args);
+ if (auto specialization = specializations.Get(arg_types)) {
+ type = *specialization;
+ } else {
+ CurrentScope::Scope generic_scope(generic_struct->ParentScope());
+ // Create a temporary fake-namespace just to temporarily declare the
+ // specialization aliases for the generic types to create a signature.
+ Namespace tmp_namespace("_tmp");
+ CurrentScope::Scope tmp_namespace_scope(&tmp_namespace);
+ auto arg_types_iterator = arg_types.begin();
+ for (auto param : params) {
+ TypeAlias* alias =
+ Declarations::DeclareType(param, *arg_types_iterator);
+ alias->SetIsUserDefined(false);
+ arg_types_iterator++;
+ }
+
+ auto struct_type = ComputeType(generic_struct->declaration());
+ specializations.Add(arg_types, struct_type);
+ type = struct_type;
+ }
+ pos = generic_struct->declaration()->name->pos;
+ }
+
if (GlobalContext::collect_language_server_data()) {
- LanguageServerData::AddDefinition(type_expression->pos,
- alias->GetDeclarationPosition());
+ LanguageServerData::AddDefinition(type_expression->pos, pos);
}
- return alias->type();
+ return type;
+
} else if (auto* union_type =
UnionTypeExpression::DynamicCast(type_expression)) {
return TypeOracle::GetUnionType(ComputeType(union_type->a),
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 1d7ca1d5f2..37a328b1dc 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -272,7 +272,25 @@ const Field& AggregateType::LookupField(const std::string& name) const {
}
std::string StructType::GetGeneratedTypeNameImpl() const {
- return "TorqueStruct" + name();
+ return "TorqueStruct" + MangledName();
+}
+
+// static
+std::string StructType::ComputeName(const std::string& basename,
+ const std::vector<const Type*>& args) {
+ if (args.size() == 0) return basename;
+ std::stringstream s;
+ s << basename << "<";
+ bool first = true;
+ for (auto t : args) {
+ if (!first) {
+ s << ", ";
+ }
+ s << t->ToString();
+ first = false;
+ }
+ s << ">";
+ return s.str();
}
std::vector<Method*> AggregateType::Methods(const std::string& name) const {
@@ -349,7 +367,7 @@ void ClassType::Finalize() const {
TypeVisitor::VisitClassFieldsAndMethods(const_cast<ClassType*>(this),
this->decl_);
is_finalized_ = true;
- if (GenerateCppClassDefinitions()) {
+ if (GenerateCppClassDefinitions() || !IsExtern()) {
for (const Field& f : fields()) {
if (f.is_weak) {
Error("Generation of C++ class for Torque class ", name(),
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 0d79c1f405..f6180c4250 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -282,6 +282,8 @@ class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
}
size_t function_pointer_type_id() const { return function_pointer_type_id_; }
+ std::vector<std::string> GetRuntimeTypes() const override { return {"Smi"}; }
+
private:
friend class TypeOracle;
BuiltinPointerType(const Type* parent, TypeVector parameter_types,
@@ -500,6 +502,18 @@ class StructType final : public AggregateType {
DECLARE_TYPE_BOILERPLATE(StructType)
std::string ToExplicitString() const override;
std::string GetGeneratedTypeNameImpl() const override;
+ std::string MangledName() const override {
+ // TODO(gsps): Generate more readable mangled names
+ std::string str(name());
+ std::replace(str.begin(), str.end(), ',', '_');
+ std::replace(str.begin(), str.end(), ' ', '_');
+ std::replace(str.begin(), str.end(), '<', '_');
+ std::replace(str.begin(), str.end(), '>', '_');
+ return str;
+ }
+
+ static std::string ComputeName(const std::string& basename,
+ const std::vector<const Type*>& args);
private:
friend class TypeOracle;
@@ -526,10 +540,10 @@ class ClassType final : public AggregateType {
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
bool ShouldGeneratePrint() const {
- return flags_ & ClassFlag::kGeneratePrint;
+ return flags_ & ClassFlag::kGeneratePrint || !IsExtern();
}
bool ShouldGenerateVerify() const {
- return flags_ & ClassFlag::kGenerateVerify;
+ return flags_ & ClassFlag::kGenerateVerify || !IsExtern();
}
bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; }
@@ -540,7 +554,7 @@ class ClassType final : public AggregateType {
return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
}
bool GenerateCppClassDefinitions() const {
- return flags_ & ClassFlag::kGenerateCppClassDefinitions;
+ return flags_ & ClassFlag::kGenerateCppClassDefinitions || !IsExtern();
}
bool HasIndexedField() const override;
size_t size() const { return size_; }
@@ -606,8 +620,6 @@ class VisitResult {
base::Optional<StackRange> stack_range_;
};
-using NameValueMap = std::map<std::string, VisitResult>;
-
VisitResult ProjectStructField(VisitResult structure,
const std::string& fieldname);
@@ -669,6 +681,7 @@ struct Signature {
base::Optional<std::string> arguments_variable;
ParameterTypes parameter_types;
size_t implicit_count;
+ size_t ExplicitCount() const { return types().size() - implicit_count; }
const Type* return_type;
LabelDeclarationVector labels;
bool HasSameTypesAs(
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 3e2f715f0d..244d1587db 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -123,12 +123,6 @@ std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
-void NamingConventionError(const std::string& type, const std::string& name,
- const std::string& convention) {
- Lint(type, " \"", name, "\" does not follow \"", convention,
- "\" naming convention.");
-}
-
MessageBuilder::MessageBuilder(const std::string& message,
TorqueMessage::Kind kind) {
base::Optional<SourcePosition> position;
@@ -162,7 +156,7 @@ bool ContainsUpperCase(const std::string& s) {
// keywords, e.g.: 'True', 'Undefined', etc.
// These do not need to follow the default naming convention for constants.
bool IsKeywordLikeName(const std::string& s) {
- static const char* const keyword_like_constants[]{"True", "False", "Hole",
+ static const char* const keyword_like_constants[]{"True", "False", "TheHole",
"Null", "Undefined"};
return std::find(std::begin(keyword_like_constants),
@@ -186,12 +180,16 @@ bool IsMachineType(const std::string& s) {
bool IsLowerCamelCase(const std::string& s) {
if (s.empty()) return false;
- return islower(s[0]) && !ContainsUnderscore(s);
+ size_t start = 0;
+ if (s[0] == '_') start = 1;
+ return islower(s[start]) && !ContainsUnderscore(s.substr(start));
}
bool IsUpperCamelCase(const std::string& s) {
if (s.empty()) return false;
- return isupper(s[0]) && !ContainsUnderscore(s);
+ size_t start = 0;
+ if (s[0] == '_') start = 1;
+ return isupper(s[start]) && !ContainsUnderscore(s.substr(1));
}
bool IsSnakeCase(const std::string& s) {
@@ -248,12 +246,34 @@ std::string CamelifyString(const std::string& underscore_string) {
return result;
}
+std::string SnakeifyString(const std::string& camel_string) {
+ std::string result;
+ bool previousWasLower = false;
+ for (auto current : camel_string) {
+ if (previousWasLower && isupper(current)) {
+ result += "_";
+ }
+ result += tolower(current);
+ previousWasLower = (islower(current));
+ }
+ return result;
+}
+
std::string DashifyString(const std::string& underscore_string) {
std::string result = underscore_string;
std::replace(result.begin(), result.end(), '_', '-');
return result;
}
+std::string UnderlinifyPath(std::string path) {
+ std::replace(path.begin(), path.end(), '-', '_');
+ std::replace(path.begin(), path.end(), '/', '_');
+ std::replace(path.begin(), path.end(), '\\', '_');
+ std::replace(path.begin(), path.end(), '.', '_');
+ transform(path.begin(), path.end(), path.begin(), ::toupper);
+ return path;
+}
+
void ReplaceFileContentsIfDifferent(const std::string& file_path,
const std::string& contents) {
std::ifstream old_contents_stream(file_path.c_str());
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index 10b91ce7d4..fb4ad59f99 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -81,11 +81,6 @@ MessageBuilder Lint(Args&&... args) {
return Message(TorqueMessage::Kind::kLint, std::forward<Args>(args)...);
}
-// Report a LintError with the format "{type} '{name}' doesn't follow
-// '{convention}' naming convention".
-void NamingConventionError(const std::string& type, const std::string& name,
- const std::string& convention);
-
bool IsLowerCamelCase(const std::string& s);
bool IsUpperCamelCase(const std::string& s);
bool IsSnakeCase(const std::string& s);
@@ -99,7 +94,9 @@ template <class... Args>
std::string CapifyStringWithUnderscores(const std::string& camellified_string);
std::string CamelifyString(const std::string& underscore_string);
+std::string SnakeifyString(const std::string& camel_string);
std::string DashifyString(const std::string& underscore_string);
+std::string UnderlinifyPath(std::string path);
void ReplaceFileContentsIfDifferent(const std::string& file_path,
const std::string& contents);
@@ -350,6 +347,15 @@ class NullOStream : public std::ostream {
NullStreambuf buffer_;
};
+inline bool StringStartsWith(const std::string& s, const std::string& prefix) {
+ if (s.size() < prefix.size()) return false;
+ return s.substr(0, prefix.size()) == prefix;
+}
+inline bool StringEndsWith(const std::string& s, const std::string& suffix) {
+ if (s.size() < suffix.size()) return false;
+ return s.substr(s.size() - suffix.size()) == suffix;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
index 6afd4d0fee..7ab7c063da 100644
--- a/deps/v8/src/tracing/OWNERS
+++ b/deps/v8/src/tracing/OWNERS
@@ -1,2 +1,4 @@
alph@chromium.org
petermarshall@chromium.org
+
+# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index b5d2e7d866..53839ba4b1 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -32,7 +32,9 @@ enum CategoryGroupEnabledFlags {
kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
};
-// By default, const char* asrgument values are assumed to have long-lived scope
+// TODO(petermarshall): Remove with the old tracing implementation - Perfetto
+// copies const char* arguments by default.
+// By default, const char* argument values are assumed to have long-lived scope
// and will not be copied. Use this macro to force a const char* to be copied.
#define TRACE_STR_COPY(str) v8::internal::tracing::TraceStringWithCopy(str)
diff --git a/deps/v8/src/trap-handler/OWNERS b/deps/v8/src/trap-handler/OWNERS
index ac0d46af0e..f6f3bc07ec 100644
--- a/deps/v8/src/trap-handler/OWNERS
+++ b/deps/v8/src/trap-handler/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
titzer@chromium.org
ahaas@chromium.org
diff --git a/deps/v8/src/utils/OWNERS b/deps/v8/src/utils/OWNERS
index 852d438bb0..3f9de7e204 100644
--- a/deps/v8/src/utils/OWNERS
+++ b/deps/v8/src/utils/OWNERS
@@ -1 +1,3 @@
file://COMMON_OWNERS
+
+# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 27db17a479..af32e90088 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -84,7 +84,7 @@ v8::PageAllocator* SetPlatformPageAllocatorForTesting(
return old_page_allocator;
}
-void* Malloced::New(size_t size) {
+void* Malloced::operator new(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
@@ -92,7 +92,7 @@ void* Malloced::New(size_t size) {
return result;
}
-void Malloced::Delete(void* p) { free(p); }
+void Malloced::operator delete(void* p) { free(p); }
char* StrDup(const char* str) {
size_t length = strlen(str);
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index fa3e6f3d7d..2f7074acb0 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -29,11 +29,8 @@ class Isolate;
// Superclass for classes managed with new & delete.
class V8_EXPORT_PRIVATE Malloced {
public:
- void* operator new(size_t size) { return New(size); }
- void operator delete(void* p) { Delete(p); }
-
- static void* New(size_t size);
- static void Delete(void* p);
+ static void* operator new(size_t size);
+ static void operator delete(void* p);
};
template <typename T>
@@ -70,8 +67,8 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
public:
- V8_INLINE void* New(size_t size) { return Malloced::New(size); }
- V8_INLINE static void Delete(void* p) { Malloced::Delete(p); }
+ V8_INLINE void* New(size_t size) { return Malloced::operator new(size); }
+ V8_INLINE static void Delete(void* p) { Malloced::operator delete(p); }
};
// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
diff --git a/deps/v8/src/utils/splay-tree-inl.h b/deps/v8/src/utils/splay-tree-inl.h
deleted file mode 100644
index bda453fd8f..0000000000
--- a/deps/v8/src/utils/splay-tree-inl.h
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UTILS_SPLAY_TREE_INL_H_
-#define V8_UTILS_SPLAY_TREE_INL_H_
-
-#include <vector>
-
-#include "src/utils/splay-tree.h"
-
-namespace v8 {
-namespace internal {
-
-
-template<typename Config, class Allocator>
-SplayTree<Config, Allocator>::~SplayTree() {
- NodeDeleter deleter;
- ForEachNode(&deleter);
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Insert(const Key& key,
- Locator* locator) {
- if (is_empty()) {
- // If the tree is empty, insert the new node.
- root_ = new(allocator_) Node(key, Config::NoValue());
- } else {
- // Splay on the key to move the last node on the search path
- // for the key to the root of the tree.
- Splay(key);
- // Ignore repeated insertions with the same key.
- int cmp = Config::Compare(key, root_->key_);
- if (cmp == 0) {
- locator->bind(root_);
- return false;
- }
- // Insert the new node.
- Node* node = new(allocator_) Node(key, Config::NoValue());
- InsertInternal(cmp, node);
- }
- locator->bind(root_);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
- if (cmp > 0) {
- node->left_ = root_;
- node->right_ = root_->right_;
- root_->right_ = nullptr;
- } else {
- node->right_ = root_;
- node->left_ = root_->left_;
- root_->left_ = nullptr;
- }
- root_ = node;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
- if (is_empty())
- return false;
- Splay(key);
- return Config::Compare(key, root_->key_) == 0;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Contains(const Key& key) {
- return FindInternal(key);
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
- if (FindInternal(key)) {
- locator->bind(root_);
- return true;
- } else {
- return false;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatestLessThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the greatest node in
- // the left subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp <= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->left_;
- bool result = FindGreatest(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeastGreaterThan(const Key& key,
- Locator* locator) {
- if (is_empty())
- return false;
- // Splay on the key to move the node with the given key or the last
- // node on the search path to the top of the tree.
- Splay(key);
- // Now the result is either the root node or the least node in
- // the right subtree.
- int cmp = Config::Compare(root_->key_, key);
- if (cmp >= 0) {
- locator->bind(root_);
- return true;
- } else {
- Node* temp = root_;
- root_ = root_->right_;
- bool result = FindLeast(locator);
- root_ = temp;
- return result;
- }
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->right_ != nullptr) current = current->right_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
- if (is_empty())
- return false;
- Node* current = root_;
- while (current->left_ != nullptr) current = current->left_;
- locator->bind(current);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Move(const Key& old_key,
- const Key& new_key) {
- if (!FindInternal(old_key))
- return false;
- Node* node_to_move = root_;
- RemoveRootNode(old_key);
- Splay(new_key);
- int cmp = Config::Compare(new_key, root_->key_);
- if (cmp == 0) {
- // A node with the target key already exists.
- delete node_to_move;
- return false;
- }
- node_to_move->key_ = new_key;
- InsertInternal(cmp, node_to_move);
- return true;
-}
-
-
-template<typename Config, class Allocator>
-bool SplayTree<Config, Allocator>::Remove(const Key& key) {
- if (!FindInternal(key))
- return false;
- Node* node_to_remove = root_;
- RemoveRootNode(key);
- delete node_to_remove;
- return true;
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
- if (root_->left_ == nullptr) {
- // No left child, so the new tree is just the right child.
- root_ = root_->right_;
- } else {
- // Left child exists.
- Node* right = root_->right_;
- // Make the original left child the new root.
- root_ = root_->left_;
- // Splay to make sure that the new root has an empty right child.
- Splay(key);
- // Insert the original right child as the right child of the new
- // root.
- root_->right_ = right;
- }
-}
-
-
-template<typename Config, class Allocator>
-void SplayTree<Config, Allocator>::Splay(const Key& key) {
- if (is_empty())
- return;
- Node dummy_node(Config::kNoKey, Config::NoValue());
- // Create a dummy node. The use of the dummy node is a bit
- // counter-intuitive: The right child of the dummy node will hold
- // the L tree of the algorithm. The left child of the dummy node
- // will hold the R tree of the algorithm. Using a dummy node, left
- // and right will always be nodes and we avoid special cases.
- Node* dummy = &dummy_node;
- Node* left = dummy;
- Node* right = dummy;
- Node* current = root_;
- while (true) {
- int cmp = Config::Compare(key, current->key_);
- if (cmp < 0) {
- if (current->left_ == nullptr) break;
- if (Config::Compare(key, current->left_->key_) < 0) {
- // Rotate right.
- Node* temp = current->left_;
- current->left_ = temp->right_;
- temp->right_ = current;
- current = temp;
- if (current->left_ == nullptr) break;
- }
- // Link right.
- right->left_ = current;
- right = current;
- current = current->left_;
- } else if (cmp > 0) {
- if (current->right_ == nullptr) break;
- if (Config::Compare(key, current->right_->key_) > 0) {
- // Rotate left.
- Node* temp = current->right_;
- current->right_ = temp->left_;
- temp->left_ = current;
- current = temp;
- if (current->right_ == nullptr) break;
- }
- // Link left.
- left->right_ = current;
- left = current;
- current = current->right_;
- } else {
- break;
- }
- }
- // Assemble.
- left->right_ = current->left_;
- right->left_ = current->right_;
- current->left_ = dummy->right_;
- current->right_ = dummy->left_;
- root_ = current;
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
- NodeToPairAdaptor<Callback> callback_adaptor(callback);
- ForEachNode(&callback_adaptor);
-}
-
-
-template <typename Config, class Allocator> template <class Callback>
-void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
- if (root_ == nullptr) return;
- // Pre-allocate some space for tiny trees.
- std::vector<Node*> nodes_to_visit;
- nodes_to_visit.push_back(root_);
- size_t pos = 0;
- while (pos < nodes_to_visit.size()) {
- Node* node = nodes_to_visit[pos++];
- if (node->left() != nullptr) nodes_to_visit.push_back(node->left());
- if (node->right() != nullptr) nodes_to_visit.push_back(node->right());
- callback->Call(node);
- }
-}
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UTILS_SPLAY_TREE_INL_H_
diff --git a/deps/v8/src/utils/splay-tree.h b/deps/v8/src/utils/splay-tree.h
deleted file mode 100644
index 47633f39db..0000000000
--- a/deps/v8/src/utils/splay-tree.h
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UTILS_SPLAY_TREE_H_
-#define V8_UTILS_SPLAY_TREE_H_
-
-#include "src/utils/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// A splay tree. The config type parameter encapsulates the different
-// configurations of a concrete splay tree:
-//
-// typedef Key: the key type
-// typedef Value: the value type
-// static const Key kNoKey: the dummy key used when no key is set
-// static Value kNoValue(): the dummy value used to initialize nodes
-// static int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
-// The tree is also parameterized by an allocation policy
-// (Allocator). The policy is used for allocating lists in the C free
-// store or the zone; see zone.h.
-
-// Forward defined as
-// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
-// class SplayTree;
-template <typename Config, class AllocationPolicy>
-class SplayTree {
- public:
- using Key = typename Config::Key;
- using Value = typename Config::Value;
-
- class Locator;
-
- explicit SplayTree(AllocationPolicy allocator = AllocationPolicy())
- : root_(nullptr), allocator_(allocator) {}
- ~SplayTree();
-
- V8_INLINE void* operator new(
- size_t size, AllocationPolicy allocator = AllocationPolicy()) {
- return allocator.New(static_cast<int>(size));
- }
- V8_INLINE void operator delete(void* p) { AllocationPolicy::Delete(p); }
- // Please the MSVC compiler. We should never have to execute this.
- V8_INLINE void operator delete(void* p, AllocationPolicy policy) {
- UNREACHABLE();
- }
-
- AllocationPolicy allocator() { return allocator_; }
-
- // Checks if there is a mapping for the key.
- bool Contains(const Key& key);
-
- // Inserts the given key in this tree with the given value. Returns
- // true if a node was inserted, otherwise false. If found the locator
- // is enabled and provides access to the mapping for the key.
- bool Insert(const Key& key, Locator* locator);
-
- // Looks up the key in this tree and returns true if it was found,
- // otherwise false. If the node is found the locator is enabled and
- // provides access to the mapping for the key.
- bool Find(const Key& key, Locator* locator);
-
- // Finds the mapping with the greatest key less than or equal to the
- // given key.
- bool FindGreatestLessThan(const Key& key, Locator* locator);
-
- // Find the mapping with the greatest key in this tree.
- bool FindGreatest(Locator* locator);
-
- // Finds the mapping with the least key greater than or equal to the
- // given key.
- bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
- // Find the mapping with the least key in this tree.
- bool FindLeast(Locator* locator);
-
- // Move the node from one key to another.
- bool Move(const Key& old_key, const Key& new_key);
-
- // Remove the node with the given key from the tree.
- bool Remove(const Key& key);
-
- // Remove all keys from the tree.
- void Clear() { ResetRoot(); }
-
- bool is_empty() { return root_ == nullptr; }
-
- // Perform the splay operation for the given key. Moves the node with
- // the given key to the top of the tree. If no node has the given
- // key, the last node on the search path is moved to the top of the
- // tree.
- void Splay(const Key& key);
-
- class Node {
- public:
- Node(const Key& key, const Value& value)
- : key_(key), value_(value), left_(nullptr), right_(nullptr) {}
-
- V8_INLINE void* operator new(size_t size, AllocationPolicy allocator) {
- return allocator.New(static_cast<int>(size));
- }
- V8_INLINE void operator delete(void* p) {
- return AllocationPolicy::Delete(p);
- }
- // Please the MSVC compiler. We should never have to execute
- // this.
- V8_INLINE void operator delete(void* p, AllocationPolicy allocator) {
- UNREACHABLE();
- }
-
- Key key() { return key_; }
- Value value() { return value_; }
- Node* left() { return left_; }
- Node* right() { return right_; }
-
- private:
- friend class SplayTree;
- friend class Locator;
- Key key_;
- Value value_;
- Node* left_;
- Node* right_;
- };
-
- // A locator provides access to a node in the tree without actually
- // exposing the node.
- class Locator {
- public:
- explicit Locator(Node* node) : node_(node) {}
- Locator() : node_(nullptr) {}
- const Key& key() { return node_->key_; }
- Value& value() { return node_->value_; }
- void set_value(const Value& value) { node_->value_ = value; }
- inline void bind(Node* node) { node_ = node; }
-
- private:
- Node* node_;
- };
-
- template <class Callback>
- void ForEach(Callback* callback);
-
- protected:
- // Resets tree root. Existing nodes become unreachable.
- void ResetRoot() { root_ = nullptr; }
-
- private:
- // Search for a node with a given key. If found, root_ points
- // to the node.
- bool FindInternal(const Key& key);
-
- // Inserts a node assuming that root_ is already set up.
- void InsertInternal(int cmp, Node* node);
-
- // Removes root_ node.
- void RemoveRootNode(const Key& key);
-
- template <class Callback>
- class NodeToPairAdaptor {
- public:
- explicit NodeToPairAdaptor(Callback* callback) : callback_(callback) {}
- void Call(Node* node) { callback_->Call(node->key(), node->value()); }
-
- private:
- Callback* callback_;
-
- DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
- };
-
- class NodeDeleter {
- public:
- NodeDeleter() = default;
- void Call(Node* node) { AllocationPolicy::Delete(node); }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
- };
-
- template <class Callback>
- void ForEachNode(Callback* callback);
-
- Node* root_;
- AllocationPolicy allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(SplayTree);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UTILS_SPLAY_TREE_H_
diff --git a/deps/v8/src/utils/utils.h b/deps/v8/src/utils/utils.h
index 17e07d3042..20d85aae10 100644
--- a/deps/v8/src/utils/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -777,36 +777,16 @@ inline T truncate_to_intn(T x, unsigned n) {
return (x & ((static_cast<T>(1) << n) - 1));
}
-#define INT_1_TO_63_LIST(V) \
- V(1) \
- V(2) \
- V(3) \
- V(4) \
- V(5) \
- V(6) \
- V(7) \
- V(8) \
- V(9) \
- V(10) \
- V(11) \
- V(12) \
- V(13) \
- V(14) \
- V(15) \
- V(16) \
- V(17) \
- V(18) \
- V(19) \
- V(20) \
- V(21) \
- V(22) \
- V(23) \
- V(24) \
- V(25) \
- V(26) V(27) V(28) V(29) V(30) V(31) V(32) V(33) V(34) V(35) V(36) V(37) \
- V(38) V(39) V(40) V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) \
- V(50) V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
- V(61) V(62) V(63)
+// clang-format off
+#define INT_1_TO_63_LIST(V) \
+ V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10) \
+ V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
+ V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
+ V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+ V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
+ V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
+ V(61) V(62) V(63)
+// clang-format on
#define DECLARE_IS_INT_N(N) \
inline bool is_int##N(int64_t x) { return is_intn(x, N); }
@@ -875,12 +855,6 @@ class BailoutId {
int ToInt() const { return id_; }
static BailoutId None() { return BailoutId(kNoneId); }
- static BailoutId ScriptContext() { return BailoutId(kScriptContextId); }
- static BailoutId FunctionContext() { return BailoutId(kFunctionContextId); }
- static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
- static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
- static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
- static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
// Special bailout id support for deopting into the {JSConstructStub} stub.
// The following hard-coded deoptimization points are supported by the stub:
@@ -905,25 +879,10 @@ class BailoutId {
static const int kNoneId = -1;
// Using 0 could disguise errors.
- static const int kScriptContextId = 1;
- static const int kFunctionContextId = 2;
- static const int kFunctionEntryId = 3;
-
- // This AST id identifies the point after the declarations have been visited.
- // We need it to capture the environment effects of declarations that emit
- // code (function declarations).
- static const int kDeclarationsId = 4;
-
- // Every FunctionState starts with this id.
- static const int kFirstUsableId = 5;
-
- // Every compiled stub starts with this id.
- static const int kStubEntryId = 6;
-
// Builtin continuations bailout ids start here. If you need to add a
// non-builtin BailoutId, add it before this id so that this Id has the
// highest number.
- static const int kFirstBuiltinContinuationId = 7;
+ static const int kFirstBuiltinContinuationId = 1;
int id_;
};
diff --git a/deps/v8/src/utils/vector.h b/deps/v8/src/utils/vector.h
index 5b6c878e34..dd5c59e553 100644
--- a/deps/v8/src/utils/vector.h
+++ b/deps/v8/src/utils/vector.h
@@ -230,6 +230,8 @@ constexpr Vector<const uint8_t> StaticCharVector(const char (&array)[N]) {
return Vector<const uint8_t>::cast(Vector<const char>(array, N - 1));
}
+// The resulting vector does not contain a null-termination byte. If you want
+// the null byte, use ArrayVector("foo").
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, strlen(data));
}
@@ -250,6 +252,9 @@ inline Vector<char> MutableCStrVector(char* data, size_t max) {
return Vector<char>(data, strnlen(data, max));
}
+// For string literals, ArrayVector("foo") returns a vector ['f', 'o', 'o', \0]
+// with length 4 and null-termination.
+// If you want ['f', 'o', 'o'], use CStrVector("foo").
template <typename T, size_t N>
inline constexpr Vector<T> ArrayVector(T (&arr)[N]) {
return Vector<T>{arr, N};
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index c9b1aa4d78..8aa6e24739 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index b2cd566873..834eb181d8 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("arm " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) {
- BAILOUT("Armv6 not supported");
+ bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0;
}
uint32_t offset = static_cast<uint32_t>(pc_offset());
@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
- BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
if (!CpuFeatures::IsSupported(SUDIV)) {
- BAILOUT("i32_divs");
+ bailout(kMissingCPUFeature, "i32_divs");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
if (!CpuFeatures::IsSupported(SUDIV)) {
- BAILOUT("i32_divu");
+ bailout(kMissingCPUFeature, "i32_divu");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
- BAILOUT("i32_rems");
+ bailout(kMissingCPUFeature, "i32_rems");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
- BAILOUT("i32_remu");
+ bailout(kMissingCPUFeature, "i32_remu");
return;
}
CpuFeatureScope scope(this, SUDIV);
@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index b1d71dce2f..57a157d3a7 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("arm64 " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
if (!IsImmAddSub(bytes)) {
// Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan.
- BAILOUT("Stack too big");
+ bailout(kOtherReason, "Stack too big");
return;
}
}
@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
- BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
+ bailout(kOtherReason,
+ "Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
@@ -173,7 +172,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
patching_assembler.PatchSubSp(bytes);
}
-void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
+void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 1b5ca87c3d..7bc3596d2e 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define REQUIRE_CPU_FEATURE(name, ...) \
- if (!CpuFeatures::IsSupported(name)) { \
- bailout("no " #name); \
- return __VA_ARGS__; \
- } \
+#define REQUIRE_CPU_FEATURE(name, ...) \
+ if (!CpuFeatures::IsSupported(name)) { \
+ bailout(kMissingCPUFeature, "no " #name); \
+ return __VA_ARGS__; \
+ } \
CpuFeatureScope feature(this, name);
namespace liftoff {
@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout("no SSE4.1");
+ assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 40e1636b6e..766ce71db1 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -11,8 +11,8 @@
#include "src/base/bits.h"
#include "src/base/small-vector.h"
#include "src/codegen/macro-assembler.h"
-#include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
+#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -635,13 +635,16 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; }
- bool did_bailout() { return bailout_reason_ != nullptr; }
- const char* bailout_reason() const { return bailout_reason_; }
+ bool did_bailout() { return bailout_reason_ != kSuccess; }
+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
+ const char* bailout_detail() const { return bailout_detail_; }
- void bailout(const char* reason) {
- if (bailout_reason_ != nullptr) return;
+ void bailout(LiftoffBailoutReason reason, const char* detail) {
+ DCHECK_NE(kSuccess, reason);
+ if (bailout_reason_ != kSuccess) return;
AbortCompilation();
bailout_reason_ = reason;
+ bailout_detail_ = detail;
}
private:
@@ -655,7 +658,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0;
- const char* bailout_reason_ = nullptr;
+ LiftoffBailoutReason bailout_reason_ = kSuccess;
+ const char* bailout_detail_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index caf00a24ca..7a87ae1a95 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -174,7 +174,8 @@ class LiftoffCompiler {
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
- bool ok() const { return ok_; }
+ bool did_bailout() const { return bailout_reason_ != kSuccess; }
+ LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
void GetCode(CodeDesc* desc) {
asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
@@ -195,30 +196,51 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
- void unsupported(FullDecoder* decoder, const char* reason) {
- ok_ = false;
- TRACE("unsupported: %s\n", reason);
+ void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
+ const char* detail) {
+ DCHECK_NE(kSuccess, reason);
+ if (did_bailout()) return;
+ bailout_reason_ = reason;
+ TRACE("unsupported: %s\n", detail);
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
- reason);
+ detail);
UnuseLabels(decoder);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
- unsupported(decoder, __ bailout_reason());
+ unsupported(decoder, __ bailout_reason(), __ bailout_detail());
return true;
}
+ LiftoffBailoutReason BailoutReasonForType(ValueType type) {
+ switch (type) {
+ case kWasmS128:
+ return kSimd;
+ case kWasmAnyRef:
+ case kWasmFuncRef:
+ case kWasmNullRef:
+ return kAnyRef;
+ case kWasmExnRef:
+ return kExceptionHandling;
+ case kWasmBottom:
+ return kMultiValue;
+ default:
+ return kOtherReason;
+ }
+ }
+
bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
- char buffer[128];
// Check supported types.
for (ValueType supported : supported_types) {
if (type == supported) return true;
}
- SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
- unsupported(decoder, buffer);
+ LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
+ EmbeddedVector<char, 128> buffer;
+ SNPrintF(buffer, "%s %s", ValueTypes::TypeName(type), context);
+ unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -394,17 +416,17 @@ class LiftoffCompiler {
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
}
- void GenerateOutOfLineCode(OutOfLineCode& ool) {
- __ bind(ool.label.get());
- const bool is_stack_check = ool.stub == WasmCode::kWasmStackGuard;
+ void GenerateOutOfLineCode(OutOfLineCode* ool) {
+ __ bind(ool->label.get());
+ const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
const bool is_mem_out_of_bounds =
- ool.stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
+ ool->stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
if (is_mem_out_of_bounds && env_->use_trap_handler) {
uint32_t pc = static_cast<uint32_t>(__ pc_offset());
DCHECK_EQ(pc, __ pc_offset());
protected_instructions_.emplace_back(
- trap_handler::ProtectedInstructionData{ool.pc, pc});
+ trap_handler::ProtectedInstructionData{ool->pc, pc});
}
if (!env_->runtime_exception_support) {
@@ -419,16 +441,16 @@ class LiftoffCompiler {
return;
}
- if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
+ if (!ool->regs_to_save.is_empty()) __ PushRegisters(ool->regs_to_save);
source_position_table_builder_.AddPosition(
- __ pc_offset(), SourcePosition(ool.position), false);
- __ CallRuntimeStub(ool.stub);
+ __ pc_offset(), SourcePosition(ool->position), false);
+ __ CallRuntimeStub(ool->stub);
safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
- DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
- if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
+ DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
+ if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
- __ emit_jump(ool.continuation.get());
+ __ emit_jump(ool->continuation.get());
} else {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
@@ -437,7 +459,7 @@ class LiftoffCompiler {
void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
for (OutOfLineCode& ool : out_of_line_code_) {
- GenerateOutOfLineCode(ool);
+ GenerateOutOfLineCode(&ool);
}
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
__ GetTotalFrameSlotCount());
@@ -449,7 +471,7 @@ class LiftoffCompiler {
}
void OnFirstError(FullDecoder* decoder) {
- ok_ = false;
+ if (!did_bailout()) bailout_reason_ = kDecodeError;
UnuseLabels(decoder);
asm_.AbortCompilation();
}
@@ -481,19 +503,20 @@ class LiftoffCompiler {
}
void Try(FullDecoder* decoder, Control* block) {
- unsupported(decoder, "try");
+ unsupported(decoder, kExceptionHandling, "try");
}
void Catch(FullDecoder* decoder, Control* block, Value* exception) {
- unsupported(decoder, "catch");
+ unsupported(decoder, kExceptionHandling, "catch");
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
- if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
- return unsupported(decoder, "multi-value if");
+ if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1) {
+ return unsupported(decoder, kMultiValue, "multi-value if");
+ }
// Allocate the else state.
if_block->else_state = base::make_unique<ElseState>();
@@ -773,8 +796,23 @@ class LiftoffCompiler {
__ emit_i64_eqz(dst.gp(), src);
});
break;
+ case WasmOpcode::kExprI64Clz:
+ case WasmOpcode::kExprI64Ctz:
+ case WasmOpcode::kExprI64Popcnt:
+ return unsupported(decoder, kComplexOperation,
+ WasmOpcodes::OpcodeName(opcode));
+ case WasmOpcode::kExprI32SConvertSatF32:
+ case WasmOpcode::kExprI32UConvertSatF32:
+ case WasmOpcode::kExprI32SConvertSatF64:
+ case WasmOpcode::kExprI32UConvertSatF64:
+ case WasmOpcode::kExprI64SConvertSatF32:
+ case WasmOpcode::kExprI64UConvertSatF32:
+ case WasmOpcode::kExprI64SConvertSatF64:
+ case WasmOpcode::kExprI64UConvertSatF64:
+ return unsupported(decoder, kNonTrappingFloatToInt,
+ WasmOpcodes::OpcodeName(opcode));
default:
- return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ UNREACHABLE();
}
#undef CASE_I32_UNOP
#undef CASE_I32_SIGN_EXTENSION
@@ -1104,8 +1142,12 @@ class LiftoffCompiler {
}
});
break;
+ case WasmOpcode::kExprI64Rol:
+ case WasmOpcode::kExprI64Ror:
+ return unsupported(decoder, kComplexOperation,
+ WasmOpcodes::OpcodeName(opcode));
default:
- return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
+ UNREACHABLE();
}
#undef CASE_I32_BINOP
#undef CASE_I32_BINOPI
@@ -1153,11 +1195,11 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, Value* result) {
- unsupported(decoder, "ref_null");
+ unsupported(decoder, kAnyRef, "ref_null");
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, "func");
+ unsupported(decoder, kAnyRef, "func");
}
void Drop(FullDecoder* decoder, const Value& value) {
@@ -1169,7 +1211,9 @@ class LiftoffCompiler {
void ReturnImpl(FullDecoder* decoder) {
size_t num_returns = decoder->sig_->return_count();
- if (num_returns > 1) return unsupported(decoder, "multi-return");
+ if (num_returns > 1) {
+ return unsupported(decoder, kMultiValue, "multi-return");
+ }
if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
@@ -1201,24 +1245,24 @@ class LiftoffCompiler {
}
}
- void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
+ void SetLocalFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
- ValueType type = dst_slot.type();
- if (dst_slot.is_reg()) {
- LiftoffRegister slot_reg = dst_slot.reg();
+ ValueType type = dst_slot->type();
+ if (dst_slot->is_reg()) {
+ LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot.reg(), state.stack_height() - 1, type);
+ __ Fill(dst_slot->reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
- dst_slot.MakeStack();
+ dst_slot->MakeStack();
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
- dst_slot = LiftoffAssembler::VarState(type, dst_reg);
+ *dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
@@ -1237,7 +1281,7 @@ class LiftoffCompiler {
target_slot = source_slot;
break;
case kStack:
- SetLocalFromStackSlot(target_slot, local_index);
+ SetLocalFromStackSlot(&target_slot, local_index);
break;
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
@@ -1254,12 +1298,12 @@ class LiftoffCompiler {
}
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
- LiftoffRegList& pinned, uint32_t* offset) {
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
+ LiftoffRegList* pinned, uint32_t* offset) {
+ Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
- global->index * sizeof(Address), kPointerLoadType, pinned);
+ global->index * sizeof(Address), kPointerLoadType, *pinned);
*offset = 0;
} else {
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
@@ -1275,7 +1319,7 @@ class LiftoffCompiler {
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type);
@@ -1290,20 +1334,20 @@ class LiftoffCompiler {
return;
LiftoffRegList pinned;
uint32_t offset = 0;
- Register addr = GetGlobalBaseAndOffset(global, pinned, &offset);
+ Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
- void GetTable(FullDecoder* decoder, const Value& index, Value* result,
- TableIndexImmediate<validate>& imm) {
- unsupported(decoder, "table_get");
+ void TableGet(FullDecoder* decoder, const Value& index, Value* result,
+ const TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, kAnyRef, "table_get");
}
- void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
- TableIndexImmediate<validate>& imm) {
- unsupported(decoder, "table_set");
+ void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
+ const TableIndexImmediate<validate>& imm) {
+ unsupported(decoder, kAnyRef, "table_set");
}
void Unreachable(FullDecoder* decoder) {
@@ -1370,8 +1414,8 @@ class LiftoffCompiler {
// Generate a branch table case, potentially reusing previously generated
// stack transfer code.
void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
- std::map<uint32_t, MovableLabel>& br_targets) {
- MovableLabel& label = br_targets[br_depth];
+ std::map<uint32_t, MovableLabel>* br_targets) {
+ MovableLabel& label = (*br_targets)[br_depth];
if (label.get()->is_bound()) {
__ jmp(label.get());
} else {
@@ -1384,13 +1428,13 @@ class LiftoffCompiler {
// TODO(wasm): Generate a real branch table (like TF TableSwitch).
void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
LiftoffRegister value, uint32_t min, uint32_t max,
- BranchTableIterator<validate>& table_iterator,
- std::map<uint32_t, MovableLabel>& br_targets) {
+ BranchTableIterator<validate>* table_iterator,
+ std::map<uint32_t, MovableLabel>* br_targets) {
DCHECK_LT(min, max);
// Check base case.
if (max == min + 1) {
- DCHECK_EQ(min, table_iterator.cur_index());
- GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ DCHECK_EQ(min, table_iterator->cur_index());
+ GenerateBrCase(decoder, table_iterator->next(), br_targets);
return;
}
@@ -1422,14 +1466,14 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
value.gp(), tmp.gp());
- GenerateBrTable(decoder, tmp, value, 0, imm.table_count, table_iterator,
- br_targets);
+ GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
+ &br_targets);
__ bind(&case_default);
}
// Generate the default case.
- GenerateBrCase(decoder, table_iterator.next(), br_targets);
+ GenerateBrCase(decoder, table_iterator.next(), &br_targets);
DCHECK(!table_iterator.has_next());
}
@@ -1593,7 +1637,7 @@ class LiftoffCompiler {
}
Register AddMemoryMasking(Register index, uint32_t* offset,
- LiftoffRegList& pinned) {
+ LiftoffRegList* pinned) {
if (!FLAG_untrusted_code_mitigations || env_->use_trap_handler) {
return index;
}
@@ -1601,11 +1645,11 @@ class LiftoffCompiler {
// Make sure that we can overwrite {index}.
if (__ cache_state()->is_used(LiftoffRegister(index))) {
Register old_index = index;
- pinned.clear(LiftoffRegister(old_index));
- index = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ pinned->clear(LiftoffRegister(old_index));
+ index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
if (index != old_index) __ Move(index, old_index, kWasmI32);
}
- Register tmp = __ GetUnusedRegister(kGpReg, pinned).gp();
+ Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
__ emit_ptrsize_add(index, index, *offset);
LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
__ emit_ptrsize_and(index, index, tmp);
@@ -1625,7 +1669,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, pinned);
+ index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -1659,7 +1703,7 @@ class LiftoffCompiler {
return;
}
uint32_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, pinned);
+ index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("Store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
@@ -1720,12 +1764,14 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
- if (imm.sig->return_count() > 1)
- return unsupported(decoder, "multi-return");
+ if (imm.sig->return_count() > 1) {
+ return unsupported(decoder, kMultiValue, "multi-return");
+ }
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
- "return"))
+ "return")) {
return;
+ }
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
@@ -1783,10 +1829,10 @@ class LiftoffCompiler {
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
- return unsupported(decoder, "multi-return");
+ return unsupported(decoder, kMultiValue, "multi-return");
}
if (imm.table_index != 0) {
- return unsupported(decoder, "table index != 0");
+ return unsupported(decoder, kAnyRef, "table index != 0");
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
@@ -1918,96 +1964,99 @@ class LiftoffCompiler {
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
- unsupported(decoder, "return_call");
+ unsupported(decoder, kTailCall, "return_call");
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
- unsupported(decoder, "return_call_indirect");
+ unsupported(decoder, kTailCall, "return_call_indirect");
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
- unsupported(decoder, "simd");
+ unsupported(decoder, kSimd, "simd");
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
const Vector<Value>& args) {
- unsupported(decoder, "throw");
+ unsupported(decoder, kExceptionHandling, "throw");
}
void Rethrow(FullDecoder* decoder, const Value& exception) {
- unsupported(decoder, "rethrow");
+ unsupported(decoder, kExceptionHandling, "rethrow");
}
void BrOnException(FullDecoder* decoder, const Value& exception,
const ExceptionIndexImmediate<validate>& imm,
uint32_t depth, Vector<Value> values) {
- unsupported(decoder, "br_on_exn");
+ unsupported(decoder, kExceptionHandling, "br_on_exn");
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
- unsupported(decoder, "atomicop");
+ unsupported(decoder, kAtomics, "atomicop");
+ }
+ void AtomicFence(FullDecoder* decoder) {
+ unsupported(decoder, kAtomics, "atomic.fence");
}
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- unsupported(decoder, "memory.init");
+ unsupported(decoder, kBulkMemory, "memory.init");
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
- unsupported(decoder, "data.drop");
+ unsupported(decoder, kBulkMemory, "data.drop");
}
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
- unsupported(decoder, "memory.copy");
+ unsupported(decoder, kBulkMemory, "memory.copy");
}
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value& dst,
const Value& value, const Value& size) {
- unsupported(decoder, "memory.fill");
+ unsupported(decoder, kBulkMemory, "memory.fill");
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, "table.init");
+ unsupported(decoder, kBulkMemory, "table.init");
}
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
- unsupported(decoder, "elem.drop");
+ unsupported(decoder, kBulkMemory, "elem.drop");
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- unsupported(decoder, "table.copy");
+ unsupported(decoder, kBulkMemory, "table.copy");
}
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value& value, Value& delta, Value* result) {
- unsupported(decoder, "table.grow");
+ const Value& value, const Value& delta, Value* result) {
+ unsupported(decoder, kAnyRef, "table.grow");
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
- unsupported(decoder, "table.size");
+ unsupported(decoder, kAnyRef, "table.size");
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
- Value& start, Value& value, Value& count) {
- unsupported(decoder, "table.fill");
+ const Value& start, const Value& value, const Value& count) {
+ unsupported(decoder, kAnyRef, "table.fill");
}
private:
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
- bool ok_ = true;
+ LiftoffBailoutReason bailout_reason_ = kSuccess;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
@@ -2066,11 +2115,17 @@ WasmCompilationResult ExecuteLiftoffCompilation(AccountingAllocator* allocator,
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
- if (decoder.failed()) {
- compiler->OnFirstError(&decoder);
- return WasmCompilationResult{};
- }
- if (!compiler->ok()) {
+ if (decoder.failed()) compiler->OnFirstError(&decoder);
+
+ // Check that the histogram for the bailout reasons has the correct size.
+ DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
+ DCHECK_EQ(kNumBailoutReasons - 1, counters->liftoff_bailout_reasons()->max());
+ DCHECK_EQ(kNumBailoutReasons,
+ counters->liftoff_bailout_reasons()->num_buckets());
+ // Register the bailout reason (can also be {kSuccess}).
+ counters->liftoff_bailout_reasons()->AddSample(
+ static_cast<int>(compiler->bailout_reason()));
+ if (compiler->did_bailout()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
return WasmCompilationResult{};
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index f310b9a54b..d40b92bef4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -19,6 +19,38 @@ struct CompilationEnv;
struct FunctionBody;
struct WasmFeatures;
+// Note: If this list changes, also the histogram "V8.LiftoffBailoutReasons"
+// on the chromium side needs to be updated.
+// Deprecating entries is always fine. Repurposing works if you don't care about
+// temporary mix-ups. Increasing the number of reasons {kNumBailoutReasons} is
+// more tricky, and might require introducing a new (updated) histogram.
+enum LiftoffBailoutReason : int8_t {
+ // Nothing actually failed.
+ kSuccess = 0,
+ // Compilation failed, but not because of Liftoff.
+ kDecodeError = 1,
+ // Liftoff is not implemented on that architecture.
+ kUnsupportedArchitecture = 2,
+ // More complex code would be needed because a CPU feature is not present.
+ kMissingCPUFeature = 3,
+ // Liftoff does not implement a complex (and rare) instruction.
+ kComplexOperation = 4,
+ // Unimplemented proposals:
+ kSimd = 5,
+ kAnyRef = 6,
+ kExceptionHandling = 7,
+ kMultiValue = 8,
+ kTailCall = 9,
+ kAtomics = 10,
+ kBulkMemory = 11,
+ kNonTrappingFloatToInt = 12,
+ // A little gap, for forward compatibility.
+ // Any other reason (use rarely; introduce new reasons if this spikes).
+ kOtherReason = 20,
+ // Marker:
+ kNumBailoutReasons
+};
+
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
Counters*, WasmFeatures* detected_features);
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 5be769685c..e82ffe8f67 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("mips " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
- } else {
- BAILOUT("emit_type_conversion kExprI32SConvertF64");
- return true;
}
+ bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
+ return true;
}
case kExprI32UConvertF64: {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
- } else {
- BAILOUT("emit_type_conversion kExprI32UConvertF64");
- return true;
}
+ bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
+ return true;
}
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -1239,29 +1235,29 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
- Condition condition) {
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+ bool* predicate) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kUnequal:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -1287,7 +1283,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1316,7 +1312,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 1da72cb9b8..9c87dca733 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("mips64 " reason)
-
namespace v8 {
namespace internal {
namespace wasm {
@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f32_copysign");
+ bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("f64_copysign");
+ bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
@@ -1096,29 +1094,29 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
namespace liftoff {
-inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
- Condition condition) {
+inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
+ bool* predicate) {
switch (condition) {
case kEqual:
- predicate = true;
+ *predicate = true;
return EQ;
case kUnequal:
- predicate = false;
+ *predicate = false;
return EQ;
case kUnsignedLessThan:
- predicate = true;
+ *predicate = true;
return OLT;
case kUnsignedGreaterEqual:
- predicate = false;
+ *predicate = false;
return OLT;
case kUnsignedLessEqual:
- predicate = true;
+ *predicate = true;
return OLE;
case kUnsignedGreaterThan:
- predicate = false;
+ *predicate = false;
return OLE;
default:
- predicate = true;
+ *predicate = true;
break;
}
UNREACHABLE();
@@ -1144,7 +1142,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF32(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1173,7 +1171,7 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
TurboAssembler::li(dst, 1);
bool predicate;
- FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
+ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate);
TurboAssembler::CompareF64(fcond, lhs, rhs);
if (predicate) {
TurboAssembler::LoadZeroIfNotFPUCondition(dst);
@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
-#undef BAILOUT
-
#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 577df835e8..a690a1c090 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -7,20 +7,19 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("ppc " reason)
namespace v8 {
namespace internal {
namespace wasm {
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
@@ -29,136 +28,136 @@ void LiftoffAssembler::AbortCompilation() { FinishCode(); }
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ bailout(kUnsupportedArchitecture, "LoadConstant");
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
- BAILOUT("LoadTaggedPointerFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ bailout(kUnsupportedArchitecture, "SpillInstance");
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
- BAILOUT("LoadTaggedPointer");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ bailout(kUnsupportedArchitecture, "MoveStackValue");
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
- BAILOUT("FillI64Half");
+ bailout(kUnsupportedArchitecture, "FillI64Half");
}
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("i32 binop:: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop:: " #name); \
}
#define UNIMPLEMENTED_I32_BINOP_I(name) \
UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
- BAILOUT("i32 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP_I(name) \
UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
- BAILOUT("i64_i binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64_i binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
@@ -227,65 +226,65 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ bailout(kUnsupportedArchitecture, "i32_divs");
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ bailout(kUnsupportedArchitecture, "i32_divu");
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ bailout(kUnsupportedArchitecture, "i32_rems");
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ bailout(kUnsupportedArchitecture, "i32_remu");
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+ bailout(kUnsupportedArchitecture, "i32_shr");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i64_divs");
+ bailout(kUnsupportedArchitecture, "i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_divu");
+ bailout(kUnsupportedArchitecture, "i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_rems");
+ bailout(kUnsupportedArchitecture, "i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_remu");
+ bailout(kUnsupportedArchitecture, "i64_remu");
return true;
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
int amount) {
- BAILOUT("i64_shr");
+ bailout(kUnsupportedArchitecture, "i64_shr");
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_PPC64
- BAILOUT("emit_i32_to_intptr");
+ bailout(kUnsupportedArchitecture, "emit_i32_to_intptr");
#else
// This is a nop on ppc32.
#endif
@@ -294,96 +293,100 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
+ bailout(kUnsupportedArchitecture, "emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ bailout(kUnsupportedArchitecture, "StackCheck");
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ bailout(kUnsupportedArchitecture, "AssertUnreachable");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ bailout(kUnsupportedArchitecture, "PushRegisters");
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ bailout(kUnsupportedArchitecture, "PopRegisters");
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -391,33 +394,33 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ bailout(kUnsupportedArchitecture, "CallC");
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ bailout(kUnsupportedArchitecture, "CallIndirect");
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ bailout(kUnsupportedArchitecture, "CallRuntimeStub");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ bailout(kUnsupportedArchitecture, "AllocateStackSlot");
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1e01bec407..d17c7dada1 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -7,20 +7,19 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#define BAILOUT(reason) bailout("s390 " reason)
namespace v8 {
namespace internal {
namespace wasm {
int LiftoffAssembler::PrepareStackFrame() {
- BAILOUT("PrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PrepareStackFrame");
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset,
uint32_t stack_slots) {
- BAILOUT("PatchPrepareStackFrame");
+ bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
void LiftoffAssembler::FinishCode() {}
@@ -29,136 +28,136 @@ void LiftoffAssembler::AbortCompilation() {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- BAILOUT("LoadConstant");
+ bailout(kUnsupportedArchitecture, "LoadConstant");
}
void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
int size) {
- BAILOUT("LoadFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
uint32_t offset) {
- BAILOUT("LoadTaggedPointerFromInstance");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
void LiftoffAssembler::SpillInstance(Register instance) {
- BAILOUT("SpillInstance");
+ bailout(kUnsupportedArchitecture, "SpillInstance");
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- BAILOUT("FillInstanceInto");
+ bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
uint32_t offset_imm,
LiftoffRegList pinned) {
- BAILOUT("LoadTaggedPointer");
+ bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- BAILOUT("Load");
+ bailout(kUnsupportedArchitecture, "Load");
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- BAILOUT("Store");
+ bailout(kUnsupportedArchitecture, "Store");
}
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
- BAILOUT("MoveStackValue");
+ bailout(kUnsupportedArchitecture, "MoveStackValue");
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- BAILOUT("Move Register");
+ bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueType type) {
- BAILOUT("Move DoubleRegister");
+ bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
- BAILOUT("Spill register");
+ bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
- BAILOUT("Spill value");
+ bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
- BAILOUT("Fill");
+ bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
- BAILOUT("FillI64Half");
+ bailout(kUnsupportedArchitecture, "FillI64Half");
}
#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("i32 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop: " #name); \
}
#define UNIMPLEMENTED_I32_BINOP_I(name) \
UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
int32_t imm) { \
- BAILOUT("i32 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- BAILOUT("i64 binop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP_I(name) \
UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
int32_t imm) { \
- BAILOUT("i64 binop_i: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop: " #name); \
+ bailout(kUnsupportedArchitecture, "fp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_I32_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i32 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
}
#define UNIMPLEMENTED_I64_SHIFTOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
Register amount, LiftoffRegList pinned) { \
- BAILOUT("i64 shiftop: " #name); \
+ bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
}
UNIMPLEMENTED_I32_BINOP_I(i32_add)
@@ -227,65 +226,65 @@ UNIMPLEMENTED_FP_UNOP(f64_sqrt)
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i32_divs");
+ bailout(kUnsupportedArchitecture, "i32_divs");
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_divu");
+ bailout(kUnsupportedArchitecture, "i32_divu");
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_rems");
+ bailout(kUnsupportedArchitecture, "i32_rems");
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- BAILOUT("i32_remu");
+ bailout(kUnsupportedArchitecture, "i32_remu");
}
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
- BAILOUT("i32_shr");
+ bailout(kUnsupportedArchitecture, "i32_shr");
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- BAILOUT("i64_divs");
+ bailout(kUnsupportedArchitecture, "i64_divs");
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_divu");
+ bailout(kUnsupportedArchitecture, "i64_divu");
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_rems");
+ bailout(kUnsupportedArchitecture, "i64_rems");
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- BAILOUT("i64_remu");
+ bailout(kUnsupportedArchitecture, "i64_remu");
return true;
}
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister lhs,
int amount) {
- BAILOUT("i64_shr");
+ bailout(kUnsupportedArchitecture, "i64_shr");
}
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
- BAILOUT("emit_i32_to_intptr");
+ bailout(kUnsupportedArchitecture, "emit_i32_to_intptr");
#else
// This is a nop on s390.
#endif
@@ -294,96 +293,100 @@ void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister dst,
LiftoffRegister src, Label* trap) {
- BAILOUT("emit_type_conversion");
+ bailout(kUnsupportedArchitecture, "emit_type_conversion");
return true;
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- BAILOUT("emit_i32_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i8");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i16");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
- BAILOUT("emit_i64_signextend_i32");
+ bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Label* label) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
-void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) {
+ bailout(kUnsupportedArchitecture, "emit_jump");
+}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
- BAILOUT("emit_cond_jump");
+ bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- BAILOUT("emit_i32_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i32_eqz");
}
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- BAILOUT("emit_i32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- BAILOUT("emit_i64_eqz");
+ bailout(kUnsupportedArchitecture, "emit_i64_eqz");
}
void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- BAILOUT("emit_i64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f32_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
}
void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
- BAILOUT("emit_f64_set_cond");
+ bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- BAILOUT("StackCheck");
+ bailout(kUnsupportedArchitecture, "StackCheck");
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- BAILOUT("CallTrapCallbackForTesting");
+ bailout(kUnsupportedArchitecture, "CallTrapCallbackForTesting");
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ bailout(kUnsupportedArchitecture, "AssertUnreachable");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ bailout(kUnsupportedArchitecture, "PushRegisters");
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ bailout(kUnsupportedArchitecture, "PopRegisters");
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- BAILOUT("DropStackSlotsAndRet");
+ bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
@@ -391,33 +394,33 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- BAILOUT("CallC");
+ bailout(kUnsupportedArchitecture, "CallC");
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ bailout(kUnsupportedArchitecture, "CallIndirect");
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
- BAILOUT("CallRuntimeStub");
+ bailout(kUnsupportedArchitecture, "CallRuntimeStub");
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ bailout(kUnsupportedArchitecture, "AllocateStackSlot");
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
- asm_->BAILOUT("LiftoffStackSlots::Construct");
+ asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index cbff0d4da9..43637985d0 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define REQUIRE_CPU_FEATURE(name, ...) \
- if (!CpuFeatures::IsSupported(name)) { \
- bailout("no " #name); \
- return __VA_ARGS__; \
- } \
+#define REQUIRE_CPU_FEATURE(name, ...) \
+ if (!CpuFeatures::IsSupported(name)) { \
+ bailout(kMissingCPUFeature, "no " #name); \
+ return __VA_ARGS__; \
+ } \
CpuFeatureScope feature(this, name);
namespace liftoff {
@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout("no SSE4.1");
+ assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index e5c1fa4686..86bba189b8 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -28,577 +28,131 @@
#include "include/libplatform/libplatform.h"
#include "src/api/api-inl.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-instantiate.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
-// BEGIN FILE wasm-bin.cc
-
namespace wasm {
-namespace bin {
-
-////////////////////////////////////////////////////////////////////////////////
-// Encoding
-
-void encode_header(char*& ptr) {
- std::memcpy(ptr,
- "\x00"
- "asm\x01\x00\x00\x00",
- 8);
- ptr += 8;
-}
-
-void encode_size32(char*& ptr, size_t n) {
- assert(n <= 0xffffffff);
- for (int i = 0; i < 5; ++i) {
- *ptr++ = (n & 0x7f) | (i == 4 ? 0x00 : 0x80);
- n = n >> 7;
- }
-}
-void encode_valtype(char*& ptr, const ValType* type) {
- switch (type->kind()) {
- case I32:
- *ptr++ = 0x7f;
- break;
- case I64:
- *ptr++ = 0x7e;
- break;
- case F32:
- *ptr++ = 0x7d;
- break;
- case F64:
- *ptr++ = 0x7c;
- break;
- case FUNCREF:
- *ptr++ = 0x70;
- break;
- case ANYREF:
- *ptr++ = 0x6f;
- break;
- default:
- UNREACHABLE();
- }
-}
-
-auto zero_size(const ValType* type) -> size_t {
- switch (type->kind()) {
- case I32:
- return 1;
- case I64:
- return 1;
- case F32:
- return 4;
- case F64:
- return 8;
- case FUNCREF:
- return 0;
- case ANYREF:
- return 0;
- default:
- UNREACHABLE();
- }
-}
-
-void encode_const_zero(char*& ptr, const ValType* type) {
- switch (type->kind()) {
- case I32:
- *ptr++ = 0x41;
- break;
- case I64:
- *ptr++ = 0x42;
- break;
- case F32:
- *ptr++ = 0x43;
- break;
- case F64:
- *ptr++ = 0x44;
- break;
- default:
- UNREACHABLE();
- }
- for (size_t i = 0; i < zero_size(type); ++i) *ptr++ = 0;
-}
-
-auto wrapper(const FuncType* type) -> vec<byte_t> {
- auto in_arity = type->params().size();
- auto out_arity = type->results().size();
- auto size = 39 + in_arity + out_arity;
- auto binary = vec<byte_t>::make_uninitialized(size);
- auto ptr = binary.get();
-
- encode_header(ptr);
-
- *ptr++ = i::wasm::kTypeSectionCode;
- encode_size32(ptr, 12 + in_arity + out_arity); // size
- *ptr++ = 1; // length
- *ptr++ = i::wasm::kWasmFunctionTypeCode;
- encode_size32(ptr, in_arity);
- for (size_t i = 0; i < in_arity; ++i) {
- encode_valtype(ptr, type->params()[i].get());
- }
- encode_size32(ptr, out_arity);
- for (size_t i = 0; i < out_arity; ++i) {
- encode_valtype(ptr, type->results()[i].get());
- }
-
- *ptr++ = i::wasm::kImportSectionCode;
- *ptr++ = 5; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // module length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalFunction;
- *ptr++ = 0; // type index
-
- *ptr++ = i::wasm::kExportSectionCode;
- *ptr++ = 4; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalFunction;
- *ptr++ = 0; // func index
-
- assert(ptr - binary.get() == static_cast<ptrdiff_t>(size));
- return binary;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// Decoding
-
-// Numbers
-
-auto u32(const byte_t*& pos) -> uint32_t {
- uint32_t n = 0;
- uint32_t shift = 0;
- byte_t b;
- do {
- b = *pos++;
- n += (b & 0x7f) << shift;
- shift += 7;
- } while ((b & 0x80) != 0);
- return n;
-}
+namespace {
-auto u64(const byte_t*& pos) -> uint64_t {
+auto ReadLebU64(const byte_t** pos) -> uint64_t {
uint64_t n = 0;
uint64_t shift = 0;
byte_t b;
do {
- b = *pos++;
+ b = **pos;
+ (*pos)++;
n += (b & 0x7f) << shift;
shift += 7;
} while ((b & 0x80) != 0);
return n;
}
-void u32_skip(const byte_t*& pos) { bin::u32(pos); }
-
-// Names
-
-auto name(const byte_t*& pos) -> Name {
- auto size = bin::u32(pos);
- auto start = pos;
- auto name = Name::make_uninitialized(size);
- std::memcpy(name.get(), start, size);
- pos += size;
- return name;
-}
-
-// Types
-
-auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
- switch (*pos++) {
- case i::wasm::kLocalI32:
- return ValType::make(I32);
- case i::wasm::kLocalI64:
- return ValType::make(I64);
- case i::wasm::kLocalF32:
- return ValType::make(F32);
- case i::wasm::kLocalF64:
- return ValType::make(F64);
- case i::wasm::kLocalAnyFunc:
- return ValType::make(FUNCREF);
- case i::wasm::kLocalAnyRef:
- return ValType::make(ANYREF);
+ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
+ switch (v8_valtype) {
+ case i::wasm::kWasmI32:
+ return I32;
+ case i::wasm::kWasmI64:
+ return I64;
+ case i::wasm::kWasmF32:
+ return F32;
+ case i::wasm::kWasmF64:
+ return F64;
+ case i::wasm::kWasmFuncRef:
+ return FUNCREF;
+ case i::wasm::kWasmAnyRef:
+ return ANYREF;
default:
// TODO(wasm+): support new value types
UNREACHABLE();
}
- return {};
-}
-
-auto mutability(const byte_t*& pos) -> Mutability {
- return *pos++ ? VAR : CONST;
-}
-
-auto limits(const byte_t*& pos) -> Limits {
- auto tag = *pos++;
- auto min = bin::u32(pos);
- if ((tag & 0x01) == 0) {
- return Limits(min);
- } else {
- auto max = bin::u32(pos);
- return Limits(min, max);
- }
-}
-
-auto stacktype(const byte_t*& pos) -> vec<ValType*> {
- size_t size = bin::u32(pos);
- auto v = vec<ValType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) v[i] = bin::valtype(pos);
- return v;
-}
-
-auto functype(const byte_t*& pos) -> own<FuncType*> {
- assert(*pos == i::wasm::kWasmFunctionTypeCode);
- ++pos;
- auto params = bin::stacktype(pos);
- auto results = bin::stacktype(pos);
- return FuncType::make(std::move(params), std::move(results));
-}
-
-auto globaltype(const byte_t*& pos) -> own<GlobalType*> {
- auto content = bin::valtype(pos);
- auto mutability = bin::mutability(pos);
- return GlobalType::make(std::move(content), mutability);
-}
-
-auto tabletype(const byte_t*& pos) -> own<TableType*> {
- auto elem = bin::valtype(pos);
- auto limits = bin::limits(pos);
- return TableType::make(std::move(elem), limits);
}
-auto memorytype(const byte_t*& pos) -> own<MemoryType*> {
- auto limits = bin::limits(pos);
- return MemoryType::make(limits);
-}
-
-// Expressions
-
-void expr_skip(const byte_t*& pos) {
- switch (*pos++) {
- case i::wasm::kExprI32Const:
- case i::wasm::kExprI64Const:
- case i::wasm::kExprGetGlobal: {
- bin::u32_skip(pos);
- } break;
- case i::wasm::kExprF32Const: {
- pos += 4;
- } break;
- case i::wasm::kExprF64Const: {
- pos += 8;
- } break;
- default: {
- // TODO(wasm+): support new expression forms
+i::wasm::ValueType WasmValKindToV8(ValKind kind) {
+ switch (kind) {
+ case I32:
+ return i::wasm::kWasmI32;
+ case I64:
+ return i::wasm::kWasmI64;
+ case F32:
+ return i::wasm::kWasmF32;
+ case F64:
+ return i::wasm::kWasmF64;
+ case FUNCREF:
+ return i::wasm::kWasmFuncRef;
+ case ANYREF:
+ return i::wasm::kWasmAnyRef;
+ default:
+ // TODO(wasm+): support new value types
UNREACHABLE();
- }
}
- ++pos; // end
-}
-
-// Sections
-
-auto section(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
- -> const byte_t* {
- const byte_t* end = binary.get() + binary.size();
- const byte_t* pos = binary.get() + 8; // skip header
- while (pos < end && *pos++ != sec) {
- auto size = bin::u32(pos);
- pos += size;
- }
- if (pos == end) return nullptr;
- bin::u32_skip(pos);
- return pos;
-}
-
-// Only for asserts/DCHECKs.
-auto section_end(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
- -> const byte_t* {
- const byte_t* end = binary.get() + binary.size();
- const byte_t* pos = binary.get() + 8; // skip header
- while (pos < end && *pos != sec) {
- ++pos;
- auto size = bin::u32(pos);
- pos += size;
- }
- if (pos == end) return nullptr;
- ++pos;
- auto size = bin::u32(pos);
- return pos + size;
-}
-
-// Type section
-
-auto types(const vec<const byte_t>& binary) -> vec<FuncType*> {
- auto pos = bin::section(binary, i::wasm::kTypeSectionCode);
- if (pos == nullptr) return vec<FuncType*>::make();
- size_t size = bin::u32(pos);
- // TODO(wasm+): support new deftypes
- auto v = vec<FuncType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- v[i] = bin::functype(pos);
- }
- assert(pos == bin::section_end(binary, i::wasm::kTypeSectionCode));
- return v;
-}
-
-// Import section
-
-auto imports(const vec<const byte_t>& binary, const vec<FuncType*>& types)
- -> vec<ImportType*> {
- auto pos = bin::section(binary, i::wasm::kImportSectionCode);
- if (pos == nullptr) return vec<ImportType*>::make();
- size_t size = bin::u32(pos);
- auto v = vec<ImportType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- auto module = bin::name(pos);
- auto name = bin::name(pos);
- own<ExternType*> type;
- switch (*pos++) {
- case i::wasm::kExternalFunction:
- type = types[bin::u32(pos)]->copy();
- break;
- case i::wasm::kExternalTable:
- type = bin::tabletype(pos);
- break;
- case i::wasm::kExternalMemory:
- type = bin::memorytype(pos);
- break;
- case i::wasm::kExternalGlobal:
- type = bin::globaltype(pos);
- break;
- default:
- UNREACHABLE();
- }
- v[i] =
- ImportType::make(std::move(module), std::move(name), std::move(type));
- }
- assert(pos == bin::section_end(binary, i::wasm::kImportSectionCode));
- return v;
-}
-
-auto count(const vec<ImportType*>& imports, ExternKind kind) -> uint32_t {
- uint32_t n = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- if (imports[i]->type()->kind() == kind) ++n;
- }
- return n;
}
-// Function section
-
-auto funcs(const vec<const byte_t>& binary, const vec<ImportType*>& imports,
- const vec<FuncType*>& types) -> vec<FuncType*> {
- auto pos = bin::section(binary, i::wasm::kFunctionSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v =
- vec<FuncType*>::make_uninitialized(size + count(imports, EXTERN_FUNC));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_FUNC) {
- v[j++] = et->func()->copy();
- }
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = types[bin::u32(pos)]->copy();
- }
- assert(pos == bin::section_end(binary, i::wasm::kFunctionSectionCode));
- }
- return v;
+Name GetNameFromWireBytes(const i::wasm::WireBytesRef& ref,
+ const i::Vector<const uint8_t>& wire_bytes) {
+ DCHECK_LE(ref.offset(), wire_bytes.length());
+ DCHECK_LE(ref.end_offset(), wire_bytes.length());
+ Name name = Name::make_uninitialized(ref.length());
+ std::memcpy(name.get(), wire_bytes.begin() + ref.offset(), ref.length());
+ return name;
}
-// Global section
-
-auto globals(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<GlobalType*> {
- auto pos = bin::section(binary, i::wasm::kGlobalSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v = vec<GlobalType*>::make_uninitialized(size +
- count(imports, EXTERN_GLOBAL));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_GLOBAL) {
- v[j++] = et->global()->copy();
- }
+own<FuncType*> FunctionSigToFuncType(const i::wasm::FunctionSig* sig) {
+ size_t param_count = sig->parameter_count();
+ vec<ValType*> params = vec<ValType*>::make_uninitialized(param_count);
+ for (size_t i = 0; i < param_count; i++) {
+ params[i] = ValType::make(V8ValueTypeToWasm(sig->GetParam(i)));
}
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::globaltype(pos);
- expr_skip(pos);
- }
- assert(pos == bin::section_end(binary, i::wasm::kGlobalSectionCode));
+ size_t return_count = sig->return_count();
+ vec<ValType*> results = vec<ValType*>::make_uninitialized(return_count);
+ for (size_t i = 0; i < return_count; i++) {
+ results[i] = ValType::make(V8ValueTypeToWasm(sig->GetReturn(i)));
}
- return v;
+ return FuncType::make(std::move(params), std::move(results));
}
-// Table section
-
-auto tables(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<TableType*> {
- auto pos = bin::section(binary, i::wasm::kTableSectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v =
- vec<TableType*>::make_uninitialized(size + count(imports, EXTERN_TABLE));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_TABLE) {
- v[j++] = et->table()->copy();
- }
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::tabletype(pos);
+own<ExternType*> GetImportExportType(const i::wasm::WasmModule* module,
+ const i::wasm::ImportExportKindCode kind,
+ const uint32_t index) {
+ switch (kind) {
+ case i::wasm::kExternalFunction: {
+ return FunctionSigToFuncType(module->functions[index].sig);
}
- assert(pos == bin::section_end(binary, i::wasm::kTableSectionCode));
- }
- return v;
-}
-
-// Memory section
-
-auto memories(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
- -> vec<MemoryType*> {
- auto pos = bin::section(binary, i::wasm::kMemorySectionCode);
- size_t size = pos != nullptr ? bin::u32(pos) : 0;
- auto v = vec<MemoryType*>::make_uninitialized(size +
- count(imports, EXTERN_MEMORY));
- size_t j = 0;
- for (uint32_t i = 0; i < imports.size(); ++i) {
- auto et = imports[i]->type();
- if (et->kind() == EXTERN_MEMORY) {
- v[j++] = et->memory()->copy();
+ case i::wasm::kExternalTable: {
+ const i::wasm::WasmTable& table = module->tables[index];
+ own<ValType*> elem = ValType::make(V8ValueTypeToWasm(table.type));
+ Limits limits(table.initial_size,
+ table.has_maximum_size ? table.maximum_size : -1);
+ return TableType::make(std::move(elem), limits);
}
- }
- if (pos != nullptr) {
- for (; j < v.size(); ++j) {
- v[j] = bin::memorytype(pos);
+ case i::wasm::kExternalMemory: {
+ DCHECK(module->has_memory);
+ Limits limits(module->initial_pages,
+ module->has_maximum_pages ? module->maximum_pages : -1);
+ return MemoryType::make(limits);
}
- assert(pos == bin::section_end(binary, i::wasm::kMemorySectionCode));
- }
- return v;
-}
-
-// Export section
-
-auto exports(const vec<const byte_t>& binary, const vec<FuncType*>& funcs,
- const vec<GlobalType*>& globals, const vec<TableType*>& tables,
- const vec<MemoryType*>& memories) -> vec<ExportType*> {
- auto pos = bin::section(binary, i::wasm::kExportSectionCode);
- if (pos == nullptr) return vec<ExportType*>::make();
- size_t size = bin::u32(pos);
- auto exports = vec<ExportType*>::make_uninitialized(size);
- for (uint32_t i = 0; i < size; ++i) {
- auto name = bin::name(pos);
- auto tag = *pos++;
- auto index = bin::u32(pos);
- own<ExternType*> type;
- switch (tag) {
- case i::wasm::kExternalFunction:
- type = funcs[index]->copy();
- break;
- case i::wasm::kExternalTable:
- type = tables[index]->copy();
- break;
- case i::wasm::kExternalMemory:
- type = memories[index]->copy();
- break;
- case i::wasm::kExternalGlobal:
- type = globals[index]->copy();
- break;
- default:
- UNREACHABLE();
+ case i::wasm::kExternalGlobal: {
+ const i::wasm::WasmGlobal& global = module->globals[index];
+ own<ValType*> content = ValType::make(V8ValueTypeToWasm(global.type));
+ Mutability mutability = global.mutability ? VAR : CONST;
+ return GlobalType::make(std::move(content), mutability);
}
- exports[i] = ExportType::make(std::move(name), std::move(type));
- }
- assert(pos == bin::section_end(binary, i::wasm::kExportSectionCode));
- return exports;
-}
-
-auto imports(const vec<const byte_t>& binary) -> vec<ImportType*> {
- return bin::imports(binary, bin::types(binary));
-}
-
-auto exports(const vec<const byte_t>& binary) -> vec<ExportType*> {
- auto types = bin::types(binary);
- auto imports = bin::imports(binary, types);
- auto funcs = bin::funcs(binary, imports, types);
- auto globals = bin::globals(binary, imports);
- auto tables = bin::tables(binary, imports);
- auto memories = bin::memories(binary, imports);
- return bin::exports(binary, funcs, globals, tables, memories);
-}
-
-} // namespace bin
-} // namespace wasm
-
-// BEGIN FILE wasm-v8-lowlevel.cc
-
-namespace v8 {
-namespace wasm {
-
-// Foreign pointers
-
-auto foreign_new(v8::Isolate* isolate, void* ptr) -> v8::Local<v8::Value> {
- auto foreign = v8::FromCData(reinterpret_cast<i::Isolate*>(isolate),
- reinterpret_cast<i::Address>(ptr));
- return v8::Utils::ToLocal(foreign);
-}
-
-auto foreign_get(v8::Local<v8::Value> val) -> void* {
- auto foreign = v8::Utils::OpenHandle(*val);
- if (!foreign->IsForeign()) return nullptr;
- auto addr = v8::ToCData<i::Address>(*foreign);
- return reinterpret_cast<void*>(addr);
-}
-
-// Types
-
-auto v8_valtype_to_wasm(i::wasm::ValueType v8_valtype) -> ::wasm::ValKind {
- switch (v8_valtype) {
- case i::wasm::kWasmI32:
- return ::wasm::I32;
- case i::wasm::kWasmI64:
- return ::wasm::I64;
- case i::wasm::kWasmF32:
- return ::wasm::F32;
- case i::wasm::kWasmF64:
- return ::wasm::F64;
- default:
- // TODO(wasm+): support new value types
- UNREACHABLE();
- }
-}
-
-i::wasm::ValueType wasm_valtype_to_v8(::wasm::ValKind type) {
- switch (type) {
- case ::wasm::I32:
- return i::wasm::kWasmI32;
- case ::wasm::I64:
- return i::wasm::kWasmI64;
- case ::wasm::F32:
- return i::wasm::kWasmF32;
- case ::wasm::F64:
- return i::wasm::kWasmF64;
- default:
- // TODO(wasm+): support new value types
+ case i::wasm::kExternalException:
UNREACHABLE();
+ return {};
}
}
-} // namespace wasm
-} // namespace v8
+} // namespace
/// BEGIN FILE wasm-v8.cc
-namespace wasm {
-
///////////////////////////////////////////////////////////////////////////////
// Auxiliaries
@@ -695,6 +249,7 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config*>&& config) -> own<Engine*> {
i::FLAG_expose_gc = true;
+ i::FLAG_experimental_wasm_anyref = true;
i::FLAG_experimental_wasm_bigint = true;
i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
@@ -714,7 +269,6 @@ StoreImpl::~StoreImpl() {
v8::kGCCallbackFlagForced);
#endif
context()->Exit();
- isolate_->Exit();
isolate_->Dispose();
delete create_params_.array_buffer_allocator;
}
@@ -739,7 +293,6 @@ auto Store::make(Engine*) -> own<Store*> {
if (!isolate) return own<Store*>();
{
- v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
// Create context.
@@ -750,8 +303,10 @@ auto Store::make(Engine*) -> own<Store*> {
store->isolate_ = isolate;
store->context_ = v8::Eternal<v8::Context>(isolate, context);
}
-
- store->isolate()->Enter();
+ // We intentionally do not call isolate->Enter() here, because that would
+ // prevent embedders from using stores with overlapping but non-nested
+ // lifetimes. The consequence is that Isolate::Current() is dysfunctional
+ // and hence must not be called by anything reachable via this file.
store->context()->Enter();
isolate->SetData(0, store.get());
@@ -831,7 +386,8 @@ struct FuncTypeImpl : ExternTypeImpl {
vec<ValType*> params;
vec<ValType*> results;
- FuncTypeImpl(vec<ValType*>& params, vec<ValType*>& results)
+ FuncTypeImpl(vec<ValType*>& params, // NOLINT(runtime/references)
+ vec<ValType*>& results) // NOLINT(runtime/references)
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
@@ -884,7 +440,8 @@ struct GlobalTypeImpl : ExternTypeImpl {
own<ValType*> content;
Mutability mutability;
- GlobalTypeImpl(own<ValType*>& content, Mutability mutability)
+ GlobalTypeImpl(own<ValType*>& content, // NOLINT(runtime/references)
+ Mutability mutability)
: ExternTypeImpl(EXTERN_GLOBAL),
content(std::move(content)),
mutability(mutability) {}
@@ -936,7 +493,8 @@ struct TableTypeImpl : ExternTypeImpl {
own<ValType*> element;
Limits limits;
- TableTypeImpl(own<ValType*>& element, Limits limits)
+ TableTypeImpl(own<ValType*>& element, // NOLINT(runtime/references)
+ Limits limits)
: ExternTypeImpl(EXTERN_TABLE),
element(std::move(element)),
limits(limits) {}
@@ -1028,7 +586,9 @@ struct ImportTypeImpl {
Name name;
own<ExternType*> type;
- ImportTypeImpl(Name& module, Name& name, own<ExternType*>& type)
+ ImportTypeImpl(Name& module, // NOLINT(runtime/references)
+ Name& name, // NOLINT(runtime/references)
+ own<ExternType*>& type) // NOLINT(runtime/references)
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
@@ -1071,7 +631,8 @@ struct ExportTypeImpl {
Name name;
own<ExternType*> type;
- ExportTypeImpl(Name& name, own<ExternType*>& type)
+ ExportTypeImpl(Name& name, // NOLINT(runtime/references)
+ own<ExternType*>& type) // NOLINT(runtime/references)
: name(std::move(name)), type(std::move(type)) {}
~ExportTypeImpl() {}
@@ -1103,89 +664,14 @@ auto ExportType::type() const -> const ExternType* {
return impl(this)->type.get();
}
-///////////////////////////////////////////////////////////////////////////////
-// Conversions of values from and to V8 objects
-
-auto val_to_v8(StoreImpl* store, const Val& v) -> v8::Local<v8::Value> {
- auto isolate = store->isolate();
- switch (v.kind()) {
- case I32:
- return v8::Integer::NewFromUnsigned(isolate, v.i32());
- case I64:
- return v8::BigInt::New(isolate, v.i64());
- case F32:
- return v8::Number::New(isolate, v.f32());
- case F64:
- return v8::Number::New(isolate, v.f64());
- case ANYREF:
- case FUNCREF: {
- if (v.ref() == nullptr) {
- return v8::Null(isolate);
- } else {
- WASM_UNIMPLEMENTED("ref value");
- }
- }
- default:
- UNREACHABLE();
- }
-}
-
-own<Val> v8_to_val(i::Isolate* isolate, i::Handle<i::Object> value,
- ValKind kind) {
- switch (kind) {
- case I32:
- do {
- if (value->IsSmi()) return Val(i::Smi::ToInt(*value));
- if (value->IsHeapNumber()) {
- return Val(i::DoubleToInt32(i::HeapNumber::cast(*value).value()));
- }
- value = i::Object::ToInt32(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case I64:
- if (value->IsBigInt()) return Val(i::BigInt::cast(*value).AsInt64());
- return Val(
- i::BigInt::FromObject(isolate, value).ToHandleChecked()->AsInt64());
- case F32:
- do {
- if (value->IsSmi()) {
- return Val(static_cast<float32_t>(i::Smi::ToInt(*value)));
- }
- if (value->IsHeapNumber()) {
- return Val(i::DoubleToFloat32(i::HeapNumber::cast(*value).value()));
- }
- value = i::Object::ToNumber(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case F64:
- do {
- if (value->IsSmi()) {
- return Val(static_cast<float64_t>(i::Smi::ToInt(*value)));
- }
- if (value->IsHeapNumber()) {
- return Val(i::HeapNumber::cast(*value).value());
- }
- value = i::Object::ToNumber(isolate, value).ToHandleChecked();
- // This will loop back at most once.
- } while (true);
- UNREACHABLE();
- case ANYREF:
- case FUNCREF: {
- if (value->IsNull(isolate)) {
- return Val(nullptr);
- } else {
- WASM_UNIMPLEMENTED("ref value");
- }
- }
- }
-}
-
i::Handle<i::String> VecToString(i::Isolate* isolate,
const vec<byte_t>& chars) {
+ size_t length = chars.size();
+ // Some, but not all, {chars} vectors we get here are null-terminated,
+ // so let's be robust to that.
+ if (length > 0 && chars[length - 1] == 0) length--;
return isolate->factory()
- ->NewStringFromUtf8({chars.get(), chars.size()})
+ ->NewStringFromUtf8({chars.get(), length})
.ToHandleChecked();
}
@@ -1327,11 +813,12 @@ Foreign::~Foreign() {}
auto Foreign::copy() const -> own<Foreign*> { return impl(this)->copy(); }
auto Foreign::make(Store* store_abs) -> own<Foreign*> {
- auto store = impl(store_abs);
- auto isolate = store->i_isolate();
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
- auto obj = i::Handle<i::JSReceiver>();
+ i::Handle<i::JSObject> obj =
+ isolate->factory()->NewJSObject(isolate->object_function());
return implement<Foreign>::type::make(store, obj);
}
@@ -1379,22 +866,37 @@ auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
}
auto Module::imports() const -> vec<ImportType*> {
- i::Vector<const uint8_t> wire_bytes =
- impl(this)->v8_object()->native_module()->wire_bytes();
- vec<const byte_t> binary = vec<const byte_t>::adopt(
- wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
- auto imports = wasm::bin::imports(binary);
- binary.release();
+ const i::wasm::NativeModule* native_module =
+ impl(this)->v8_object()->native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const std::vector<i::wasm::WasmImport>& import_table = module->import_table;
+ size_t size = import_table.size();
+ vec<ImportType*> imports = vec<ImportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; i++) {
+ const i::wasm::WasmImport& imp = import_table[i];
+ Name module_name = GetNameFromWireBytes(imp.module_name, wire_bytes);
+ Name name = GetNameFromWireBytes(imp.field_name, wire_bytes);
+ own<ExternType*> type = GetImportExportType(module, imp.kind, imp.index);
+ imports[i] = ImportType::make(std::move(module_name), std::move(name),
+ std::move(type));
+ }
return imports;
}
vec<ExportType*> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
- i::Vector<const uint8_t> wire_bytes =
- module_obj->native_module()->wire_bytes();
- vec<const byte_t> binary = vec<const byte_t>::adopt(
- wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
- auto exports = wasm::bin::exports(binary);
- binary.release();
+ const i::wasm::NativeModule* native_module = module_obj->native_module();
+ const i::wasm::WasmModule* module = native_module->module();
+ const i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ const std::vector<i::wasm::WasmExport>& export_table = module->export_table;
+ size_t size = export_table.size();
+ vec<ExportType*> exports = vec<ExportType*>::make_uninitialized(size);
+ for (uint32_t i = 0; i < size; i++) {
+ const i::wasm::WasmExport& exp = export_table[i];
+ Name name = GetNameFromWireBytes(exp.name, wire_bytes);
+ own<ExternType*> type = GetImportExportType(module, exp.kind, exp.index);
+ exports[i] = ExportType::make(std::move(name), std::move(type));
+ }
return exports;
}
@@ -1430,7 +932,7 @@ auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
i::Isolate* isolate = store->i_isolate();
i::HandleScope handle_scope(isolate);
const byte_t* ptr = serialized.get();
- uint64_t binary_size = wasm::bin::u64(ptr);
+ uint64_t binary_size = ReadLebU64(&ptr);
ptrdiff_t size_size = ptr - serialized.get();
size_t serial_size = serialized.size() - size_size - binary_size;
i::Handle<i::WasmModuleObject> module_obj;
@@ -1597,16 +1099,14 @@ class SignatureHelper : public i::AllStatic {
int index = 0;
// TODO(jkummerow): Consider making vec<> range-based for-iterable.
for (size_t i = 0; i < type->results().size(); i++) {
- sig->set(index++,
- v8::wasm::wasm_valtype_to_v8(type->results()[i]->kind()));
+ sig->set(index++, WasmValKindToV8(type->results()[i]->kind()));
}
// {sig->set} needs to take the address of its second parameter,
// so we can't pass in the static const kMarker directly.
i::wasm::ValueType marker = kMarker;
sig->set(index++, marker);
for (size_t i = 0; i < type->params().size(); i++) {
- sig->set(index++,
- v8::wasm::wasm_valtype_to_v8(type->params()[i]->kind()));
+ sig->set(index++, WasmValKindToV8(type->params()[i]->kind()));
}
return sig;
}
@@ -1619,11 +1119,11 @@ class SignatureHelper : public i::AllStatic {
int i = 0;
for (; i < result_arity; ++i) {
- results[i] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ results[i] = ValType::make(V8ValueTypeToWasm(sig.get(i)));
}
i++; // Skip marker.
for (int p = 0; i < sig.length(); ++i, ++p) {
- params[p] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ params[p] = ValType::make(V8ValueTypeToWasm(sig.get(i)));
}
return FuncType::make(std::move(params), std::move(results));
}
@@ -1684,22 +1184,8 @@ auto Func::type() const -> own<FuncType*> {
DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
i::Handle<i::WasmExportedFunction> function =
i::Handle<i::WasmExportedFunction>::cast(func);
- i::wasm::FunctionSig* sig =
- function->instance().module()->functions[function->function_index()].sig;
- uint32_t param_arity = static_cast<uint32_t>(sig->parameter_count());
- uint32_t result_arity = static_cast<uint32_t>(sig->return_count());
- auto params = vec<ValType*>::make_uninitialized(param_arity);
- auto results = vec<ValType*>::make_uninitialized(result_arity);
-
- for (size_t i = 0; i < params.size(); ++i) {
- auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetParam(i));
- params[i] = ValType::make(kind);
- }
- for (size_t i = 0; i < results.size(); ++i) {
- auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(i));
- results[i] = ValType::make(kind);
- }
- return FuncType::make(std::move(params), std::move(results));
+ return FunctionSigToFuncType(
+ function->instance().module()->functions[function->function_index()].sig);
}
auto Func::param_arity() const -> size_t {
@@ -1728,74 +1214,183 @@ auto Func::result_arity() const -> size_t {
return sig->return_count();
}
+namespace {
+
+void PrepareFunctionData(i::Isolate* isolate,
+ i::Handle<i::WasmExportedFunctionData> function_data,
+ i::wasm::FunctionSig* sig) {
+ // If the data is already populated, return immediately.
+ if (!function_data->c_wrapper_code().IsSmi()) return;
+ // Compile wrapper code.
+ i::Handle<i::Code> wrapper_code =
+ i::compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
+ function_data->set_c_wrapper_code(*wrapper_code);
+ // Compute packed args size.
+ function_data->set_packed_args_size(
+ i::wasm::CWasmArgumentsPacker::TotalSize(sig));
+ // Get call target (function table offset). This is an Address, we store
+ // it as a pseudo-Smi by shifting it by one bit, so the GC leaves it alone.
+ i::Address call_target =
+ function_data->instance().GetCallTarget(function_data->function_index());
+ i::Smi smi_target((call_target << i::kSmiTagSize) | i::kSmiTag);
+ function_data->set_wasm_call_target(smi_target);
+}
+
+void PushArgs(i::wasm::FunctionSig* sig, const Val args[],
+ i::wasm::CWasmArgumentsPacker* packer) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ i::wasm::ValueType type = sig->GetParam(i);
+ switch (type) {
+ case i::wasm::kWasmI32:
+ packer->Push(args[i].i32());
+ break;
+ case i::wasm::kWasmI64:
+ packer->Push(args[i].i64());
+ break;
+ case i::wasm::kWasmF32:
+ packer->Push(args[i].f32());
+ break;
+ case i::wasm::kWasmF64:
+ packer->Push(args[i].f64());
+ break;
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef:
+ packer->Push(impl(args[i].ref())->v8_object()->ptr());
+ break;
+ case i::wasm::kWasmExnRef:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+void PopArgs(i::wasm::FunctionSig* sig, Val results[],
+ i::wasm::CWasmArgumentsPacker* packer, StoreImpl* store) {
+ packer->Reset();
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ i::wasm::ValueType type = sig->GetReturn(i);
+ switch (type) {
+ case i::wasm::kWasmI32:
+ results[i] = Val(packer->Pop<int32_t>());
+ break;
+ case i::wasm::kWasmI64:
+ results[i] = Val(packer->Pop<int64_t>());
+ break;
+ case i::wasm::kWasmF32:
+ results[i] = Val(packer->Pop<float>());
+ break;
+ case i::wasm::kWasmF64:
+ results[i] = Val(packer->Pop<double>());
+ break;
+ case i::wasm::kWasmAnyRef:
+ case i::wasm::kWasmFuncRef: {
+ i::Address raw = packer->Pop<i::Address>();
+ if (raw == i::kNullAddress) {
+ results[i] = Val(nullptr);
+ } else {
+ i::JSReceiver raw_obj = i::JSReceiver::cast(i::Object(raw));
+ i::Handle<i::JSReceiver> obj(raw_obj, store->i_isolate());
+ results[i] = Val(implement<Ref>::type::make(store, obj));
+ }
+ break;
+ }
+ case i::wasm::kWasmExnRef:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+own<Trap*> CallWasmCapiFunction(i::WasmCapiFunctionData data, const Val args[],
+ Val results[]) {
+ FuncData* func_data = reinterpret_cast<FuncData*>(data.embedder_data());
+ if (func_data->kind == FuncData::kCallback) {
+ return (func_data->callback)(args, results);
+ }
+ DCHECK(func_data->kind == FuncData::kCallbackWithEnv);
+ return (func_data->callback_with_env)(func_data->env, args, results);
+}
+
+} // namespace
+
auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
auto func = impl(this);
auto store = func->store();
- auto isolate = store->isolate();
- auto i_isolate = store->i_isolate();
- v8::HandleScope handle_scope(isolate);
-
- int num_params;
- int num_results;
- ValKind result_kind;
- i::Handle<i::JSFunction> v8_func = func->v8_object();
- if (i::WasmExportedFunction::IsWasmExportedFunction(*v8_func)) {
- i::WasmExportedFunction wef = i::WasmExportedFunction::cast(*v8_func);
- i::wasm::FunctionSig* sig =
- wef.instance().module()->functions[wef.function_index()].sig;
- num_params = static_cast<int>(sig->parameter_count());
- num_results = static_cast<int>(sig->return_count());
- if (num_results > 0) {
- result_kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(0));
- }
-#if DEBUG
- for (int i = 0; i < num_params; i++) {
- DCHECK_EQ(args[i].kind(), v8::wasm::v8_valtype_to_wasm(sig->GetParam(i)));
+ auto isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ i::Object raw_function_data = func->v8_object()->shared().function_data();
+
+ // WasmCapiFunctions can be called directly.
+ if (raw_function_data.IsWasmCapiFunctionData()) {
+ return CallWasmCapiFunction(
+ i::WasmCapiFunctionData::cast(raw_function_data), args, results);
+ }
+
+ DCHECK(raw_function_data.IsWasmExportedFunctionData());
+ i::Handle<i::WasmExportedFunctionData> function_data(
+ i::WasmExportedFunctionData::cast(raw_function_data), isolate);
+ i::Handle<i::WasmInstanceObject> instance(function_data->instance(), isolate);
+ int function_index = function_data->function_index();
+ // Caching {sig} would give a ~10% reduction in overhead.
+ i::wasm::FunctionSig* sig = instance->module()->functions[function_index].sig;
+ PrepareFunctionData(isolate, function_data, sig);
+ i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
+ i::Code::cast(function_data->c_wrapper_code()), isolate);
+ i::Address call_target =
+ function_data->wasm_call_target().ptr() >> i::kSmiTagSize;
+
+ i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
+ PushArgs(sig, args, &packer);
+
+ i::Handle<i::Object> object_ref = instance;
+ if (function_index <
+ static_cast<int>(instance->module()->num_imported_functions)) {
+ object_ref = i::handle(
+ instance->imported_function_refs().get(function_index), isolate);
+ if (object_ref->IsTuple2()) {
+ i::JSFunction jsfunc =
+ i::JSFunction::cast(i::Tuple2::cast(*object_ref).value2());
+ i::Object data = jsfunc.shared().function_data();
+ if (data.IsWasmCapiFunctionData()) {
+ return CallWasmCapiFunction(i::WasmCapiFunctionData::cast(data), args,
+ results);
+ }
+ // TODO(jkummerow): Imported and then re-exported JavaScript functions
+ // are not supported yet. If we support C-API + JavaScript, we'll need
+ // to call those here.
+ UNIMPLEMENTED();
+ } else {
+ // A WasmFunction from another module.
+ DCHECK(object_ref->IsWasmInstanceObject());
}
-#endif
- } else {
- DCHECK(i::WasmCapiFunction::IsWasmCapiFunction(*v8_func));
- UNIMPLEMENTED();
- }
- // TODO(rossberg): cache v8_args array per thread.
- auto v8_args = std::unique_ptr<i::Handle<i::Object>[]>(
- new (std::nothrow) i::Handle<i::Object>[num_params]);
- for (int i = 0; i < num_params; ++i) {
- v8_args[i] = v8::Utils::OpenHandle(*val_to_v8(store, args[i]));
- }
-
- // TODO(jkummerow): Use Execution::TryCall instead of manual TryCatch.
- v8::TryCatch handler(isolate);
- i::MaybeHandle<i::Object> maybe_val = i::Execution::Call(
- i_isolate, func->v8_object(), i_isolate->factory()->undefined_value(),
- num_params, v8_args.get());
-
- if (handler.HasCaught()) {
- i_isolate->OptionalRescheduleException(true);
- i::Handle<i::Object> exception =
- v8::Utils::OpenHandle(*handler.Exception());
+ }
+
+ i::Execution::CallWasm(isolate, wrapper_code, call_target, object_ref,
+ packer.argv());
+
+ if (isolate->has_pending_exception()) {
+ i::Handle<i::Object> exception(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
if (!exception->IsJSReceiver()) {
i::MaybeHandle<i::String> maybe_string =
- i::Object::ToString(i_isolate, exception);
+ i::Object::ToString(isolate, exception);
i::Handle<i::String> string = maybe_string.is_null()
- ? i_isolate->factory()->empty_string()
+ ? isolate->factory()->empty_string()
: maybe_string.ToHandleChecked();
exception =
- i_isolate->factory()->NewError(i_isolate->error_function(), string);
+ isolate->factory()->NewError(isolate->error_function(), string);
}
return implement<Trap>::type::make(
store, i::Handle<i::JSReceiver>::cast(exception));
}
- auto val = maybe_val.ToHandleChecked();
- if (num_results == 0) {
- assert(val->IsUndefined(i_isolate));
- } else if (num_results == 1) {
- assert(!val->IsUndefined(i_isolate));
- new (&results[0]) Val(v8_to_val(i_isolate, val, result_kind));
- } else {
- WASM_UNIMPLEMENTED("multiple results");
- }
+ PopArgs(sig, results, &packer, store);
return nullptr;
}
@@ -1814,24 +1409,24 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
for (int i = 0; i < num_param_types; ++i) {
switch (param_types[i]->kind()) {
case I32:
- params[i] = Val(i::ReadUnalignedValue<int32_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<int32_t>(p));
p += 4;
break;
case I64:
- params[i] = Val(i::ReadUnalignedValue<int64_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<int64_t>(p));
p += 8;
break;
case F32:
- params[i] = Val(i::ReadUnalignedValue<float32_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<float32_t>(p));
p += 4;
break;
case F64:
- params[i] = Val(i::ReadUnalignedValue<float64_t>(p));
+ params[i] = Val(v8::base::ReadUnalignedValue<float64_t>(p));
p += 8;
break;
case ANYREF:
case FUNCREF: {
- i::Address raw = i::ReadUnalignedValue<i::Address>(p);
+ i::Address raw = v8::base::ReadUnalignedValue<i::Address>(p);
p += sizeof(raw);
if (raw == i::kNullAddress) {
params[i] = Val(nullptr);
@@ -1864,27 +1459,28 @@ i::Address FuncData::v8_callback(void* data, i::Address argv) {
for (int i = 0; i < num_result_types; ++i) {
switch (result_types[i]->kind()) {
case I32:
- i::WriteUnalignedValue(p, results[i].i32());
+ v8::base::WriteUnalignedValue(p, results[i].i32());
p += 4;
break;
case I64:
- i::WriteUnalignedValue(p, results[i].i64());
+ v8::base::WriteUnalignedValue(p, results[i].i64());
p += 8;
break;
case F32:
- i::WriteUnalignedValue(p, results[i].f32());
+ v8::base::WriteUnalignedValue(p, results[i].f32());
p += 4;
break;
case F64:
- i::WriteUnalignedValue(p, results[i].f64());
+ v8::base::WriteUnalignedValue(p, results[i].f64());
p += 8;
break;
case ANYREF:
case FUNCREF: {
if (results[i].ref() == nullptr) {
- i::WriteUnalignedValue(p, i::kNullAddress);
+ v8::base::WriteUnalignedValue(p, i::kNullAddress);
} else {
- i::WriteUnalignedValue(p, impl(results[i].ref())->v8_object()->ptr());
+ v8::base::WriteUnalignedValue(
+ p, impl(results[i].ref())->v8_object()->ptr());
}
p += sizeof(i::Address);
break;
@@ -1917,8 +1513,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
DCHECK_EQ(type->content()->kind(), val.kind());
- i::wasm::ValueType i_type =
- v8::wasm::wasm_valtype_to_v8(type->content()->kind());
+ i::wasm::ValueType i_type = WasmValKindToV8(type->content()->kind());
bool is_mutable = (type->mutability() == VAR);
const int32_t offset = 0;
i::Handle<i::WasmGlobalObject> obj =
@@ -1935,7 +1530,7 @@ auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
auto Global::type() const -> own<GlobalType*> {
i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
- ValKind kind = v8::wasm::v8_valtype_to_wasm(v8_global->type());
+ ValKind kind = V8ValueTypeToWasm(v8_global->type());
Mutability mutability = v8_global->is_mutable() ? VAR : CONST;
return GlobalType::make(ValType::make(kind), mutability);
}
@@ -1951,9 +1546,16 @@ auto Global::get() const -> Val {
return Val(v8_global->GetF32());
case F64:
return Val(v8_global->GetF64());
- case ANYREF:
- case FUNCREF:
- WASM_UNIMPLEMENTED("globals of reference type");
+ case ANYREF: {
+ i::Handle<i::JSReceiver> obj =
+ i::Handle<i::JSReceiver>::cast(v8_global->GetRef());
+ return Val(RefImpl<Ref, i::JSReceiver>::make(impl(this)->store(), obj));
+ }
+ case FUNCREF: {
+ i::Handle<i::JSFunction> obj =
+ i::Handle<i::JSFunction>::cast(v8_global->GetRef());
+ return Val(implement<Func>::type::make(impl(this)->store(), obj));
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -1972,8 +1574,14 @@ void Global::set(const Val& val) {
case F64:
return v8_global->SetF64(val.f64());
case ANYREF:
- case FUNCREF:
- WASM_UNIMPLEMENTED("globals of reference type");
+ return v8_global->SetAnyRef(impl(val.ref())->v8_object());
+ case FUNCREF: {
+ bool result = v8_global->SetFuncRef(impl(this)->store()->i_isolate(),
+ impl(val.ref())->v8_object());
+ DCHECK(result);
+ USE(result);
+ return;
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -2002,7 +1610,7 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
i::wasm::ValueType i_type;
switch (type->element()->kind()) {
case FUNCREF:
- i_type = i::wasm::kWasmAnyFunc;
+ i_type = i::wasm::kWasmFuncRef;
break;
case ANYREF:
if (enabled_features.anyref) {
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 440267bd25..abb7b8ee86 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -10,8 +10,8 @@
#include <memory>
#include "src/base/compiler-specific.h"
+#include "src/base/memory.h"
#include "src/codegen/signature.h"
-#include "src/common/v8memory.h"
#include "src/flags/flags.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -299,7 +299,7 @@ class Decoder {
} else if (!validate_size(pc, sizeof(IntType), msg)) {
return IntType{0};
}
- return ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
+ return base::ReadLittleEndianValue<IntType>(reinterpret_cast<Address>(pc));
}
template <typename IntType>
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index eb895a25b3..9f1ca23c62 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -13,6 +13,7 @@
#include "src/utils/bit-vector.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/value-type.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
@@ -64,7 +65,7 @@ struct WasmException;
#define ATOMIC_OP_LIST(V) \
V(AtomicNotify, Uint32) \
V(I32AtomicWait, Uint32) \
- V(I64AtomicWait, Uint32) \
+ V(I64AtomicWait, Uint64) \
V(I32AtomicLoad, Uint32) \
V(I64AtomicLoad, Uint64) \
V(I32AtomicLoad8U, Uint8) \
@@ -229,17 +230,17 @@ inline bool decode_local_type(uint8_t val, ValueType* result) {
case kLocalS128:
*result = kWasmS128;
return true;
- case kLocalAnyFunc:
- *result = kWasmAnyFunc;
+ case kLocalFuncRef:
+ *result = kWasmFuncRef;
return true;
case kLocalAnyRef:
*result = kWasmAnyRef;
return true;
- case kLocalExceptRef:
- *result = kWasmExceptRef;
+ case kLocalExnRef:
+ *result = kWasmExnRef;
return true;
default:
- *result = kWasmVar;
+ *result = kWasmBottom;
return false;
}
}
@@ -296,20 +297,20 @@ struct BlockTypeImmediate {
}
uint32_t in_arity() const {
- if (type != kWasmVar) return 0;
+ if (type != kWasmBottom) return 0;
return static_cast<uint32_t>(sig->parameter_count());
}
uint32_t out_arity() const {
if (type == kWasmStmt) return 0;
- if (type != kWasmVar) return 1;
+ if (type != kWasmBottom) return 1;
return static_cast<uint32_t>(sig->return_count());
}
ValueType in_type(uint32_t index) {
- DCHECK_EQ(kWasmVar, type);
+ DCHECK_EQ(kWasmBottom, type);
return sig->GetParam(index);
}
ValueType out_type(uint32_t index) {
- if (type == kWasmVar) return sig->GetReturn(index);
+ if (type == kWasmBottom) return sig->GetReturn(index);
DCHECK_NE(kWasmStmt, type);
DCHECK_EQ(0, index);
return type;
@@ -573,14 +574,14 @@ struct ElemDropImmediate {
template <Decoder::ValidateFlag validate>
struct TableCopyImmediate {
- TableIndexImmediate<validate> table_src;
TableIndexImmediate<validate> table_dst;
+ TableIndexImmediate<validate> table_src;
unsigned length = 0;
inline TableCopyImmediate(Decoder* decoder, const byte* pc) {
- table_src = TableIndexImmediate<validate>(decoder, pc + 1);
- table_dst =
- TableIndexImmediate<validate>(decoder, pc + 1 + table_src.length);
+ table_dst = TableIndexImmediate<validate>(decoder, pc + 1);
+ table_src =
+ TableIndexImmediate<validate>(decoder, pc + 1 + table_dst.length);
length = table_src.length + table_dst.length;
}
};
@@ -718,9 +719,9 @@ struct ControlBase {
const LocalIndexImmediate<validate>& imm) \
F(GetGlobal, Value* result, const GlobalIndexImmediate<validate>& imm) \
F(SetGlobal, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(GetTable, const Value& index, Value* result, \
+ F(TableGet, const Value& index, Value* result, \
const TableIndexImmediate<validate>& imm) \
- F(SetTable, const Value& index, const Value& value, \
+ F(TableSet, const Value& index, const Value& value, \
const TableIndexImmediate<validate>& imm) \
F(Unreachable) \
F(Select, const Value& cond, const Value& fval, const Value& tval, \
@@ -759,6 +760,7 @@ struct ControlBase {
Vector<Value> values) \
F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
const Value& src, const Value& size) \
F(DataDrop, const DataDropImmediate<validate>& imm) \
@@ -849,18 +851,18 @@ class WasmDecoder : public Decoder {
}
decoder->error(decoder->pc() - 1, "invalid local type");
return false;
- case kLocalAnyFunc:
+ case kLocalFuncRef:
if (enabled.anyref) {
- type = kWasmAnyFunc;
+ type = kWasmFuncRef;
break;
}
decoder->error(decoder->pc() - 1,
- "local type 'anyfunc' is not enabled with "
+ "local type 'funcref' is not enabled with "
"--experimental-wasm-anyref");
return false;
- case kLocalExceptRef:
+ case kLocalExnRef:
if (enabled.eh) {
- type = kWasmExceptRef;
+ type = kWasmExnRef;
break;
}
decoder->error(decoder->pc() - 1, "invalid local type");
@@ -1015,8 +1017,8 @@ class WasmDecoder : public Decoder {
return false;
}
if (!VALIDATE(module_ != nullptr &&
- module_->tables[imm.table_index].type == kWasmAnyFunc)) {
- error("table of call_indirect must be of type anyfunc");
+ module_->tables[imm.table_index].type == kWasmFuncRef)) {
+ error("table of call_indirect must be of type funcref");
return false;
}
if (!Complete(pc, imm)) {
@@ -1049,6 +1051,12 @@ class WasmDecoder : public Decoder {
SimdLaneImmediate<validate>& imm) {
uint8_t num_lanes = 0;
switch (opcode) {
+ case kExprF64x2ExtractLane:
+ case kExprF64x2ReplaceLane:
+ case kExprI64x2ExtractLane:
+ case kExprI64x2ReplaceLane:
+ num_lanes = 2;
+ break;
case kExprF32x4ExtractLane:
case kExprF32x4ReplaceLane:
case kExprI32x4ExtractLane:
@@ -1079,6 +1087,11 @@ class WasmDecoder : public Decoder {
SimdShiftImmediate<validate>& imm) {
uint8_t max_shift = 0;
switch (opcode) {
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU:
+ max_shift = 64;
+ break;
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
@@ -1121,7 +1134,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(BlockTypeImmediate<validate>& imm) {
- if (imm.type != kWasmVar) return true;
+ if (imm.type != kWasmBottom) return true;
if (!VALIDATE(module_ && imm.sig_index < module_->signatures.size())) {
return false;
}
@@ -1238,8 +1251,8 @@ class WasmDecoder : public Decoder {
GlobalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
- case kExprGetTable:
- case kExprSetTable: {
+ case kExprTableGet:
+ case kExprTableSet: {
TableIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
@@ -1405,6 +1418,12 @@ class WasmDecoder : public Decoder {
MemoryAccessImmediate<validate> imm(decoder, pc + 1, UINT32_MAX);
return 2 + imm.length;
}
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ return 2 + 1;
+ }
default:
decoder->error(pc, "invalid Atomics opcode");
return 2;
@@ -1428,11 +1447,11 @@ class WasmDecoder : public Decoder {
case kExprSelect:
case kExprSelectWithType:
return {3, 1};
- case kExprSetTable:
+ case kExprTableSet:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
return {2, 0};
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- case kExprGetTable:
+ case kExprTableGet:
case kExprTeeLocal:
case kExprMemoryGrow:
return {1, 1};
@@ -1536,7 +1555,6 @@ template <Decoder::ValidateFlag validate, typename Interface>
class WasmFullDecoder : public WasmDecoder<validate> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
- using MergeValues = Merge<Value>;
using ArgVector = base::SmallVector<Value, 8>;
// All Value types should be trivially copyable for performance. We push, pop,
@@ -1658,7 +1676,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
static Value UnreachableValue(const uint8_t* pc) {
- return Value{pc, kWasmVar};
+ return Value{pc, kWasmBottom};
}
bool CheckHasMemory() {
@@ -1760,7 +1778,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kExprRethrow: {
CHECK_PROTOTYPE_OPCODE(eh);
- auto exception = Pop(0, kWasmExceptRef);
+ auto exception = Pop(0, kWasmExnRef);
CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
EndControl();
break;
@@ -1806,7 +1824,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
FallThruTo(c);
stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
c->reachability = control_at(1)->innerReachability();
- auto* exception = Push(kWasmExceptRef);
+ auto* exception = Push(kWasmExnRef);
CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
break;
}
@@ -1816,7 +1834,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
Control* c = control_at(imm.depth.depth);
- auto exception = Pop(0, kWasmExceptRef);
+ auto exception = Pop(0, kWasmExnRef);
const WasmExceptionSig* sig = imm.index.exception->sig;
size_t value_count = sig->parameter_count();
// TODO(mstarzinger): This operand stack mutation is an ugly hack to
@@ -1825,15 +1843,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// special handling for both and do minimal/no stack mutation here.
for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
Vector<Value> values(stack_.data() + c->stack_depth, value_count);
- if (!TypeCheckBranch(c)) break;
- if (control_.back().reachable()) {
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
values);
c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
for (size_t i = 0; i < value_count; ++i) Pop();
- auto* pexception = Push(kWasmExceptRef);
+ auto* pexception = Push(kWasmExnRef);
*pexception = exception;
break;
}
@@ -1875,7 +1895,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error(this->pc_, "else already present for if");
break;
}
- if (!TypeCheckFallThru(c)) break;
+ if (!TypeCheckFallThru()) break;
c->kind = kControlIfElse;
CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
if (c->reachable()) c->end_merge.reached = true;
@@ -1902,7 +1922,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- if (!TypeCheckFallThru(c)) break;
+ if (!TypeCheckFallThru()) break;
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
@@ -1917,7 +1937,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.clear();
break;
}
-
PopControl(c);
break;
}
@@ -1925,8 +1944,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto cond = Pop(2, kWasmI32);
auto fval = Pop();
auto tval = Pop(0, fval.type);
- ValueType type = tval.type == kWasmVar ? fval.type : tval.type;
- if (ValueTypes::IsSubType(kWasmAnyRef, type)) {
+ ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
+ if (ValueTypes::IsSubType(type, kWasmAnyRef)) {
this->error(
"select without type is only valid for value type inputs");
break;
@@ -1951,12 +1970,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchDepthImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm, control_.size())) break;
Control* c = control_at(imm.depth);
- if (!TypeCheckBranch(c)) break;
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else if (control_.back().reachable()) {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else {
+ CALL_INTERFACE(Br, c);
+ c->br_merge()->reached = true;
+ }
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
EndControl();
@@ -1968,10 +1991,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->failed()) break;
if (!this->Validate(this->pc_, imm, control_.size())) break;
Control* c = control_at(imm.depth);
- if (!TypeCheckBranch(c)) break;
- if (control_.back().reachable()) {
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
CALL_INTERFACE(BrIf, cond, imm.depth);
c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
}
len = 1 + imm.length;
break;
@@ -1982,42 +2007,45 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto key = Pop(0, kWasmI32);
if (this->failed()) break;
if (!this->Validate(this->pc_, imm, control_.size())) break;
- uint32_t br_arity = 0;
+
+ // Cache the branch targets during the iteration, so that we can set
+ // all branch targets as reachable after the {CALL_INTERFACE} call.
std::vector<bool> br_targets(control_.size());
+
+ // The result types of the br_table instruction. We have to check the
+ // stack against these types. Only needed during validation.
+ std::vector<ValueType> result_types;
+
while (iterator.has_next()) {
- const uint32_t i = iterator.cur_index();
+ const uint32_t index = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
- if (!VALIDATE(target < control_.size())) {
- this->errorf(pos,
- "improper branch in br_table target %u (depth %u)",
- i, target);
- break;
- }
+ if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
// Avoid redundant branch target checks.
if (br_targets[target]) continue;
br_targets[target] = true;
- // Check that label types match up.
- Control* c = control_at(target);
- uint32_t arity = c->br_merge()->arity;
- if (i == 0) {
- br_arity = arity;
- } else if (!VALIDATE(br_arity == arity)) {
- this->errorf(pos,
- "inconsistent arity in br_table target %u"
- " (previous was %u, this one %u)",
- i, br_arity, arity);
+
+ if (validate) {
+ if (index == 0) {
+ // With the first branch target, initialize the result types.
+ result_types = InitializeBrTableResultTypes(target);
+ } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
+ index)) {
+ break;
+ }
}
- if (!TypeCheckBranch(c)) break;
}
- if (this->failed()) break;
+
+ if (!VALIDATE(TypeCheckBrTable(result_types))) break;
+
+ DCHECK(this->ok());
if (control_.back().reachable()) {
CALL_INTERFACE(BrTable, imm, key);
- for (uint32_t depth = control_depth(); depth-- > 0;) {
- if (!br_targets[depth]) continue;
- control_at(depth)->br_merge()->reached = true;
+ for (int i = 0, e = control_depth(); i < e; ++i) {
+ if (!br_targets[i]) continue;
+ control_at(i)->br_merge()->reached = true;
}
}
@@ -2026,8 +2054,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
case kExprReturn: {
- if (!TypeCheckReturn()) break;
- DoReturn();
+ if (V8_LIKELY(control_.back().reachable())) {
+ if (!VALIDATE(TypeCheckReturn())) break;
+ DoReturn();
+ } else {
+ // We pop all return values from the stack to check their type.
+ // Since we deal with unreachable code, we do not have to keep the
+ // values.
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ for (int i = 0; i < num_returns; ++i) {
+ Pop(i, this->sig_->GetReturn(i));
+ }
+ }
+
EndControl();
break;
}
@@ -2075,7 +2114,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(anyref);
FunctionIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(kWasmAnyFunc);
+ auto* value = Push(kWasmFuncRef);
CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
len = 1 + imm.length;
break;
@@ -2131,7 +2170,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_REACHABLE(SetGlobal, value, imm);
break;
}
- case kExprGetTable: {
+ case kExprTableGet: {
CHECK_PROTOTYPE_OPCODE(anyref);
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
@@ -2139,17 +2178,17 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK_NOT_NULL(this->module_);
auto index = Pop(0, kWasmI32);
auto* result = Push(this->module_->tables[imm.index].type);
- CALL_INTERFACE_IF_REACHABLE(GetTable, index, result, imm);
+ CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
break;
}
- case kExprSetTable: {
+ case kExprTableSet: {
CHECK_PROTOTYPE_OPCODE(anyref);
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto value = Pop(1, this->module_->tables[imm.index].type);
auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(SetTable, index, value, imm);
+ CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
break;
}
@@ -2328,7 +2367,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
case kAtomicPrefix: {
CHECK_PROTOTYPE_OPCODE(threads);
- if (!CheckHasSharedMemory()) break;
len++;
byte atomic_index =
this->template read_u8<validate>(this->pc_ + 1, "atomic index");
@@ -2348,8 +2386,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
default: {
// Deal with special asmjs opcodes.
- if (this->module_ != nullptr &&
- this->module_->origin == kAsmJsOrigin) {
+ if (this->module_ != nullptr && is_asmjs_module(this->module_)) {
FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
@@ -2520,6 +2557,90 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return imm.length;
}
+ bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
+ if (!VALIDATE(target < this->control_.size())) {
+ this->errorf(pos, "improper branch in br_table target %u (depth %u)",
+ index, target);
+ return false;
+ }
+ return true;
+ }
+
+ std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
+ auto* merge = control_at(target)->br_merge();
+ int br_arity = merge->arity;
+ std::vector<ValueType> result(br_arity);
+ for (int i = 0; i < br_arity; ++i) {
+ result[i] = (*merge)[i].type;
+ }
+ return result;
+ }
+
+ bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
+ uint32_t target, const byte* pos, int index) {
+ auto* merge = control_at(target)->br_merge();
+ int br_arity = merge->arity;
+ // First we check if the arities match.
+ if (br_arity != static_cast<int>(result_types->size())) {
+ this->errorf(pos,
+ "inconsistent arity in br_table target %u (previous was "
+ "%zu, this one is %u)",
+ index, result_types->size(), br_arity);
+ return false;
+ }
+
+ for (int i = 0; i < br_arity; ++i) {
+ if (this->enabled_.anyref) {
+ // The expected type is the biggest common sub type of all targets.
+ (*result_types)[i] =
+ ValueTypes::CommonSubType((*result_types)[i], (*merge)[i].type);
+ } else {
+ // All target must have the same signature.
+ if ((*result_types)[i] != (*merge)[i].type) {
+ this->errorf(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, ValueTypes::TypeName((*result_types)[i]),
+ ValueTypes::TypeName((*merge)[i].type));
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ bool TypeCheckBrTable(const std::vector<ValueType>& result_types) {
+ int br_arity = static_cast<int>(result_types.size());
+ if (V8_LIKELY(control_.back().reachable())) {
+ int available =
+ static_cast<int>(stack_.size()) - control_.back().stack_depth;
+ // There have to be enough values on the stack.
+ if (available < br_arity) {
+ this->errorf(this->pc_,
+ "expected %u elements on the stack for branch to "
+ "@%d, found %u",
+ br_arity, startrel(control_.back().pc), available);
+ return false;
+ }
+ Value* stack_values = &*(stack_.end() - br_arity);
+ // Type-check the topmost br_arity values on the stack.
+ for (int i = 0; i < br_arity; ++i) {
+ Value& val = stack_values[i];
+ if (!ValueTypes::IsSubType(val.type, result_types[i])) {
+ this->errorf(this->pc_,
+ "type error in merge[%u] (expected %s, got %s)", i,
+ ValueTypes::TypeName(result_types[i]),
+ ValueTypes::TypeName(val.type));
+ return false;
+ }
+ }
+ } else { // !control_.back().reachable()
+ // Pop values from the stack, accoring to the expected signature.
+ for (int i = 0; i < br_arity; ++i) Pop(i + 1, result_types[i]);
+ }
+ return this->ok();
+ }
+
uint32_t SimdExtractLane(WasmOpcode opcode, ValueType type) {
SimdLaneImmediate<validate> imm(this, this->pc_);
if (this->Validate(this->pc_, opcode, imm)) {
@@ -2570,26 +2691,45 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t DecodeSimdOpcode(WasmOpcode opcode) {
uint32_t len = 0;
switch (opcode) {
+ case kExprF64x2ExtractLane: {
+ len = SimdExtractLane(opcode, kWasmF64);
+ break;
+ }
case kExprF32x4ExtractLane: {
len = SimdExtractLane(opcode, kWasmF32);
break;
}
+ case kExprI64x2ExtractLane: {
+ len = SimdExtractLane(opcode, kWasmI64);
+ break;
+ }
case kExprI32x4ExtractLane:
case kExprI16x8ExtractLane:
case kExprI8x16ExtractLane: {
len = SimdExtractLane(opcode, kWasmI32);
break;
}
+ case kExprF64x2ReplaceLane: {
+ len = SimdReplaceLane(opcode, kWasmF64);
+ break;
+ }
case kExprF32x4ReplaceLane: {
len = SimdReplaceLane(opcode, kWasmF32);
break;
}
+ case kExprI64x2ReplaceLane: {
+ len = SimdReplaceLane(opcode, kWasmI64);
+ break;
+ }
case kExprI32x4ReplaceLane:
case kExprI16x8ReplaceLane:
case kExprI8x16ReplaceLane: {
len = SimdReplaceLane(opcode, kWasmI32);
break;
}
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU:
case kExprI32x4Shl:
case kExprI32x4ShrS:
case kExprI32x4ShrU:
@@ -2631,16 +2771,19 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t len = 0;
ValueType ret_type;
FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (sig != nullptr) {
- MachineType memtype;
- switch (opcode) {
+ if (!VALIDATE(sig != nullptr)) {
+ this->error("invalid atomic opcode");
+ return 0;
+ }
+ MachineType memtype;
+ switch (opcode) {
#define CASE_ATOMIC_STORE_OP(Name, Type) \
case kExpr##Name: { \
memtype = MachineType::Type(); \
ret_type = kWasmStmt; \
break; \
}
- ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
+ ATOMIC_STORE_OP_LIST(CASE_ATOMIC_STORE_OP)
#undef CASE_ATOMIC_OP
#define CASE_ATOMIC_OP(Name, Type) \
case kExpr##Name: { \
@@ -2648,22 +2791,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ret_type = GetReturnType(sig); \
break; \
}
- ATOMIC_OP_LIST(CASE_ATOMIC_OP)
+ ATOMIC_OP_LIST(CASE_ATOMIC_OP)
#undef CASE_ATOMIC_OP
- default:
- this->error("invalid atomic opcode");
+ case kExprAtomicFence: {
+ byte zero = this->template read_u8<validate>(this->pc_ + 2, "zero");
+ if (!VALIDATE(zero == 0)) {
+ this->error(this->pc_ + 2, "invalid atomic operand");
return 0;
+ }
+ CALL_INTERFACE_IF_REACHABLE(AtomicFence);
+ return 1;
}
- MemoryAccessImmediate<validate> imm(
- this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
- len += imm.length;
- auto args = PopArgs(sig);
- auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
- result);
- } else {
- this->error("invalid atomic opcode");
- }
+ default:
+ this->error("invalid atomic opcode");
+ return 0;
+ }
+ if (!CheckHasSharedMemory()) return 0;
+ MemoryAccessImmediate<validate> imm(
+ this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
+ len += imm.length;
+ auto args = PopArgs(sig);
+ auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
+ CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
return len;
}
@@ -2823,8 +2972,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
V8_INLINE Value Pop(int index, ValueType expected) {
auto val = Pop();
- if (!VALIDATE(ValueTypes::IsSubType(expected, val.type) ||
- val.type == kWasmVar || expected == kWasmVar)) {
+ if (!VALIDATE(ValueTypes::IsSubType(val.type, expected) ||
+ val.type == kWasmBottom || expected == kWasmBottom)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
SafeOpcodeNameAt(this->pc_), index,
ValueTypes::TypeName(expected), SafeOpcodeNameAt(val.pc),
@@ -2849,11 +2998,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return val;
}
+ // Pops values from the stack, as defined by {merge}. Thereby we type-check
+ // unreachable merges. Afterwards the values are pushed again on the stack
+ // according to the signature in {merge}. This is done so follow-up validation
+ // is possible.
+ bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch) {
+ int arity = merge.arity;
+ // For conditional branches, stack value '0' is the condition of the branch,
+ // and the result values start at index '1'.
+ int index_offset = conditional_branch ? 1 : 0;
+ for (int i = 0; i < arity; ++i) Pop(index_offset + i, merge[i].type);
+ // Push values of the correct type back on the stack.
+ for (int i = arity - 1; i >= 0; --i) Push(merge[i].type);
+ return this->ok();
+ }
+
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
void FallThruTo(Control* c) {
DCHECK_EQ(c, &control_.back());
- if (!TypeCheckFallThru(c)) return;
+ if (!TypeCheckFallThru()) return;
if (!c->reachable()) return;
if (!c->is_loop()) CALL_INTERFACE(FallThruTo, c);
@@ -2861,6 +3025,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckMergeValues(Control* c, Merge<Value>* merge) {
+ // This is a CHECK instead of a DCHECK because {validate} is a constexpr,
+ // and a CHECK makes the whole function unreachable.
+ static_assert(validate, "Call this function only within VALIDATE");
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
DCHECK_GE(stack_.size(), c->stack_depth + merge->arity);
// The computation of {stack_values} is only valid if {merge->arity} is >0.
@@ -2870,108 +3037,121 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (ValueTypes::IsSubType(old.type, val.type)) continue;
- // If {val.type} is polymorphic, which results from unreachable, make
- // it more specific by using the merge value's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
+ if (!ValueTypes::IsSubType(val.type, old.type)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
i, ValueTypes::TypeName(old.type),
ValueTypes::TypeName(val.type));
return false;
}
- val.type = old.type;
}
return true;
}
- bool TypeCheckFallThru(Control* c) {
- DCHECK_EQ(c, &control_.back());
- if (!validate) return true;
- uint32_t expected = c->end_merge.arity;
- DCHECK_GE(stack_.size(), c->stack_depth);
- uint32_t actual = static_cast<uint32_t>(stack_.size()) - c->stack_depth;
- // Fallthrus must match the arity of the control exactly.
- if (!InsertUnreachablesIfNecessary(expected, actual) || actual > expected) {
+ bool TypeCheckFallThru() {
+ Control& c = control_.back();
+ if (V8_LIKELY(c.reachable())) {
+ // We only do type-checking here. This is only needed during validation.
+ if (!validate) return true;
+
+ uint32_t expected = c.end_merge.arity;
+ DCHECK_GE(stack_.size(), c.stack_depth);
+ uint32_t actual = static_cast<uint32_t>(stack_.size()) - c.stack_depth;
+ // Fallthrus must match the arity of the control exactly.
+ if (actual != expected) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for fallthru to @%d, found %u",
+ expected, startrel(c.pc), actual);
+ return false;
+ }
+ if (expected == 0) return true; // Fast path.
+
+ return TypeCheckMergeValues(&c, &c.end_merge);
+ }
+
+ // Type-check an unreachable fallthru. First we do an arity check, then a
+ // type check. Note that type-checking may require an adjustment of the
+ // stack, if some stack values are missing to match the block signature.
+ Merge<Value>& merge = c.end_merge;
+ int arity = static_cast<int>(merge.arity);
+ int available = static_cast<int>(stack_.size()) - c.stack_depth;
+ // For fallthrus, not more than the needed values should be available.
+ if (available > arity) {
this->errorf(
this->pc_,
"expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c->pc), actual);
+ arity, startrel(c.pc), available);
return false;
}
- if (expected == 0) return true; // Fast path.
-
- return TypeCheckMergeValues(c, &c->end_merge);
+ // Pop all values from the stack for type checking of existing stack
+ // values.
+ return TypeCheckUnreachableMerge(merge, false);
}
- bool TypeCheckBranch(Control* c) {
- // Branches must have at least the number of values expected; can have more.
- uint32_t expected = c->br_merge()->arity;
- if (expected == 0) return true; // Fast path.
- DCHECK_GE(stack_.size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
- if (!InsertUnreachablesIfNecessary(expected, actual)) {
- this->errorf(this->pc_,
- "expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc), actual);
- return false;
+ enum TypeCheckBranchResult {
+ kReachableBranch,
+ kUnreachableBranch,
+ kInvalidStack,
+ };
+
+ TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch) {
+ if (V8_LIKELY(control_.back().reachable())) {
+ // We only do type-checking here. This is only needed during validation.
+ if (!validate) return kReachableBranch;
+
+ // Branches must have at least the number of values expected; can have
+ // more.
+ uint32_t expected = c->br_merge()->arity;
+ if (expected == 0) return kReachableBranch; // Fast path.
+ DCHECK_GE(stack_.size(), control_.back().stack_depth);
+ uint32_t actual =
+ static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
+ if (expected > actual) {
+ this->errorf(
+ this->pc_,
+ "expected %u elements on the stack for br to @%d, found %u",
+ expected, startrel(c->pc), actual);
+ return kInvalidStack;
+ }
+ return TypeCheckMergeValues(c, c->br_merge()) ? kReachableBranch
+ : kInvalidStack;
}
- return TypeCheckMergeValues(c, c->br_merge());
+
+ return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch)
+ ? kUnreachableBranch
+ : kInvalidStack;
}
bool TypeCheckReturn() {
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ // No type checking is needed if there are no returns.
+ if (num_returns == 0) return true;
+
// Returns must have at least the number of values expected; can have more.
- uint32_t num_returns = static_cast<uint32_t>(this->sig_->return_count());
- DCHECK_GE(stack_.size(), control_.back().stack_depth);
- uint32_t actual =
- static_cast<uint32_t>(stack_.size()) - control_.back().stack_depth;
- if (!InsertUnreachablesIfNecessary(num_returns, actual)) {
+ int num_available =
+ static_cast<int>(stack_.size()) - control_.back().stack_depth;
+ if (num_available < num_returns) {
this->errorf(this->pc_,
"expected %u elements on the stack for return, found %u",
- num_returns, actual);
+ num_returns, num_available);
return false;
}
// Typecheck the topmost {num_returns} values on the stack.
- if (num_returns == 0) return true;
// This line requires num_returns > 0.
Value* stack_values = &*(stack_.end() - num_returns);
- for (uint32_t i = 0; i < num_returns; ++i) {
+ for (int i = 0; i < num_returns; ++i) {
auto& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (ValueTypes::IsSubType(expected_type, val.type)) continue;
- // If {val.type} is polymorphic, which results from unreachable,
- // make it more specific by using the return's expected type.
- // If it is not polymorphic, this is a type error.
- if (!VALIDATE(val.type == kWasmVar)) {
+ if (!ValueTypes::IsSubType(val.type, expected_type)) {
this->errorf(this->pc_,
"type error in return[%u] (expected %s, got %s)", i,
ValueTypes::TypeName(expected_type),
ValueTypes::TypeName(val.type));
return false;
}
- val.type = expected_type;
- }
- return true;
- }
-
- inline bool InsertUnreachablesIfNecessary(uint32_t expected,
- uint32_t actual) {
- if (V8_LIKELY(actual >= expected)) {
- return true; // enough actual values are there.
- }
- if (!VALIDATE(control_.back().unreachable())) {
- // There aren't enough values on the stack.
- return false;
}
- // A slow path. When the actual number of values on the stack is less
- // than the expected number of values and the current control is
- // unreachable, insert unreachable values below the actual values.
- // This simplifies {TypeCheckMergeValues}.
- auto pos = stack_.begin() + (stack_.size() - actual);
- stack_.insert(pos, expected - actual, UnreachableValue(this->pc_));
return true;
}
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index c1e8e541b5..0568d61f3f 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -45,7 +45,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module, WasmFeatures* detected,
- FunctionBody& body) {
+ const FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kValidate, EmptyInterface> decoder(
&zone, module, enabled, detected, body);
@@ -151,7 +151,12 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned length =
WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, i.pc());
+ unsigned offset = 1;
WasmOpcode opcode = i.current();
+ if (WasmOpcodes::IsPrefixOpcode(opcode)) {
+ opcode = i.prefixed_opcode();
+ offset = 2;
+ }
if (line_numbers) line_numbers->push_back(i.position());
if (opcode == kExprElse || opcode == kExprCatch) {
control_depth--;
@@ -188,7 +193,7 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
}
#undef CASE_LOCAL_TYPE
} else {
- for (unsigned j = 1; j < length; ++j) {
+ for (unsigned j = offset; j < length; ++j) {
os << " 0x" << AsHex(i.pc()[j], 2) << ",";
}
}
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 16f90a41cb..eadc333dd5 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -38,7 +38,7 @@ V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module,
WasmFeatures* detected,
- FunctionBody& body);
+ const FunctionBody& body);
enum PrintLocals { kPrintLocals, kOmitLocals };
V8_EXPORT_PRIVATE
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index a5d7a08846..7df5abf5c8 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -4,9 +4,14 @@
#include "src/wasm/function-compiler.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
@@ -107,12 +112,48 @@ ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
const WasmModule* module) {
// Liftoff does not support the special asm.js opcodes, thus always compile
// asm.js modules with TurboFan.
- if (module->origin == kAsmJsOrigin) return ExecutionTier::kTurbofan;
+ if (is_asmjs_module(module)) return ExecutionTier::kTurbofan;
if (FLAG_wasm_interpret_all) return ExecutionTier::kInterpreter;
return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
+ WasmEngine* engine, CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected) {
+ WasmCompilationResult result;
+ if (func_index_ < static_cast<int>(env->module->num_imported_functions)) {
+ result = ExecuteImportWrapperCompilation(engine, env);
+ } else {
+ result = ExecuteFunctionCompilation(engine, env, wire_bytes_storage,
+ counters, detected);
+ }
+
+ if (result.succeeded()) {
+ counters->wasm_generated_code_size()->Increment(
+ result.code_desc.instr_size);
+ counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
+ }
+
+ result.func_index = func_index_;
+ result.requested_tier = tier_;
+
+ return result;
+}
+
+WasmCompilationResult WasmCompilationUnit::ExecuteImportWrapperCompilation(
+ WasmEngine* engine, CompilationEnv* env) {
+ FunctionSig* sig = env->module->functions[func_index_].sig;
+ // Assume the wrapper is going to be a JS function with matching arity at
+ // instantiation time.
+ auto kind = compiler::kDefaultImportCallKind;
+ bool source_positions = is_asmjs_module(env->module);
+ WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ engine, env, kind, sig, source_positions);
+ return result;
+}
+
+WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
WasmEngine* wasm_engine, CompilationEnv* env,
const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
@@ -167,17 +208,32 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
break;
}
- result.func_index = func_index_;
- result.requested_tier = tier_;
+ return result;
+}
- if (result.succeeded()) {
- counters->wasm_generated_code_size()->Increment(
- result.code_desc.instr_size);
- counters->wasm_reloc_size()->Increment(result.code_desc.reloc_size);
- }
+namespace {
+bool must_record_function_compilation(Isolate* isolate) {
+ return isolate->logger()->is_listening_to_code_events() ||
+ isolate->is_profiling();
+}
- return result;
+PRINTF_FORMAT(3, 4)
+void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
+ const char* format, ...) {
+ DCHECK(must_record_function_compilation(isolate));
+
+ ScopedVector<char> buffer(128);
+ va_list arguments;
+ va_start(arguments, format);
+ int len = VSNPrintF(buffer, format, arguments);
+ CHECK_LT(0, len);
+ va_end(arguments);
+ Handle<String> name_str =
+ isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ AbstractCode::cast(*code), *name_str));
}
+} // namespace
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
@@ -190,6 +246,8 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
wire_bytes.start() + function->code.offset(),
wire_bytes.start() + function->code.end_offset()};
+ DCHECK_LE(native_module->num_imported_functions(), function->func_index);
+ DCHECK_LT(function->func_index, native_module->num_functions());
WasmCompilationUnit unit(function->func_index, tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = unit.ExecuteCompilation(
@@ -204,6 +262,46 @@ void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
}
}
+JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(Isolate* isolate,
+ FunctionSig* sig,
+ bool is_import)
+ : job_(compiler::NewJSToWasmCompilationJob(isolate, sig, is_import)) {}
+
+JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
+
+void JSToWasmWrapperCompilationUnit::Prepare(Isolate* isolate) {
+ CompilationJob::Status status = job_->PrepareJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+}
+
+void JSToWasmWrapperCompilationUnit::Execute() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
+ DCHECK_EQ(job_->state(), CompilationJob::State::kReadyToExecute);
+ CompilationJob::Status status = job_->ExecuteJob();
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+}
+
+Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
+ CompilationJob::Status status = job_->FinalizeJob(isolate);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (must_record_function_compilation(isolate)) {
+ RecordWasmHeapStubCompilation(
+ isolate, code, "%s", job_->compilation_info()->GetDebugName().get());
+ }
+ return code;
+}
+
+// static
+Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ Isolate* isolate, FunctionSig* sig, bool is_import) {
+ // Run the compilation unit synchronously.
+ JSToWasmWrapperCompilationUnit unit(isolate, sig, is_import);
+ unit.Prepare(isolate);
+ unit.Execute();
+ return unit.Finalize(isolate);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index e7d8ff9471..d0b47b91aa 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -18,6 +18,7 @@ namespace internal {
class AssemblerBuffer;
class Counters;
+class OptimizedCompilationJob;
namespace wasm {
@@ -34,6 +35,10 @@ class WasmInstructionBuffer final {
static std::unique_ptr<WasmInstructionBuffer> New();
+ // Override {operator delete} to avoid implicit instantiation of {operator
+ // delete} with {size_t} argument. The {size_t} argument would be incorrect.
+ void operator delete(void* ptr) { ::operator delete(ptr); }
+
private:
WasmInstructionBuffer() = delete;
DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
@@ -43,6 +48,12 @@ struct WasmCompilationResult {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(WasmCompilationResult);
+ enum Kind : int8_t {
+ kFunction,
+ kWasmToJsWrapper,
+ kInterpreterEntry,
+ };
+
bool succeeded() const { return code_desc.buffer != nullptr; }
bool failed() const { return !succeeded(); }
operator bool() const { return succeeded(); }
@@ -53,9 +64,10 @@ struct WasmCompilationResult {
uint32_t tagged_parameter_slots = 0;
OwnedVector<byte> source_positions;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions;
- int func_index;
+ int func_index = static_cast<int>(kAnonymousFuncIndex);
ExecutionTier requested_tier;
ExecutionTier result_tier;
+ Kind kind = kFunction;
};
class V8_EXPORT_PRIVATE WasmCompilationUnit final {
@@ -77,6 +89,14 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ExecutionTier);
private:
+ WasmCompilationResult ExecuteFunctionCompilation(
+ WasmEngine* wasm_engine, CompilationEnv* env,
+ const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
+ Counters* counters, WasmFeatures* detected);
+
+ WasmCompilationResult ExecuteImportWrapperCompilation(WasmEngine* engine,
+ CompilationEnv* env);
+
int func_index_;
ExecutionTier tier_;
};
@@ -86,6 +106,24 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
ASSERT_TRIVIALLY_COPYABLE(WasmCompilationUnit);
STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
+class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
+ public:
+ JSToWasmWrapperCompilationUnit(Isolate* isolate, FunctionSig* sig,
+ bool is_import);
+ ~JSToWasmWrapperCompilationUnit();
+
+ void Prepare(Isolate* isolate);
+ void Execute();
+ Handle<Code> Finalize(Isolate* isolate);
+
+ // Run a compilation unit synchronously.
+ static Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
+ bool is_import);
+
+ private:
+ std::unique_ptr<OptimizedCompilationJob> job_;
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 90d8749f2c..8efac18787 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -291,14 +291,14 @@ class WasmGraphBuildingInterface {
BUILD(SetGlobal, imm.index, value.node);
}
- void GetTable(FullDecoder* decoder, const Value& index, Value* result,
+ void TableGet(FullDecoder* decoder, const Value& index, Value* result,
const TableIndexImmediate<validate>& imm) {
- result->node = BUILD(GetTable, imm.index, index.node, decoder->position());
+ result->node = BUILD(TableGet, imm.index, index.node, decoder->position());
}
- void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
+ void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
const TableIndexImmediate<validate>& imm) {
- BUILD(SetTable, imm.index, index.node, value.node, decoder->position());
+ BUILD(TableSet, imm.index, index.node, value.node, decoder->position());
}
void Unreachable(FullDecoder* decoder) {
@@ -532,6 +532,8 @@ class WasmGraphBuildingInterface {
if (result) result->node = node;
}
+ void AtomicFence(FullDecoder* decoder) { BUILD(AtomicFence); }
+
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
@@ -567,7 +569,7 @@ class WasmGraphBuildingInterface {
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
- BUILD(TableCopy, imm.table_src.index, imm.table_dst.index, args[0].node,
+ BUILD(TableCopy, imm.table_dst.index, imm.table_src.index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
@@ -691,8 +693,8 @@ class WasmGraphBuildingInterface {
case kWasmS128:
return builder_->S128Zero();
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return builder_->RefNull();
default:
UNREACHABLE();
@@ -717,7 +719,7 @@ class WasmGraphBuildingInterface {
Value& val = stack_values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
- DCHECK(val.type == kWasmVar ||
+ DCHECK(val.type == kWasmBottom ||
ValueTypes::MachineRepresentationFor(val.type) ==
ValueTypes::MachineRepresentationFor(old.type));
old.node = first ? val.node
diff --git a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
deleted file mode 100644
index ba2093d2c1..0000000000
--- a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
-#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
-
-#include "src/compiler/wasm-compiler.h"
-#include "src/logging/counters.h"
-#include "src/wasm/value-type.h"
-#include "src/wasm/wasm-code-manager.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class JSToWasmWrapperCache {
- public:
- Handle<Code> GetOrCompileJSToWasmWrapper(Isolate* isolate, FunctionSig* sig,
- bool is_import) {
- std::pair<bool, FunctionSig> key(is_import, *sig);
- Handle<Code>& cached = cache_[key];
- if (cached.is_null()) {
- cached = compiler::CompileJSToWasmWrapper(isolate, sig, is_import)
- .ToHandleChecked();
- }
- return cached;
- }
-
- private:
- // We generate different code for calling imports than calling wasm functions
- // in this module. Both are cached separately.
- using CacheKey = std::pair<bool, FunctionSig>;
- std::unordered_map<CacheKey, Handle<Code>, base::hash<CacheKey>> cache_;
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 93ff8a9317..7c41c0a209 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -17,8 +17,8 @@ namespace wasm {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
// Use a push, because mov to an extended register takes 6 bytes.
- pushq(Immediate(func_index)); // max 5 bytes
- EmitJumpSlot(lazy_compile_target); // always 5 bytes
+ pushq_imm32(func_index); // 5 bytes
+ EmitJumpSlot(lazy_compile_target); // 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -43,7 +43,7 @@ void JumpTableAssembler::NopBytes(int bytes) {
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
mov(kWasmCompileLazyFuncIndexRegister, func_index); // 5 bytes
- jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
+ jmp(lazy_compile_target, RelocInfo::NONE); // 5 bytes
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -97,13 +97,17 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_ARM64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
- Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // max. 2 instr
- Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ int start = pc_offset();
+ Mov(kWasmCompileLazyFuncIndexRegister.W(), func_index); // 1-2 instr
+ Jump(lazy_compile_target, RelocInfo::NONE); // 1 instr
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK(nop_bytes == 0 || nop_bytes == kInstrSize);
+ if (nop_bytes) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
- CheckConstPool(true, false); // force emit of const pool
+ ForceConstantPoolEmissionWithoutJump();
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
@@ -154,10 +158,14 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
+ int start = pc_offset();
li(kWasmCompileLazyFuncIndexRegister, func_index); // max. 2 instr
// Jump produces max. 4 instructions for 32-bit platform
// and max. 6 instructions for 64-bit platform.
Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
@@ -179,12 +187,16 @@ void JumpTableAssembler::NopBytes(int bytes) {
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
+ int start = pc_offset();
// Load function index to register. max 5 instrs
mov(kWasmCompileLazyFuncIndexRegister, Operand(func_index));
// Jump to {lazy_compile_target}. max 5 instrs
mov(r0, Operand(lazy_compile_target));
mtctr(r0);
bctr();
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
}
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index eef9fea167..379a547b55 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -17,7 +17,16 @@ namespace wasm {
// each slot containing a dispatch to the currently published {WasmCode} that
// corresponds to the function.
//
-// Note that the table is split into lines of fixed size, with lines laid out
+// Additionally to this main jump table, there exist special jump tables for
+// other purposes:
+// - the runtime stub table contains one entry per wasm runtime stub (see
+// {WasmCode::RuntimeStubId}, which jumps to the corresponding embedded
+// builtin.
+// - the lazy compile table contains one entry per wasm function which jumps to
+// the common {WasmCompileLazy} builtin and passes the function index that was
+// invoked.
+//
+// The main jump table is split into lines of fixed size, with lines laid out
// consecutively within the executable memory of the {NativeModule}. The slots
// in turn are consecutive within a line, but do not cross line boundaries.
//
@@ -27,6 +36,7 @@ namespace wasm {
//
// The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes.
+// Other jump tables are just consecutive.
class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
public:
// Translate an offset into the continuous jump table to a jump table index.
@@ -39,7 +49,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
}
// Translate a jump table index to an offset into the continuous jump table.
- static uint32_t SlotIndexToOffset(uint32_t slot_index) {
+ static uint32_t JumpSlotIndexToOffset(uint32_t slot_index) {
uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
uint32_t line_offset =
(slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
@@ -60,40 +70,56 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
return slot_index * kJumpTableStubSlotSize;
}
+ // Translate a slot index to an offset into the lazy compile table.
+ static uint32_t LazyCompileSlotIndexToOffset(uint32_t slot_index) {
+ return slot_index * kLazyCompileTableSlotSize;
+ }
+
// Determine the size of a jump table containing only runtime stub slots.
static constexpr uint32_t SizeForNumberOfStubSlots(uint32_t slot_count) {
return slot_count * kJumpTableStubSlotSize;
}
- static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
- uint32_t func_index,
- Address lazy_compile_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + SlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
- jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableSlotSize);
+ // Determine the size of a lazy compile table.
+ static constexpr uint32_t SizeForNumberOfLazyFunctions(uint32_t slot_count) {
+ return slot_count * kLazyCompileTableSlotSize;
+ }
+
+ static void GenerateLazyCompileTable(Address base, uint32_t num_slots,
+ uint32_t num_imported_functions,
+ Address wasm_compile_lazy_target) {
+ uint32_t lazy_compile_table_size = num_slots * kLazyCompileTableSlotSize;
+ // Assume enough space, so the Assembler does not try to grow the buffer.
+ JumpTableAssembler jtasm(base, lazy_compile_table_size + 256);
+ for (uint32_t slot_index = 0; slot_index < num_slots; ++slot_index) {
+ DCHECK_EQ(slot_index * kLazyCompileTableSlotSize, jtasm.pc_offset());
+ jtasm.EmitLazyCompileJumpSlot(slot_index + num_imported_functions,
+ wasm_compile_lazy_target);
}
+ DCHECK_EQ(lazy_compile_table_size, jtasm.pc_offset());
+ FlushInstructionCache(base, lazy_compile_table_size);
}
- static void EmitRuntimeStubSlot(Address base, uint32_t slot_index,
- Address builtin_target,
- WasmCode::FlushICache flush_i_cache) {
- Address slot = base + StubSlotIndexToOffset(slot_index);
- JumpTableAssembler jtasm(slot);
- jtasm.EmitRuntimeStubSlot(builtin_target);
- jtasm.NopBytes(kJumpTableStubSlotSize - jtasm.pc_offset());
- if (flush_i_cache) {
- FlushInstructionCache(slot, kJumpTableStubSlotSize);
+ static void GenerateRuntimeStubTable(Address base, Address* targets,
+ int num_stubs) {
+ uint32_t table_size = num_stubs * kJumpTableStubSlotSize;
+ // Assume enough space, so the Assembler does not try to grow the buffer.
+ JumpTableAssembler jtasm(base, table_size + 256);
+ int offset = 0;
+ for (int index = 0; index < num_stubs; ++index) {
+ DCHECK_EQ(offset, StubSlotIndexToOffset(index));
+ DCHECK_EQ(offset, jtasm.pc_offset());
+ jtasm.EmitRuntimeStubSlot(targets[index]);
+ offset += kJumpTableStubSlotSize;
+ jtasm.NopBytes(offset - jtasm.pc_offset());
}
+ FlushInstructionCache(base, table_size);
}
static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target,
WasmCode::FlushICache flush_i_cache) {
- Address slot = base + SlotIndexToOffset(slot_index);
+ Address slot = base + JumpSlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
@@ -115,44 +141,54 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 10;
+ static constexpr int kJumpTableSlotSize = 5;
+ static constexpr int kLazyCompileTableSlotSize = 10;
static constexpr int kJumpTableStubSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
- static constexpr int kJumpTableLineSize = 5 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
- static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
-#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstrSize;
static constexpr int kJumpTableSlotSize = 3 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 5 * kInstrSize;
+ static constexpr int kJumpTableStubSlotSize = 5 * kInstrSize;
+#elif V8_TARGET_ARCH_ARM64
+ static constexpr int kJumpTableLineSize = 1 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 1 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 3 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 128;
- static constexpr int kJumpTableSlotSize = 20;
+ static constexpr int kJumpTableSlotSize = 14;
+ static constexpr int kLazyCompileTableSlotSize = 20;
static constexpr int kJumpTableStubSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 48;
+ static constexpr int kJumpTableSlotSize = 7 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 12 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 7 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 6 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 4 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
- static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
static constexpr int kJumpTableStubSlotSize = 6 * kInstrSize;
#else
static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1;
+ static constexpr int kLazyCompileTableSlotSize = 1;
static constexpr int kJumpTableStubSlotSize = 1;
#endif
static constexpr int kJumpTableSlotsPerLine =
kJumpTableLineSize / kJumpTableSlotSize;
+ STATIC_ASSERT(kJumpTableSlotsPerLine >= 1);
// {JumpTableAssembler} is never used during snapshot generation, and its code
// must be independent of the code range of any isolate anyway. Just ensure
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 10483cf8ea..b11a557195 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -6,7 +6,7 @@
#include <cinttypes>
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -22,9 +22,9 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
#define TRACE_TYPE(rep, str, format, ctype1, ctype2) \
case MachineRepresentation::rep: \
SNPrintF(value, str ":" format, \
- ReadLittleEndianValue<ctype1>( \
+ base::ReadLittleEndianValue<ctype1>( \
reinterpret_cast<Address>(mem_start) + info->address), \
- ReadLittleEndianValue<ctype2>( \
+ base::ReadLittleEndianValue<ctype2>( \
reinterpret_cast<Address>(mem_start) + info->address)); \
break;
TRACE_TYPE(kWord8, " i8", "%d / %02x", uint8_t, uint8_t)
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 3bb6eb1e58..b5a58d4f27 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -24,7 +24,6 @@
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/identity-map.h"
-#include "src/wasm/js-to-wasm-wrapper-cache.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-code-manager.h"
@@ -34,6 +33,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -152,6 +152,9 @@ class CompilationUnitQueues {
for (int task_id = 0; task_id < max_tasks; ++task_id) {
queues_[task_id].next_steal_task_id = next_task_id(task_id);
}
+ for (auto& atomic_counter : num_units_) {
+ std::atomic_init(&atomic_counter, size_t{0});
+ }
}
base::Optional<WasmCompilationUnit> GetNextUnit(
@@ -254,15 +257,14 @@ class CompilationUnitQueues {
};
struct BigUnitsQueue {
- BigUnitsQueue() = default;
+ BigUnitsQueue() {
+ for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+ }
base::Mutex mutex;
// Can be read concurrently to check whether any elements are in the queue.
- std::atomic_bool has_units[kNumTiers] = {
- ATOMIC_VAR_INIT(false),
- ATOMIC_VAR_INIT(false)
- };
+ std::atomic<bool> has_units[kNumTiers];
// Protected by {mutex}:
std::priority_queue<BigUnit> units[kNumTiers];
@@ -271,11 +273,8 @@ class CompilationUnitQueues {
std::vector<Queue> queues_;
BigUnitsQueue big_units_queue_;
- std::atomic_size_t num_units_[kNumTiers] = {
- ATOMIC_VAR_INIT(0),
- ATOMIC_VAR_INIT(0)
- };
- std::atomic_int next_queue_to_add{0};
+ std::atomic<size_t> num_units_[kNumTiers];
+ std::atomic<int> next_queue_to_add{0};
int next_task_id(int task_id) const {
int next = task_id + 1;
@@ -382,7 +381,7 @@ class CompilationStateImpl {
// Initialize compilation progress. Set compilation tiers to expect for
// baseline and top tier compilation. Must be set before {AddCompilationUnits}
// is invoked which triggers background compilation.
- void InitializeCompilationProgress(bool lazy_module);
+ void InitializeCompilationProgress(bool lazy_module, int num_import_wrappers);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -411,13 +410,11 @@ class CompilationStateImpl {
bool baseline_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
- return outstanding_baseline_functions_ == 0;
+ return outstanding_baseline_units_ == 0;
}
bool top_tier_compilation_finished() const {
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
return outstanding_top_tier_functions_ == 0;
}
@@ -473,7 +470,7 @@ class CompilationStateImpl {
// Compilation error, atomically updated. This flag can be updated and read
// using relaxed semantics.
- std::atomic_bool compile_failed_{false};
+ std::atomic<bool> compile_failed_{false};
const int max_background_tasks_ = 0;
@@ -519,7 +516,7 @@ class CompilationStateImpl {
// Callback functions to be called on compilation events.
std::vector<CompilationState::callback_t> callbacks_;
- int outstanding_baseline_functions_ = 0;
+ int outstanding_baseline_units_ = 0;
int outstanding_top_tier_functions_ = 0;
std::vector<uint8_t> compilation_progress_;
@@ -701,6 +698,10 @@ class CompilationUnitBuilder {
native_module->module())) {}
void AddUnits(uint32_t func_index) {
+ if (func_index < native_module_->module()->num_imported_functions) {
+ baseline_units_.emplace_back(func_index, ExecutionTier::kNone);
+ return;
+ }
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
@@ -823,7 +824,7 @@ void ValidateSequentially(
bool IsLazyModule(const WasmModule* module) {
return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+ (FLAG_asm_wasm_lazy_compilation && is_asmjs_module(module));
}
} // namespace
@@ -848,6 +849,8 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
ExecutionTierPair tiers = GetRequestedExecutionTiers(
module, compilation_state->compile_mode(), enabled_features, func_index);
+ DCHECK_LE(native_module->num_imported_functions(), func_index);
+ DCHECK_LT(func_index, native_module->num_functions());
WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
@@ -857,7 +860,7 @@ bool CompileLazy(Isolate* isolate, NativeModule* native_module,
// During lazy compilation, we can only get compilation errors when
// {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
// verified before starting its execution.
- DCHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
+ CHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
const WasmFunction* func = &module->functions[func_index];
if (result.failed()) {
ErrorThrower thrower(isolate, nullptr);
@@ -972,6 +975,29 @@ bool ExecuteCompilationUnits(
std::vector<WasmCode*> code_vector =
compile_scope->native_module()->AddCompiledCode(
VectorOf(results_to_publish));
+
+ // For import wrapper compilation units, add result to the cache.
+ const NativeModule* native_module = compile_scope->native_module();
+ int num_imported_functions = native_module->num_imported_functions();
+ DCHECK_EQ(code_vector.size(), results_to_publish.size());
+ WasmImportWrapperCache* cache = native_module->import_wrapper_cache();
+ for (WasmCode* code : code_vector) {
+ int func_index = code->index();
+ DCHECK_LE(0, func_index);
+ DCHECK_LT(func_index, native_module->num_functions());
+ if (func_index < num_imported_functions) {
+ FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind,
+ sig);
+ // If two imported functions have the same key, only one of them should
+ // have been added as a compilation unit. So it is always the first time
+ // we compile a wrapper for this key here.
+ DCHECK_NULL((*cache)[key]);
+ (*cache)[key] = code;
+ code->IncRef();
+ }
+ }
+
compile_scope->compilation_state()->OnFinishedUnits(VectorOf(code_vector));
results_to_publish.clear();
};
@@ -1023,15 +1049,39 @@ bool ExecuteCompilationUnits(
return true;
}
+// Returns the number of units added.
+int AddImportWrapperUnits(NativeModule* native_module,
+ CompilationUnitBuilder* builder) {
+ std::unordered_set<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>
+ keys;
+ int num_imported_functions = native_module->num_imported_functions();
+ for (int func_index = 0; func_index < num_imported_functions; func_index++) {
+ FunctionSig* sig = native_module->module()->functions[func_index].sig;
+ bool has_bigint_feature = native_module->enabled_features().bigint;
+ if (!IsJSCompatibleSignature(sig, has_bigint_feature)) {
+ continue;
+ }
+ WasmImportWrapperCache::CacheKey key(compiler::kDefaultImportCallKind, sig);
+ auto it = keys.insert(key);
+ if (it.second) {
+ // Ensure that all keys exist in the cache, so that we can populate the
+ // cache later without locking.
+ (*native_module->import_wrapper_cache())[key] = nullptr;
+ builder->AddUnits(func_index);
+ }
+ }
+ return static_cast<int>(keys.size());
+}
+
void InitializeCompilationUnits(NativeModule* native_module) {
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
const bool lazy_module = IsLazyModule(native_module->module());
- compilation_state->InitializeCompilationProgress(lazy_module);
-
ModuleWireBytes wire_bytes(native_module->wire_bytes());
CompilationUnitBuilder builder(native_module);
auto* module = native_module->module();
+
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
@@ -1047,6 +1097,9 @@ void InitializeCompilationUnits(NativeModule* native_module) {
builder.AddUnits(func_index);
}
}
+ int num_import_wrappers = AddImportWrapperUnits(native_module, &builder);
+ compilation_state->InitializeCompilationProgress(lazy_module,
+ num_import_wrappers);
builder.Commit();
}
@@ -1111,9 +1164,12 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
NativeModule* native_module) {
ModuleWireBytes wire_bytes(native_module->wire_bytes());
const bool lazy_module = IsLazyModule(wasm_module);
- if (!FLAG_wasm_lazy_validation &&
+ if (!FLAG_wasm_lazy_validation && wasm_module->origin == kWasmOrigin &&
MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(),
lazy_module)) {
+ // Validate wasm modules for lazy compilation if requested. Never validate
+ // asm.js modules as these are valid by construction (additionally a CHECK
+ // will catch this during lazy compilation).
ValidateSequentially(wasm_module, native_module, isolate->counters(),
isolate->allocator(), thrower, lazy_module,
kOnlyLazyFunctions);
@@ -1256,6 +1312,7 @@ AsyncCompileJob::AsyncCompileJob(
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "new AsyncCompileJob");
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
@@ -1386,6 +1443,8 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
void AsyncCompileJob::FinishCompile() {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "AsyncCompileJob::FinishCompile");
bool is_after_deserialization = !module_object_.is_null();
if (!is_after_deserialization) {
PrepareRuntimeObjects();
@@ -1865,7 +1924,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
if (section_code == SectionCode::kUnknownSectionCode) {
Decoder decoder(bytes, offset);
section_code = ModuleDecoder::IdentifyUnknownSection(
- decoder, bytes.begin() + bytes.length());
+ &decoder, bytes.begin() + bytes.length());
if (section_code == SectionCode::kUnknownSectionCode) {
// Skip unknown sections that we do not know how to handle.
return true;
@@ -1902,13 +1961,19 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
const bool lazy_module = job_->wasm_lazy_compilation_;
- compilation_state->InitializeCompilationProgress(lazy_module);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.store(2);
compilation_unit_builder_.reset(
new CompilationUnitBuilder(job_->native_module_.get()));
+
+ NativeModule* native_module = job_->native_module_.get();
+
+ int num_import_wrappers =
+ AddImportWrapperUnits(native_module, compilation_unit_builder_.get());
+ compilation_state->InitializeCompilationProgress(lazy_module,
+ num_import_wrappers);
return true;
}
@@ -2079,16 +2144,16 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
-void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
+void CompilationStateImpl::InitializeCompilationProgress(
+ bool lazy_module, int num_import_wrappers) {
DCHECK(!failed());
auto enabled_features = native_module_->enabled_features();
auto* module = native_module_->module();
base::MutexGuard guard(&callbacks_mutex_);
- DCHECK_EQ(0, outstanding_baseline_functions_);
+ DCHECK_EQ(0, outstanding_baseline_units_);
DCHECK_EQ(0, outstanding_top_tier_functions_);
compilation_progress_.reserve(module->num_declared_functions);
-
int start = module->num_imported_functions;
int end = start + module->num_declared_functions;
for (int func_index = start; func_index < end; func_index++) {
@@ -2104,7 +2169,7 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
strategy == CompileStrategy::kLazyBaselineEagerTopTier);
// Count functions to complete baseline and top tier compilation.
- if (required_for_baseline) outstanding_baseline_functions_++;
+ if (required_for_baseline) outstanding_baseline_units_++;
if (required_for_top_tier) outstanding_top_tier_functions_++;
// Initialize function's compilation progress.
@@ -2120,24 +2185,25 @@ void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
RequiredTopTierField::update(function_progress, required_top_tier);
compilation_progress_.push_back(function_progress);
}
- DCHECK_IMPLIES(lazy_module, outstanding_baseline_functions_ == 0);
+ DCHECK_IMPLIES(lazy_module, outstanding_baseline_units_ == 0);
DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
- DCHECK_LE(0, outstanding_baseline_functions_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+ DCHECK_LE(0, outstanding_baseline_units_);
+ DCHECK_LE(outstanding_baseline_units_, outstanding_top_tier_functions_);
+ outstanding_baseline_units_ += num_import_wrappers;
// Trigger callbacks if module needs no baseline or top tier compilation. This
// can be the case for an empty or fully lazy module.
- if (outstanding_baseline_functions_ == 0) {
+ if (outstanding_baseline_units_ == 0) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
- }
- if (outstanding_top_tier_functions_ == 0) {
- for (auto& callback : callbacks_) {
- callback(CompilationEvent::kFinishedTopTierCompilation);
+ if (outstanding_top_tier_functions_ == 0) {
+ for (auto& callback : callbacks_) {
+ callback(CompilationEvent::kFinishedTopTierCompilation);
+ }
+ // Clear the callbacks because no more events will be delivered.
+ callbacks_.clear();
}
- // Clear the callbacks because no more events will be delivered.
- callbacks_.clear();
}
}
@@ -2170,10 +2236,10 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
base::MutexGuard guard(&callbacks_mutex_);
- // In case of no outstanding functions we can return early.
+ // In case of no outstanding compilation units we can return early.
// This is especially important for lazy modules that were deserialized.
// Compilation progress was not set up in these cases.
- if (outstanding_baseline_functions_ == 0 &&
+ if (outstanding_baseline_units_ == 0 &&
outstanding_top_tier_functions_ == 0) {
return;
}
@@ -2190,49 +2256,61 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
for (WasmCode* code : code_vector) {
DCHECK_NOT_NULL(code);
- DCHECK_NE(code->tier(), ExecutionTier::kNone);
- native_module_->engine()->LogCode(code);
-
- // Read function's compilation progress.
- // This view on the compilation progress may differ from the actually
- // compiled code. Any lazily compiled function does not contribute to the
- // compilation progress but may publish code to the code manager.
- int slot_index =
- code->index() - native_module_->module()->num_imported_functions;
- uint8_t function_progress = compilation_progress_[slot_index];
- ExecutionTier required_baseline_tier =
- RequiredBaselineTierField::decode(function_progress);
- ExecutionTier required_top_tier =
- RequiredTopTierField::decode(function_progress);
- ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+ DCHECK_LT(code->index(), native_module_->num_functions());
bool completes_baseline_compilation = false;
bool completes_top_tier_compilation = false;
- // Check whether required baseline or top tier are reached.
- if (reached_tier < required_baseline_tier &&
- required_baseline_tier <= code->tier()) {
- DCHECK_GT(outstanding_baseline_functions_, 0);
- outstanding_baseline_functions_--;
- if (outstanding_baseline_functions_ == 0) {
+ if (code->index() < native_module_->num_imported_functions()) {
+ // Import wrapper.
+ DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
+ outstanding_baseline_units_--;
+ if (outstanding_baseline_units_ == 0) {
completes_baseline_compilation = true;
}
- }
- if (reached_tier < required_top_tier && required_top_tier <= code->tier()) {
- DCHECK_GT(outstanding_top_tier_functions_, 0);
- outstanding_top_tier_functions_--;
- if (outstanding_top_tier_functions_ == 0) {
- completes_top_tier_compilation = true;
+ } else {
+ // Function.
+ DCHECK_NE(code->tier(), ExecutionTier::kNone);
+ native_module_->engine()->LogCode(code);
+
+ // Read function's compilation progress.
+ // This view on the compilation progress may differ from the actually
+ // compiled code. Any lazily compiled function does not contribute to the
+ // compilation progress but may publish code to the code manager.
+ int slot_index =
+ code->index() - native_module_->module()->num_imported_functions;
+ uint8_t function_progress = compilation_progress_[slot_index];
+ ExecutionTier required_baseline_tier =
+ RequiredBaselineTierField::decode(function_progress);
+ ExecutionTier required_top_tier =
+ RequiredTopTierField::decode(function_progress);
+ ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+
+ // Check whether required baseline or top tier are reached.
+ if (reached_tier < required_baseline_tier &&
+ required_baseline_tier <= code->tier()) {
+ DCHECK_GT(outstanding_baseline_units_, 0);
+ outstanding_baseline_units_--;
+ if (outstanding_baseline_units_ == 0) {
+ completes_baseline_compilation = true;
+ }
+ }
+ if (reached_tier < required_top_tier &&
+ required_top_tier <= code->tier()) {
+ DCHECK_GT(outstanding_top_tier_functions_, 0);
+ outstanding_top_tier_functions_--;
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- }
- // Update function's compilation progress.
- if (code->tier() > reached_tier) {
- compilation_progress_[slot_index] = ReachedTierField::update(
- compilation_progress_[slot_index], code->tier());
+ // Update function's compilation progress.
+ if (code->tier() > reached_tier) {
+ compilation_progress_[slot_index] = ReachedTierField::update(
+ compilation_progress_[slot_index], code->tier());
+ }
+ DCHECK_LE(0, outstanding_baseline_units_);
}
- DCHECK_LE(0, outstanding_baseline_functions_);
- DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
// Trigger callbacks.
if (completes_baseline_compilation) {
@@ -2240,8 +2318,11 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- if (completes_top_tier_compilation) {
+ if (outstanding_baseline_units_ == 0 && completes_top_tier_compilation) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
@@ -2335,24 +2416,83 @@ void CompilationStateImpl::SetError() {
callbacks_.clear();
}
+namespace {
+using JSToWasmWrapperKey = std::pair<bool, FunctionSig>;
+using JSToWasmWrapperQueue =
+ WrapperQueue<JSToWasmWrapperKey, base::hash<JSToWasmWrapperKey>>;
+using JSToWasmWrapperUnitMap =
+ std::unordered_map<JSToWasmWrapperKey,
+ std::unique_ptr<JSToWasmWrapperCompilationUnit>,
+ base::hash<JSToWasmWrapperKey>>;
+
+class CompileJSToWasmWrapperTask final : public CancelableTask {
+ public:
+ CompileJSToWasmWrapperTask(CancelableTaskManager* task_manager,
+ JSToWasmWrapperQueue* queue,
+ JSToWasmWrapperUnitMap* compilation_units)
+ : CancelableTask(task_manager),
+ queue_(queue),
+ compilation_units_(compilation_units) {}
+
+ void RunInternal() override {
+ while (base::Optional<JSToWasmWrapperKey> key = queue_->pop()) {
+ JSToWasmWrapperCompilationUnit* unit = (*compilation_units_)[*key].get();
+ unit->Execute();
+ }
+ }
+
+ private:
+ JSToWasmWrapperQueue* const queue_;
+ JSToWasmWrapperUnitMap* const compilation_units_;
+};
+} // namespace
+
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
- JSToWasmWrapperCache js_to_wasm_cache;
+ JSToWasmWrapperQueue queue;
+ JSToWasmWrapperUnitMap compilation_units;
- // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
- // optimization we keep the code space unlocked to avoid repeated unlocking
- // because many such wrapper are allocated in sequence below.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // Prepare compilation units in the main thread.
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
auto& function = module->functions[exp.index];
- Handle<Code> wrapper_code = js_to_wasm_cache.GetOrCompileJSToWasmWrapper(
- isolate, function.sig, function.imported);
- int wrapper_index =
- GetExportWrapperIndex(module, function.sig, function.imported);
+ JSToWasmWrapperKey key(function.imported, *function.sig);
+ if (queue.insert(key)) {
+ auto unit = base::make_unique<JSToWasmWrapperCompilationUnit>(
+ isolate, function.sig, function.imported);
+ unit->Prepare(isolate);
+ compilation_units.emplace(key, std::move(unit));
+ }
+ }
+
+ // Execute compilation jobs in the background.
+ CancelableTaskManager task_manager;
+ const int max_background_tasks = GetMaxBackgroundTasks();
+ for (int i = 0; i < max_background_tasks; ++i) {
+ auto task = base::make_unique<CompileJSToWasmWrapperTask>(
+ &task_manager, &queue, &compilation_units);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ }
+
+ // Work in the main thread too.
+ while (base::Optional<JSToWasmWrapperKey> key = queue.pop()) {
+ JSToWasmWrapperCompilationUnit* unit = compilation_units[*key].get();
+ unit->Execute();
+ }
+ task_manager.CancelAndWait();
- export_wrappers->set(wrapper_index, *wrapper_code);
- RecordStats(*wrapper_code, isolate->counters());
+ // Finalize compilation jobs in the main thread.
+ // TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
+ // optimization we keep the code space unlocked to avoid repeated unlocking
+ // because many such wrapper are allocated in sequence below.
+ CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ for (auto& pair : compilation_units) {
+ JSToWasmWrapperKey key = pair.first;
+ JSToWasmWrapperCompilationUnit* unit = pair.second.get();
+ Handle<Code> code = unit->Finalize(isolate);
+ int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
+ export_wrappers->set(wrapper_index, *code);
+ RecordStats(*code, isolate->counters());
}
}
@@ -2365,17 +2505,24 @@ WasmCode* CompileImportWrapper(
// yet.
WasmImportWrapperCache::CacheKey key(kind, sig);
DCHECK_NULL((*cache_scope)[key]);
- bool source_positions = native_module->module()->origin == kAsmJsOrigin;
+ bool source_positions = is_asmjs_module(native_module->module());
// Keep the {WasmCode} alive until we explicitly call {IncRef}.
WasmCodeRefScope code_ref_scope;
- WasmCode* wasm_code = compiler::CompileWasmImportCallWrapper(
- wasm_engine, native_module, kind, sig, source_positions);
- (*cache_scope)[key] = wasm_code;
- wasm_code->IncRef();
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ wasm_engine, &env, kind, sig, source_positions);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), GetCodeKind(result),
+ ExecutionTier::kNone);
+ WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ (*cache_scope)[key] = published_code;
+ published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
- wasm_code->instructions().length());
- counters->wasm_reloc_size()->Increment(wasm_code->reloc_info().length());
- return wasm_code;
+ published_code->instructions().length());
+ counters->wasm_reloc_size()->Increment(published_code->reloc_info().length());
+ return published_code;
}
Handle<Script> CreateWasmScript(Isolate* isolate,
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index d465d6a322..27c7bff868 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -9,6 +9,7 @@
#include <functional>
#include <memory>
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/tasks/cancelable-task.h"
#include "src/wasm/compilation-environment.h"
@@ -67,6 +68,33 @@ bool CompileLazy(Isolate*, NativeModule*, int func_index);
int GetMaxBackgroundTasks();
+template <typename Key, typename Hash>
+class WrapperQueue {
+ public:
+ // Removes an arbitrary key from the queue and returns it.
+ // If the queue is empty, returns nullopt.
+ // Thread-safe.
+ base::Optional<Key> pop() {
+ base::Optional<Key> key = base::nullopt;
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto it = queue_.begin();
+ if (it != queue_.end()) {
+ key = *it;
+ queue_.erase(it);
+ }
+ return key;
+ }
+
+ // Add the given key to the queue and returns true iff the insert was
+ // successful.
+ // Not thread-safe.
+ bool insert(const Key& key) { return queue_.insert(key).second; }
+
+ private:
+ base::Mutex mutex_;
+ std::unordered_set<Key, Hash> queue_;
+};
+
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
// as foreground and background tasks. Any phase that touches the V8 heap or
@@ -91,6 +119,8 @@ class AsyncCompileJob {
Isolate* isolate() const { return isolate_; }
+ Handle<Context> context() const { return native_context_; }
+
private:
class CompileTask;
class CompileStep;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 4201b1e76c..56712977b1 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -123,7 +123,7 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
case WasmInitExpr::kRefNullConst:
return kWasmNullRef;
case WasmInitExpr::kRefFuncConst:
- return kWasmAnyFunc;
+ return kWasmFuncRef;
default:
UNREACHABLE();
}
@@ -131,35 +131,35 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
-WireBytesRef consume_string(Decoder& decoder, bool validate_utf8,
+WireBytesRef consume_string(Decoder* decoder, bool validate_utf8,
const char* name) {
- uint32_t length = decoder.consume_u32v("string length");
- uint32_t offset = decoder.pc_offset();
- const byte* string_start = decoder.pc();
+ uint32_t length = decoder->consume_u32v("string length");
+ uint32_t offset = decoder->pc_offset();
+ const byte* string_start = decoder->pc();
// Consume bytes before validation to guarantee that the string is not oob.
if (length > 0) {
- decoder.consume_bytes(length, name);
- if (decoder.ok() && validate_utf8 &&
+ decoder->consume_bytes(length, name);
+ if (decoder->ok() && validate_utf8 &&
!unibrow::Utf8::ValidateEncoding(string_start, length)) {
- decoder.errorf(string_start, "%s: no valid UTF-8 string", name);
+ decoder->errorf(string_start, "%s: no valid UTF-8 string", name);
}
}
- return {offset, decoder.failed() ? 0 : length};
+ return {offset, decoder->failed() ? 0 : length};
}
// An iterator over the sections in a wasm binary module.
// Automatically skips all unknown sections.
class WasmSectionIterator {
public:
- explicit WasmSectionIterator(Decoder& decoder)
+ explicit WasmSectionIterator(Decoder* decoder)
: decoder_(decoder),
section_code_(kUnknownSectionCode),
- section_start_(decoder.pc()),
- section_end_(decoder.pc()) {
+ section_start_(decoder->pc()),
+ section_end_(decoder->pc()) {
next();
}
- inline bool more() const { return decoder_.ok() && decoder_.more(); }
+ inline bool more() const { return decoder_->ok() && decoder_->more(); }
inline SectionCode section_code() const { return section_code_; }
@@ -184,23 +184,23 @@ class WasmSectionIterator {
// Advances to the next section, checking that decoding the current section
// stopped at {section_end_}.
void advance(bool move_to_section_end = false) {
- if (move_to_section_end && decoder_.pc() < section_end_) {
- decoder_.consume_bytes(
- static_cast<uint32_t>(section_end_ - decoder_.pc()));
- }
- if (decoder_.pc() != section_end_) {
- const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
- decoder_.errorf(decoder_.pc(),
- "section was %s than expected size "
- "(%u bytes expected, %zu decoded)",
- msg, section_length(),
- static_cast<size_t>(decoder_.pc() - section_start_));
+ if (move_to_section_end && decoder_->pc() < section_end_) {
+ decoder_->consume_bytes(
+ static_cast<uint32_t>(section_end_ - decoder_->pc()));
+ }
+ if (decoder_->pc() != section_end_) {
+ const char* msg = decoder_->pc() < section_end_ ? "shorter" : "longer";
+ decoder_->errorf(decoder_->pc(),
+ "section was %s than expected size "
+ "(%u bytes expected, %zu decoded)",
+ msg, section_length(),
+ static_cast<size_t>(decoder_->pc() - section_start_));
}
next();
}
private:
- Decoder& decoder_;
+ Decoder* decoder_;
SectionCode section_code_;
const byte* section_start_;
const byte* payload_start_;
@@ -209,17 +209,17 @@ class WasmSectionIterator {
// Reads the section code/name at the current position and sets up
// the embedder fields.
void next() {
- if (!decoder_.more()) {
+ if (!decoder_->more()) {
section_code_ = kUnknownSectionCode;
return;
}
- section_start_ = decoder_.pc();
- uint8_t section_code = decoder_.consume_u8("section code");
+ section_start_ = decoder_->pc();
+ uint8_t section_code = decoder_->consume_u8("section code");
// Read and check the section size.
- uint32_t section_length = decoder_.consume_u32v("section length");
+ uint32_t section_length = decoder_->consume_u32v("section length");
- payload_start_ = decoder_.pc();
- if (decoder_.checkAvailable(section_length)) {
+ payload_start_ = decoder_->pc();
+ if (decoder_->checkAvailable(section_length)) {
// Get the limit of the section within the module.
section_end_ = payload_start_ + section_length;
} else {
@@ -234,19 +234,19 @@ class WasmSectionIterator {
ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
// As a side effect, the above function will forward the decoder to after
// the identifier string.
- payload_start_ = decoder_.pc();
+ payload_start_ = decoder_->pc();
} else if (!IsValidSectionCode(section_code)) {
- decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
- section_code);
+ decoder_->errorf(decoder_->pc(), "unknown section code #0x%02x",
+ section_code);
section_code = kUnknownSectionCode;
}
- section_code_ = decoder_.failed() ? kUnknownSectionCode
- : static_cast<SectionCode>(section_code);
+ section_code_ = decoder_->failed() ? kUnknownSectionCode
+ : static_cast<SectionCode>(section_code);
- if (section_code_ == kUnknownSectionCode && section_end_ > decoder_.pc()) {
+ if (section_code_ == kUnknownSectionCode && section_end_ > decoder_->pc()) {
// skip to the end of the unknown section.
- uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_.pc());
- decoder_.consume_bytes(remaining, "section payload");
+ uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_->pc());
+ decoder_->consume_bytes(remaining, "section payload");
}
}
};
@@ -259,13 +259,13 @@ class ModuleDecoderImpl : public Decoder {
explicit ModuleDecoderImpl(const WasmFeatures& enabled, ModuleOrigin origin)
: Decoder(nullptr, nullptr),
enabled_features_(enabled),
- origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {}
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsSloppyOrigin : origin) {}
ModuleDecoderImpl(const WasmFeatures& enabled, const byte* module_start,
const byte* module_end, ModuleOrigin origin)
: Decoder(module_start, module_end),
enabled_features_(enabled),
- origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
+ origin_(FLAG_assume_asmjs_origin ? kAsmJsSloppyOrigin : origin) {
if (end_ < start_) {
error(start_, "end is less than start");
end_ = start_;
@@ -520,8 +520,8 @@ class ModuleDecoderImpl : public Decoder {
});
WasmImport* import = &module_->import_table.back();
const byte* pos = pc_;
- import->module_name = consume_string(*this, true, "module name");
- import->field_name = consume_string(*this, true, "field name");
+ import->module_name = consume_string(this, true, "module name");
+ import->field_name = consume_string(this, true, "field name");
import->kind =
static_cast<ImportExportKindCode>(consume_u8("import kind"));
switch (import->kind) {
@@ -550,7 +550,7 @@ class ModuleDecoderImpl : public Decoder {
table->imported = true;
ValueType type = consume_reference_type();
if (!enabled_features_.anyref) {
- if (type != kWasmAnyFunc) {
+ if (type != kWasmFuncRef) {
error(pc_ - 1, "invalid table type");
break;
}
@@ -635,7 +635,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTableSection() {
// TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
// implementation of AnyRef landed.
- uint32_t max_count = enabled_features_.anyref ? 10 : kV8MaxWasmTables;
+ uint32_t max_count = enabled_features_.anyref ? 100000 : kV8MaxWasmTables;
uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
@@ -694,7 +694,7 @@ class ModuleDecoderImpl : public Decoder {
});
WasmExport* exp = &module_->export_table.back();
- exp->name = consume_string(*this, true, "field name");
+ exp->name = consume_string(this, true, "field name");
const byte* pos = pc();
exp->kind = static_cast<ImportExportKindCode>(consume_u8("export kind"));
@@ -746,7 +746,7 @@ class ModuleDecoderImpl : public Decoder {
}
}
// Check for duplicate exports (except for asm.js).
- if (ok() && origin_ != kAsmJsOrigin && module_->export_table.size() > 1) {
+ if (ok() && origin_ == kWasmOrigin && module_->export_table.size() > 1) {
std::vector<WasmExport> sorted_exports(module_->export_table);
auto cmp_less = [this](const WasmExport& a, const WasmExport& b) {
@@ -808,16 +808,16 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (!ValueTypes::IsSubType(module_->tables[table_index].type,
- kWasmAnyFunc)) {
+ if (!ValueTypes::IsSubType(kWasmFuncRef,
+ module_->tables[table_index].type)) {
errorf(pos,
- "Invalid element segment. Table %u is not of type AnyFunc",
+ "Invalid element segment. Table %u is not of type FuncRef",
table_index);
break;
}
} else {
ValueType type = consume_reference_type();
- if (!ValueTypes::IsSubType(type, kWasmAnyFunc)) {
+ if (!ValueTypes::IsSubType(kWasmFuncRef, type)) {
error(pc_ - 1, "invalid element segment type");
break;
}
@@ -957,7 +957,7 @@ class ModuleDecoderImpl : public Decoder {
// Decode module name, ignore the rest.
// Function and local names will be decoded when needed.
if (name_type == NameSectionKindCode::kModule) {
- WireBytesRef name = consume_string(inner, false, "module name");
+ WireBytesRef name = consume_string(&inner, false, "module name");
if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
} else {
inner.consume_bytes(name_payload_len, "name subsection payload");
@@ -970,7 +970,7 @@ class ModuleDecoderImpl : public Decoder {
void DecodeSourceMappingURLSection() {
Decoder inner(start_, pc_, end_, buffer_offset_);
- WireBytesRef url = wasm::consume_string(inner, true, "module name");
+ WireBytesRef url = wasm::consume_string(&inner, true, "module name");
if (inner.ok() &&
!has_seen_unordered_section(kSourceMappingURLSectionCode)) {
const byte* url_start =
@@ -1128,7 +1128,7 @@ class ModuleDecoderImpl : public Decoder {
offset += 8;
Decoder decoder(start_ + offset, end_, offset);
- WasmSectionIterator section_iter(decoder);
+ WasmSectionIterator section_iter(&decoder);
while (ok() && section_iter.more()) {
// Shift the offset by the section header length
@@ -1269,7 +1269,7 @@ class ModuleDecoderImpl : public Decoder {
ValueTypes::TypeName(module->globals[other_index].type));
}
} else {
- if (!ValueTypes::IsSubType(global->type, TypeOf(module, global->init))) {
+ if (!ValueTypes::IsSubType(TypeOf(module, global->init), global->type)) {
errorf(pos, "type error in global initialization, expected %s, got %s",
ValueTypes::TypeName(global->type),
ValueTypes::TypeName(TypeOf(module, global->init)));
@@ -1373,32 +1373,33 @@ class ModuleDecoderImpl : public Decoder {
uint32_t consume_func_index(WasmModule* module, WasmFunction** func,
const char* name) {
- return consume_index(name, module->functions, func);
+ return consume_index(name, &module->functions, func);
}
uint32_t consume_global_index(WasmModule* module, WasmGlobal** global) {
- return consume_index("global index", module->globals, global);
+ return consume_index("global index", &module->globals, global);
}
uint32_t consume_table_index(WasmModule* module, WasmTable** table) {
- return consume_index("table index", module->tables, table);
+ return consume_index("table index", &module->tables, table);
}
uint32_t consume_exception_index(WasmModule* module, WasmException** except) {
- return consume_index("exception index", module->exceptions, except);
+ return consume_index("exception index", &module->exceptions, except);
}
template <typename T>
- uint32_t consume_index(const char* name, std::vector<T>& vector, T** ptr) {
+ uint32_t consume_index(const char* name, std::vector<T>* vector, T** ptr) {
const byte* pos = pc_;
uint32_t index = consume_u32v(name);
- if (index >= vector.size()) {
+ if (index >= vector->size()) {
errorf(pos, "%s %u out of bounds (%d entr%s)", name, index,
- static_cast<int>(vector.size()), vector.size() == 1 ? "y" : "ies");
+ static_cast<int>(vector->size()),
+ vector->size() == 1 ? "y" : "ies");
*ptr = nullptr;
return 0;
}
- *ptr = &vector[index];
+ *ptr = &(*vector)[index];
return index;
}
@@ -1594,14 +1595,14 @@ class ModuleDecoderImpl : public Decoder {
case kLocalS128:
if (enabled_features_.simd) return kWasmS128;
break;
- case kLocalAnyFunc:
- if (enabled_features_.anyref) return kWasmAnyFunc;
+ case kLocalFuncRef:
+ if (enabled_features_.anyref) return kWasmFuncRef;
break;
case kLocalAnyRef:
if (enabled_features_.anyref) return kWasmAnyRef;
break;
- case kLocalExceptRef:
- if (enabled_features_.eh) return kWasmExceptRef;
+ case kLocalExnRef:
+ if (enabled_features_.eh) return kWasmExnRef;
break;
default:
break;
@@ -1617,8 +1618,8 @@ class ModuleDecoderImpl : public Decoder {
byte val = consume_u8("reference type");
ValueTypeCode t = static_cast<ValueTypeCode>(val);
switch (t) {
- case kLocalAnyFunc:
- return kWasmAnyFunc;
+ case kLocalFuncRef:
+ return kWasmFuncRef;
case kLocalAnyRef:
if (!enabled_features_.anyref) {
error(pc_ - 1,
@@ -1680,45 +1681,41 @@ class ModuleDecoderImpl : public Decoder {
void consume_segment_header(const char* name, bool* is_active,
uint32_t* index, WasmInitExpr* offset) {
const byte* pos = pc();
- // In the MVP, this is a table or memory index field that must be 0, but
- // we've repurposed it as a flags field in the bulk memory proposal.
- uint32_t flags;
- if (enabled_features_.bulk_memory) {
- flags = consume_u32v("flags");
- if (failed()) return;
- } else {
- // Without the bulk memory proposal, we should still read the table
- // index. This is the same as reading the `ActiveWithIndex` flag with
- // the bulk memory proposal.
- flags = SegmentFlags::kActiveWithIndex;
+ uint32_t flag = consume_u32v("flag");
+
+ // Some flag values are only valid for specific proposals.
+ if (flag == SegmentFlags::kPassive) {
+ if (!enabled_features_.bulk_memory) {
+ error(
+ "Passive element segments require --experimental-wasm-bulk-memory");
+ return;
+ }
+ } else if (flag == SegmentFlags::kActiveWithIndex) {
+ if (!(enabled_features_.bulk_memory || enabled_features_.anyref)) {
+ error(
+ "Element segments with table indices require "
+ "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ return;
+ }
+ } else if (flag != SegmentFlags::kActiveNoIndex) {
+ errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flag);
+ return;
}
- bool read_index;
- bool read_offset;
- if (flags == SegmentFlags::kActiveNoIndex) {
+ // We know now that the flag is valid. Time to read the rest.
+ if (flag == SegmentFlags::kActiveNoIndex) {
*is_active = true;
- read_index = false;
- read_offset = true;
- } else if (flags == SegmentFlags::kPassive) {
+ *index = 0;
+ *offset = consume_init_expr(module_.get(), kWasmI32);
+ return;
+ }
+ if (flag == SegmentFlags::kPassive) {
*is_active = false;
- read_index = false;
- read_offset = false;
- } else if (flags == SegmentFlags::kActiveWithIndex) {
- *is_active = true;
- read_index = true;
- read_offset = true;
- } else {
- errorf(pos, "illegal flag value %u. Must be 0, 1, or 2", flags);
return;
}
-
- if (read_index) {
+ if (flag == SegmentFlags::kActiveWithIndex) {
+ *is_active = true;
*index = consume_u32v(name);
- } else {
- *index = 0;
- }
-
- if (read_offset) {
*offset = consume_init_expr(module_.get(), kWasmI32);
}
}
@@ -1833,17 +1830,17 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
return impl_->FinishDecoding(verify_functions);
}
-SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
+SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder* decoder,
const byte* end) {
WireBytesRef string = consume_string(decoder, true, "section name");
- if (decoder.failed() || decoder.pc() > end) {
+ if (decoder->failed() || decoder->pc() > end) {
return kUnknownSectionCode;
}
const byte* section_name_start =
- decoder.start() + decoder.GetBufferRelativeOffset(string.offset());
+ decoder->start() + decoder->GetBufferRelativeOffset(string.offset());
TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - decoder.start()),
+ static_cast<int>(section_name_start - decoder->start()),
string.length() < 20 ? string.length() : 20, section_name_start);
if (string.length() == num_chars(kNameString) &&
@@ -1989,20 +1986,20 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
namespace {
-bool FindNameSection(Decoder& decoder) {
+bool FindNameSection(Decoder* decoder) {
static constexpr int kModuleHeaderSize = 8;
- decoder.consume_bytes(kModuleHeaderSize, "module header");
+ decoder->consume_bytes(kModuleHeaderSize, "module header");
WasmSectionIterator section_iter(decoder);
- while (decoder.ok() && section_iter.more() &&
+ while (decoder->ok() && section_iter.more() &&
section_iter.section_code() != kNameSectionCode) {
section_iter.advance(true);
}
if (!section_iter.more()) return false;
// Reset the decoder to not read beyond the name section end.
- decoder.Reset(section_iter.payload(), decoder.pc_offset());
+ decoder->Reset(section_iter.payload(), decoder->pc_offset());
return true;
}
@@ -2014,7 +2011,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
DCHECK(names->empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(decoder)) return;
+ if (!FindNameSection(&decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -2031,7 +2028,7 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
for (; decoder.ok() && functions_count > 0; --functions_count) {
uint32_t function_index = decoder.consume_u32v("function index");
- WireBytesRef name = consume_string(decoder, false, "function name");
+ WireBytesRef name = consume_string(&decoder, false, "function name");
// Be lenient with errors in the name section: Ignore non-UTF8 names. You
// can even assign to the same function multiple times (last valid one
@@ -2049,7 +2046,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
DCHECK(result->names.empty());
Decoder decoder(module_start, module_end);
- if (!FindNameSection(decoder)) return;
+ if (!FindNameSection(&decoder)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
@@ -2074,7 +2071,7 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
uint32_t num_names = decoder.consume_u32v("namings count");
for (uint32_t k = 0; k < num_names; ++k) {
uint32_t local_index = decoder.consume_u32v("local index");
- WireBytesRef name = consume_string(decoder, true, "local name");
+ WireBytesRef name = consume_string(&decoder, true, "local name");
if (!decoder.ok()) break;
if (local_index > kMaxInt) continue;
func_names.max_local_index =
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 07d6e66019..8e121c9d30 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -139,12 +139,12 @@ class ModuleDecoder {
// Translates the unknown section that decoder is pointing to to an extended
// SectionCode if the unknown section is known to decoder.
- // The decoder is expected to point after the section lenght and just before
+ // The decoder is expected to point after the section length and just before
// the identifier string of the unknown section.
// If a SectionCode other than kUnknownSectionCode is returned, the decoder
// will point right after the identifier string. Otherwise, the position is
// undefined.
- static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end);
+ static SectionCode IdentifyUnknownSection(Decoder* decoder, const byte* end);
private:
const WasmFeatures enabled_features_;
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 8293674826..a4b0139ea4 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -25,6 +25,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::WriteLittleEndianValue;
+
namespace {
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
@@ -48,35 +51,8 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
}
}
-// Queue of import wrapper keys to compile for an instance.
-class ImportWrapperQueue {
- public:
- // Removes an arbitrary cache key from the queue and returns it.
- // If the queue is empty, returns nullopt.
- // Thread-safe.
- base::Optional<WasmImportWrapperCache::CacheKey> pop() {
- base::Optional<WasmImportWrapperCache::CacheKey> key = base::nullopt;
- base::LockGuard<base::Mutex> lock(&mutex_);
- auto it = queue_.begin();
- if (it != queue_.end()) {
- key = *it;
- queue_.erase(it);
- }
- return key;
- }
-
- // Add the given key to the queue.
- // Not thread-safe.
- void insert(const WasmImportWrapperCache::CacheKey& key) {
- queue_.insert(key);
- }
-
- private:
- base::Mutex mutex_;
- std::unordered_set<WasmImportWrapperCache::CacheKey,
- WasmImportWrapperCache::CacheKeyHash>
- queue_;
-};
+using ImportWrapperQueue = WrapperQueue<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>;
class CompileImportWrapperTask final : public CancelableTask {
public:
@@ -200,9 +176,9 @@ class InstanceBuilder {
Handle<String> import_name,
Handle<Object> value);
- // Initialize imported tables of type anyfunc.
+ // Initialize imported tables of type funcref.
bool InitializeImportedIndirectFunctionTable(
- Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmInstanceObject> instance, int table_index, int import_index,
Handle<WasmTableObject> table_object);
// Process a single imported table.
@@ -255,7 +231,7 @@ class InstanceBuilder {
// and globals.
void ProcessExports(Handle<WasmInstanceObject> instance);
- void InitializeTables(Handle<WasmInstanceObject> instance);
+ void InitializeIndirectFunctionTables(Handle<WasmInstanceObject> instance);
void LoadTableSegments(Handle<WasmInstanceObject> instance);
@@ -336,8 +312,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
memory->set_is_detachable(false);
DCHECK_IMPLIES(native_module->use_trap_handler(),
- module_->origin == kAsmJsOrigin ||
- memory->is_wasm_memory() ||
+ is_asmjs_module(module_) || memory->is_wasm_memory() ||
memory->backing_store() == nullptr);
} else if (initial_pages > 0 || native_module->use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
@@ -421,15 +396,34 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set up table storage space.
//--------------------------------------------------------------------------
int table_count = static_cast<int>(module_->tables.size());
- Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
- for (int i = module_->num_imported_tables; i < table_count; i++) {
- const WasmTable& table = module_->tables[i];
- Handle<WasmTableObject> table_obj = WasmTableObject::New(
- isolate_, table.type, table.initial_size, table.has_maximum_size,
- table.maximum_size, nullptr);
- tables->set(i, *table_obj);
+ {
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ for (int i = module_->num_imported_tables; i < table_count; i++) {
+ const WasmTable& table = module_->tables[i];
+ Handle<WasmTableObject> table_obj = WasmTableObject::New(
+ isolate_, table.type, table.initial_size, table.has_maximum_size,
+ table.maximum_size, nullptr);
+ tables->set(i, *table_obj);
+ }
+ instance->set_tables(*tables);
+ }
+
+ {
+ Handle<FixedArray> tables = isolate_->factory()->NewFixedArray(table_count);
+ // Table 0 is handled specially. See {InitializeIndirectFunctionTable} for
+ // the initilization. All generated and runtime code will use this optimized
+ // shortcut in the instance. Hence it is safe to start with table 1 in the
+ // iteration below.
+ for (int i = 1; i < table_count; ++i) {
+ const WasmTable& table = module_->tables[i];
+ if (table.type == kWasmFuncRef) {
+ Handle<WasmIndirectFunctionTable> table_obj =
+ WasmIndirectFunctionTable::New(isolate_, table.initial_size);
+ tables->set(i, *table_obj);
+ }
+ }
+ instance->set_indirect_function_tables(*tables);
}
- instance->set_tables(*tables);
//--------------------------------------------------------------------------
// Process the imports for the module.
@@ -446,7 +440,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Initialize the indirect tables.
//--------------------------------------------------------------------------
if (table_count > 0) {
- InitializeTables(instance);
+ InitializeIndirectFunctionTables(instance);
}
//--------------------------------------------------------------------------
@@ -550,9 +544,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
auto& function = module_->functions[start_index];
- Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, function.sig, function.imported)
- .ToHandleChecked();
+ Handle<Code> wrapper_code =
+ JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate_, function.sig, function.imported);
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
@@ -755,8 +749,8 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
@@ -800,7 +794,7 @@ void InstanceBuilder::SanitizeImports() {
int int_index = static_cast<int>(index);
MaybeHandle<Object> result =
- module_->origin == kAsmJsOrigin
+ is_asmjs_module(module_)
? LookupImportAsm(int_index, import_name)
: LookupImport(int_index, module_name, import_name);
if (thrower_->error()) {
@@ -842,8 +836,10 @@ bool InstanceBuilder::ProcessImportedFunction(
}
auto js_receiver = Handle<JSReceiver>::cast(value);
FunctionSig* expected_sig = module_->functions[func_index].sig;
- auto kind = compiler::GetWasmImportCallKind(js_receiver, expected_sig,
- enabled_.bigint);
+ auto resolved = compiler::ResolveWasmImportCall(js_receiver, expected_sig,
+ enabled_.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
+ js_receiver = resolved.second;
switch (kind) {
case compiler::WasmImportCallKind::kLinkError:
ReportLinkError("imported function does not match the expected type",
@@ -851,7 +847,7 @@ bool InstanceBuilder::ProcessImportedFunction(
return false;
case compiler::WasmImportCallKind::kWasmToWasm: {
// The imported function is a WASM function from another instance.
- auto imported_function = Handle<WasmExportedFunction>::cast(value);
+ auto imported_function = Handle<WasmExportedFunction>::cast(js_receiver);
Handle<WasmInstanceObject> imported_instance(
imported_function->instance(), isolate_);
// The import reference is the instance object itself.
@@ -866,7 +862,8 @@ bool InstanceBuilder::ProcessImportedFunction(
}
case compiler::WasmImportCallKind::kWasmToCapi: {
NativeModule* native_module = instance->module_object().native_module();
- Address host_address = WasmCapiFunction::cast(*value).GetHostCallTarget();
+ Address host_address =
+ WasmCapiFunction::cast(*js_receiver).GetHostCallTarget();
WasmCodeRefScope code_ref_scope;
WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
isolate_->wasm_engine(), native_module, expected_sig, host_address);
@@ -904,14 +901,12 @@ bool InstanceBuilder::ProcessImportedFunction(
}
bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
- Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmInstanceObject> instance, int table_index, int import_index,
Handle<WasmTableObject> table_object) {
int imported_table_size = table_object->entries().length();
// Allocate a new dispatch table.
- if (!instance->has_indirect_function_table()) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, imported_table_size);
- }
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, imported_table_size);
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
for (int i = 0; i < imported_table_size; ++i) {
@@ -919,15 +914,22 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
bool is_null;
MaybeHandle<WasmInstanceObject> maybe_target_instance;
int function_index;
+ MaybeHandle<WasmJSFunction> maybe_js_function;
WasmTableObject::GetFunctionTableEntry(isolate_, table_object, i, &is_valid,
&is_null, &maybe_target_instance,
- &function_index);
+ &function_index, &maybe_js_function);
if (!is_valid) {
thrower_->LinkError("table import %d[%d] is not a wasm function",
import_index, i);
return false;
}
if (is_null) continue;
+ Handle<WasmJSFunction> js_function;
+ if (maybe_js_function.ToHandle(&js_function)) {
+ WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ isolate_, instance, table_index, i, js_function);
+ continue;
+ }
Handle<WasmInstanceObject> target_instance =
maybe_target_instance.ToHandleChecked();
@@ -939,7 +941,7 @@ bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
// Look up the signature's canonical id. If there is no canonical
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
- IndirectFunctionTableEntry(instance, i)
+ IndirectFunctionTableEntry(instance, table_index, i)
.Set(module_->signature_map.Find(*sig), target_instance,
function_index);
}
@@ -958,7 +960,6 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
const WasmTable& table = module_->tables[table_index];
- instance->tables().set(table_index, *value);
auto table_object = Handle<WasmTableObject>::cast(value);
int imported_table_size = table_object->entries().length();
@@ -995,13 +996,13 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
return false;
}
- // The indirect function table only exists for table 0.
- if (table.type == kWasmAnyFunc && table_index == 0 &&
- !InitializeImportedIndirectFunctionTable(instance, import_index,
- table_object)) {
+ if (table.type == kWasmFuncRef &&
+ !InitializeImportedIndirectFunctionTable(instance, table_index,
+ import_index, table_object)) {
return false;
}
+ instance->tables().set(table_index, *value);
return true;
}
@@ -1068,7 +1069,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
return false;
}
- bool is_sub_type = ValueTypes::IsSubType(global.type, global_object->type());
+ bool is_sub_type = ValueTypes::IsSubType(global_object->type(), global.type);
bool is_same_type = global_object->type() == global.type;
bool valid_type = global.mutability ? is_same_type : is_sub_type;
@@ -1129,7 +1130,7 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
module_name, import_name);
return false;
}
- if (module_->origin == kAsmJsOrigin) {
+ if (is_asmjs_module(module_)) {
// Accepting {JSFunction} on top of just primitive values here is a
// workaround to support legacy asm.js code with broken binding. Note
// that using {NaN} (or Smi::kZero) here is what using the observable
@@ -1162,11 +1163,11 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
if (ValueTypes::IsReferenceType(global.type)) {
// There shouldn't be any null-ref globals.
DCHECK_NE(ValueType::kWasmNullRef, global.type);
- if (global.type == ValueType::kWasmAnyFunc) {
+ if (global.type == ValueType::kWasmFuncRef) {
if (!value->IsNull(isolate_) &&
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
ReportLinkError(
- "imported anyfunc global must be null or an exported function",
+ "imported funcref global must be null or an exported function",
import_index, module_name, import_name);
return false;
}
@@ -1217,8 +1218,9 @@ void InstanceBuilder::CompileImportWrappers(
auto js_receiver = Handle<JSReceiver>::cast(value);
uint32_t func_index = module_->import_table[index].index;
FunctionSig* sig = module_->functions[func_index].sig;
- auto kind =
- compiler::GetWasmImportCallKind(js_receiver, sig, enabled_.bigint);
+ auto resolved =
+ compiler::ResolveWasmImportCall(js_receiver, sig, enabled_.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
kind == compiler::WasmImportCallKind::kLinkError ||
kind == compiler::WasmImportCallKind::kWasmToCapi) {
@@ -1431,7 +1433,7 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
for (auto& table : module_->tables) {
- if (table.type == kWasmAnyFunc) return true;
+ if (table.type == kWasmFuncRef) return true;
}
return false;
}
@@ -1458,6 +1460,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
Handle<JSObject> exports_object;
+ MaybeHandle<String> single_function_name;
bool is_asm_js = false;
switch (module_->origin) {
case kWasmOrigin: {
@@ -1465,10 +1468,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
exports_object = isolate_->factory()->NewJSObjectWithNullProto();
break;
}
- case kAsmJsOrigin: {
+ case kAsmJsSloppyOrigin:
+ case kAsmJsStrictOrigin: {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
exports_object = isolate_->factory()->NewJSObject(object_function);
+ single_function_name = isolate_->factory()->InternalizeUtf8String(
+ AsmJs::kSingleFunctionName);
is_asm_js = true;
break;
}
@@ -1477,9 +1483,6 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
instance->set_exports_object(*exports_object);
- Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
-
PropertyDescriptor desc;
desc.set_writable(is_asm_js);
desc.set_enumerable(true);
@@ -1490,14 +1493,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
isolate_, module_object_, exp.name)
.ToHandleChecked();
- Handle<JSObject> export_to;
- if (is_asm_js && exp.kind == kExternalFunction &&
- String::Equals(isolate_, name, single_function_name)) {
- export_to = instance;
- } else {
- export_to = exports_object;
- }
-
+ Handle<JSObject> export_to = exports_object;
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
@@ -1505,8 +1501,13 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
MaybeHandle<WasmExportedFunction> wasm_exported_function =
WasmInstanceObject::GetOrCreateWasmExportedFunction(
isolate_, instance, exp.index);
-
desc.set_value(wasm_exported_function.ToHandleChecked());
+
+ if (is_asm_js &&
+ String::Equals(isolate_, name,
+ single_function_name.ToHandleChecked())) {
+ export_to = instance;
+ }
break;
}
case kExternalTable: {
@@ -1611,21 +1612,21 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
}
}
-void InstanceBuilder::InitializeTables(Handle<WasmInstanceObject> instance) {
- size_t table_count = module_->tables.size();
- for (size_t index = 0; index < table_count; ++index) {
- const WasmTable& table = module_->tables[index];
+void InstanceBuilder::InitializeIndirectFunctionTables(
+ Handle<WasmInstanceObject> instance) {
+ for (int i = 0; i < static_cast<int>(module_->tables.size()); ++i) {
+ const WasmTable& table = module_->tables[i];
- if (!instance->has_indirect_function_table() &&
- table.type == kWasmAnyFunc) {
+ if (table.type == kWasmFuncRef) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, table.initial_size);
+ instance, i, table.initial_size);
}
}
}
bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmTableObject> table_object,
+ uint32_t table_index,
const WasmElemSegment& elem_segment, uint32_t dst,
uint32_t src, size_t count) {
// TODO(wasm): Move this functionality into wasm-objects, since it is used
@@ -1642,8 +1643,8 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
- if (table_object->type() == kWasmAnyFunc) {
- IndirectFunctionTableEntry(instance, entry_index).clear();
+ if (table_object->type() == kWasmFuncRef) {
+ IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
WasmTableObject::Set(isolate, table_object, entry_index,
isolate->factory()->null_value());
@@ -1652,13 +1653,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmFunction* function = &module->functions[func_index];
- // Update the local dispatch table first if necessary. We only have to
- // update the dispatch table if the first table of the instance is changed.
- // For all other tables, function calls do not use a dispatch table at
- // the moment.
- if (elem_segment.table_index == 0 && table_object->type() == kWasmAnyFunc) {
+ // Update the local dispatch table first if necessary.
+ if (table_object->type() == kWasmFuncRef) {
uint32_t sig_id = module->signature_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
@@ -1699,6 +1697,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
// Passive segments are not copied during instantiation.
if (!elem_segment.active) continue;
+ uint32_t table_index = elem_segment.table_index;
uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
uint32_t src = 0;
size_t count = elem_segment.entries.size();
@@ -1708,7 +1707,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
handle(WasmTableObject::cast(
instance->tables().get(elem_segment.table_index)),
isolate_),
- elem_segment, dst, src, count);
+ table_index, elem_segment, dst, src, count);
if (enabled_.bulk_memory) {
if (!success) {
thrower_->LinkError("table initializer is out of bounds");
@@ -1724,7 +1723,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
- if (module_->tables[index].type == kWasmAnyFunc) {
+ if (module_->tables[index].type == kWasmFuncRef) {
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(index)), isolate_);
@@ -1749,19 +1748,12 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
- // This code path is only used for passive element segments with the
- // table.init instruction. This instruction was introduced in the
- // bulk-memory-operations proposal. At the moment, table.init can only operate
- // on table-0. If table.init should work for tables with higher indices, then
- // we have to adjust the code in {LoadElemSegmentImpl}. The code there uses
- // {IndirectFunctionTableEntry} at the moment, which only works for table-0.
- CHECK_EQ(table_index, 0);
auto& elem_segment = instance->module()->elem_segments[segment_index];
return LoadElemSegmentImpl(
isolate, instance,
handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate),
- elem_segment, dst, src, count);
+ table_index, elem_segment, dst, src, count);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index 49fd2892eb..bca5c2b941 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -16,6 +16,16 @@ class Signature;
namespace wasm {
+// Type lattice: For any two types connected by a line, the type at the bottom
+// is a subtype of the other type.
+//
+// AnyRef
+// / \
+// FuncRef ExnRef
+// \ /
+// I32 I64 F32 F64 NullRef
+// \ \ \ \ /
+// ------------ Bottom
enum ValueType : uint8_t {
kWasmStmt,
kWasmI32,
@@ -24,10 +34,10 @@ enum ValueType : uint8_t {
kWasmF64,
kWasmS128,
kWasmAnyRef,
- kWasmAnyFunc,
+ kWasmFuncRef,
kWasmNullRef,
- kWasmExceptRef,
- kWasmVar,
+ kWasmExnRef,
+ kWasmBottom,
};
using FunctionSig = Signature<ValueType>;
@@ -178,25 +188,31 @@ class StoreType {
// A collection of ValueType-related static methods.
class V8_EXPORT_PRIVATE ValueTypes {
public:
- static inline bool IsSubType(ValueType expected, ValueType actual) {
+ static inline bool IsSubType(ValueType actual, ValueType expected) {
return (expected == actual) ||
(expected == kWasmAnyRef && actual == kWasmNullRef) ||
- (expected == kWasmAnyRef && actual == kWasmAnyFunc) ||
- (expected == kWasmAnyRef && actual == kWasmExceptRef) ||
- (expected == kWasmAnyFunc && actual == kWasmNullRef) ||
- // TODO(mstarzinger): For now we treat "null_ref" as a sub-type of
- // "except_ref", which is correct but might change. See here:
+ (expected == kWasmAnyRef && actual == kWasmFuncRef) ||
+ (expected == kWasmAnyRef && actual == kWasmExnRef) ||
+ (expected == kWasmFuncRef && actual == kWasmNullRef) ||
+ // TODO(mstarzinger): For now we treat "nullref" as a sub-type of
+ // "exnref", which is correct but might change. See here:
// https://github.com/WebAssembly/exception-handling/issues/55
- (expected == kWasmExceptRef && actual == kWasmNullRef);
+ (expected == kWasmExnRef && actual == kWasmNullRef);
}
static inline bool IsReferenceType(ValueType type) {
- // This function assumes at the moment that it is never called with
- // {kWasmNullRef}. If this assumption is wrong, it should be added to the
- // result calculation below.
- DCHECK_NE(type, kWasmNullRef);
- return type == kWasmAnyRef || type == kWasmAnyFunc ||
- type == kWasmExceptRef;
+ return type == kWasmAnyRef || type == kWasmFuncRef || type == kWasmExnRef;
+ }
+
+ static inline ValueType CommonSubType(ValueType a, ValueType b) {
+ if (a == b) return a;
+ // The only sub type of any value type is {bot}.
+ if (!IsReferenceType(a) || !IsReferenceType(b)) return kWasmBottom;
+ if (IsSubType(a, b)) return a;
+ if (IsSubType(b, a)) return b;
+ // {a} and {b} are not each other's subtype. The biggest sub-type of all
+ // reference types is {kWasmNullRef}.
+ return kWasmNullRef;
}
static byte MemSize(MachineType type) {
@@ -214,8 +230,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmS128:
return 16;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return kSystemPointerSize;
default:
UNREACHABLE();
@@ -232,6 +248,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 3;
case kWasmS128:
return 4;
+ case kWasmAnyRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
+ return kSystemPointerSizeLog2;
default:
UNREACHABLE();
}
@@ -253,10 +273,10 @@ class V8_EXPORT_PRIVATE ValueTypes {
return kLocalS128;
case kWasmAnyRef:
return kLocalAnyRef;
- case kWasmAnyFunc:
- return kLocalAnyFunc;
- case kWasmExceptRef:
- return kLocalExceptRef;
+ case kWasmFuncRef:
+ return kLocalFuncRef;
+ case kWasmExnRef:
+ return kLocalExnRef;
case kWasmStmt:
return kLocalVoid;
default:
@@ -275,8 +295,8 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineType::Float64();
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
+ case kWasmFuncRef:
+ case kWasmExnRef:
return MachineType::TaggedPointer();
case kWasmS128:
return MachineType::Simd128();
@@ -298,9 +318,9 @@ class V8_EXPORT_PRIVATE ValueTypes {
case kWasmF64:
return MachineRepresentation::kFloat64;
case kWasmAnyRef:
- case kWasmAnyFunc:
+ case kWasmFuncRef:
case kWasmNullRef:
- case kWasmExceptRef:
+ case kWasmExnRef:
return MachineRepresentation::kTaggedPointer;
case kWasmS128:
return MachineRepresentation::kSimd128;
@@ -344,13 +364,13 @@ class V8_EXPORT_PRIVATE ValueTypes {
return 'd';
case kWasmAnyRef:
return 'r';
- case kWasmAnyFunc:
+ case kWasmFuncRef:
return 'a';
case kWasmS128:
return 's';
case kWasmStmt:
return 'v';
- case kWasmVar:
+ case kWasmBottom:
return '*';
default:
return '?';
@@ -369,18 +389,18 @@ class V8_EXPORT_PRIVATE ValueTypes {
return "f64";
case kWasmAnyRef:
return "anyref";
- case kWasmAnyFunc:
- return "anyfunc";
+ case kWasmFuncRef:
+ return "funcref";
case kWasmNullRef:
return "nullref";
- case kWasmExceptRef:
+ case kWasmExnRef:
return "exn";
case kWasmS128:
return "s128";
case kWasmStmt:
return "<stmt>";
- case kWasmVar:
- return "<var>";
+ case kWasmBottom:
+ return "<bot>";
default:
return "<unknown>";
}
diff --git a/deps/v8/src/wasm/wasm-arguments.h b/deps/v8/src/wasm/wasm-arguments.h
new file mode 100644
index 0000000000..822f46addd
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-arguments.h
@@ -0,0 +1,73 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_ARGUMENTS_H_
+#define V8_WASM_WASM_ARGUMENTS_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "src/base/memory.h"
+#include "src/codegen/signature.h"
+#include "src/common/globals.h"
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Helper class for {Push}ing Wasm value arguments onto the stack in the format
+// that the CWasmEntryStub expects, as well as for {Pop}ping return values.
+// {Reset} must be called if a packer instance used for pushing is then
+// reused for popping: it resets the internal pointer to the beginning of
+// the stack region.
+class CWasmArgumentsPacker {
+ public:
+ explicit CWasmArgumentsPacker(size_t buffer_size)
+ : heap_buffer_(buffer_size <= kMaxOnStackBuffer ? 0 : buffer_size),
+ buffer_((buffer_size <= kMaxOnStackBuffer) ? on_stack_buffer_
+ : heap_buffer_.data()) {}
+ i::Address argv() const { return reinterpret_cast<i::Address>(buffer_); }
+ void Reset() { offset_ = 0; }
+
+ template <typename T>
+ void Push(T val) {
+ Address address = reinterpret_cast<Address>(buffer_ + offset_);
+ offset_ += sizeof(val);
+ base::WriteUnalignedValue(address, val);
+ }
+
+ template <typename T>
+ T Pop() {
+ Address address = reinterpret_cast<Address>(buffer_ + offset_);
+ offset_ += sizeof(T);
+ return base::ReadUnalignedValue<T>(address);
+ }
+
+ static int TotalSize(FunctionSig* sig) {
+ int return_size = 0;
+ for (ValueType t : sig->returns()) {
+ return_size += ValueTypes::ElementSizeInBytes(t);
+ }
+ int param_size = 0;
+ for (ValueType t : sig->parameters()) {
+ param_size += ValueTypes::ElementSizeInBytes(t);
+ }
+ return std::max(return_size, param_size);
+ }
+
+ private:
+ static const size_t kMaxOnStackBuffer = 10 * i::kSystemPointerSize;
+
+ uint8_t on_stack_buffer_[kMaxOnStackBuffer];
+ std::vector<uint8_t> heap_buffer_;
+ uint8_t* buffer_;
+ size_t offset_ = 0;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_ARGUMENTS_H_
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 2eddce3d95..3d0cde0cce 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -150,7 +150,8 @@ bool WasmCode::ShouldBeLogged(Isolate* isolate) {
// The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
// to call {WasmEngine::EnableCodeLogging} if this return value would change
// for any isolate. Otherwise we might lose code events.
- return isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
+ return isolate->logger()->is_listening_to_code_events() ||
+ isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
isolate->is_profiling();
}
@@ -286,7 +287,8 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
os << "\n";
if (handler_table_size() > 0) {
- HandlerTable table(handler_table(), handler_table_size());
+ HandlerTable table(handler_table(), handler_table_size(),
+ HandlerTable::kReturnAddressBasedEncoding);
os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
<< "):\n";
table.HandlerTableReturnPrint(os);
@@ -403,12 +405,15 @@ void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space,
- bool can_request_more)
+ bool can_request_more,
+ std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
free_code_space_(code_space.region()),
- can_request_more_memory_(can_request_more) {
+ can_request_more_memory_(can_request_more),
+ async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(can_request_more ? 4 : 1);
owned_code_space_.emplace_back(std::move(code_space));
+ async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
@@ -487,6 +492,8 @@ Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
owned_code_space_.emplace_back(std::move(new_mem));
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
+ async_counters_->wasm_module_num_code_spaces()->AddSample(
+ static_cast<int>(owned_code_space_.size()));
}
const Address commit_page_size = page_allocator->CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
@@ -613,7 +620,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
: code_allocator_(engine->code_manager(), std::move(code_space),
- can_request_more),
+ can_request_more, async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
@@ -694,12 +701,26 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
+ if (!lazy_compile_table_) {
+ uint32_t num_slots = module_->num_declared_functions;
+ WasmCodeRefScope code_ref_scope;
+ lazy_compile_table_ = CreateEmptyJumpTable(
+ JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots));
+ JumpTableAssembler::GenerateLazyCompileTable(
+ lazy_compile_table_->instruction_start(), num_slots,
+ module_->num_imported_functions,
+ runtime_stub_entry(WasmCode::kWasmCompileLazy));
+ }
+
// Add jump table entry for jump to the lazy compile stub.
uint32_t slot_index = func_index - module_->num_imported_functions;
DCHECK_NE(runtime_stub_entry(WasmCode::kWasmCompileLazy), kNullAddress);
- JumpTableAssembler::EmitLazyCompileJumpSlot(
- jump_table_->instruction_start(), slot_index, func_index,
- runtime_stub_entry(WasmCode::kWasmCompileLazy), WasmCode::kFlushICache);
+ Address lazy_compile_target =
+ lazy_compile_table_->instruction_start() +
+ JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
+ JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
+ slot_index, lazy_compile_target,
+ WasmCode::kFlushICache);
}
// TODO(mstarzinger): Remove {Isolate} parameter once {V8_EMBEDDED_BUILTINS}
@@ -713,23 +734,22 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
WasmCode::kRuntimeStubCount));
Address base = jump_table->instruction_start();
EmbeddedData embedded_data = EmbeddedData::FromBlob();
-#define RUNTIME_STUB(Name) {Builtins::k##Name, WasmCode::k##Name},
+#define RUNTIME_STUB(Name) Builtins::k##Name,
#define RUNTIME_STUB_TRAP(Name) RUNTIME_STUB(ThrowWasm##Name)
- std::pair<Builtins::Name, WasmCode::RuntimeStubId> wasm_runtime_stubs[] = {
+ Builtins::Name wasm_runtime_stubs[WasmCode::kRuntimeStubCount] = {
WASM_RUNTIME_STUB_LIST(RUNTIME_STUB, RUNTIME_STUB_TRAP)};
#undef RUNTIME_STUB
#undef RUNTIME_STUB_TRAP
- for (auto pair : wasm_runtime_stubs) {
- CHECK(embedded_data.ContainsBuiltin(pair.first));
- Address builtin = embedded_data.InstructionStartOfBuiltin(pair.first);
- JumpTableAssembler::EmitRuntimeStubSlot(base, pair.second, builtin,
- WasmCode::kNoFlushICache);
- uint32_t slot_offset =
- JumpTableAssembler::StubSlotIndexToOffset(pair.second);
- runtime_stub_entries_[pair.second] = base + slot_offset;
+ Address builtin_address[WasmCode::kRuntimeStubCount];
+ for (int i = 0; i < WasmCode::kRuntimeStubCount; ++i) {
+ Builtins::Name builtin = wasm_runtime_stubs[i];
+ CHECK(embedded_data.ContainsBuiltin(builtin));
+ builtin_address[i] = embedded_data.InstructionStartOfBuiltin(builtin);
+ runtime_stub_entries_[i] =
+ base + JumpTableAssembler::StubSlotIndexToOffset(i);
}
- FlushInstructionCache(jump_table->instructions().begin(),
- jump_table->instructions().size());
+ JumpTableAssembler::GenerateRuntimeStubTable(base, builtin_address,
+ WasmCode::kRuntimeStubCount);
DCHECK_NULL(runtime_stub_table_);
runtime_stub_table_ = jump_table;
#else // V8_EMBEDDED_BUILTINS
@@ -822,7 +842,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
std::unique_ptr<WasmCode> new_code{new WasmCode{
this, // native_module
- WasmCode::kAnonymousFuncIndex, // index
+ kAnonymousFuncIndex, // index
dst_code_bytes, // instructions
stack_slots, // stack_slots
0, // tagged_parameter_slots
@@ -920,8 +940,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
code->MaybePrint();
code->Validate();
- code->RegisterTrapHandlerData();
-
return code;
}
@@ -930,27 +948,28 @@ WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
return PublishCodeLocked(std::move(code));
}
-namespace {
-WasmCode::Kind GetCodeKindForExecutionTier(ExecutionTier tier) {
- switch (tier) {
- case ExecutionTier::kInterpreter:
+WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
+ switch (result.kind) {
+ case WasmCompilationResult::kWasmToJsWrapper:
+ return WasmCode::Kind::kWasmToJsWrapper;
+ case WasmCompilationResult::kInterpreterEntry:
return WasmCode::Kind::kInterpreterEntry;
- case ExecutionTier::kLiftoff:
- case ExecutionTier::kTurbofan:
+ case WasmCompilationResult::kFunction:
return WasmCode::Kind::kFunction;
- case ExecutionTier::kNone:
+ default:
UNREACHABLE();
}
}
-} // namespace
WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock());
- if (!code->IsAnonymous()) {
+ if (!code->IsAnonymous() &&
+ code->index() >= module_->num_imported_functions) {
DCHECK_LT(code->index(), num_functions());
- DCHECK_LE(module_->num_imported_functions, code->index());
+
+ code->RegisterTrapHandlerData();
// Assume an order of execution tiers that represents the quality of their
// generated code.
@@ -1017,8 +1036,6 @@ WasmCode* NativeModule::AddDeserializedCode(
std::move(protected_instructions), std::move(reloc_info),
std::move(source_position_table), kind, tier}};
- code->RegisterTrapHandlerData();
-
// Note: we do not flush the i-cache here, since the code needs to be
// relocated anyway. The caller is responsible for flushing the i-cache later.
@@ -1056,7 +1073,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
this, // native_module
- WasmCode::kAnonymousFuncIndex, // index
+ kAnonymousFuncIndex, // index
code_space, // instructions
0, // stack_slots
0, // tagged_parameter_slots
@@ -1112,11 +1129,16 @@ WasmCode* NativeModule::Lookup(Address pc) const {
return candidate;
}
+uint32_t NativeModule::GetJumpTableOffset(uint32_t func_index) const {
+ uint32_t slot_idx = func_index - module_->num_imported_functions;
+ DCHECK_GT(module_->num_declared_functions, slot_idx);
+ return JumpTableAssembler::JumpSlotIndexToOffset(slot_idx);
+}
+
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
// Return the jump table slot for that function index.
DCHECK_NOT_NULL(jump_table_);
- uint32_t slot_idx = func_index - module_->num_imported_functions;
- uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
+ uint32_t slot_offset = GetJumpTableOffset(func_index);
DCHECK_LT(slot_offset, jump_table_->instructions().size());
return jump_table_->instruction_start() + slot_offset;
}
@@ -1416,9 +1438,8 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
generated_code.emplace_back(AddCodeWithCodeSpace(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots, std::move(result.protected_instructions),
- std::move(result.source_positions),
- GetCodeKindForExecutionTier(result.result_tier), result.result_tier,
- this_code_space));
+ std::move(result.source_positions), GetCodeKind(result),
+ result.result_tier, this_code_space));
}
DCHECK_EQ(0, code_space.size());
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 49c287df2c..db7b4f061d 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -176,7 +176,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
- static constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
STATIC_ASSERT(kAnonymousFuncIndex > kV8MaxWasmFunctions);
private:
@@ -270,6 +269,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
+WasmCode::Kind GetCodeKind(const WasmCompilationResult& result);
+
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
@@ -277,7 +278,8 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
class WasmCodeAllocator {
public:
WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
- bool can_request_more);
+ bool can_request_more,
+ std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
size_t committed_code_space() const {
@@ -315,7 +317,7 @@ class WasmCodeAllocator {
// Code space that was allocated for code (subset of {owned_code_space_}).
DisjointAllocationPool allocated_code_space_;
// Code space that was allocated before but is dead now. Full pages within
- // this region are discarded. It's still a subset of {owned_code_space_}).
+ // this region are discarded. It's still a subset of {owned_code_space_}.
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
@@ -329,6 +331,8 @@ class WasmCodeAllocator {
bool is_executable_ = false;
const bool can_request_more_memory_;
+
+ std::shared_ptr<Counters> async_counters_;
};
class V8_EXPORT_PRIVATE NativeModule final {
@@ -399,10 +403,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
- ptrdiff_t jump_table_offset(uint32_t func_index) const {
- DCHECK_GE(func_index, num_imported_functions());
- return GetCallTargetForFunction(func_index) - jump_table_start();
- }
+ uint32_t GetJumpTableOffset(uint32_t func_index) const;
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
@@ -558,6 +559,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Jump table used to easily redirect wasm function calls.
WasmCode* jump_table_ = nullptr;
+ // Lazy compile stub table, containing entries to jump to the
+ // {WasmCompileLazy} builtin, passing the function index.
+ WasmCode* lazy_compile_table_ = nullptr;
+
// The compilation state keeps track of compilation tasks for this module.
// Note that its destructor blocks until all tasks are finished/aborted and
// hence needs to be destructed first when this native module dies.
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index fce60cb593..fbbe19396c 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -26,9 +26,9 @@ enum ValueTypeCode : uint8_t {
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
kLocalS128 = 0x7b,
- kLocalAnyFunc = 0x70,
+ kLocalFuncRef = 0x70,
kLocalAnyRef = 0x6f,
- kLocalExceptRef = 0x68,
+ kLocalExnRef = 0x68,
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
@@ -106,6 +106,8 @@ constexpr WasmCodePosition kNoCodePosition = -1;
constexpr uint32_t kExceptionAttribute = 0;
+constexpr uint32_t kAnonymousFuncIndex = 0xffffffff;
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 33d9a64bf4..2955bc602f 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -617,8 +617,8 @@ Handle<JSObject> WasmDebugInfo::GetLocalScopeObject(
}
// static
-Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
- Handle<WasmDebugInfo> debug_info, wasm::FunctionSig* sig) {
+Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
+ wasm::FunctionSig* sig) {
Isolate* isolate = debug_info->GetIsolate();
DCHECK_EQ(debug_info->has_c_wasm_entries(),
debug_info->has_c_wasm_entry_map());
@@ -642,24 +642,9 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
DCHECK(entries->get(index).IsUndefined(isolate));
Handle<Code> new_entry_code =
compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
- Handle<WasmExportedFunctionData> function_data =
- Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_wrapper_code(*new_entry_code);
- function_data->set_instance(debug_info->wasm_instance());
- function_data->set_jump_table_offset(-1);
- function_data->set_function_index(-1);
- Handle<String> name =
- isolate->factory()->InternalizeString(StaticCharVector("c-wasm-entry"));
- NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, function_data, isolate->sloppy_function_map());
- Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
- new_entry->set_context(debug_info->wasm_instance().native_context());
- new_entry->shared().set_internal_formal_parameter_count(
- compiler::CWasmEntryParameters::kNumParameters);
- entries->set(index, *new_entry);
+ entries->set(index, *new_entry_code);
}
- return handle(JSFunction::cast(entries->get(index)), isolate);
+ return handle(Code::cast(entries->get(index)), isolate);
}
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 83053fd71f..7b91b16b80 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -8,6 +8,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/diagnostics/compilation-statistics.h"
#include "src/execution/frames.h"
+#include "src/execution/v8threads.h"
#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
@@ -88,24 +89,24 @@ class LogCodesTask : public Task {
WasmEngine* const engine_;
};
-class WasmGCForegroundTask : public Task {
- public:
- explicit WasmGCForegroundTask(Isolate* isolate) : isolate_(isolate) {
- DCHECK_NOT_NULL(isolate);
- }
-
- ~WasmGCForegroundTask() {
- // If the isolate is already shutting down, the platform can delete this
- // task without ever executing it. For that case, we need to deregister the
- // task from the engine to avoid UAF.
- if (isolate_) {
- WasmEngine* engine = isolate_->wasm_engine();
- engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
+void CheckNoArchivedThreads(Isolate* isolate) {
+ class ArchivedThreadsVisitor : public ThreadVisitor {
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) override {
+ // Archived threads are rarely used, and not combined with Wasm at the
+ // moment. Implement this and test it properly once we have a use case for
+ // that.
+ FATAL("archived threads in combination with wasm not supported");
}
- }
+ } archived_threads_visitor;
+ isolate->thread_manager()->IterateArchivedThreads(&archived_threads_visitor);
+}
+
+class WasmGCForegroundTask : public CancelableTask {
+ public:
+ explicit WasmGCForegroundTask(Isolate* isolate)
+ : CancelableTask(isolate->cancelable_task_manager()), isolate_(isolate) {}
- void Run() final {
- if (isolate_ == nullptr) return; // cancelled.
+ void RunInternal() final {
WasmEngine* engine = isolate_->wasm_engine();
// If the foreground task is executing, there is no wasm code active. Just
// report an empty set of live wasm code.
@@ -114,13 +115,10 @@ class WasmGCForegroundTask : public Task {
DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
}
#endif
+ CheckNoArchivedThreads(isolate_);
engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
- // Cancel to signal to the destructor that this task executed.
- Cancel();
}
- void Cancel() { isolate_ = nullptr; }
-
private:
Isolate* isolate_;
};
@@ -240,10 +238,13 @@ bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
- Handle<HeapNumber> uses_bitset) {
+ Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
+ ModuleOrigin origin = language_mode == LanguageMode::kSloppy
+ ? kAsmJsSloppyOrigin
+ : kAsmJsStrictOrigin;
ModuleResult result =
DecodeWasmModule(kAsmjsWasmFeatures, bytes.start(), bytes.end(), false,
- kAsmJsOrigin, isolate->counters(), allocator());
+ origin, isolate->counters(), allocator());
if (result.failed()) {
// This happens once in a while when we have missed some limit check
// in the asm parser. Output an error message to help diagnose, but crash.
@@ -465,6 +466,9 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_[native_module]->isolates.insert(isolate);
}
+
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
return module_object;
}
@@ -524,6 +528,24 @@ bool WasmEngine::HasRunningCompileJob(Isolate* isolate) {
return false;
}
+void WasmEngine::DeleteCompileJobsOnContext(Handle<Context> context) {
+ // Under the mutex get all jobs to delete. Then delete them without holding
+ // the mutex, such that deletion can reenter the WasmEngine.
+ std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
+ {
+ base::MutexGuard guard(&mutex_);
+ for (auto it = async_compile_jobs_.begin();
+ it != async_compile_jobs_.end();) {
+ if (!it->first->context().is_identical_to(context)) {
+ ++it;
+ continue;
+ }
+ jobs_to_delete.push_back(std::move(it->second));
+ it = async_compile_jobs_.erase(it);
+ }
+ }
+}
+
void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
// Under the mutex get all jobs to delete. Then delete them without holding
// the mutex, such that deletion can reenter the WasmEngine.
@@ -775,6 +797,8 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
}
+ CheckNoArchivedThreads(isolate);
+
ReportLiveCodeForGC(isolate,
OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}
@@ -876,11 +900,7 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
DCHECK(!mutex_.TryLock());
DCHECK_NOT_NULL(current_gc_info_);
- auto it = current_gc_info_->outstanding_isolates.find(isolate);
- if (it == current_gc_info_->outstanding_isolates.end()) return false;
- if (auto* fg_task = it->second) fg_task->Cancel();
- current_gc_info_->outstanding_isolates.erase(it);
- return true;
+ return current_gc_info_->outstanding_isolates.erase(isolate) != 0;
}
void WasmEngine::PotentiallyFinishCurrentGC() {
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 2ae3e81368..69e6cdae6e 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -62,7 +62,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
MaybeHandle<AsmWasmData> SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
- Handle<HeapNumber> uses_bitset);
+ Handle<HeapNumber> uses_bitset, LanguageMode language_mode);
Handle<WasmModuleObject> FinalizeTranslatedAsmJs(
Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
Handle<Script> script);
@@ -140,6 +140,11 @@ class V8_EXPORT_PRIVATE WasmEngine {
// Isolate is currently running.
bool HasRunningCompileJob(Isolate* isolate);
+ // Deletes all AsyncCompileJobs that belong to the given context. All
+ // compilation is aborted, no more callbacks will be triggered. This is used
+ // when a context is disposed, e.g. because of browser navigation.
+ void DeleteCompileJobsOnContext(Handle<Context> context);
+
// Deletes all AsyncCompileJobs that belong to the given Isolate. All
// compilation is aborted, no more callbacks will be triggered. This is used
// for tearing down an isolate, or to clean it up to be reused.
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 997cf83bb7..08e6139abe 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -29,7 +29,7 @@
#include "src/trap-handler/trap-handler.h"
#endif
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/utils.h"
#include "src/wasm/wasm-external-refs.h"
@@ -37,6 +37,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadUnalignedValue;
+using base::WriteUnalignedValue;
+
void f32_trunc_wrapper(Address data) {
WriteUnalignedValue<float>(data, truncf(ReadUnalignedValue<float>(data)));
}
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index b586d07ff4..9630fa76dd 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -18,6 +18,11 @@ WasmCode*& WasmImportWrapperCache::ModificationScope::operator[](
return cache_->entry_map_[key];
}
+WasmCode*& WasmImportWrapperCache::operator[](
+ const WasmImportWrapperCache::CacheKey& key) {
+ return entry_map_[key];
+}
+
WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
FunctionSig* sig) const {
auto it = entry_map_.find({kind, sig});
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 62f27cd9a4..e9e60faad4 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -45,6 +45,10 @@ class WasmImportWrapperCache {
base::MutexGuard guard_;
};
+ // Not thread-safe, use ModificationScope to get exclusive write access to the
+ // cache.
+ V8_EXPORT_PRIVATE WasmCode*& operator[](const CacheKey& key);
+
// Assumes the key exists in the map.
V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
FunctionSig* sig) const;
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index f06cead069..4449439896 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -12,7 +12,6 @@
#include "src/compiler/wasm-compiler.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects-inl.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/utils/boxed-float.h"
#include "src/utils/identity-map.h"
#include "src/utils/utils.h"
@@ -21,12 +20,12 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/module-compiler.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
-
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
@@ -34,6 +33,11 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::ReadUnalignedValue;
+using base::WriteLittleEndianValue;
+using base::WriteUnalignedValue;
+
#define TRACE(...) \
do { \
if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
@@ -582,7 +586,7 @@ inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
}
inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
- return static_cast<float>(a);
+ return DoubleToFloat32(a);
}
inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
@@ -816,7 +820,7 @@ class SideTable : public ZoneObject {
bool is_loop = opcode == kExprLoop;
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
@@ -832,7 +836,7 @@ class SideTable : public ZoneObject {
case kExprIf: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
@@ -865,7 +869,7 @@ class SideTable : public ZoneObject {
case kExprTry: {
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
imm.sig = module->signatures[imm.sig_index];
}
TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
@@ -1279,8 +1283,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
@@ -1460,8 +1464,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
val = WasmValue(isolate_->factory()->null_value());
break;
}
@@ -1658,8 +1662,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
- MachineRepresentation rep) {
+ bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
sizeof(ctype));
uint32_t index = Pop().to<uint32_t>();
@@ -1672,7 +1676,7 @@ class ThreadImpl {
converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
Push(result);
- len = 1 + imm.length;
+ *len = 1 + imm.length;
if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, false, rep);
@@ -1685,8 +1689,8 @@ class ThreadImpl {
}
template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
- MachineRepresentation rep) {
+ bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
+ int* const len, MachineRepresentation rep) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
sizeof(ctype));
ctype val = Pop().to<ctype>();
@@ -1698,7 +1702,7 @@ class ThreadImpl {
return false;
}
WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
- len = 1 + imm.length;
+ *len = 1 + imm.length;
if (FLAG_trace_wasm_memory) {
MemoryTracingInfo info(imm.offset + index, true, rep);
@@ -1730,24 +1734,24 @@ class ThreadImpl {
template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
- Address& address, pc_t pc, int& len,
+ Address* address, pc_t pc, int* const len,
type* val = nullptr, type* val2 = nullptr) {
MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
if (val) *val = static_cast<type>(Pop().to<op_type>());
uint32_t index = Pop().to<uint32_t>();
- address = BoundsCheckMem<type>(imm.offset, index);
+ *address = BoundsCheckMem<type>(imm.offset, index);
if (!address) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
- len = 2 + imm.length;
+ *len = 2 + imm.length;
return true;
}
bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int& len) {
+ InterpreterCode* code, pc_t pc, int* const len) {
switch (opcode) {
case kExprI32SConvertSatF32:
Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
@@ -1776,7 +1780,7 @@ class ThreadImpl {
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
- len += imm.length;
+ *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
pc)) {
return false;
@@ -1784,6 +1788,9 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
auto src_max =
@@ -1799,7 +1806,7 @@ class ThreadImpl {
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
@@ -1808,11 +1815,15 @@ class ThreadImpl {
}
case kExprMemoryCopy: {
MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
+ *len += imm.length;
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
- bool copy_backward = src < dst && dst - src < size;
+ bool copy_backward = src < dst;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
// Trap without copying any bytes if we are copying backward and the
// copy is partially out-of-bounds. We only need to check that the dst
@@ -1825,25 +1836,27 @@ class ThreadImpl {
memory_copy_wrapper(dst_addr, src_addr, size);
}
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
- len += imm.length;
return ok;
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc + 1));
+ *len += imm.length;
auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
+ if (size == 0) {
+ return true;
+ }
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
memory_fill_wrapper(dst_addr, value, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
- len += imm.length;
return ok;
}
case kExprTableInit: {
TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.elem_segment_index,
pc)) {
return false;
@@ -1860,7 +1873,7 @@ class ThreadImpl {
}
case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- len += imm.length;
+ *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
@@ -1877,9 +1890,64 @@ class ThreadImpl {
isolate_, instance_object_, imm.table_dst.index,
imm.table_src.index, dst, src, size);
if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- len += imm.length;
+ *len += imm.length;
return ok;
}
+ case kExprTableGrow: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ auto delta = Pop().to<uint32_t>();
+ auto value = Pop().to_anyref();
+ int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
+ Push(WasmValue(result));
+ *len += imm.length;
+ return true;
+ }
+ case kExprTableSize: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ Push(WasmValue(table_size));
+ *len += imm.length;
+ return true;
+ }
+ case kExprTableFill: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
+ code->at(pc + 1));
+ HandleScope handle_scope(isolate_);
+ auto count = Pop().to<uint32_t>();
+ auto value = Pop().to_anyref();
+ auto start = Pop().to<uint32_t>();
+
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ if (start > table_size) {
+ DoTrap(kTrapTableOutOfBounds, pc);
+ return false;
+ }
+
+ // Even when table.fill goes out-of-bounds, as many entries as possible
+ // are put into the table. Only afterwards we trap.
+ uint32_t fill_count = std::min(count, table_size - start);
+ WasmTableObject::Fill(isolate_, table, start, value, fill_count);
+
+ if (fill_count < count) {
+ DoTrap(kTrapTableOutOfBounds, pc);
+ return false;
+ }
+ *len += imm.length;
+ return true;
+ }
default:
FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
OpcodeName(code->start[pc]));
@@ -1911,7 +1979,7 @@ class ThreadImpl {
}
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int& len) {
+ InterpreterCode* code, pc_t pc, int* const len) {
#if V8_TARGET_BIG_ENDIAN
constexpr bool kBigEndian = true;
#else
@@ -1919,27 +1987,27 @@ class ThreadImpl {
#endif
WasmValue result;
switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- op_type result; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- if (kBigEndian) { \
- auto oplambda = [](type a, type b) { return a op b; }; \
- result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
- } else { \
- result = static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
- } \
- Push(WasmValue(result)); \
- break; \
+#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ op_type result; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ if (kBigEndian) { \
+ auto oplambda = [](type a, type b) { return a op b; }; \
+ result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
+ } else { \
+ result = static_cast<op_type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
+ } \
+ Push(WasmValue(result)); \
+ break; \
}
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
@@ -2003,24 +2071,24 @@ class ThreadImpl {
ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
atomic_exchange, =);
#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
- case kExpr##name: { \
- type old_val; \
- type new_val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &old_val, &new_val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- old_val = AdjustByteOrder<type>(old_val); \
- new_val = AdjustByteOrder<type>(new_val); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
- Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
- break; \
+#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
+ case kExpr##name: { \
+ type old_val; \
+ type new_val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &old_val, &new_val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ old_val = AdjustByteOrder<type>(old_val); \
+ new_val = AdjustByteOrder<type>(new_val); \
+ std::atomic_compare_exchange_strong( \
+ reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
+ Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
+ break; \
}
ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
uint32_t);
@@ -2037,19 +2105,20 @@ class ThreadImpl {
ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
uint64_t);
#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
- Push(result); \
- break; \
+#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
+ len)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
+ Push(result); \
+ break; \
}
ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
@@ -2059,20 +2128,20 @@ class ThreadImpl {
ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
- AdjustByteOrder<type>(val)); \
- break; \
+#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
+ case kExpr##name: { \
+ type val; \
+ Address addr; \
+ if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
+ &val)) { \
+ return false; \
+ } \
+ static_assert(sizeof(std::atomic<type>) == sizeof(type), \
+ "Size mismatch for types std::atomic<" #type \
+ ">, and " #type); \
+ std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
+ AdjustByteOrder<type>(val)); \
+ break; \
}
ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
@@ -2082,6 +2151,10 @@ class ThreadImpl {
ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
#undef ATOMIC_STORE_CASE
+ case kExprAtomicFence:
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ *len += 2;
+ break;
default:
UNREACHABLE();
return false;
@@ -2118,7 +2191,7 @@ class ThreadImpl {
}
bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
- pc_t pc, int& len) {
+ pc_t pc, int* const len) {
switch (opcode) {
#define SPLAT_CASE(format, sType, valType, num) \
case kExpr##format##Splat: { \
@@ -2129,23 +2202,27 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
- SPLAT_CASE(I32x4, int4, int32_t, 4)
+ SPLAT_CASE(F64x2, float2, double, 2)
SPLAT_CASE(F32x4, float4, float, 4)
+ SPLAT_CASE(I64x2, int2, int64_t, 2)
+ SPLAT_CASE(I32x4, int4, int32_t, 4)
SPLAT_CASE(I16x8, int8, int32_t, 8)
SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE
#define EXTRACT_LANE_CASE(format, name) \
case kExpr##format##ExtractLane: { \
SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue val = Pop(); \
Simd128 s = val.to_s128(); \
auto ss = s.to_##name(); \
Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
return true; \
}
- EXTRACT_LANE_CASE(I32x4, i32x4)
+ EXTRACT_LANE_CASE(F64x2, f64x2)
EXTRACT_LANE_CASE(F32x4, f32x4)
+ EXTRACT_LANE_CASE(I64x2, i64x2)
+ EXTRACT_LANE_CASE(I32x4, i32x4)
EXTRACT_LANE_CASE(I16x8, i16x8)
EXTRACT_LANE_CASE(I8x16, i8x16)
#undef EXTRACT_LANE_CASE
@@ -2169,6 +2246,9 @@ class ThreadImpl {
BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
+ BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
+ BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
+ BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
@@ -2222,10 +2302,13 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
+ UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
+ UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
@@ -2246,12 +2329,32 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
+ CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
+ CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
+ CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
+ CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
+ CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
+ CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
+ CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
+ CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
+ CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
+ CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
+ CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
+ CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
+ CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
+ static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
@@ -2298,7 +2401,7 @@ class ThreadImpl {
#define REPLACE_LANE_CASE(format, name, stype, ctype) \
case kExpr##format##ReplaceLane: { \
SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue new_val = Pop(); \
WasmValue simd_val = Pop(); \
stype s = simd_val.to_s128().to_##name(); \
@@ -2306,7 +2409,9 @@ class ThreadImpl {
Push(WasmValue(Simd128(s))); \
return true; \
}
+ REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
+ REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
@@ -2320,7 +2425,7 @@ class ThreadImpl {
#define SHIFT_CASE(op, name, stype, count, expr) \
case kExpr##op: { \
SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
- ++len; \
+ *len += 1; \
WasmValue v = Pop(); \
stype s = v.to_s128().to_##name(); \
stype res; \
@@ -2331,6 +2436,11 @@ class ThreadImpl {
Push(WasmValue(Simd128(res))); \
return true; \
}
+ SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
+ static_cast<uint64_t>(a) << imm.shift)
+ SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> imm.shift)
+ SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
+ static_cast<uint64_t>(a) >> imm.shift)
SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
static_cast<uint32_t>(a) << imm.shift)
SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
@@ -2452,7 +2562,7 @@ class ThreadImpl {
case kExprS8x16Shuffle: {
Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc));
- len += 16;
+ *len += 16;
int16 v2 = Pop().to_s128().to_i8x16();
int16 v1 = Pop().to_s128().to_i8x16();
int16 res;
@@ -2465,6 +2575,7 @@ class ThreadImpl {
Push(WasmValue(Simd128(res)));
return true;
}
+ case kExprS1x2AnyTrue:
case kExprS1x4AnyTrue:
case kExprS1x8AnyTrue:
case kExprS1x16AnyTrue: {
@@ -2483,6 +2594,7 @@ class ThreadImpl {
Push(WasmValue(res)); \
return true; \
}
+ REDUCTION_CASE(S1x2AllTrue, i64x2, int2, 2, &)
REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
@@ -2583,8 +2695,8 @@ class ThreadImpl {
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
Handle<Object> anyref = value.to_anyref();
encoded_values->set(encoded_index++, *anyref);
break;
@@ -2683,8 +2795,8 @@ class ThreadImpl {
break;
}
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
value = WasmValue(anyref);
break;
@@ -3005,11 +3117,9 @@ class ThreadImpl {
CallIndirectImmediate<Decoder::kNoValidate> imm(
kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
- // Assume only one table for now.
- DCHECK_LE(module()->tables.size(), 1u);
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, imm.sig_index);
+ CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL:
// The import is a function of this instance. Call it directly.
@@ -3077,14 +3187,12 @@ class ThreadImpl {
CallIndirectImmediate<Decoder::kNoValidate> imm(
kAllWasmFeatures, &decoder, code->at(pc));
uint32_t entry_index = Pop().to<uint32_t>();
- // Assume only one table for now.
- DCHECK_LE(module()->tables.size(), 1u);
CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
// TODO(wasm): Calling functions needs some refactoring to avoid
// multi-exit code like this.
ExternalCallResult result =
- CallIndirectFunction(0, entry_index, imm.sig_index);
+ CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
switch (result.type) {
case ExternalCallResult::INTERNAL: {
InterpreterCode* target = result.interpreter_code;
@@ -3141,8 +3249,8 @@ class ThreadImpl {
WASM_CTYPES(CASE_TYPE)
#undef CASE_TYPE
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
+ case kWasmFuncRef:
+ case kWasmExnRef: {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
@@ -3156,10 +3264,42 @@ class ThreadImpl {
len = 1 + imm.length;
break;
}
-
+ case kExprTableGet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ uint32_t entry_index = Pop().to<uint32_t>();
+ if (entry_index >= table_size) {
+ return DoTrap(kTrapTableOutOfBounds, pc);
+ }
+ Handle<Object> value =
+ WasmTableObject::Get(isolate_, table, entry_index);
+ Push(WasmValue(value));
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprTableSet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ HandleScope handle_scope(isolate_);
+ auto table = handle(
+ WasmTableObject::cast(instance_object_->tables().get(imm.index)),
+ isolate_);
+ uint32_t table_size = table->current_length();
+ Handle<Object> value = Pop().to_anyref();
+ uint32_t entry_index = Pop().to<uint32_t>();
+ if (entry_index >= table_size) {
+ return DoTrap(kTrapTableOutOfBounds, pc);
+ }
+ WasmTableObject::Set(isolate_, table, entry_index, value);
+ len = 1 + imm.length;
+ break;
+ }
#define LOAD_CASE(name, ctype, mtype, rep) \
case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
+ if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len, \
MachineRepresentation::rep)) \
return; \
break; \
@@ -3183,7 +3323,7 @@ class ThreadImpl {
#define STORE_CASE(name, ctype, mtype, rep) \
case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
+ if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len, \
MachineRepresentation::rep)) \
return; \
break; \
@@ -3300,16 +3440,16 @@ class ThreadImpl {
}
case kNumericPrefix: {
++len;
- if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
break;
}
case kAtomicPrefix: {
- if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
break;
}
case kSimdPrefix: {
++len;
- if (!ExecuteSimdOp(opcode, &decoder, code, pc, len)) return;
+ if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len)) return;
break;
}
@@ -3547,118 +3687,71 @@ class ThreadImpl {
}
Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
- Handle<JSFunction> wasm_entry =
- WasmDebugInfo::GetCWasmEntry(debug_info, sig);
+ Handle<Code> wasm_entry = WasmDebugInfo::GetCWasmEntry(debug_info, sig);
TRACE(" => Calling external wasm function\n");
// Copy the arguments to one buffer.
- // TODO(clemensh): Introduce a helper for all argument buffer
- // con-/destruction.
- std::vector<uint8_t> arg_buffer(num_args * 8);
- size_t offset = 0;
+ CWasmArgumentsPacker packer(CWasmArgumentsPacker::TotalSize(sig));
sp_t base_index = StackHeight() - num_args;
for (int i = 0; i < num_args; ++i) {
- int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
- if (arg_buffer.size() < offset + param_size) {
- arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
- }
- Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
WasmValue arg = GetStackValue(base_index + i);
switch (sig->GetParam(i)) {
case kWasmI32:
- WriteUnalignedValue(address, arg.to<uint32_t>());
+ packer.Push(arg.to<uint32_t>());
break;
case kWasmI64:
- WriteUnalignedValue(address, arg.to<uint64_t>());
+ packer.Push(arg.to<uint64_t>());
break;
case kWasmF32:
- WriteUnalignedValue(address, arg.to<float>());
+ packer.Push(arg.to<float>());
break;
case kWasmF64:
- WriteUnalignedValue(address, arg.to<double>());
+ packer.Push(arg.to<double>());
break;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef:
- DCHECK_EQ(kSystemPointerSize, param_size);
- WriteUnalignedValue<Object>(address, *arg.to_anyref());
+ case kWasmFuncRef:
+ case kWasmExnRef:
+ packer.Push(arg.to_anyref()->ptr());
break;
default:
UNIMPLEMENTED();
}
- offset += param_size;
- }
-
- // Ensure that there is enough space in the arg_buffer to hold the return
- // value(s).
- size_t return_size = 0;
- for (ValueType t : sig->returns()) {
- return_size += ValueTypes::ElementSizeInBytes(t);
- }
- if (arg_buffer.size() < return_size) {
- arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer and the code target data pointers in handles. As
- // these are aligned pointers, to the GC it will look like Smis.
- Handle<Object> arg_buffer_obj(
- Object(reinterpret_cast<Address>(arg_buffer.data())), isolate);
- DCHECK(!arg_buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(Object(code->instruction_start()), isolate);
- DCHECK(!code_entry_obj->IsHeapObject());
-
- static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
- "code below needs adaption");
- Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
- args[compiler::CWasmEntryParameters::kObjectRef] = object_ref;
- args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
-
- Handle<Object> receiver = isolate->factory()->undefined_value();
- trap_handler::SetThreadInWasm();
- MaybeHandle<Object> maybe_retval =
- Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
+ Address call_target = code->instruction_start();
+ Execution::CallWasm(isolate, wasm_entry, call_target, object_ref,
+ packer.argv());
TRACE(" => External wasm function returned%s\n",
- maybe_retval.is_null() ? " with exception" : "");
+ isolate->has_pending_exception() ? " with exception" : "");
// Pop arguments off the stack.
Drop(num_args);
- if (maybe_retval.is_null()) {
- // JSEntry may throw a stack overflow before we actually get to wasm code
- // or back to the interpreter, meaning the thread-in-wasm flag won't be
- // cleared.
- if (trap_handler::IsThreadInWasm()) {
- trap_handler::ClearThreadInWasm();
- }
+ if (isolate->has_pending_exception()) {
return TryHandleException(isolate);
}
- trap_handler::ClearThreadInWasm();
-
// Push return values.
- if (sig->return_count() > 0) {
- // TODO(wasm): Handle multiple returns.
- DCHECK_EQ(1, sig->return_count());
- Address address = reinterpret_cast<Address>(arg_buffer.data());
- switch (sig->GetReturn()) {
+ packer.Reset();
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ switch (sig->GetReturn(i)) {
case kWasmI32:
- Push(WasmValue(ReadUnalignedValue<uint32_t>(address)));
+ Push(WasmValue(packer.Pop<uint32_t>()));
break;
case kWasmI64:
- Push(WasmValue(ReadUnalignedValue<uint64_t>(address)));
+ Push(WasmValue(packer.Pop<uint64_t>()));
break;
case kWasmF32:
- Push(WasmValue(ReadUnalignedValue<float>(address)));
+ Push(WasmValue(packer.Pop<float>()));
break;
case kWasmF64:
- Push(WasmValue(ReadUnalignedValue<double>(address)));
+ Push(WasmValue(packer.Pop<double>()));
break;
case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
- Handle<Object> ref(ReadUnalignedValue<Object>(address), isolate);
+ case kWasmFuncRef:
+ case kWasmExnRef: {
+ Handle<Object> ref(Object(packer.Pop<Address>()), isolate);
Push(WasmValue(ref));
break;
}
@@ -3710,25 +3803,24 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
uint32_t expected_sig_id = module()->signature_ids[sig_index];
DCHECK_EQ(expected_sig_id,
module()->signature_map.Find(*module()->signatures[sig_index]));
-
- // The function table is stored in the instance.
- // TODO(wasm): the wasm interpreter currently supports only one table.
- CHECK_EQ(0, table_index);
// Bounds check against table size.
- if (entry_index >= instance_object_->indirect_function_table_size()) {
+ if (entry_index >=
+ static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
+ isolate_, instance_object_, table_index))) {
return {ExternalCallResult::INVALID_FUNC};
}
- IndirectFunctionTableEntry entry(instance_object_, entry_index);
+ IndirectFunctionTableEntry entry(instance_object_, table_index,
+ entry_index);
// Signature check.
if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
FunctionSig* signature = module()->signatures[sig_index];
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
WasmCode* code = GetTargetCode(isolate_, entry.target());
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index fb633c6c26..1ee76fc11d 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -1094,7 +1094,7 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!value->ToString(context).ToLocal(&string)) return;
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
- type = i::wasm::kWasmAnyFunc;
+ type = i::wasm::kWasmFuncRef;
} else if (enabled_features.anyref &&
string->StringEquals(v8_str(isolate, "anyref"))) {
type = i::wasm::kWasmAnyRef;
@@ -1222,7 +1222,7 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmAnyRef;
} else if (enabled_features.anyref &&
string->StringEquals(v8_str(isolate, "anyfunc"))) {
- *type = i::wasm::kWasmAnyFunc;
+ *type = i::wasm::kWasmFuncRef;
} else {
// Unrecognized type.
*type = i::wasm::kWasmStmt;
@@ -1322,7 +1322,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Number> number_value;
if (!value->ToNumber(context).ToLocal(&number_value)) return;
if (!number_value->NumberValue(context).To(&f64_value)) return;
- f32_value = static_cast<float>(f64_value);
+ f32_value = i::DoubleToFloat32(f64_value);
}
global_obj->SetF32(f32_value);
break;
@@ -1347,15 +1347,15 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetAnyRef(Utils::OpenHandle(*value));
break;
}
- case i::wasm::kWasmAnyFunc: {
+ case i::wasm::kWasmFuncRef: {
if (args.Length() < 2) {
// When no inital value is provided, we have to use the WebAssembly
// default value 'null', and not the JS default value 'undefined'.
- global_obj->SetAnyFunc(i_isolate, i_isolate->factory()->null_value());
+ global_obj->SetFuncRef(i_isolate, i_isolate->factory()->null_value());
break;
}
- if (!global_obj->SetAnyFunc(i_isolate, Utils::OpenHandle(*value))) {
+ if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
thrower.TypeError(
"The value of anyfunc globals must be null or an "
"exported function");
@@ -1437,7 +1437,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Decode the function type and construct a signature.
i::Zone zone(i_isolate->allocator(), ZONE_NAME);
- i::wasm::FunctionSig::Builder builder(&zone, parameters_len, results_len);
+ i::wasm::FunctionSig::Builder builder(&zone, results_len, parameters_len);
for (uint32_t i = 0; i < parameters_len; ++i) {
i::wasm::ValueType type;
MaybeLocal<Value> maybe = parameters->Get(context, i);
@@ -1513,13 +1513,12 @@ void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Function.type()");
i::wasm::FunctionSig* sig;
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
sig = i::Handle<i::WasmExportedFunction>::cast(arg0)->sig();
} else if (i::WasmJSFunction::IsWasmJSFunction(*arg0)) {
- // TODO(7742): Implement deserialization of signature.
- sig = nullptr;
- UNIMPLEMENTED();
+ sig = i::Handle<i::WasmJSFunction>::cast(arg0)->GetSignature(&zone);
} else {
thrower.TypeError("Argument 0 must be a WebAssembly.Function");
return;
@@ -1686,7 +1685,7 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<String> element;
auto enabled_features = i::wasm::WasmFeaturesFromFlags();
- if (table->type() == i::wasm::ValueType::kWasmAnyFunc) {
+ if (table->type() == i::wasm::ValueType::kWasmFuncRef) {
element = v8_str(isolate, "anyfunc");
} else if (enabled_features.anyref &&
table->type() == i::wasm::ValueType::kWasmAnyRef) {
@@ -1694,7 +1693,6 @@ void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
} else {
UNREACHABLE();
}
- // TODO(aseemgarg): update anyfunc to funcref
if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
v8_str(isolate, "element"), element)
.IsJust()) {
@@ -1865,8 +1863,8 @@ void WebAssemblyGlobalGetValueCommon(
return_value.Set(receiver->GetF64());
break;
case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmAnyFunc:
- case i::wasm::kWasmExceptRef:
+ case i::wasm::kWasmFuncRef:
+ case i::wasm::kWasmExnRef:
return_value.Set(Utils::ToLocal(receiver->GetRef()));
break;
default:
@@ -1925,7 +1923,7 @@ void WebAssemblyGlobalSetValue(
case i::wasm::kWasmF32: {
double f64_value = 0;
if (!args[0]->NumberValue(context).To(&f64_value)) return;
- receiver->SetF32(static_cast<float>(f64_value));
+ receiver->SetF32(i::DoubleToFloat32(f64_value));
break;
}
case i::wasm::kWasmF64: {
@@ -1935,12 +1933,12 @@ void WebAssemblyGlobalSetValue(
break;
}
case i::wasm::kWasmAnyRef:
- case i::wasm::kWasmExceptRef: {
+ case i::wasm::kWasmExnRef: {
receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
break;
}
- case i::wasm::kWasmAnyFunc: {
- if (!receiver->SetAnyFunc(i_isolate, Utils::OpenHandle(*args[0]))) {
+ case i::wasm::kWasmFuncRef: {
+ if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
thrower.TypeError(
"value of an anyfunc reference must be either null or an "
"exported function");
@@ -2245,7 +2243,6 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
if (enabled_features.type_reflection) {
Handle<JSFunction> function_constructor = InstallConstructorFunc(
isolate, webassembly, "Function", WebAssemblyFunction);
- context->set_wasm_function_constructor(*function_constructor);
SetDummyInstanceTemplate(isolate, function_constructor);
JSFunction::EnsureHasInitialMap(function_constructor);
Handle<JSObject> function_proto(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 8633a61504..f203649542 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -40,6 +40,9 @@ bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
*did_retry = true;
if (trial == kAllocationRetries) return false;
// Otherwise, collect garbage and retry.
+ // TODO(wasm): Since reservation limits are engine-wide, we should do an
+ // engine-wide GC here (i.e. trigger a GC in each isolate using the engine,
+ // and wait for them all to finish). See https://crbug.com/v8/9405.
heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index eb253219ad..7dd6b1c7b2 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -16,7 +16,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
namespace v8 {
namespace internal {
@@ -26,18 +26,18 @@ namespace {
// Emit a section code and the size as a padded varint that can be patched
// later.
-size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
+size_t EmitSection(SectionCode code, ZoneBuffer* buffer) {
// Emit the section code.
- buffer.write_u8(code);
+ buffer->write_u8(code);
// Emit a placeholder for the length.
- return buffer.reserve_u32v();
+ return buffer->reserve_u32v();
}
// Patch the size of a section after it's finished.
-void FixupSection(ZoneBuffer& buffer, size_t start) {
- buffer.patch_u32v(start, static_cast<uint32_t>(buffer.offset() - start -
- kPaddedVarInt32Size));
+void FixupSection(ZoneBuffer* buffer, size_t start) {
+ buffer->patch_u32v(start, static_cast<uint32_t>(buffer->offset() - start -
+ kPaddedVarInt32Size));
}
} // namespace
@@ -186,22 +186,22 @@ void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
body_.Truncate(position);
}
-void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
- buffer.write_u32v(signature_index_);
+void WasmFunctionBuilder::WriteSignature(ZoneBuffer* buffer) const {
+ buffer->write_u32v(signature_index_);
}
-void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
+void WasmFunctionBuilder::WriteBody(ZoneBuffer* buffer) const {
size_t locals_size = locals_.Size();
- buffer.write_size(locals_size + body_.size());
- buffer.EnsureSpace(locals_size);
- byte** ptr = buffer.pos_ptr();
+ buffer->write_size(locals_size + body_.size());
+ buffer->EnsureSpace(locals_size);
+ byte** ptr = buffer->pos_ptr();
locals_.Emit(*ptr);
(*ptr) += locals_size; // UGLY: manual bump of position pointer
if (body_.size() > 0) {
- size_t base = buffer.offset();
- buffer.write(body_.begin(), body_.size());
+ size_t base = buffer->offset();
+ buffer->write(body_.begin(), body_.size());
for (DirectCallIndex call : direct_calls_) {
- buffer.patch_u32v(
+ buffer->patch_u32v(
base + call.offset,
call.direct_index +
static_cast<uint32_t>(builder_->function_imports_.size()));
@@ -209,29 +209,29 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
}
}
-void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
+void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const {
if (asm_func_start_source_position_ == 0 && asm_offsets_.size() == 0) {
- buffer.write_size(0);
+ buffer->write_size(0);
return;
}
size_t locals_enc_size = LEBHelper::sizeof_u32v(locals_.Size());
size_t func_start_size =
LEBHelper::sizeof_u32v(asm_func_start_source_position_);
- buffer.write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
+ buffer->write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
// Offset of the recorded byte offsets.
DCHECK_GE(kMaxUInt32, locals_.Size());
- buffer.write_u32v(static_cast<uint32_t>(locals_.Size()));
+ buffer->write_u32v(static_cast<uint32_t>(locals_.Size()));
// Start position of the function.
- buffer.write_u32v(asm_func_start_source_position_);
- buffer.write(asm_offsets_.begin(), asm_offsets_.size());
+ buffer->write_u32v(asm_func_start_source_position_);
+ buffer->write(asm_offsets_.begin(), asm_offsets_.size());
}
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
function_imports_(zone),
- function_exports_(zone),
global_imports_(zone),
+ exports_(zone),
functions_(zone),
data_segments_(zone),
indirect_functions_(zone),
@@ -274,7 +274,10 @@ uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
if (count > FLAG_wasm_max_table_size - index) {
return std::numeric_limits<uint32_t>::max();
}
- indirect_functions_.resize(indirect_functions_.size() + count);
+ DCHECK(max_table_size_ == 0 ||
+ indirect_functions_.size() + count <= max_table_size_);
+ indirect_functions_.resize(indirect_functions_.size() + count,
+ WasmElemSegment::kNullIndex);
return index;
}
@@ -283,15 +286,23 @@ void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
indirect_functions_[indirect] = direct;
}
+void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
+ DCHECK_GE(FLAG_wasm_max_table_size, max);
+ DCHECK_GE(max, indirect_functions_.size());
+ max_table_size_ = max;
+}
+
uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
FunctionSig* sig) {
+ DCHECK(adding_imports_allowed_);
function_imports_.push_back({name, AddSignature(sig)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
- ValueType type) {
- global_imports_.push_back({name, ValueTypes::ValueTypeCodeFor(type)});
+ ValueType type, bool mutability) {
+ global_imports_.push_back(
+ {name, ValueTypes::ValueTypeCodeFor(type), mutability});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -300,14 +311,33 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
}
void WasmModuleBuilder::AddExport(Vector<const char> name,
- WasmFunctionBuilder* function) {
- function_exports_.push_back({name, function->func_index()});
+ ImportExportKindCode kind, uint32_t index) {
+ DCHECK_LE(index, std::numeric_limits<int>::max());
+ exports_.push_back({name, kind, static_cast<int>(index)});
+}
+
+uint32_t WasmModuleBuilder::AddExportedGlobal(ValueType type, bool mutability,
+ const WasmInitExpr& init,
+ Vector<const char> name) {
+ uint32_t index = AddGlobal(type, mutability, init);
+ AddExport(name, kExternalGlobal, index);
+ return index;
+}
+
+void WasmModuleBuilder::ExportImportedFunction(Vector<const char> name,
+ int import_index) {
+#if DEBUG
+ // The size of function_imports_ must not change any more.
+ adding_imports_allowed_ = false;
+#endif
+ exports_.push_back(
+ {name, kExternalFunction,
+ import_index - static_cast<int>(function_imports_.size())});
}
-uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
- bool mutability,
+uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool mutability,
const WasmInitExpr& init) {
- globals_.push_back({type, exported, mutability, init});
+ globals_.push_back({type, mutability, init});
return static_cast<uint32_t>(globals_.size() - 1);
}
@@ -322,25 +352,25 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
-void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
+void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
// == Emit magic =============================================================
- buffer.write_u32(kWasmMagic);
- buffer.write_u32(kWasmVersion);
+ buffer->write_u32(kWasmMagic);
+ buffer->write_u32(kWasmVersion);
// == Emit signatures ========================================================
if (signatures_.size() > 0) {
size_t start = EmitSection(kTypeSectionCode, buffer);
- buffer.write_size(signatures_.size());
+ buffer->write_size(signatures_.size());
for (FunctionSig* sig : signatures_) {
- buffer.write_u8(kWasmFunctionTypeCode);
- buffer.write_size(sig->parameter_count());
+ buffer->write_u8(kWasmFunctionTypeCode);
+ buffer->write_size(sig->parameter_count());
for (auto param : sig->parameters()) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(param));
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(param));
}
- buffer.write_size(sig->return_count());
+ buffer->write_size(sig->return_count());
for (auto ret : sig->returns()) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(ret));
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(ret));
}
}
FixupSection(buffer, start);
@@ -349,19 +379,19 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit imports ===========================================================
if (global_imports_.size() + function_imports_.size() > 0) {
size_t start = EmitSection(kImportSectionCode, buffer);
- buffer.write_size(global_imports_.size() + function_imports_.size());
+ buffer->write_size(global_imports_.size() + function_imports_.size());
for (auto import : global_imports_) {
- buffer.write_u32v(0); // module name (length)
- buffer.write_string(import.name); // field name
- buffer.write_u8(kExternalGlobal);
- buffer.write_u8(import.type_code);
- buffer.write_u8(0); // immutable
+ buffer->write_u32v(0); // module name (length)
+ buffer->write_string(import.name); // field name
+ buffer->write_u8(kExternalGlobal);
+ buffer->write_u8(import.type_code);
+ buffer->write_u8(import.mutability ? 1 : 0);
}
for (auto import : function_imports_) {
- buffer.write_u32v(0); // module name (length)
- buffer.write_string(import.name); // field name
- buffer.write_u8(kExternalFunction);
- buffer.write_u32v(import.sig_index);
+ buffer->write_u32v(0); // module name (length)
+ buffer->write_string(import.name); // field name
+ buffer->write_u8(kExternalFunction);
+ buffer->write_u32v(import.sig_index);
}
FixupSection(buffer, start);
}
@@ -370,7 +400,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
uint32_t num_function_names = 0;
if (functions_.size() > 0) {
size_t start = EmitSection(kFunctionSectionCode, buffer);
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
for (auto* function : functions_) {
function->WriteSignature(buffer);
if (!function->name_.empty()) ++num_function_names;
@@ -381,28 +411,31 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit function table ====================================================
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kTableSectionCode, buffer);
- buffer.write_u8(1); // table count
- buffer.write_u8(kLocalAnyFunc);
- buffer.write_u8(kHasMaximumFlag);
- buffer.write_size(indirect_functions_.size());
- buffer.write_size(indirect_functions_.size());
+ buffer->write_u8(1); // table count
+ buffer->write_u8(kLocalFuncRef);
+ buffer->write_u8(kHasMaximumFlag);
+ buffer->write_size(indirect_functions_.size());
+ size_t max =
+ max_table_size_ > 0 ? max_table_size_ : indirect_functions_.size();
+ DCHECK_GE(max, indirect_functions_.size());
+ buffer->write_size(max);
FixupSection(buffer, start);
}
// == emit memory declaration ================================================
{
size_t start = EmitSection(kMemorySectionCode, buffer);
- buffer.write_u8(1); // memory count
+ buffer->write_u8(1); // memory count
if (has_shared_memory_) {
- buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
- : MemoryFlags::kSharedNoMaximum);
+ buffer->write_u8(has_max_memory_size_ ? MemoryFlags::kSharedAndMaximum
+ : MemoryFlags::kSharedNoMaximum);
} else {
- buffer.write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
- : MemoryFlags::kNoMaximum);
+ buffer->write_u8(has_max_memory_size_ ? MemoryFlags::kMaximum
+ : MemoryFlags::kNoMaximum);
}
- buffer.write_u32v(min_memory_size_);
+ buffer->write_u32v(min_memory_size_);
if (has_max_memory_size_) {
- buffer.write_u32v(max_memory_size_);
+ buffer->write_u32v(max_memory_size_);
}
FixupSection(buffer, start);
}
@@ -410,76 +443,90 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit globals ===========================================================
if (globals_.size() > 0) {
size_t start = EmitSection(kGlobalSectionCode, buffer);
- buffer.write_size(globals_.size());
+ buffer->write_size(globals_.size());
for (auto global : globals_) {
- buffer.write_u8(ValueTypes::ValueTypeCodeFor(global.type));
- buffer.write_u8(global.mutability ? 1 : 0);
+ buffer->write_u8(ValueTypes::ValueTypeCodeFor(global.type));
+ buffer->write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
case WasmInitExpr::kI32Const:
DCHECK_EQ(kWasmI32, global.type);
- buffer.write_u8(kExprI32Const);
- buffer.write_i32v(global.init.val.i32_const);
+ buffer->write_u8(kExprI32Const);
+ buffer->write_i32v(global.init.val.i32_const);
break;
case WasmInitExpr::kI64Const:
DCHECK_EQ(kWasmI64, global.type);
- buffer.write_u8(kExprI64Const);
- buffer.write_i64v(global.init.val.i64_const);
+ buffer->write_u8(kExprI64Const);
+ buffer->write_i64v(global.init.val.i64_const);
break;
case WasmInitExpr::kF32Const:
DCHECK_EQ(kWasmF32, global.type);
- buffer.write_u8(kExprF32Const);
- buffer.write_f32(global.init.val.f32_const);
+ buffer->write_u8(kExprF32Const);
+ buffer->write_f32(global.init.val.f32_const);
break;
case WasmInitExpr::kF64Const:
DCHECK_EQ(kWasmF64, global.type);
- buffer.write_u8(kExprF64Const);
- buffer.write_f64(global.init.val.f64_const);
+ buffer->write_u8(kExprF64Const);
+ buffer->write_f64(global.init.val.f64_const);
break;
case WasmInitExpr::kGlobalIndex:
- buffer.write_u8(kExprGetGlobal);
- buffer.write_u32v(global.init.val.global_index);
+ buffer->write_u8(kExprGetGlobal);
+ buffer->write_u32v(global.init.val.global_index);
break;
default: {
// No initializer, emit a default value.
switch (global.type) {
case kWasmI32:
- buffer.write_u8(kExprI32Const);
+ buffer->write_u8(kExprI32Const);
// LEB encoding of 0.
- buffer.write_u8(0);
+ buffer->write_u8(0);
break;
case kWasmI64:
- buffer.write_u8(kExprI64Const);
+ buffer->write_u8(kExprI64Const);
// LEB encoding of 0.
- buffer.write_u8(0);
+ buffer->write_u8(0);
break;
case kWasmF32:
- buffer.write_u8(kExprF32Const);
- buffer.write_f32(0.f);
+ buffer->write_u8(kExprF32Const);
+ buffer->write_f32(0.f);
break;
case kWasmF64:
- buffer.write_u8(kExprF64Const);
- buffer.write_f64(0.);
+ buffer->write_u8(kExprF64Const);
+ buffer->write_f64(0.);
break;
default:
UNREACHABLE();
}
}
}
- buffer.write_u8(kExprEnd);
+ buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
// == emit exports ===========================================================
- if (!function_exports_.empty()) {
+ if (exports_.size() > 0) {
size_t start = EmitSection(kExportSectionCode, buffer);
- buffer.write_size(function_exports_.size());
- for (auto function_export : function_exports_) {
- buffer.write_string(function_export.name);
- buffer.write_u8(kExternalFunction);
- buffer.write_size(function_export.function_index +
- function_imports_.size());
+ buffer->write_size(exports_.size());
+ for (auto ex : exports_) {
+ buffer->write_string(ex.name);
+ buffer->write_u8(ex.kind);
+ switch (ex.kind) {
+ case kExternalFunction:
+ buffer->write_size(ex.index + function_imports_.size());
+ break;
+ case kExternalGlobal:
+ buffer->write_size(ex.index + global_imports_.size());
+ break;
+ case kExternalMemory:
+ case kExternalTable:
+ // The WasmModuleBuilder doesn't support importing tables or memories
+ // yet, so there is no index offset to add.
+ buffer->write_size(ex.index);
+ break;
+ case kExternalException:
+ UNREACHABLE();
+ }
}
FixupSection(buffer, start);
}
@@ -487,22 +534,33 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit start function index ==============================================
if (start_function_index_ >= 0) {
size_t start = EmitSection(kStartSectionCode, buffer);
- buffer.write_size(start_function_index_ + function_imports_.size());
+ buffer->write_size(start_function_index_ + function_imports_.size());
FixupSection(buffer, start);
}
// == emit function table elements ===========================================
if (indirect_functions_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer.write_u8(1); // count of entries
- buffer.write_u8(0); // table index
- buffer.write_u8(kExprI32Const); // offset
- buffer.write_u32v(0);
- buffer.write_u8(kExprEnd);
- buffer.write_size(indirect_functions_.size()); // element count
-
- for (auto index : indirect_functions_) {
- buffer.write_size(index + function_imports_.size());
+ buffer->write_u8(1); // count of entries
+ buffer->write_u8(0); // table index
+ uint32_t first_element = 0;
+ while (first_element < indirect_functions_.size() &&
+ indirect_functions_[first_element] == WasmElemSegment::kNullIndex) {
+ first_element++;
+ }
+ uint32_t last_element =
+ static_cast<uint32_t>(indirect_functions_.size() - 1);
+ while (last_element >= first_element &&
+ indirect_functions_[last_element] == WasmElemSegment::kNullIndex) {
+ last_element--;
+ }
+ buffer->write_u8(kExprI32Const); // offset
+ buffer->write_u32v(first_element);
+ buffer->write_u8(kExprEnd);
+ uint32_t element_count = last_element - first_element + 1;
+ buffer->write_size(element_count);
+ for (uint32_t i = first_element; i <= last_element; i++) {
+ buffer->write_size(indirect_functions_[i] + function_imports_.size());
}
FixupSection(buffer, start);
@@ -518,18 +576,18 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
if (emit_compilation_hints) {
// Emit the section code.
- buffer.write_u8(kUnknownSectionCode);
+ buffer->write_u8(kUnknownSectionCode);
// Emit a placeholder for section length.
- size_t start = buffer.reserve_u32v();
+ size_t start = buffer->reserve_u32v();
// Emit custom section name.
- buffer.write_string(CStrVector("compilationHints"));
+ buffer->write_string(CStrVector("compilationHints"));
// Emit hint count.
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
// Emit hint bytes.
for (auto* fn : functions_) {
uint8_t hint_byte =
fn->hint_ != kNoCompilationHint ? fn->hint_ : kDefaultCompilationHint;
- buffer.write_u8(hint_byte);
+ buffer->write_u8(hint_byte);
}
FixupSection(buffer, start);
}
@@ -537,7 +595,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit code ==============================================================
if (functions_.size() > 0) {
size_t start = EmitSection(kCodeSectionCode, buffer);
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
for (auto* function : functions_) {
function->WriteBody(buffer);
}
@@ -547,15 +605,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == emit data segments =====================================================
if (data_segments_.size() > 0) {
size_t start = EmitSection(kDataSectionCode, buffer);
- buffer.write_size(data_segments_.size());
+ buffer->write_size(data_segments_.size());
for (auto segment : data_segments_) {
- buffer.write_u8(0); // linear memory segment
- buffer.write_u8(kExprI32Const); // initializer expression for dest
- buffer.write_u32v(segment.dest);
- buffer.write_u8(kExprEnd);
- buffer.write_u32v(static_cast<uint32_t>(segment.data.size()));
- buffer.write(&segment.data[0], segment.data.size());
+ buffer->write_u8(0); // linear memory segment
+ buffer->write_u8(kExprI32Const); // initializer expression for dest
+ buffer->write_u32v(segment.dest);
+ buffer->write_u8(kExprEnd);
+ buffer->write_u32v(static_cast<uint32_t>(segment.data.size()));
+ buffer->write(&segment.data[0], segment.data.size());
}
FixupSection(buffer, start);
}
@@ -563,33 +621,33 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// == Emit names =============================================================
if (num_function_names > 0 || !function_imports_.empty()) {
// Emit the section code.
- buffer.write_u8(kUnknownSectionCode);
+ buffer->write_u8(kUnknownSectionCode);
// Emit a placeholder for the length.
- size_t start = buffer.reserve_u32v();
+ size_t start = buffer->reserve_u32v();
// Emit the section string.
- buffer.write_string(CStrVector("name"));
+ buffer->write_string(CStrVector("name"));
// Emit a subsection for the function names.
- buffer.write_u8(NameSectionKindCode::kFunction);
+ buffer->write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
- size_t functions_start = buffer.reserve_u32v();
+ size_t functions_start = buffer->reserve_u32v();
// Emit the function names.
// Imports are always named.
uint32_t num_imports = static_cast<uint32_t>(function_imports_.size());
- buffer.write_size(num_imports + num_function_names);
+ buffer->write_size(num_imports + num_function_names);
uint32_t function_index = 0;
for (; function_index < num_imports; ++function_index) {
const WasmFunctionImport* import = &function_imports_[function_index];
DCHECK(!import->name.empty());
- buffer.write_u32v(function_index);
- buffer.write_string(import->name);
+ buffer->write_u32v(function_index);
+ buffer->write_string(import->name);
}
if (num_function_names > 0) {
for (auto* function : functions_) {
DCHECK_EQ(function_index,
function->func_index() + function_imports_.size());
if (!function->name_.empty()) {
- buffer.write_u32v(function_index);
- buffer.write_string(function->name_);
+ buffer->write_u32v(function_index);
+ buffer->write_string(function->name_);
}
++function_index;
}
@@ -599,15 +657,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
}
-void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
+void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer* buffer) const {
// == Emit asm.js offset table ===============================================
- buffer.write_size(functions_.size());
+ buffer->write_size(functions_.size());
// Emit the offset table per function.
for (auto* function : functions_) {
function->WriteAsmWasmOffsetTable(buffer);
}
// Append a 0 to indicate that this is an encoded table.
- buffer.write_u8(0);
+ buffer->write_u8(0);
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 750dafa227..9e6a8933e2 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -8,7 +8,7 @@
#include "src/codegen/signature.h"
#include "src/zone/zone-containers.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/utils/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
@@ -36,19 +36,19 @@ class ZoneBuffer : public ZoneObject {
void write_u16(uint16_t x) {
EnsureSpace(2);
- WriteLittleEndianValue<uint16_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint16_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 2;
}
void write_u32(uint32_t x) {
EnsureSpace(4);
- WriteLittleEndianValue<uint32_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint32_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 4;
}
void write_u64(uint64_t x) {
EnsureSpace(8);
- WriteLittleEndianValue<uint64_t>(reinterpret_cast<Address>(pos_), x);
+ base::WriteLittleEndianValue<uint64_t>(reinterpret_cast<Address>(pos_), x);
pos_ += 8;
}
@@ -187,9 +187,9 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
}
void DeleteCodeAfter(size_t position);
- void WriteSignature(ZoneBuffer& buffer) const;
- void WriteBody(ZoneBuffer& buffer) const;
- void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
+ void WriteSignature(ZoneBuffer* buffer) const;
+ void WriteBody(ZoneBuffer* buffer) const;
+ void WriteAsmWasmOffsetTable(ZoneBuffer* buffer) const;
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
@@ -231,22 +231,34 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Building methods.
uint32_t AddImport(Vector<const char> name, FunctionSig* sig);
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
- uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
+ uint32_t AddGlobal(ValueType type, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
- uint32_t AddGlobalImport(Vector<const char> name, ValueType type);
+ uint32_t AddGlobalImport(Vector<const char> name, ValueType type,
+ bool mutability);
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
+ // In the current implementation, it's supported to have uninitialized slots
+ // at the beginning and/or end of the indirect function table, as long as
+ // the filled slots form a contiguous block in the middle.
uint32_t AllocateIndirectFunctions(uint32_t count);
void SetIndirectFunction(uint32_t indirect, uint32_t direct);
+ void SetMaxTableSize(uint32_t max);
void MarkStartFunction(WasmFunctionBuilder* builder);
- void AddExport(Vector<const char> name, WasmFunctionBuilder* builder);
+ void AddExport(Vector<const char> name, ImportExportKindCode kind,
+ uint32_t index);
+ void AddExport(Vector<const char> name, WasmFunctionBuilder* builder) {
+ AddExport(name, kExternalFunction, builder->func_index());
+ }
+ uint32_t AddExportedGlobal(ValueType type, bool mutability,
+ const WasmInitExpr& init, Vector<const char> name);
+ void ExportImportedFunction(Vector<const char> name, int import_index);
void SetMinMemorySize(uint32_t value);
void SetMaxMemorySize(uint32_t value);
void SetHasSharedMemory();
// Writing methods.
- void WriteTo(ZoneBuffer& buffer) const;
- void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
+ void WriteTo(ZoneBuffer* buffer) const;
+ void WriteAsmJsOffsetTable(ZoneBuffer* buffer) const;
Zone* zone() { return zone_; }
@@ -258,19 +270,20 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
uint32_t sig_index;
};
- struct WasmFunctionExport {
+ struct WasmGlobalImport {
Vector<const char> name;
- uint32_t function_index;
+ ValueTypeCode type_code;
+ bool mutability;
};
- struct WasmGlobalImport {
+ struct WasmExport {
Vector<const char> name;
- ValueTypeCode type_code;
+ ImportExportKindCode kind;
+ int index; // Can be negative for re-exported imports.
};
struct WasmGlobal {
ValueType type;
- bool exported;
bool mutability;
WasmInitExpr init;
};
@@ -284,18 +297,23 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
ZoneVector<WasmFunctionImport> function_imports_;
- ZoneVector<WasmFunctionExport> function_exports_;
ZoneVector<WasmGlobalImport> global_imports_;
+ ZoneVector<WasmExport> exports_;
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmDataSegment> data_segments_;
ZoneVector<uint32_t> indirect_functions_;
ZoneVector<WasmGlobal> globals_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
+ uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
bool has_shared_memory_;
+#if DEBUG
+ // Once AddExportedImport is called, no more imports can be added.
+ bool adding_imports_allowed_ = true;
+#endif
};
inline FunctionSig* WasmFunctionBuilder::signature() {
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index eb40c51dd3..7dea208d8e 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -164,7 +164,11 @@ struct WasmCompilationHint {
WasmCompilationHintTier top_tier;
};
-enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+enum ModuleOrigin : uint8_t {
+ kWasmOrigin,
+ kAsmJsSloppyOrigin,
+ kAsmJsStrictOrigin
+};
#define SELECT_WASM_COUNTER(counters, origin, prefix, suffix) \
((origin) == kWasmOrigin ? (counters)->prefix##_wasm_##suffix() \
@@ -221,6 +225,10 @@ struct V8_EXPORT_PRIVATE WasmModule {
void AddFunctionNameForTesting(int function_index, WireBytesRef name);
};
+inline bool is_asmjs_module(const WasmModule* module) {
+ return module->origin != kWasmOrigin;
+}
+
size_t EstimateStoredSize(const WasmModule* module);
// Returns the number of possible export wrappers for a given module.
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index e1fc2d2410..7a80b7ea2b 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -7,7 +7,7 @@
#include "src/wasm/wasm-objects.h"
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/contexts-inl.h"
#include "src/objects/foreign-inl.h"
@@ -51,10 +51,11 @@ CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
CAST_ACCESSOR(AsmWasmData)
-#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
- bool holder::has_##name() { \
- return !READ_FIELD(*this, offset).IsUndefined(); \
- } \
+#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
+ DEF_GETTER(holder, has_##name, bool) { \
+ Object value = TaggedField<Object, offset>::load(isolate, *this); \
+ return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
+ } \
ACCESSORS(holder, name, type, offset)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
@@ -65,7 +66,7 @@ CAST_ACCESSOR(AsmWasmData)
/* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
/* way of accessing them in order to avoid undefined behavior in C++ */ \
/* code. */ \
- return ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
+ return base::ReadUnalignedValue<type>(FIELD_ADDR(*this, offset)); \
} else { \
return *reinterpret_cast<type const*>(FIELD_ADDR(*this, offset)); \
} \
@@ -77,7 +78,7 @@ CAST_ACCESSOR(AsmWasmData)
/* kTaggedSize aligned so we have to use unaligned pointer friendly */ \
/* way of accessing them in order to avoid undefined behavior in C++ */ \
/* code. */ \
- WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
+ base::WriteUnalignedValue<type>(FIELD_ADDR(*this, offset), value); \
} else { \
*reinterpret_cast<type*>(FIELD_ADDR(*this, offset)) = value; \
} \
@@ -110,7 +111,7 @@ void WasmModuleObject::reset_breakpoint_infos() {
GetReadOnlyRoots().undefined_value());
}
bool WasmModuleObject::is_asm_js() {
- bool asm_js = module()->origin == wasm::kAsmJsOrigin;
+ bool asm_js = is_asmjs_module(module());
DCHECK_EQ(asm_js, script().IsUserJavaScript());
DCHECK_EQ(asm_js, has_asm_js_offset_table());
return asm_js;
@@ -148,53 +149,54 @@ Address WasmGlobalObject::address() const {
}
int32_t WasmGlobalObject::GetI32() {
- return ReadLittleEndianValue<int32_t>(address());
+ return base::ReadLittleEndianValue<int32_t>(address());
}
int64_t WasmGlobalObject::GetI64() {
- return ReadLittleEndianValue<int64_t>(address());
+ return base::ReadLittleEndianValue<int64_t>(address());
}
float WasmGlobalObject::GetF32() {
- return ReadLittleEndianValue<float>(address());
+ return base::ReadLittleEndianValue<float>(address());
}
double WasmGlobalObject::GetF64() {
- return ReadLittleEndianValue<double>(address());
+ return base::ReadLittleEndianValue<double>(address());
}
Handle<Object> WasmGlobalObject::GetRef() {
- // We use this getter for anyref, anyfunc, and except_ref.
+ // We use this getter for anyref, funcref, and exnref.
DCHECK(wasm::ValueTypes::IsReferenceType(type()));
return handle(tagged_buffer().get(offset()), GetIsolate());
}
void WasmGlobalObject::SetI32(int32_t value) {
- WriteLittleEndianValue<int32_t>(address(), value);
+ base::WriteLittleEndianValue<int32_t>(address(), value);
}
void WasmGlobalObject::SetI64(int64_t value) {
- WriteLittleEndianValue<int64_t>(address(), value);
+ base::WriteLittleEndianValue<int64_t>(address(), value);
}
void WasmGlobalObject::SetF32(float value) {
- WriteLittleEndianValue<float>(address(), value);
+ base::WriteLittleEndianValue<float>(address(), value);
}
void WasmGlobalObject::SetF64(double value) {
- WriteLittleEndianValue<double>(address(), value);
+ base::WriteLittleEndianValue<double>(address(), value);
}
void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
- // We use this getter anyref and except_ref.
- DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExceptRef);
+ // We use this getter anyref and exnref.
+ DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExnRef);
tagged_buffer().set(offset(), *value);
}
-bool WasmGlobalObject::SetAnyFunc(Isolate* isolate, Handle<Object> value) {
- DCHECK_EQ(type(), wasm::kWasmAnyFunc);
+bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
+ DCHECK_EQ(type(), wasm::kWasmFuncRef);
if (!value->IsNull(isolate) &&
- !WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ !WasmExportedFunction::IsWasmExportedFunction(*value) &&
+ !WasmCapiFunction::IsWasmCapiFunction(*value)) {
return false;
}
tagged_buffer().set(offset(), *value);
@@ -249,6 +251,8 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_tables, FixedArray,
+ kIndirectFunctionTablesOffset)
ACCESSORS(WasmInstanceObject, imported_function_refs, FixedArray,
kImportedFunctionRefsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_refs, FixedArray,
@@ -257,16 +261,10 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
kManagedNativeAllocationsOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, exceptions_table, FixedArray,
kExceptionsTableOffset)
-ACCESSORS(WasmInstanceObject, undefined_value, Oddball, kUndefinedValueOffset)
-ACCESSORS(WasmInstanceObject, null_value, Oddball, kNullValueOffset)
ACCESSORS(WasmInstanceObject, centry_stub, Code, kCEntryStubOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, wasm_exported_functions, FixedArray,
kWasmExportedFunctionsOffset)
-inline bool WasmInstanceObject::has_indirect_function_table() {
- return indirect_function_table_sig_ids() != nullptr;
-}
-
void WasmInstanceObject::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@@ -276,10 +274,29 @@ void WasmInstanceObject::clear_padding() {
}
IndirectFunctionTableEntry::IndirectFunctionTableEntry(
- Handle<WasmInstanceObject> instance, int index)
- : instance_(instance), index_(index) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, instance->indirect_function_table_size());
+ Handle<WasmInstanceObject> instance, int table_index, int entry_index)
+ : instance_(table_index == 0 ? instance
+ : Handle<WasmInstanceObject>::null()),
+ table_(table_index != 0
+ ? handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(
+ table_index)),
+ instance->GetIsolate())
+ : Handle<WasmIndirectFunctionTable>::null()),
+ index_(entry_index) {
+ DCHECK_GE(entry_index, 0);
+ DCHECK_LT(entry_index, table_index == 0
+ ? instance->indirect_function_table_size()
+ : table_->size());
+}
+
+IndirectFunctionTableEntry::IndirectFunctionTableEntry(
+ Handle<WasmIndirectFunctionTable> table, int entry_index)
+ : instance_(Handle<WasmInstanceObject>::null()),
+ table_(table),
+ index_(entry_index) {
+ DCHECK_GE(entry_index, 0);
+ DCHECK_LT(entry_index, table_->size());
}
ImportedFunctionEntry::ImportedFunctionEntry(
@@ -307,6 +324,10 @@ ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
+ACCESSORS(WasmExportedFunctionData, wasm_call_target, Smi,
+ kWasmCallTargetOffset)
+SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
// WasmJSFunction
WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
@@ -317,6 +338,13 @@ CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, Struct)
CAST_ACCESSOR(WasmJSFunctionData)
+SMI_ACCESSORS(WasmJSFunctionData, serialized_return_count,
+ kSerializedReturnCountOffset)
+SMI_ACCESSORS(WasmJSFunctionData, serialized_parameter_count,
+ kSerializedParameterCountOffset)
+ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
+ kSerializedSignatureOffset)
+ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
// WasmCapiFunction
@@ -336,6 +364,18 @@ ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
+// WasmIndirectFunctionTable
+OBJECT_CONSTRUCTORS_IMPL(WasmIndirectFunctionTable, Struct)
+CAST_ACCESSOR(WasmIndirectFunctionTable)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, size, uint32_t, kSizeOffset)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, sig_ids, uint32_t*,
+ kSigIdsOffset)
+PRIMITIVE_ACCESSORS(WasmIndirectFunctionTable, targets, Address*,
+ kTargetsOffset)
+OPTIONAL_ACCESSORS(WasmIndirectFunctionTable, managed_native_allocations,
+ Foreign, kManagedNativeAllocationsOffset)
+ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
+
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 27a56695c2..f44f8326ad 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -139,7 +139,9 @@ class WasmInstanceNativeAllocations {
instance->set_indirect_function_table_refs(*new_refs);
for (uint32_t j = old_size; j < new_size; j++) {
- IndirectFunctionTableEntry(instance, static_cast<int>(j)).clear();
+ // {WasmInstanceNativeAllocations} only manages the memory of table 0.
+ // Therefore we pass the {table_index} as a constant here.
+ IndirectFunctionTableEntry(instance, 0, static_cast<int>(j)).clear();
}
}
uint32_t* indirect_function_table_sig_ids_ = nullptr;
@@ -509,7 +511,7 @@ int WasmModuleObject::GetSourcePosition(Handle<WasmModuleObject> module_object,
Isolate* isolate = module_object->GetIsolate();
const WasmModule* module = module_object->module();
- if (module->origin != wasm::kAsmJsOrigin) {
+ if (module->origin == wasm::kWasmOrigin) {
// for non-asm.js modules, we just add the function's start offset
// to make a module-relative position.
return byte_offset + module_object->GetFunctionOffset(func_index);
@@ -789,19 +791,21 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
backing_store->set(i, null);
}
+ Handle<Object> max;
+ if (has_maximum) {
+ max = isolate->factory()->NewNumberFromUint(maximum);
+ } else {
+ max = isolate->factory()->undefined_value();
+ }
+
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor(), isolate);
auto table_obj = Handle<WasmTableObject>::cast(
isolate->factory()->NewJSObject(table_ctor));
+ DisallowHeapAllocation no_gc;
table_obj->set_raw_type(static_cast<int>(type));
table_obj->set_entries(*backing_store);
- Handle<Object> max;
- if (has_maximum) {
- max = isolate->factory()->NewNumberFromUint(maximum);
- } else {
- max = isolate->factory()->undefined_value();
- }
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
@@ -865,15 +869,14 @@ int WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- continue;
- }
- // For Table 0 we have to update the indirect function table.
+
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- DCHECK_EQ(old_size, instance->indirect_function_table_size());
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(instance,
- new_size);
+
+ DCHECK_EQ(old_size, WasmInstanceObject::IndirectFunctionTableSize(
+ isolate, instance, table_index));
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table_index, new_size);
}
for (uint32_t entry = old_size; entry < new_size; ++entry) {
@@ -895,10 +898,11 @@ bool WasmTableObject::IsValidElement(Isolate* isolate,
Handle<Object> entry) {
// Anyref tables take everything.
if (table->type() == wasm::kWasmAnyRef) return true;
- // Anyfunc tables can store {null} or {WasmExportedFunction} or
- // {WasmCapiFunction} objects.
+ // FuncRef tables can store {null}, {WasmExportedFunction}, {WasmJSFunction},
+ // or {WasmCapiFunction} objects.
if (entry->IsNull(isolate)) return true;
return WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmJSFunction::IsWasmJSFunction(*entry) ||
WasmCapiFunction::IsWasmCapiFunction(*entry);
}
@@ -932,6 +936,9 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
DCHECK_NOT_NULL(wasm_function->sig);
UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
target_instance, func_index);
+ } else if (WasmJSFunction::IsWasmJSFunction(*entry)) {
+ UpdateDispatchTables(isolate, table, entry_index,
+ Handle<WasmJSFunction>::cast(entry));
} else {
DCHECK(WasmCapiFunction::IsWasmCapiFunction(*entry));
UpdateDispatchTables(isolate, table, entry_index,
@@ -955,7 +962,7 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
// First we handle the easy anyref table case.
if (table->type() == wasm::kWasmAnyRef) return entry;
- // Now we handle the anyfunc case.
+ // Now we handle the funcref case.
if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
WasmCapiFunction::IsWasmCapiFunction(*entry)) {
return entry;
@@ -1005,11 +1012,6 @@ void WasmTableObject::UpdateDispatchTables(
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- // TODO(ahaas): Introduce dispatch tables for the other tables as well.
- continue;
- }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1017,11 +1019,33 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(*sig);
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, target_instance, target_func_index);
}
}
+void WasmTableObject::UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
+ Handle<WasmJSFunction> function) {
+ // We simply need to update the IFTs for each instance that imports
+ // this table.
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset)),
+ isolate);
+ WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ isolate, instance, table_index, entry_index, function);
+ }
+}
+
void WasmTableObject::UpdateDispatchTables(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
Handle<WasmCapiFunction> capi_function) {
@@ -1052,11 +1076,6 @@ void WasmTableObject::UpdateDispatchTables(
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- // TODO(ahaas): Introduce dispatch tables for the other tables as well.
- continue;
- }
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1077,7 +1096,7 @@ void WasmTableObject::UpdateDispatchTables(
// Note that {SignatureMap::Find} may return {-1} if the signature is
// not found; it will simply never match any check.
auto sig_id = instance->module()->signature_map.Find(sig);
- IndirectFunctionTableEntry(instance, entry_index)
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, wasm_code->instruction_start(), *tuple);
}
}
@@ -1091,16 +1110,13 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
i += kDispatchTableNumElements) {
int table_index =
Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
- if (table_index > 0) {
- // Only table 0 has a dispatch table in the instance at the moment.
- continue;
- }
Handle<WasmInstanceObject> target_instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate);
- DCHECK_LT(index, target_instance->indirect_function_table_size());
- IndirectFunctionTableEntry(target_instance, index).clear();
+ DCHECK_LT(index, WasmInstanceObject::IndirectFunctionTableSize(
+ isolate, target_instance, table_index));
+ IndirectFunctionTableEntry(target_instance, table_index, index).clear();
}
}
@@ -1118,8 +1134,8 @@ void WasmTableObject::SetFunctionTablePlaceholder(
void WasmTableObject::GetFunctionTableEntry(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
- int* function_index) {
- DCHECK_EQ(table->type(), wasm::kWasmAnyFunc);
+ int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function) {
+ DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
DCHECK_LT(entry_index, table->entries().length());
// We initialize {is_valid} with {true}. We may change it later.
*is_valid = true;
@@ -1132,17 +1148,91 @@ void WasmTableObject::GetFunctionTableEntry(
auto target_func = Handle<WasmExportedFunction>::cast(element);
*instance = handle(target_func->instance(), isolate);
*function_index = target_func->function_index();
+ *maybe_js_function = MaybeHandle<WasmJSFunction>();
+ return;
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*element)) {
+ *instance = MaybeHandle<WasmInstanceObject>();
+ *maybe_js_function = Handle<WasmJSFunction>::cast(element);
return;
- } else if (element->IsTuple2()) {
+ }
+ if (element->IsTuple2()) {
auto tuple = Handle<Tuple2>::cast(element);
*instance = handle(WasmInstanceObject::cast(tuple->value1()), isolate);
*function_index = Smi::cast(tuple->value2()).value();
+ *maybe_js_function = MaybeHandle<WasmJSFunction>();
return;
}
*is_valid = false;
}
namespace {
+class IftNativeAllocations {
+ public:
+ IftNativeAllocations(Handle<WasmIndirectFunctionTable> table, uint32_t size)
+ : sig_ids_(size), targets_(size) {
+ table->set_sig_ids(sig_ids_.data());
+ table->set_targets(targets_.data());
+ }
+
+ static size_t SizeInMemory(uint32_t size) {
+ return size * (sizeof(Address) + sizeof(uint32_t));
+ }
+
+ void resize(Handle<WasmIndirectFunctionTable> table, uint32_t new_size) {
+ DCHECK_GE(new_size, sig_ids_.size());
+ DCHECK_EQ(this, Managed<IftNativeAllocations>::cast(
+ table->managed_native_allocations())
+ .raw());
+ sig_ids_.resize(new_size);
+ targets_.resize(new_size);
+ table->set_sig_ids(sig_ids_.data());
+ table->set_targets(targets_.data());
+ }
+
+ private:
+ std::vector<uint32_t> sig_ids_;
+ std::vector<Address> targets_;
+};
+} // namespace
+
+Handle<WasmIndirectFunctionTable> WasmIndirectFunctionTable::New(
+ Isolate* isolate, uint32_t size) {
+ auto refs = isolate->factory()->NewFixedArray(static_cast<int>(size));
+ auto table = Handle<WasmIndirectFunctionTable>::cast(
+ isolate->factory()->NewStruct(WASM_INDIRECT_FUNCTION_TABLE_TYPE));
+ table->set_size(size);
+ table->set_refs(*refs);
+ auto native_allocations = Managed<IftNativeAllocations>::Allocate(
+ isolate, IftNativeAllocations::SizeInMemory(size), table, size);
+ table->set_managed_native_allocations(*native_allocations);
+ for (uint32_t i = 0; i < size; ++i) {
+ IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ }
+ return table;
+}
+
+void WasmIndirectFunctionTable::Resize(Isolate* isolate,
+ Handle<WasmIndirectFunctionTable> table,
+ uint32_t new_size) {
+ uint32_t old_size = table->size();
+ if (old_size >= new_size) return; // Nothing to do.
+
+ Managed<IftNativeAllocations>::cast(table->managed_native_allocations())
+ .raw()
+ ->resize(table, new_size);
+
+ Handle<FixedArray> old_refs(table->refs(), isolate);
+ Handle<FixedArray> new_refs = isolate->factory()->CopyFixedArrayAndGrow(
+ old_refs, static_cast<int>(new_size - old_size));
+ table->set_refs(*new_refs);
+ table->set_size(new_size);
+ for (uint32_t i = old_size; i < new_size; ++i) {
+ IndirectFunctionTableEntry(table, static_cast<int>(i)).clear();
+ }
+}
+
+namespace {
bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
if (new_size > old_buffer->allocation_length()) return false;
@@ -1380,6 +1470,15 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
isolate->native_context()->wasm_global_constructor(), isolate);
auto global_obj = Handle<WasmGlobalObject>::cast(
isolate->factory()->NewJSObject(global_ctor));
+ {
+ // Disallow GC until all fields have acceptable types.
+ DisallowHeapAllocation no_gc;
+
+ global_obj->set_flags(0);
+ global_obj->set_type(type);
+ global_obj->set_offset(offset);
+ global_obj->set_is_mutable(is_mutable);
+ }
if (wasm::ValueTypes::IsReferenceType(type)) {
DCHECK(maybe_untagged_buffer.is_null());
@@ -1412,19 +1511,24 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_untagged_buffer(*untagged_buffer);
}
- global_obj->set_flags(0);
- global_obj->set_type(type);
- global_obj->set_offset(offset);
- global_obj->set_is_mutable(is_mutable);
return global_obj;
}
void IndirectFunctionTableEntry::clear() {
- instance_->indirect_function_table_sig_ids()[index_] = -1;
- instance_->indirect_function_table_targets()[index_] = 0;
- instance_->indirect_function_table_refs().set(
- index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
+ if (!instance_.is_null()) {
+ instance_->indirect_function_table_sig_ids()[index_] = -1;
+ instance_->indirect_function_table_targets()[index_] = 0;
+ instance_->indirect_function_table_refs().set(
+ index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
+ } else {
+ DCHECK(!table_.is_null());
+ table_->sig_ids()[index_] = -1;
+ table_->targets()[index_] = 0;
+ table_->refs().set(
+ index_,
+ ReadOnlyRoots(GetIsolateFromWritableObject(*table_)).undefined_value());
+ }
}
void IndirectFunctionTableEntry::Set(int sig_id,
@@ -1455,31 +1559,34 @@ void IndirectFunctionTableEntry::Set(int sig_id,
void IndirectFunctionTableEntry::Set(int sig_id, Address call_target,
Object ref) {
- instance_->indirect_function_table_sig_ids()[index_] = sig_id;
- instance_->indirect_function_table_targets()[index_] = call_target;
- instance_->indirect_function_table_refs().set(index_, ref);
-}
-
-Object IndirectFunctionTableEntry::object_ref() {
- return instance_->indirect_function_table_refs().get(index_);
+ if (!instance_.is_null()) {
+ instance_->indirect_function_table_sig_ids()[index_] = sig_id;
+ instance_->indirect_function_table_targets()[index_] = call_target;
+ instance_->indirect_function_table_refs().set(index_, ref);
+ } else {
+ DCHECK(!table_.is_null());
+ table_->sig_ids()[index_] = sig_id;
+ table_->targets()[index_] = call_target;
+ table_->refs().set(index_, ref);
+ }
}
-int IndirectFunctionTableEntry::sig_id() {
- return instance_->indirect_function_table_sig_ids()[index_];
+Object IndirectFunctionTableEntry::object_ref() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_refs().get(index_)
+ : table_->refs().get(index_);
}
-Address IndirectFunctionTableEntry::target() {
- return instance_->indirect_function_table_targets()[index_];
+int IndirectFunctionTableEntry::sig_id() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_sig_ids()[index_]
+ : table_->sig_ids()[index_];
}
-void IndirectFunctionTableEntry::CopyFrom(
- const IndirectFunctionTableEntry& that) {
- instance_->indirect_function_table_sig_ids()[index_] =
- that.instance_->indirect_function_table_sig_ids()[that.index_];
- instance_->indirect_function_table_targets()[index_] =
- that.instance_->indirect_function_table_targets()[that.index_];
- instance_->indirect_function_table_refs().set(
- index_, that.instance_->indirect_function_table_refs().get(that.index_));
+Address IndirectFunctionTableEntry::target() const {
+ return !instance_.is_null()
+ ? instance_->indirect_function_table_targets()[index_]
+ : table_->targets()[index_];
}
void ImportedFunctionEntry::SetWasmToJs(
@@ -1535,11 +1642,21 @@ constexpr uint16_t WasmInstanceObject::kTaggedFieldOffsets[];
// static
bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- Handle<WasmInstanceObject> instance, uint32_t minimum_size) {
+ Handle<WasmInstanceObject> instance, int table_index,
+ uint32_t minimum_size) {
+ Isolate* isolate = instance->GetIsolate();
+ if (table_index > 0) {
+ DCHECK_LT(table_index, instance->indirect_function_tables().length());
+ auto table =
+ handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index)),
+ isolate);
+ WasmIndirectFunctionTable::Resize(isolate, table, minimum_size);
+ return true;
+ }
uint32_t old_size = instance->indirect_function_table_size();
if (old_size >= minimum_size) return false; // Nothing to do.
- Isolate* isolate = instance->GetIsolate();
HandleScope scope(isolate);
auto native_allocations = GetNativeAllocations(*instance);
native_allocations->resize_indirect_function_table(isolate, instance,
@@ -1624,8 +1741,6 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_indirect_function_table_targets(nullptr);
instance->set_native_context(*isolate->native_context());
instance->set_module_object(*module_object);
- instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value());
- instance->set_null_value(ReadOnlyRoots(isolate).null_value());
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
@@ -1695,83 +1810,55 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
return native_module->GetCallTargetForFunction(func_index);
}
-namespace {
-void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
- uint32_t src, uint32_t count, bool copy_backward) {
- DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
- if (copy_backward) {
- for (uint32_t i = count; i > 0; i--) {
- auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
- auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
- to_entry.CopyFrom(from_entry);
- }
- } else {
- for (uint32_t i = 0; i < count; i++) {
- auto to_entry = IndirectFunctionTableEntry(instance, dst + i);
- auto from_entry = IndirectFunctionTableEntry(instance, src + i);
- to_entry.CopyFrom(from_entry);
- }
+int WasmInstanceObject::IndirectFunctionTableSize(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index) {
+ if (table_index == 0) {
+ return instance->indirect_function_table_size();
}
+ auto table =
+ handle(WasmIndirectFunctionTable::cast(
+ instance->indirect_function_tables().get(table_index)),
+ isolate);
+ return table->size();
}
-} // namespace
// static
bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_src_index,
uint32_t table_dst_index,
+ uint32_t table_src_index,
uint32_t dst, uint32_t src,
uint32_t count) {
- if (static_cast<int>(table_dst_index) >= instance->tables().length()) {
- return false;
- }
- if (static_cast<int>(table_src_index) >= instance->tables().length()) {
- return false;
- }
-
- // TODO(titzer): multiple tables in TableCopy
- CHECK_EQ(0, table_src_index);
- CHECK_EQ(0, table_dst_index);
- auto max = instance->indirect_function_table_size();
- bool copy_backward = src < dst && dst - src < count;
- bool ok = ClampToBounds(dst, &count, max);
+ // Copying 0 elements is a no-op.
+ if (count == 0) return true;
+ CHECK_LT(table_dst_index, instance->tables().length());
+ CHECK_LT(table_src_index, instance->tables().length());
+ auto table_dst = handle(
+ WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
+ auto table_src = handle(
+ WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
+ uint32_t max_dst = static_cast<uint32_t>(table_dst->entries().length());
+ uint32_t max_src = static_cast<uint32_t>(table_src->entries().length());
+ bool copy_backward = src < dst;
+ bool ok = ClampToBounds(dst, &count, max_dst);
// Use & instead of && so the clamp is not short-circuited.
- ok &= ClampToBounds(src, &count, max);
+ ok &= ClampToBounds(src, &count, max_src);
// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;
- if (dst == src || count == 0) return ok; // no-op
-
- // TODO(titzer): multiple tables in TableCopy
- auto table = handle(
- WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
- // Broadcast table copy operation to all instances that import this table.
- Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
- for (int i = 0; i < dispatch_tables->length();
- i += kDispatchTableNumElements) {
- Handle<WasmInstanceObject> target_instance(
- WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset)),
- isolate);
- CopyTableEntriesImpl(target_instance, dst, src, count, copy_backward);
+ // no-op
+ if ((dst == src && table_dst_index == table_src_index) || count == 0) {
+ return ok;
}
- // Copy the function entries.
- auto dst_table = handle(
- WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
- auto src_table = handle(
- WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
- if (copy_backward) {
- for (uint32_t i = count; i > 0; i--) {
- dst_table->entries().set(dst + i - 1,
- src_table->entries().get(src + i - 1));
- }
- } else {
- for (uint32_t i = 0; i < count; i++) {
- dst_table->entries().set(dst + i, src_table->entries().get(src + i));
- }
+ for (uint32_t i = 0; i < count; ++i) {
+ uint32_t src_index = copy_backward ? (src + count - i - 1) : src + i;
+ uint32_t dst_index = copy_backward ? (dst + count - i - 1) : dst + i;
+ auto value = WasmTableObject::Get(isolate, table_src, src_index);
+ WasmTableObject::Set(isolate, table_dst, dst_index, value);
}
return ok;
}
@@ -1782,6 +1869,8 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
uint32_t table_index,
uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
+ // Copying 0 elements is a no-op.
+ if (count == 0) return true;
// Note that this implementation just calls through to module instantiation.
// This is intentional, so that the runtime only depends on the object
// methods, and not the module instantiation logic.
@@ -1830,9 +1919,8 @@ WasmInstanceObject::GetOrCreateWasmExportedFunction(
// The wrapper may not exist yet if no function in the exports section has
// this signature. We compile it and store the wrapper in the module for
// later use.
- wrapper = compiler::CompileJSToWasmWrapper(isolate, function.sig,
- function.imported)
- .ToHandleChecked();
+ wrapper = wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
+ isolate, function.sig, function.imported);
module_object->export_wrappers().set(wrapper_index, *wrapper);
}
result = WasmExportedFunction::New(
@@ -1861,6 +1949,55 @@ void WasmInstanceObject::SetWasmExportedFunction(
}
// static
+void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int table_index,
+ int entry_index, Handle<WasmJSFunction> js_function) {
+ // Deserialize the signature encapsulated with the {WasmJSFunction}.
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ wasm::FunctionSig* sig = js_function->GetSignature(&zone);
+ auto sig_id = instance->module()->signature_map.Find(*sig);
+
+ // Compile a wrapper for the target callable.
+ Handle<JSReceiver> callable(js_function->GetCallable(), isolate);
+ wasm::WasmCodeRefScope code_ref_scope;
+ Address call_target = kNullAddress;
+ if (sig_id >= 0) {
+ wasm::NativeModule* native_module =
+ instance->module_object().native_module();
+ // TODO(mstarzinger): Cache and reuse wrapper code.
+ const wasm::WasmFeatures enabled = native_module->enabled_features();
+ auto resolved =
+ compiler::ResolveWasmImportCall(callable, sig, enabled.bigint);
+ compiler::WasmImportCallKind kind = resolved.first;
+ callable = resolved.second; // Update to ultimate target.
+ DCHECK_NE(compiler::WasmImportCallKind::kLinkError, kind);
+ wasm::CompilationEnv env = native_module->CreateCompilationEnv();
+ wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
+ isolate->wasm_engine(), &env, kind, sig, false);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
+ std::move(result.source_positions), GetCodeKind(result),
+ wasm::ExecutionTier::kNone);
+ wasm::WasmCode* published_code =
+ native_module->PublishCode(std::move(wasm_code));
+ isolate->counters()->wasm_generated_code_size()->Increment(
+ published_code->instructions().length());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ published_code->reloc_info().length());
+ call_target = published_code->instruction_start();
+ }
+
+ // Update the dispatch table.
+ Handle<Tuple2> tuple =
+ isolate->factory()->NewTuple2(instance, callable, AllocationType::kOld);
+ IndirectFunctionTableEntry(instance, table_index, entry_index)
+ .Set(sig_id, call_target, *tuple);
+}
+
+// static
Handle<WasmExceptionObject> WasmExceptionObject::New(
Isolate* isolate, const wasm::FunctionSig* sig,
Handle<HeapObject> exception_tag) {
@@ -2013,8 +2150,8 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
encoded_size += 8;
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
encoded_size += 1;
break;
default:
@@ -2080,10 +2217,10 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
int num_imported_functions = instance->module()->num_imported_functions;
int jump_table_offset = -1;
if (func_index >= num_imported_functions) {
- ptrdiff_t jump_table_diff =
- instance->module_object().native_module()->jump_table_offset(
+ uint32_t jump_table_diff =
+ instance->module_object().native_module()->GetJumpTableOffset(
func_index);
- DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX);
+ DCHECK_GE(kMaxInt, jump_table_diff);
jump_table_offset = static_cast<int>(jump_table_diff);
}
Handle<WasmExportedFunctionData> function_data =
@@ -2093,9 +2230,13 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+ function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
+ function_data->set_packed_args_size(0);
MaybeHandle<String> maybe_name;
- if (instance->module()->origin == wasm::kAsmJsOrigin) {
+ bool is_asm_js_module = instance->module_object().is_asm_js();
+ if (is_asm_js_module) {
// We can use the function name only for asm.js. For WebAssembly, the
// function name is specified as the function_index.toString().
maybe_name = WasmModuleObject::GetFunctionNameOrNull(
@@ -2110,10 +2251,18 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
}
- bool is_asm_js_module = instance->module_object().is_asm_js();
- Handle<Map> function_map = is_asm_js_module
- ? isolate->sloppy_function_map()
- : isolate->wasm_exported_function_map();
+ Handle<Map> function_map;
+ switch (instance->module()->origin) {
+ case wasm::kWasmOrigin:
+ function_map = isolate->wasm_exported_function_map();
+ break;
+ case wasm::kAsmJsSloppyOrigin:
+ function_map = isolate->sloppy_function_map();
+ break;
+ case wasm::kAsmJsStrictOrigin:
+ function_map = isolate->strict_function_map();
+ break;
+ }
NewFunctionArgs args =
NewFunctionArgs::ForWasm(name, function_data, function_map);
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
@@ -2143,9 +2292,22 @@ bool WasmJSFunction::IsWasmJSFunction(Object object) {
Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
wasm::FunctionSig* sig,
Handle<JSReceiver> callable) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int return_count = static_cast<int>(sig->return_count());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ Handle<PodArray<wasm::ValueType>> serialized_sig =
+ PodArray<wasm::ValueType>::New(isolate, sig_size, AllocationType::kOld);
+ if (sig_size > 0) {
+ serialized_sig->copy_in(0, sig->all().begin(), sig_size);
+ }
Handle<WasmJSFunctionData> function_data =
Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
+ function_data->set_serialized_return_count(return_count);
+ function_data->set_serialized_parameter_count(parameter_count);
+ function_data->set_serialized_signature(*serialized_sig);
+ function_data->set_callable(*callable);
// TODO(7742): Make this callable by using a proper wrapper code.
function_data->set_wrapper_code(
isolate->builtins()->builtin(Builtins::kIllegal));
@@ -2160,6 +2322,37 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
return Handle<WasmJSFunction>::cast(js_function);
}
+JSReceiver WasmJSFunction::GetCallable() const {
+ return shared().wasm_js_function_data().callable();
+}
+
+wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ int sig_size = function_data.serialized_signature().length();
+ wasm::ValueType* types = zone->NewArray<wasm::ValueType>(sig_size);
+ if (sig_size > 0) {
+ function_data.serialized_signature().copy_out(0, types, sig_size);
+ }
+ int return_count = function_data.serialized_return_count();
+ int parameter_count = function_data.serialized_parameter_count();
+ return new (zone) wasm::FunctionSig(return_count, parameter_count, types);
+}
+
+bool WasmJSFunction::MatchesSignature(wasm::FunctionSig* sig) {
+ DCHECK_LE(sig->all().size(), kMaxInt);
+ int sig_size = static_cast<int>(sig->all().size());
+ int return_count = static_cast<int>(sig->return_count());
+ int parameter_count = static_cast<int>(sig->parameter_count());
+ WasmJSFunctionData function_data = shared().wasm_js_function_data();
+ if (return_count != function_data.serialized_return_count() ||
+ parameter_count != function_data.serialized_parameter_count()) {
+ return false;
+ }
+ if (sig_size == 0) return true; // Prevent undefined behavior.
+ const wasm::ValueType* expected = sig->all().begin();
+ return function_data.serialized_signature().matches(expected, sig_size);
+}
+
Address WasmCapiFunction::GetHostCallTarget() const {
return shared().wasm_capi_function_data().call_target();
}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 1e6ced0b76..1200f7040a 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -40,15 +40,17 @@ class SeqOneByteString;
class WasmCapiFunction;
class WasmDebugInfo;
class WasmExceptionTag;
+class WasmExportedFunction;
class WasmInstanceObject;
+class WasmJSFunction;
class WasmModuleObject;
-class WasmExportedFunction;
+class WasmIndirectFunctionTable;
template <class CppType>
class Managed;
#define DECL_OPTIONAL_ACCESSORS(name, type) \
- V8_INLINE bool has_##name(); \
+ DECL_GETTER(has_##name, bool) \
DECL_ACCESSORS(name, type)
// A helper for an entry in an indirect function table (IFT).
@@ -60,7 +62,11 @@ class Managed;
// - target = entrypoint to Wasm code or import wrapper code
class IndirectFunctionTableEntry {
public:
- inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int index);
+ inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int table_index,
+ int entry_index);
+
+ inline IndirectFunctionTableEntry(Handle<WasmIndirectFunctionTable> table,
+ int entry_index);
void clear();
V8_EXPORT_PRIVATE void Set(int sig_id,
@@ -68,14 +74,13 @@ class IndirectFunctionTableEntry {
int target_func_index);
void Set(int sig_id, Address call_target, Object ref);
- void CopyFrom(const IndirectFunctionTableEntry& that);
-
- Object object_ref();
- int sig_id();
- Address target();
+ Object object_ref() const;
+ int sig_id() const;
+ Address target() const;
private:
Handle<WasmInstanceObject> const instance_;
+ Handle<WasmIndirectFunctionTable> const table_;
int const index_;
};
@@ -292,6 +297,7 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void Fill(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t start, Handle<Object> entry, uint32_t count);
+ // TODO(mstarzinger): Unify these three methods into one.
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index, wasm::FunctionSig* sig,
@@ -300,6 +306,10 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index,
+ Handle<WasmJSFunction> function);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
Handle<WasmCapiFunction> capi_function);
static void ClearDispatchTables(Isolate* isolate,
@@ -312,14 +322,12 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
int func_index);
// This function reads the content of a function table entry and returns it
- // through the out parameters {is_valid}, {is_null}, {instance}, and
- // {function_index}.
- static void GetFunctionTableEntry(Isolate* isolate,
- Handle<WasmTableObject> table,
- int entry_index, bool* is_valid,
- bool* is_null,
- MaybeHandle<WasmInstanceObject>* instance,
- int* function_index);
+ // through the out parameters {is_valid}, {is_null}, {instance},
+ // {function_index}, and {maybe_js_function}.
+ static void GetFunctionTableEntry(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
+ int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function);
OBJECT_CONSTRUCTORS(WasmTableObject, JSObject);
};
@@ -406,7 +414,7 @@ class WasmGlobalObject : public JSObject {
inline void SetF32(float value);
inline void SetF64(double value);
inline void SetAnyRef(Handle<Object> value);
- inline bool SetAnyFunc(Isolate* isolate, Handle<Object> value);
+ inline bool SetFuncRef(Isolate* isolate, Handle<Object> value);
private:
// This function returns the address of the global's data in the
@@ -431,12 +439,11 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_table_refs, FixedArray)
DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
DECL_OPTIONAL_ACCESSORS(exceptions_table, FixedArray)
- DECL_ACCESSORS(undefined_value, Oddball)
- DECL_ACCESSORS(null_value, Oddball)
DECL_ACCESSORS(centry_stub, Code)
DECL_OPTIONAL_ACCESSORS(wasm_exported_functions, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
@@ -482,7 +489,6 @@ class WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kGlobalsStartOffset, kSystemPointerSize) \
V(kImportedMutableGlobalsOffset, kSystemPointerSize) \
- V(kUndefinedValueOffset, kTaggedSize) \
V(kIsolateRootOffset, kSystemPointerSize) \
V(kJumpTableStartOffset, kSystemPointerSize) \
/* End of often-accessed fields. */ \
@@ -495,9 +501,9 @@ class WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
V(kDebugInfoOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \
+ V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
V(kExceptionsTableOffset, kTaggedSize) \
- V(kNullValueOffset, kTaggedSize) \
V(kCEntryStubOffset, kTaggedSize) \
V(kWasmExportedFunctionsOffset, kTaggedSize) \
V(kRealStackLimitAddressOffset, kSystemPointerSize) \
@@ -526,7 +532,6 @@ class WasmInstanceObject : public JSObject {
static constexpr uint16_t kTaggedFieldOffsets[] = {
kImportedFunctionRefsOffset,
kIndirectFunctionTableRefsOffset,
- kUndefinedValueOffset,
kModuleObjectOffset,
kExportsObjectOffset,
kNativeContextOffset,
@@ -536,18 +541,17 @@ class WasmInstanceObject : public JSObject {
kImportedMutableGlobalsBuffersOffset,
kDebugInfoOffset,
kTablesOffset,
+ kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
kExceptionsTableOffset,
- kNullValueOffset,
kCEntryStubOffset,
kWasmExportedFunctionsOffset};
V8_EXPORT_PRIVATE const wasm::WasmModule* module();
V8_EXPORT_PRIVATE static bool EnsureIndirectFunctionTableWithMinimumSize(
- Handle<WasmInstanceObject> instance, uint32_t minimum_size);
-
- bool has_indirect_function_table();
+ Handle<WasmInstanceObject> instance, int table_index,
+ uint32_t minimum_size);
V8_EXPORT_PRIVATE void SetRawMemory(byte* mem_start, size_t mem_size);
@@ -561,11 +565,15 @@ class WasmInstanceObject : public JSObject {
Address GetCallTarget(uint32_t func_index);
+ static int IndirectFunctionTableSize(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t table_index);
+
// Copies table entries. Returns {false} if the ranges are out-of-bounds.
static bool CopyTableEntries(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- uint32_t table_src_index,
- uint32_t table_dst_index, uint32_t dst,
+ uint32_t table_dst_index,
+ uint32_t table_src_index, uint32_t dst,
uint32_t src,
uint32_t count) V8_WARN_UNUSED_RESULT;
@@ -597,6 +605,14 @@ class WasmInstanceObject : public JSObject {
int index,
Handle<WasmExportedFunction> val);
+ // Imports a constructed {WasmJSFunction} into the indirect function table of
+ // this instance. Note that this might trigger wrapper compilation, since a
+ // {WasmJSFunction} is instance-independent and just wraps a JS callable.
+ static void ImportWasmJSFunctionIntoTable(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ int table_index, int entry_index,
+ Handle<WasmJSFunction> js_function);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
@@ -681,6 +697,12 @@ class WasmJSFunction : public JSFunction {
static Handle<WasmJSFunction> New(Isolate* isolate, wasm::FunctionSig* sig,
Handle<JSReceiver> callable);
+ JSReceiver GetCallable() const;
+ // Deserializes the signature of this function using the provided zone. Note
+ // that lifetime of the signature is hence directly coupled to the zone.
+ wasm::FunctionSig* GetSignature(Zone* zone);
+ bool MatchesSignature(wasm::FunctionSig* sig);
+
DECL_CAST(WasmJSFunction)
OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
};
@@ -704,6 +726,34 @@ class WasmCapiFunction : public JSFunction {
OBJECT_CONSTRUCTORS(WasmCapiFunction, JSFunction);
};
+class WasmIndirectFunctionTable : public Struct {
+ public:
+ DECL_PRIMITIVE_ACCESSORS(size, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(sig_ids, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(targets, Address*)
+ DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
+ DECL_ACCESSORS(refs, FixedArray)
+
+ V8_EXPORT_PRIVATE static Handle<WasmIndirectFunctionTable> New(
+ Isolate* isolate, uint32_t size);
+ static void Resize(Isolate* isolate, Handle<WasmIndirectFunctionTable> table,
+ uint32_t new_size);
+
+ DECL_CAST(WasmIndirectFunctionTable)
+
+ DECL_PRINTER(WasmIndirectFunctionTable)
+ DECL_VERIFIER(WasmIndirectFunctionTable)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_INDIRECT_FUNCTION_TABLE_FIELDS)
+
+ STATIC_ASSERT(kStartOfStrongFieldsOffset == kManagedNativeAllocationsOffset);
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
+
+ OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
+};
+
class WasmCapiFunctionData : public Struct {
public:
DECL_PRIMITIVE_ACCESSORS(call_target, Address)
@@ -734,6 +784,9 @@ class WasmExportedFunctionData : public Struct {
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
+ DECL_ACCESSORS(c_wrapper_code, Object)
+ DECL_ACCESSORS(wasm_call_target, Smi)
+ DECL_INT_ACCESSORS(packed_args_size)
DECL_CAST(WasmExportedFunctionData)
@@ -754,6 +807,10 @@ class WasmExportedFunctionData : public Struct {
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
class WasmJSFunctionData : public Struct {
public:
+ DECL_INT_ACCESSORS(serialized_return_count)
+ DECL_INT_ACCESSORS(serialized_parameter_count)
+ DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
+ DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
DECL_CAST(WasmJSFunctionData)
@@ -847,8 +904,8 @@ class WasmDebugInfo : public Struct {
Address frame_pointer,
int frame_index);
- V8_EXPORT_PRIVATE static Handle<JSFunction> GetCWasmEntry(
- Handle<WasmDebugInfo>, wasm::FunctionSig*);
+ V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
+ wasm::FunctionSig*);
OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 88b9e90381..d3fb4c42cf 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -23,7 +23,9 @@ namespace wasm {
#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
+#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
@@ -31,6 +33,7 @@ namespace wasm {
#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_S1x2_OP(name, str) CASE_OP(S1x2##name, "s1x2." str)
#define CASE_S1x4_OP(name, str) CASE_OP(S1x4##name, "s1x4." str)
#define CASE_S1x8_OP(name, str) CASE_OP(S1x8##name, "s1x8." str)
#define CASE_S1x16_OP(name, str) CASE_OP(S1x16##name, "s1x16." str)
@@ -148,8 +151,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(TeeLocal, "local.tee")
CASE_OP(GetGlobal, "global.get")
CASE_OP(SetGlobal, "global.set")
- CASE_OP(GetTable, "table.get")
- CASE_OP(SetTable, "table.set")
+ CASE_OP(TableGet, "table.get")
+ CASE_OP(TableSet, "table.set")
CASE_ALL_OP(Const, "const")
CASE_OP(MemorySize, "memory.size")
CASE_OP(MemoryGrow, "memory.grow")
@@ -217,11 +220,26 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
CASE_SIMD_OP(Neg, "neg")
+ CASE_F64x2_OP(Neg, "neg")
+ CASE_I64x2_OP(Neg, "neg")
CASE_SIMD_OP(Eq, "eq")
+ CASE_F64x2_OP(Eq, "eq")
+ CASE_I64x2_OP(Eq, "eq")
CASE_SIMD_OP(Ne, "ne")
+ CASE_F64x2_OP(Ne, "ne")
+ CASE_I64x2_OP(Ne, "ne")
CASE_SIMD_OP(Add, "add")
+ CASE_I64x2_OP(Add, "add")
CASE_SIMD_OP(Sub, "sub")
+ CASE_I64x2_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
+ CASE_I64x2_OP(Mul, "mul")
+ CASE_F64x2_OP(Splat, "splat")
+ CASE_F64x2_OP(Lt, "lt")
+ CASE_F64x2_OP(Le, "le")
+ CASE_F64x2_OP(Gt, "gt")
+ CASE_F64x2_OP(Ge, "ge")
+ CASE_F64x2_OP(Abs, "abs")
CASE_F32x4_OP(Abs, "abs")
CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
@@ -240,18 +258,29 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
+ CASE_F64x2_OP(ExtractLane, "extract_lane")
+ CASE_F64x2_OP(ReplaceLane, "replace_lane")
CASE_F32x4_OP(ExtractLane, "extract_lane")
CASE_F32x4_OP(ReplaceLane, "replace_lane")
+ CASE_I64x2_OP(ExtractLane, "extract_lane")
+ CASE_I64x2_OP(ReplaceLane, "replace_lane")
CASE_SIMDI_OP(ExtractLane, "extract_lane")
CASE_SIMDI_OP(ReplaceLane, "replace_lane")
CASE_SIGN_OP(SIMDI, Min, "min")
CASE_SIGN_OP(SIMDI, Max, "max")
CASE_SIGN_OP(SIMDI, Lt, "lt")
+ CASE_SIGN_OP(I64x2, Lt, "lt")
CASE_SIGN_OP(SIMDI, Le, "le")
+ CASE_SIGN_OP(I64x2, Le, "le")
CASE_SIGN_OP(SIMDI, Gt, "gt")
+ CASE_SIGN_OP(I64x2, Gt, "gt")
CASE_SIGN_OP(SIMDI, Ge, "ge")
+ CASE_SIGN_OP(I64x2, Ge, "ge")
CASE_SIGN_OP(SIMDI, Shr, "shr")
+ CASE_SIGN_OP(I64x2, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
+ CASE_I64x2_OP(Shl, "shl")
+ CASE_I64x2_OP(Splat, "splat")
CASE_I32x4_OP(AddHoriz, "add_horizontal")
CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
@@ -264,6 +293,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Not, "not")
CASE_S128_OP(Select, "select")
CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_S1x2_OP(AnyTrue, "any_true")
+ CASE_S1x2_OP(AllTrue, "all_true")
CASE_S1x4_OP(AnyTrue, "any_true")
CASE_S1x4_OP(AllTrue, "all_true")
CASE_S1x8_OP(AnyTrue, "any_true")
@@ -274,6 +305,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
// Atomic operations.
CASE_OP(AtomicNotify, "atomic.notify")
CASE_INT_OP(AtomicWait, "atomic.wait")
+ CASE_OP(AtomicFence, "atomic.fence")
CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
@@ -295,7 +327,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_F32_OP
#undef CASE_F64_OP
#undef CASE_REF_OP
+#undef CASE_F64x2_OP
#undef CASE_F32x4_OP
+#undef CASE_I64x2_OP
#undef CASE_I32x4_OP
#undef CASE_I16x8_OP
#undef CASE_I8x16_OP
@@ -303,6 +337,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_S32x4_OP
#undef CASE_S16x8_OP
#undef CASE_S8x16_OP
+#undef CASE_S1x2_OP
#undef CASE_S1x4_OP
#undef CASE_S1x8_OP
#undef CASE_S1x16_OP
@@ -474,7 +509,8 @@ struct GetSimdOpcodeSigIndex {
struct GetAtomicOpcodeSigIndex {
constexpr WasmOpcodeSig operator()(byte opcode) const {
#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_ATOMIC_OPCODE(CASE) kSigEnum_None;
+ return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
+ kSigEnum_None;
#undef CASE
}
};
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 6f9cb70141..22bd47d54b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -6,7 +6,7 @@
#define V8_WASM_WASM_OPCODES_H_
#include "src/common/globals.h"
-#include "src/execution/message-template.h"
+#include "src/common/message-template.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
@@ -51,8 +51,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TeeLocal, 0x22, _) \
V(GetGlobal, 0x23, _) \
V(SetGlobal, 0x24, _) \
- V(GetTable, 0x25, _) \
- V(SetTable, 0x26, _) \
+ V(TableGet, 0x25, _) \
+ V(TableSet, 0x26, _) \
V(I32Const, 0x41, _) \
V(I64Const, 0x42, _) \
V(F32Const, 0x43, _) \
@@ -272,7 +272,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I8x16Splat, 0xfd04, s_i) \
V(I16x8Splat, 0xfd08, s_i) \
V(I32x4Splat, 0xfd0c, s_i) \
+ V(I64x2Splat, 0xfd0f, s_l) \
V(F32x4Splat, 0xfd12, s_f) \
+ V(F64x2Splat, 0xfd15, s_d) \
V(I8x16Eq, 0xfd18, s_ss) \
V(I8x16Ne, 0xfd19, s_ss) \
V(I8x16LtS, 0xfd1a, s_ss) \
@@ -303,12 +305,28 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4LeU, 0xfd33, s_ss) \
V(I32x4GeS, 0xfd34, s_ss) \
V(I32x4GeU, 0xfd35, s_ss) \
+ V(I64x2Eq, 0xfd36, s_ss) \
+ V(I64x2Ne, 0xfd37, s_ss) \
+ V(I64x2LtS, 0xfd38, s_ss) \
+ V(I64x2LtU, 0xfd39, s_ss) \
+ V(I64x2GtS, 0xfd3a, s_ss) \
+ V(I64x2GtU, 0xfd3b, s_ss) \
+ V(I64x2LeS, 0xfd3c, s_ss) \
+ V(I64x2LeU, 0xfd3d, s_ss) \
+ V(I64x2GeS, 0xfd3e, s_ss) \
+ V(I64x2GeU, 0xfd3f, s_ss) \
V(F32x4Eq, 0xfd40, s_ss) \
V(F32x4Ne, 0xfd41, s_ss) \
V(F32x4Lt, 0xfd42, s_ss) \
V(F32x4Gt, 0xfd43, s_ss) \
V(F32x4Le, 0xfd44, s_ss) \
V(F32x4Ge, 0xfd45, s_ss) \
+ V(F64x2Eq, 0xfd46, s_ss) \
+ V(F64x2Ne, 0xfd47, s_ss) \
+ V(F64x2Lt, 0xfd48, s_ss) \
+ V(F64x2Gt, 0xfd49, s_ss) \
+ V(F64x2Le, 0xfd4a, s_ss) \
+ V(F64x2Ge, 0xfd4b, s_ss) \
V(S128Not, 0xfd4c, s_s) \
V(S128And, 0xfd4d, s_ss) \
V(S128Or, 0xfd4e, s_ss) \
@@ -352,6 +370,12 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I32x4MinU, 0xfd81, s_ss) \
V(I32x4MaxS, 0xfd82, s_ss) \
V(I32x4MaxU, 0xfd83, s_ss) \
+ V(I64x2Neg, 0xfd84, s_s) \
+ V(S1x2AnyTrue, 0xfd85, i_s) \
+ V(S1x2AllTrue, 0xfd86, i_s) \
+ V(I64x2Add, 0xfd8a, s_ss) \
+ V(I64x2Sub, 0xfd8d, s_ss) \
+ V(I64x2Mul, 0xfd8c, s_ss) \
V(F32x4Abs, 0xfd95, s_s) \
V(F32x4Neg, 0xfd96, s_s) \
V(F32x4RecipApprox, 0xfd98, s_s) \
@@ -361,6 +385,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(F32x4Mul, 0xfd9c, s_ss) \
V(F32x4Min, 0xfd9e, s_ss) \
V(F32x4Max, 0xfd9f, s_ss) \
+ V(F64x2Abs, 0xfda0, s_s) \
+ V(F64x2Neg, 0xfda1, s_s) \
V(I32x4SConvertF32x4, 0xfdab, s_s) \
V(I32x4UConvertF32x4, 0xfdac, s_s) \
V(F32x4SConvertI32x4, 0xfdaf, s_s) \
@@ -385,7 +411,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I8x16ExtractLane, 0xfd05, _) \
V(I16x8ExtractLane, 0xfd09, _) \
V(I32x4ExtractLane, 0xfd0d, _) \
+ V(I64x2ExtractLane, 0xfd10, _) \
V(F32x4ExtractLane, 0xfd13, _) \
+ V(F64x2ExtractLane, 0xfd16, _) \
V(I8x16Shl, 0xfd54, _) \
V(I8x16ShrS, 0xfd55, _) \
V(I8x16ShrU, 0xfd56, _) \
@@ -394,13 +422,18 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I16x8ShrU, 0xfd67, _) \
V(I32x4Shl, 0xfd76, _) \
V(I32x4ShrS, 0xfd77, _) \
- V(I32x4ShrU, 0xfd78, _)
+ V(I32x4ShrU, 0xfd78, _) \
+ V(I64x2Shl, 0xfd87, _) \
+ V(I64x2ShrS, 0xfd88, _) \
+ V(I64x2ShrU, 0xfd89, _)
#define FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V) \
V(I8x16ReplaceLane, 0xfd07, _) \
V(I16x8ReplaceLane, 0xfd0b, _) \
V(I32x4ReplaceLane, 0xfd0e, _) \
- V(F32x4ReplaceLane, 0xfd14, _)
+ V(I64x2ReplaceLane, 0xfd11, _) \
+ V(F32x4ReplaceLane, 0xfd14, _) \
+ V(F64x2ReplaceLane, 0xfd17, _)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
@@ -424,7 +457,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(TableCopy, 0xfc0e, v_iii) \
V(TableGrow, 0xfc0f, i_ai) \
V(TableSize, 0xfc10, i_v) \
- /*TableFill is polymorph in the second parameter. It's anyref or anyfunc.*/ \
+ /*TableFill is polymorph in the second parameter. It's anyref or funcref.*/ \
V(TableFill, 0xfc11, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
@@ -495,6 +528,10 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
+#define FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
+ /* AtomicFence does not target a particular linear memory. */ \
+ V(AtomicFence, 0xfe03, v_v)
+
// All opcodes.
#define FOREACH_OPCODE(V) \
FOREACH_CONTROL_OPCODE(V) \
@@ -510,6 +547,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_SIMD_MEM_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V) \
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(V) \
FOREACH_NUMERIC_OPCODE(V)
// All signatures.
@@ -553,13 +591,15 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_ai, kWasmI32, kWasmAnyFunc, kWasmI32)
+ V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
V(s_f, kWasmS128, kWasmF32) \
+ V(s_d, kWasmS128, kWasmF64) \
V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
V(s_i, kWasmS128, kWasmI32) \
+ V(s_l, kWasmS128, kWasmI64) \
V(s_si, kWasmS128, kWasmS128, kWasmI32) \
V(i_s, kWasmI32, kWasmS128) \
V(v_is, kWasmStmt, kWasmI32, kWasmS128) \
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 4688bcf8e1..42eee037d5 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -18,28 +18,28 @@ namespace wasm {
namespace {
PRINTF_FORMAT(3, 0)
-void VPrintFToString(std::string& str, size_t str_offset, const char* format,
+void VPrintFToString(std::string* str, size_t str_offset, const char* format,
va_list args) {
- DCHECK_LE(str_offset, str.size());
+ DCHECK_LE(str_offset, str->size());
size_t len = str_offset + strlen(format);
// Allocate increasingly large buffers until the message fits.
for (;; len = base::bits::RoundUpToPowerOfTwo64(len + 1)) {
DCHECK_GE(kMaxInt, len);
- str.resize(len);
+ str->resize(len);
va_list args_copy;
va_copy(args_copy, args);
- int written = VSNPrintF(Vector<char>(&str.front() + str_offset,
+ int written = VSNPrintF(Vector<char>(&str->front() + str_offset,
static_cast<int>(len - str_offset)),
format, args_copy);
va_end(args_copy);
if (written < 0) continue; // not enough space.
- str.resize(str_offset + written);
+ str->resize(str_offset + written);
return;
}
}
PRINTF_FORMAT(3, 4)
-void PrintFToString(std::string& str, size_t str_offset, const char* format,
+void PrintFToString(std::string* str, size_t str_offset, const char* format,
...) {
va_list args;
va_start(args, format);
@@ -52,7 +52,7 @@ void PrintFToString(std::string& str, size_t str_offset, const char* format,
// static
std::string WasmError::FormatError(const char* format, va_list args) {
std::string result;
- VPrintFToString(result, 0, format, args);
+ VPrintFToString(&result, 0, format, args);
return result;
}
@@ -63,10 +63,10 @@ void ErrorThrower::Format(ErrorType type, const char* format, va_list args) {
size_t context_len = 0;
if (context_) {
- PrintFToString(error_msg_, 0, "%s: ", context_);
+ PrintFToString(&error_msg_, 0, "%s: ", context_);
context_len = error_msg_.size();
}
- VPrintFToString(error_msg_, context_len, format, args);
+ VPrintFToString(&error_msg_, context_len, format, args);
error_type_ = type;
}
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 1cea08943b..a20b2f115a 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -645,6 +645,8 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
// Log the code within the generated module for profiling.
native_module->LogWasmCodes(isolate);
+ // Finish the Wasm script now and make it public to the debugger.
+ isolate->debug()->OnAfterCompile(script);
return module_object;
}
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index a79ae02fe2..e17d34e36f 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -105,7 +105,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
os << WasmOpcodes::OpcodeName(opcode);
- if (imm.type == kWasmVar) {
+ if (imm.type == kWasmBottom) {
os << " (type " << imm.sig_index << ")";
} else if (imm.out_arity() > 0) {
os << " " << ValueTypes::TypeName(imm.out_type(0));
@@ -140,16 +140,18 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
while (iterator.has_next()) os << ' ' << iterator.next();
break;
}
- case kExprCallIndirect: {
+ case kExprCallIndirect:
+ case kExprReturnCallIndirect: {
CallIndirectImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
i.pc());
DCHECK_EQ(0, imm.table_index);
- os << "call_indirect " << imm.sig_index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.sig_index;
break;
}
- case kExprCallFunction: {
+ case kExprCallFunction:
+ case kExprReturnCall: {
CallFunctionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- os << "call " << imm.index;
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
case kExprGetLocal:
@@ -170,6 +172,18 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
break;
}
+ case kExprTableGet:
+ case kExprTableSet: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprSelectWithType: {
+ SelectTypeImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << ValueTypes::TypeName(imm.type);
+ break;
+ }
#define CASE_CONST(type, str, cast_type) \
case kExpr##type##Const: { \
Imm##type##Immediate<Decoder::kNoValidate> imm(&i, i.pc()); \
@@ -182,6 +196,12 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
CASE_CONST(F64, f64, double)
#undef CASE_CONST
+ case kExprRefFunc: {
+ FunctionIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+
#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
@@ -193,6 +213,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
}
FOREACH_SIMPLE_OPCODE(CASE_OPCODE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE_OPCODE)
case kExprUnreachable:
case kExprNop:
case kExprReturn:
@@ -200,19 +221,150 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
case kExprMemoryGrow:
case kExprDrop:
case kExprSelect:
+ case kExprRethrow:
+ case kExprRefNull:
os << WasmOpcodes::OpcodeName(opcode);
break;
+
+ case kNumericPrefix: {
+ WasmOpcode numeric_opcode = i.prefixed_opcode();
+ switch (numeric_opcode) {
+ case kExprI32SConvertSatF32:
+ case kExprI32UConvertSatF32:
+ case kExprI32SConvertSatF64:
+ case kExprI32UConvertSatF64:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
+ case kExprMemoryCopy:
+ case kExprMemoryFill:
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+ case kExprMemoryInit: {
+ MemoryInitImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << imm.data_segment_index;
+ break;
+ }
+ case kExprDataDrop: {
+ DataDropImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprTableInit: {
+ TableInitImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' '
+ << imm.elem_segment_index << ' ' << imm.table.index;
+ break;
+ }
+ case kExprElemDrop: {
+ ElemDropImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ case kExprTableCopy: {
+ TableCopyImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.table_src.index
+ << ' ' << imm.table_dst.index;
+ break;
+ }
+ case kExprTableGrow:
+ case kExprTableSize:
+ case kExprTableFill: {
+ TableIndexImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.index;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+
+ case kSimdPrefix: {
+ WasmOpcode simd_opcode = i.prefixed_opcode();
+ switch (simd_opcode) {
+ case kExprS128LoadMem:
+ case kExprS128StoreMem: {
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc(),
+ kMaxUInt32);
+ os << WasmOpcodes::OpcodeName(opcode) << " offset=" << imm.offset
+ << " align=" << (1ULL << imm.alignment);
+ break;
+ }
+
+ case kExprS8x16Shuffle: {
+ Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode);
+ for (uint8_t v : imm.shuffle) {
+ os << ' ' << v;
+ }
+ break;
+ }
+
+ case kExprI8x16ExtractLane:
+ case kExprI16x8ExtractLane:
+ case kExprI32x4ExtractLane:
+ case kExprI64x2ExtractLane:
+ case kExprF32x4ExtractLane:
+ case kExprF64x2ExtractLane:
+ case kExprI8x16ReplaceLane:
+ case kExprI16x8ReplaceLane:
+ case kExprI32x4ReplaceLane:
+ case kExprI64x2ReplaceLane:
+ case kExprF32x4ReplaceLane:
+ case kExprF64x2ReplaceLane: {
+ SimdLaneImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.lane;
+ break;
+ }
+
+ case kExprI8x16Shl:
+ case kExprI8x16ShrS:
+ case kExprI8x16ShrU:
+ case kExprI16x8Shl:
+ case kExprI16x8ShrS:
+ case kExprI16x8ShrU:
+ case kExprI32x4Shl:
+ case kExprI32x4ShrS:
+ case kExprI32x4ShrU:
+ case kExprI64x2Shl:
+ case kExprI64x2ShrS:
+ case kExprI64x2ShrU: {
+ SimdShiftImmediate<Decoder::kNoValidate> imm(&i, i.pc());
+ os << WasmOpcodes::OpcodeName(opcode) << ' ' << imm.shift;
+ break;
+ }
+
+ FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+
case kAtomicPrefix: {
WasmOpcode atomic_opcode = i.prefixed_opcode();
switch (atomic_opcode) {
FOREACH_ATOMIC_OPCODE(CASE_OPCODE) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc(),
+ MemoryAccessImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1,
kMaxUInt32);
os << WasmOpcodes::OpcodeName(atomic_opcode)
<< " offset=" << imm.offset
<< " align=" << (1ULL << imm.alignment);
break;
}
+ FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(atomic_opcode);
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -222,14 +374,9 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
// This group is just printed by their internal opcode name, as they
// should never be shown to end-users.
- FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE)
- // TODO(wasm): Add correct printing for SIMD and atomic opcodes once
- // they are publicly available.
- FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
- FOREACH_SIMD_MEM_OPCODE(CASE_OPCODE)
- os << WasmOpcodes::OpcodeName(opcode);
+ FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE) {
+ os << WasmOpcodes::OpcodeName(opcode);
+ }
break;
#undef CASE_OPCODE
diff --git a/deps/v8/src/wasm/wasm-text.h b/deps/v8/src/wasm/wasm-text.h
index 60957966ab..205df5e6fd 100644
--- a/deps/v8/src/wasm/wasm-text.h
+++ b/deps/v8/src/wasm/wasm-text.h
@@ -7,9 +7,10 @@
#include <cstdint>
#include <ostream>
-#include <tuple>
#include <vector>
+#include "src/common/globals.h"
+
namespace v8 {
namespace debug {
@@ -26,10 +27,10 @@ struct ModuleWireBytes;
// Generate disassembly according to official text format.
// Output disassembly to the given output stream, and optionally return an
// offset table of <byte offset, line, column> via the given pointer.
-void PrintWasmText(
- const WasmModule *module, const ModuleWireBytes &wire_bytes,
- uint32_t func_index, std::ostream &os,
- std::vector<debug::WasmDisassemblyOffsetTableEntry> *offset_table);
+V8_EXPORT_PRIVATE void PrintWasmText(
+ const WasmModule* module, const ModuleWireBytes& wire_bytes,
+ uint32_t func_index, std::ostream& os,
+ std::vector<debug::WasmDisassemblyOffsetTableEntry>* offset_table);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 23f1aed7f0..8de53b96cf 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_WASM_VALUE_H_
#define V8_WASM_WASM_VALUE_H_
-#include "src/common/v8memory.h"
+#include "src/base/memory.h"
#include "src/handles/handles.h"
#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
@@ -15,10 +15,12 @@ namespace v8 {
namespace internal {
namespace wasm {
-#define FOREACH_SIMD_TYPE(V) \
- V(float, float4, f32x4, 4) \
- V(int32_t, int4, i32x4, 4) \
- V(int16_t, int8, i16x8, 8) \
+#define FOREACH_SIMD_TYPE(V) \
+ V(double, float2, f64x2, 2) \
+ V(float, float4, f32x4, 4) \
+ V(int64_t, int2, i64x2, 2) \
+ V(int32_t, int4, i32x4, 4) \
+ V(int16_t, int8, i16x8, 8) \
V(int8_t, int16, i8x16, 16)
#define DEFINE_SIMD_TYPE(cType, sType, name, kSize) \
@@ -35,12 +37,12 @@ class Simd128 {
val_[i] = 0;
}
}
-#define DEFINE_SIMD_TYPE_SPECIFIC_METHODS(cType, sType, name, size) \
- explicit Simd128(sType val) { \
- WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
- } \
- sType to_##name() { \
- return ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
+#define DEFINE_SIMD_TYPE_SPECIFIC_METHODS(cType, sType, name, size) \
+ explicit Simd128(sType val) { \
+ base::WriteUnalignedValue<sType>(reinterpret_cast<Address>(val_), val); \
+ } \
+ sType to_##name() { \
+ return base::ReadUnalignedValue<sType>(reinterpret_cast<Address>(val_)); \
}
FOREACH_SIMD_TYPE(DEFINE_SIMD_TYPE_SPECIFIC_METHODS)
#undef DEFINE_SIMD_TYPE_SPECIFIC_METHODS
@@ -73,18 +75,20 @@ class WasmValue {
public:
WasmValue() : type_(kWasmStmt), bit_pattern_{} {}
-#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
- explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
- static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
- "size too big for WasmValue"); \
- WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), v); \
- } \
- ctype to_##name() const { \
- DCHECK_EQ(localtype, type_); \
- return to_##name##_unchecked(); \
- } \
- ctype to_##name##_unchecked() const { \
- return ReadUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_)); \
+#define DEFINE_TYPE_SPECIFIC_METHODS(name, localtype, ctype) \
+ explicit WasmValue(ctype v) : type_(localtype), bit_pattern_{} { \
+ static_assert(sizeof(ctype) <= sizeof(bit_pattern_), \
+ "size too big for WasmValue"); \
+ base::WriteUnalignedValue<ctype>(reinterpret_cast<Address>(bit_pattern_), \
+ v); \
+ } \
+ ctype to_##name() const { \
+ DCHECK_EQ(localtype, type_); \
+ return to_##name##_unchecked(); \
+ } \
+ ctype to_##name##_unchecked() const { \
+ return base::ReadUnalignedValue<ctype>( \
+ reinterpret_cast<Address>(bit_pattern_)); \
}
FOREACH_WASMVAL_TYPE(DEFINE_TYPE_SPECIFIC_METHODS)
#undef DEFINE_TYPE_SPECIFIC_METHODS
diff --git a/deps/v8/src/zone/OWNERS b/deps/v8/src/zone/OWNERS
new file mode 100644
index 0000000000..01c515ab90
--- /dev/null
+++ b/deps/v8/src/zone/OWNERS
@@ -0,0 +1,3 @@
+clemensh@chromium.org
+sigurds@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index fe62d4bb4c..69928d5925 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -26,8 +26,18 @@ class ZoneAllocator {
using other = ZoneAllocator<O>;
};
-#ifdef V8_CC_MSVC
- // MSVS unfortunately requires the default constructor to be defined.
+#ifdef V8_OS_WIN
+ // The exported class ParallelMove derives from ZoneVector, which derives
+ // from std::vector. On Windows, the semantics of dllexport mean that
+ // a class's superclasses that are not explicitly exported themselves get
+ // implicitly exported together with the subclass, and exporting a class
+ // exports all its functions -- including the std::vector() constructors
+ // that don't take an explicit allocator argument, which in turn reference
+ // the vector allocator's default constructor. So this constructor needs
+ // to exist for linking purposes, even if it's never called.
+ // Other fixes would be to disallow subclasses of ZoneVector (etc) to be
+ // exported, or using composition instead of inheritance for either
+ // ZoneVector and friends or for ParallelMove.
ZoneAllocator() : ZoneAllocator(nullptr) { UNREACHABLE(); }
#endif
explicit ZoneAllocator(Zone* zone) : zone_(zone) {}
@@ -37,14 +47,8 @@ class ZoneAllocator {
template <typename U>
friend class ZoneAllocator;
- T* address(T& x) const { return &x; }
- const T* address(const T& x) const { return &x; }
-
- T* allocate(size_t n, const void* hint = nullptr) {
- return static_cast<T*>(zone_->NewArray<T>(static_cast<int>(n)));
- }
- void deallocate(T* p, size_t) { /* noop for Zones */
- }
+ T* allocate(size_t n) { return zone_->NewArray<T>(n); }
+ void deallocate(T* p, size_t) {} // noop for zones
size_t max_size() const {
return std::numeric_limits<int>::max() / sizeof(T);
@@ -84,13 +88,6 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
using other = RecyclingZoneAllocator<O>;
};
-#ifdef V8_CC_MSVC
- // MSVS unfortunately requires the default constructor to be defined.
- RecyclingZoneAllocator()
- : ZoneAllocator(nullptr, nullptr), free_list_(nullptr) {
- UNREACHABLE();
- }
-#endif
explicit RecyclingZoneAllocator(Zone* zone)
: ZoneAllocator<T>(zone), free_list_(nullptr) {}
template <typename U>
@@ -100,16 +97,15 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
template <typename U>
friend class RecyclingZoneAllocator;
- T* allocate(size_t n, const void* hint = nullptr) {
+ T* allocate(size_t n) {
// Only check top block in free list, since this will be equal to or larger
// than the other blocks in the free list.
if (free_list_ && free_list_->size >= n) {
T* return_val = reinterpret_cast<T*>(free_list_);
free_list_ = free_list_->next;
return return_val;
- } else {
- return ZoneAllocator<T>::allocate(n, hint);
}
+ return ZoneAllocator<T>::allocate(n);
}
void deallocate(T* p, size_t n) {
diff --git a/deps/v8/src/zone/zone-splay-tree.h b/deps/v8/src/zone/zone-splay-tree.h
deleted file mode 100644
index c28df38fda..0000000000
--- a/deps/v8/src/zone/zone-splay-tree.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ZONE_ZONE_SPLAY_TREE_H_
-#define V8_ZONE_ZONE_SPLAY_TREE_H_
-
-#include "src/utils/splay-tree.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// A zone splay tree. The config type parameter encapsulates the
-// different configurations of a concrete splay tree (see splay-tree.h).
-// The tree itself and all its elements are allocated in the Zone.
-template <typename Config>
-class ZoneSplayTree final : public SplayTree<Config, ZoneAllocationPolicy> {
- public:
- explicit ZoneSplayTree(Zone* zone)
- : SplayTree<Config, ZoneAllocationPolicy>(ZoneAllocationPolicy(zone)) {}
- ~ZoneSplayTree() {
- // Reset the root to avoid unneeded iteration over all tree nodes
- // in the destructor. For a zone-allocated tree, nodes will be
- // freed by the Zone.
- SplayTree<Config, ZoneAllocationPolicy>::ResetRoot();
- }
-
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
-
- void operator delete(void* pointer) { UNREACHABLE(); }
- void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_ZONE_ZONE_SPLAY_TREE_H_
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index a6f45fad54..81fc9c7d8b 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -27,8 +27,7 @@ constexpr size_t kASanRedzoneBytes = 0;
} // namespace
-Zone::Zone(AccountingAllocator* allocator, const char* name,
- SegmentSize segment_size)
+Zone::Zone(AccountingAllocator* allocator, const char* name)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
@@ -36,8 +35,7 @@ Zone::Zone(AccountingAllocator* allocator, const char* name,
allocator_(allocator),
segment_head_(nullptr),
name_(name),
- sealed_(false),
- segment_size_(segment_size) {
+ sealed_(false) {
allocator_->ZoneCreation(this);
}
@@ -137,12 +135,9 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
- if (segment_size_ == SegmentSize::kLarge) {
- new_size = kMaximumSegmentSize;
- }
if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
- } else if (new_size > kMaximumSegmentSize) {
+ } else if (new_size >= kMaximumSegmentSize) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index b113f49585..e2b66253f5 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -37,12 +37,9 @@ namespace internal {
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
-enum class SegmentSize { kLarge, kDefault };
-
class V8_EXPORT_PRIVATE Zone final {
public:
- Zone(AccountingAllocator* allocator, const char* name,
- SegmentSize segment_size = SegmentSize::kDefault);
+ Zone(AccountingAllocator* allocator, const char* name);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -102,7 +99,7 @@ class V8_EXPORT_PRIVATE Zone final {
static const size_t kMinimumSegmentSize = 8 * KB;
// Never allocate segments larger than this size in bytes.
- static const size_t kMaximumSegmentSize = 1 * MB;
+ static const size_t kMaximumSegmentSize = 32 * KB;
// Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB;
@@ -136,7 +133,6 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_;
const char* name_;
bool sealed_;
- SegmentSize segment_size_;
};
// ZoneObject is an abstraction that helps define classes of objects
diff --git a/deps/v8/test/OWNERS b/deps/v8/test/OWNERS
index bdb1d555a4..852d438bb0 100644
--- a/deps/v8/test/OWNERS
+++ b/deps/v8/test/OWNERS
@@ -1 +1 @@
-file://INFRA_OWNERS
+file://COMMON_OWNERS
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 42396087ee..32a766736f 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -32,6 +32,10 @@ v8_executable("cctest") {
ldflags = []
+ if (v8_use_perfetto) {
+ deps += [ "//third_party/perfetto/include/perfetto/tracing" ]
+ }
+
# TODO(machenbach): Translate from gyp.
#["OS=="aix"", {
# "ldflags": [ "-Wl,-bbigtoc" ],
@@ -55,8 +59,6 @@ v8_source_set("cctest_sources") {
testonly = true
sources = [
- "$target_gen_dir/resources.cc",
-
### gcmole(all) ###
"../common/assembler-tester.h",
"../common/wasm/flag-utils.h",
@@ -111,6 +113,8 @@ v8_source_set("cctest_sources") {
"compiler/test-run-variables.cc",
"compiler/value-helper.cc",
"compiler/value-helper.h",
+ "disasm-regex-helper.cc",
+ "disasm-regex-helper.h",
"expression-type-collector-macros.h",
"gay-fixed.cc",
"gay-fixed.h",
@@ -165,8 +169,10 @@ v8_source_set("cctest_sources") {
"test-accessors.cc",
"test-allocation.cc",
"test-api-accessors.cc",
+ "test-api-array-buffer.cc",
"test-api-interceptors.cc",
"test-api-stack-traces.cc",
+ "test-api-typed-array.cc",
"test-api.cc",
"test-api.h",
"test-array-list.cc",
@@ -306,6 +312,8 @@ v8_source_set("cctest_sources") {
"test-fuzz-arm64.cc",
"test-javascript-arm64.cc",
"test-js-arm64-variables.cc",
+ "test-macro-assembler-arm64.cc",
+ "test-poison-disasm-arm64.cc",
"test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
@@ -369,7 +377,6 @@ v8_source_set("cctest_sources") {
public_deps = [
":cctest_headers",
- ":resources",
"..:common_test_headers",
"../..:v8_for_testing",
"../..:v8_libbase",
@@ -414,39 +421,14 @@ v8_source_set("cctest_sources") {
}
if (v8_use_perfetto) {
- deps += [ "//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite" ]
+ deps += [
+ "//third_party/perfetto/include/perfetto/tracing",
+ "//third_party/perfetto/protos/perfetto/trace/chrome:lite",
+ "//third_party/perfetto/protos/perfetto/trace/chrome:zero",
+ ]
}
}
-action("resources") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- script = "../../tools/js2c.py"
-
- # NOSORT
- sources = [
- "../../tools/splaytree.js",
- "../../tools/codemap.js",
- "../../tools/csvparser.js",
- "../../tools/consarray.js",
- "../../tools/profile.js",
- "../../tools/profile_view.js",
- "../../tools/arguments.js",
- "../../tools/logreader.js",
- "log-eq-of-logging-and-traversal.js",
- ]
-
- outputs = [
- "$target_gen_dir/resources.cc",
- ]
-
- args = [
- rebase_path("$target_gen_dir/resources.cc", root_build_dir),
- "TEST",
- ]
- args += rebase_path(sources, root_build_dir)
-}
-
v8_executable("generate-bytecode-expectations") {
testonly = true
@@ -468,13 +450,8 @@ v8_executable("generate-bytecode-expectations") {
"../..:v8_libplatform",
"//build/win:default_exe_manifest",
]
-}
-#Target to generate all .cc files.
-group("v8_generated_cc_files") {
- testonly = true
-
- deps = [
- ":resources",
+ data = [
+ "interpreter/bytecode_expectations/",
]
}
diff --git a/deps/v8/test/cctest/DEPS b/deps/v8/test/cctest/DEPS
index 7210fb5317..909e60372e 100644
--- a/deps/v8/test/cctest/DEPS
+++ b/deps/v8/test/cctest/DEPS
@@ -1,4 +1,5 @@
include_rules = [
"+src",
- "+torque-generated"
+ "+torque-generated",
+ "+perfetto/tracing.h"
]
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 92be1567b0..c11f42d6c9 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,13 +1 @@
-per-file *-mips*=xwafish@gmail.com
-per-file *-ppc*=dstence@us.ibm.com
-per-file *-ppc*=joransiu@ca.ibm.com
-per-file *-ppc*=jyan@ca.ibm.com
-per-file *-ppc*=mbrandy@us.ibm.com
-per-file *-ppc*=michael_dawson@ca.ibm.com
-per-file *-s390*=dstence@us.ibm.com
-per-file *-s390*=joransiu@ca.ibm.com
-per-file *-s390*=jyan@ca.ibm.com
-per-file *-s390*=mbrandy@us.ibm.com
-per-file *-s390*=michael_dawson@ca.ibm.com
per-file *profile*=alph@chromium.org
-
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 353f7f5c76..dcfca2b2df 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -39,6 +39,10 @@
#include "test/cctest/profiler-extension.h"
#include "test/cctest/trace-extension.h"
+#ifdef V8_USE_PERFETTO
+#include "perfetto/tracing.h"
+#endif // V8_USE_PERFETTO
+
#if V8_OS_WIN
#include <windows.h> // NOLINT
#if V8_CC_MSVC
@@ -121,7 +125,9 @@ void CcTest::Run() {
}
i::Heap* CcTest::heap() { return i_isolate()->heap(); }
-i::ReadOnlyHeap* CcTest::read_only_heap() { return heap()->read_only_heap(); }
+i::ReadOnlyHeap* CcTest::read_only_heap() {
+ return i_isolate()->read_only_heap();
+}
void CcTest::CollectGarbage(i::AllocationSpace space) {
heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
@@ -268,7 +274,6 @@ static void SuggestTestHarness(int tests) {
"bogus failure. Consider using tools/run-tests.py instead.\n");
}
-
int main(int argc, char* argv[]) {
#if V8_OS_WIN
UINT new_flags =
@@ -300,6 +305,13 @@ int main(int argc, char* argv[]) {
}
}
+#ifdef V8_USE_PERFETTO
+ // Set up the in-process backend that the tracing controller will connect to.
+ perfetto::TracingInitArgs init_args;
+ init_args.backends = perfetto::BackendType::kInProcessBackend;
+ perfetto::Tracing::Initialize(init_args);
+#endif // V8_USE_PERFETTO
+
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 274a8bf28a..17d0096140 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -48,9 +48,6 @@
# works as intended.
'test-api/DisallowJavascriptExecutionScope': [FAIL],
- # TODO(gc): Temporarily disabled in the GC branch.
- 'test-log/EquivalenceOfLoggingAndTraversal': [PASS, FAIL],
-
# We do not yet shrink weak maps after they have been emptied by the GC
'test-weakmaps/Shrinking': [FAIL],
'test-weaksets/WeakSet_Shrinking': [FAIL],
@@ -84,9 +81,6 @@
'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
'test-sampler/LibSamplerCollectSample': [SKIP],
- # BUG(v8:9057). Flaky, maybe only on UBSan
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [SKIP],
-
# BUG(7202). The test is flaky.
'test-cpu-profiler/NativeFrameStackTrace': [SKIP],
@@ -204,6 +198,12 @@
}], # 'no_snap == True'
##############################################################################
+['no_snap == True and system != windows and mode == debug', {
+ # BUG(v8:9400).
+ 'test-heap/Regress615489': [SKIP],
+}], # 'no_snap == True and system != windows and mode == debug'
+
+##############################################################################
# TODO(machenbach): Fix application of '*'. Nosnap windows needs a separate
# section to not overwrite the expectations for TestThatAlwaysFails.
['no_snap == True and system == windows', {
@@ -376,14 +376,6 @@
}], # 'system != android and arch in [arm, arm64] and not simulator_run'
##############################################################################
-['system == aix and arch == ppc64', {
-
- # BUG 2857
- 'test-log/EquivalenceOfLoggingAndTraversal' : [SKIP],
-
-}], # 'system == aix and arch == ppc64'
-
-##############################################################################
['system == aix or (arch == ppc64 and byteorder == big)', {
# TODO(ppc): Fix for platforms with function desciptors.
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
index 45f4a1fb9c..2b56e07d24 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.cc
+++ b/deps/v8/test/cctest/compiler/serializer-tester.cc
@@ -264,7 +264,7 @@ TEST(SerializeUnconditionalJump) {
"function f() {"
" function p() {};"
" function q() {};"
- " if (a) g(q);"
+ " if (a) q();"
" else g(p);"
" return p;"
"};"
@@ -274,6 +274,20 @@ TEST(SerializeUnconditionalJump) {
"f(); return f;");
}
+TEST(MergeJumpTargetEnvironment) {
+ CheckForSerializedInlinee(
+ "function f() {"
+ " let g;"
+ " while (true) {"
+ " if (g === undefined) {g = ()=>1; break;} else {g = ()=>2; break};"
+ " };"
+ " g(); return g;"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "%EnsureFeedbackVectorForFunction(f());"
+ "f(); return f;"); // Two calls to f to make g() megamorhpic.
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 4dc4ac03e1..375c6586f3 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -21,7 +21,8 @@ namespace {
using Label = CodeAssemblerLabel;
using Variable = CodeAssemblerVariable;
-Node* SmiTag(CodeAssembler& m, Node* value) {
+Node* SmiTag(CodeAssembler& m, // NOLINT(runtime/references)
+ Node* value) {
int32_t constant_value;
if (m.ToInt32Constant(value, constant_value) &&
Smi::IsValid(constant_value)) {
@@ -30,22 +31,25 @@ Node* SmiTag(CodeAssembler& m, Node* value) {
return m.WordShl(value, m.IntPtrConstant(kSmiShiftSize + kSmiTagSize));
}
-Node* UndefinedConstant(CodeAssembler& m) {
+Node* UndefinedConstant(CodeAssembler& m) { // NOLINT(runtime/references)
return m.LoadRoot(RootIndex::kUndefinedValue);
}
-Node* SmiFromInt32(CodeAssembler& m, Node* value) {
+Node* SmiFromInt32(CodeAssembler& m, // NOLINT(runtime/references)
+ Node* value) {
value = m.ChangeInt32ToIntPtr(value);
return m.BitcastWordToTaggedSigned(
m.WordShl(value, kSmiShiftSize + kSmiTagSize));
}
-Node* LoadObjectField(CodeAssembler& m, Node* object, int offset,
+Node* LoadObjectField(CodeAssembler& m, // NOLINT(runtime/references)
+ Node* object, int offset,
MachineType type = MachineType::AnyTagged()) {
return m.Load(type, object, m.IntPtrConstant(offset - kHeapObjectTag));
}
-Node* LoadMap(CodeAssembler& m, Node* object) {
+Node* LoadMap(CodeAssembler& m, // NOLINT(runtime/references)
+ Node* object) {
return LoadObjectField(m, object, JSObject::kMapOffset);
}
@@ -131,7 +135,8 @@ TEST(SimpleTailCallRuntime2Arg) {
namespace {
-Handle<JSFunction> CreateSumAllArgumentsFunction(FunctionTester& ft) {
+Handle<JSFunction> CreateSumAllArgumentsFunction(
+ FunctionTester& ft) { // NOLINT(runtime/references)
const char* source =
"(function() {\n"
" var sum = 0 + this;\n"
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index bb686e8e70..30cd7da7b5 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -962,7 +962,8 @@ class CodeGeneratorTester {
: zone_(environment->main_zone()),
info_(ArrayVector("test"), environment->main_zone(), Code::STUB),
linkage_(environment->test_descriptor()),
- frame_(environment->test_descriptor()->CalculateFixedFrameSize()) {
+ frame_(environment->test_descriptor()->CalculateFixedFrameSize(
+ Code::STUB)) {
// Pick half of the stack parameters at random and move them into spill
// slots, separated by `extra_stack_space` bytes.
// When testing a move with stack slots using CheckAssembleMove or
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 458b1e521b..e6703dbbbe 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -30,8 +32,8 @@ class ContextSpecializationTester : public HandleAndZoneScope {
simplified_(main_zone()),
jsgraph_(main_isolate(), graph(), common(), &javascript_, &simplified_,
&machine_),
- reducer_(main_zone(), graph()),
- js_heap_broker_(main_isolate(), main_zone()),
+ reducer_(main_zone(), graph(), &tick_counter_),
+ js_heap_broker_(main_isolate(), main_zone(), FLAG_trace_heap_broker),
spec_(&reducer_, jsgraph(), &js_heap_broker_, context,
MaybeHandle<JSFunction>()) {}
@@ -51,6 +53,7 @@ class ContextSpecializationTester : public HandleAndZoneScope {
size_t expected_new_depth);
private:
+ TickCounter tick_counter_;
CanonicalHandleScope canonical_;
Graph* graph_;
CommonOperatorBuilder common_;
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index cc717c618e..fbe3892af1 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
@@ -24,7 +26,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
explicit JSTypedLoweringTester(int num_parameters = 0)
: isolate(main_isolate()),
canonical(isolate),
- js_heap_broker(isolate, main_zone()),
+ js_heap_broker(isolate, main_zone(), FLAG_trace_heap_broker),
binop(nullptr),
unop(nullptr),
javascript(main_zone()),
@@ -32,7 +34,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
simplified(main_zone()),
common(main_zone()),
graph(main_zone()),
- typer(&js_heap_broker, Typer::kNoFlags, &graph),
+ typer(&js_heap_broker, Typer::kNoFlags, &graph, &tick_counter),
context_node(nullptr) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
graph.SetEnd(graph.NewNode(common.End(1), graph.start()));
@@ -40,6 +42,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Isolate* isolate;
+ TickCounter tick_counter;
CanonicalHandleScope canonical;
JSHeapBroker js_heap_broker;
const Operator* binop;
@@ -89,7 +92,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
JSGraph jsgraph(main_isolate(), &graph, &common, &javascript, &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(main_zone(), &graph);
+ GraphReducer graph_reducer(main_zone(), &graph, &tick_counter);
JSTypedLowering reducer(&graph_reducer, &jsgraph, &js_heap_broker,
main_zone());
Reduction reduction = reducer.Reduce(node);
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 9a149f67f4..44bee022b3 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -109,8 +109,8 @@ class TestCode : public HandleAndZoneScope {
}
};
-
-void VerifyForwarding(TestCode& code, int count, int* expected) {
+void VerifyForwarding(TestCode& code, // NOLINT(runtime/references)
+ int count, int* expected) {
v8::internal::AccountingAllocator allocator;
Zone local_zone(&allocator, ZONE_NAME);
ZoneVector<RpoNumber> result(&local_zone);
@@ -122,7 +122,6 @@ void VerifyForwarding(TestCode& code, int count, int* expected) {
}
}
-
TEST(FwEmpty1) {
TestCode code;
@@ -611,8 +610,8 @@ void RunPermutedDiamond(int* permutation, int size) {
TEST(FwPermuted_diamond) { RunAllPermutations<4>(RunPermutedDiamond); }
-
-void ApplyForwarding(TestCode& code, int size, int* forward) {
+void ApplyForwarding(TestCode& code, // NOLINT(runtime/references)
+ int size, int* forward) {
code.sequence_.RecomputeAssemblyOrderForTesting();
ZoneVector<RpoNumber> vector(code.main_zone());
for (int i = 0; i < size; i++) {
@@ -621,8 +620,8 @@ void ApplyForwarding(TestCode& code, int size, int* forward) {
JumpThreading::ApplyForwarding(code.main_zone(), vector, &code.sequence_);
}
-
-void CheckJump(TestCode& code, int pos, int target) {
+void CheckJump(TestCode& code, // NOLINT(runtime/references)
+ int pos, int target) {
Instruction* instr = code.sequence_.InstructionAt(pos);
CHECK_EQ(kArchJmp, instr->arch_opcode());
CHECK_EQ(1, static_cast<int>(instr->InputCount()));
@@ -631,8 +630,8 @@ void CheckJump(TestCode& code, int pos, int target) {
CHECK_EQ(target, code.sequence_.InputRpo(instr, 0).ToInt());
}
-
-void CheckNop(TestCode& code, int pos) {
+void CheckNop(TestCode& code, // NOLINT(runtime/references)
+ int pos) {
Instruction* instr = code.sequence_.InstructionAt(pos);
CHECK_EQ(kArchNop, instr->arch_opcode());
CHECK_EQ(0, static_cast<int>(instr->InputCount()));
@@ -640,8 +639,8 @@ void CheckNop(TestCode& code, int pos) {
CHECK_EQ(0, static_cast<int>(instr->TempCount()));
}
-
-void CheckBranch(TestCode& code, int pos, int t1, int t2) {
+void CheckBranch(TestCode& code, // NOLINT(runtime/references)
+ int pos, int t1, int t2) {
Instruction* instr = code.sequence_.InstructionAt(pos);
CHECK_EQ(2, static_cast<int>(instr->InputCount()));
CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
@@ -650,15 +649,14 @@ void CheckBranch(TestCode& code, int pos, int t1, int t2) {
CHECK_EQ(t2, code.sequence_.InputRpo(instr, 1).ToInt());
}
-
-void CheckAssemblyOrder(TestCode& code, int size, int* expected) {
+void CheckAssemblyOrder(TestCode& code, // NOLINT(runtime/references)
+ int size, int* expected) {
int i = 0;
for (auto const block : code.sequence_.instruction_blocks()) {
CHECK_EQ(expected[i++], block->ao_number().ToInt());
}
}
-
TEST(Rewire1) {
TestCode code;
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 531d43038e..231a3ada5a 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/loop-analysis.h"
@@ -57,6 +58,7 @@ class LoopFinderTester : HandleAndZoneScope {
}
Isolate* isolate;
+ TickCounter tick_counter;
CommonOperatorBuilder common;
Graph graph;
JSGraph jsgraph;
@@ -128,7 +130,7 @@ class LoopFinderTester : HandleAndZoneScope {
StdoutStream{} << AsRPO(graph);
}
Zone zone(main_isolate()->allocator(), ZONE_NAME);
- loop_tree = LoopFinder::BuildLoopTree(&graph, &zone);
+ loop_tree = LoopFinder::BuildLoopTree(&graph, &tick_counter, &zone);
}
return loop_tree;
}
@@ -199,7 +201,7 @@ struct While {
}
void chain(Node* control) { loop->ReplaceInput(0, control); }
- void nest(While& that) {
+ void nest(While& that) { // NOLINT(runtime/references)
that.loop->ReplaceInput(1, exit);
this->loop->ReplaceInput(0, that.if_true);
}
@@ -212,7 +214,8 @@ struct Counter {
Node* phi;
Node* add;
- Counter(While& w, int32_t b, int32_t k)
+ Counter(While& w, // NOLINT(runtime/references)
+ int32_t b, int32_t k)
: base(w.t.jsgraph.Int32Constant(b)), inc(w.t.jsgraph.Int32Constant(k)) {
Build(w);
}
@@ -233,7 +236,7 @@ struct StoreLoop {
Node* phi;
Node* store;
- explicit StoreLoop(While& w)
+ explicit StoreLoop(While& w) // NOLINT(runtime/references)
: base(w.t.graph.start()), val(w.t.jsgraph.Int32Constant(13)) {
Build(w);
}
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 1376823657..f2f0003ad8 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -4,6 +4,7 @@
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/operator-properties.h"
@@ -86,12 +87,13 @@ class ReducerTester : public HandleAndZoneScope {
javascript(main_zone()),
jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine),
maxuint32(Constant<int32_t>(kMaxUInt32)),
- graph_reducer(main_zone(), &graph, jsgraph.Dead()) {
+ graph_reducer(main_zone(), &graph, &tick_counter, jsgraph.Dead()) {
Node* s = graph.NewNode(common.Start(num_parameters));
graph.SetStart(s);
}
Isolate* isolate;
+ TickCounter tick_counter;
const Operator* binop;
const Operator* unop;
MachineOperatorBuilder machine;
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 4a81ec2691..a34b1e14e5 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -43,7 +43,8 @@ CallDescriptor* CreateCallDescriptor(Zone* zone, int return_count,
return compiler::GetWasmCallDescriptor(zone, builder.Build());
}
-Node* MakeConstant(RawMachineAssembler& m, MachineType type, int value) {
+Node* MakeConstant(RawMachineAssembler& m, // NOLINT(runtime/references)
+ MachineType type, int value) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m.Int32Constant(static_cast<int32_t>(value));
@@ -58,7 +59,8 @@ Node* MakeConstant(RawMachineAssembler& m, MachineType type, int value) {
}
}
-Node* Add(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
+Node* Add(RawMachineAssembler& m, // NOLINT(runtime/references)
+ MachineType type, Node* a, Node* b) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m.Int32Add(a, b);
@@ -73,7 +75,8 @@ Node* Add(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
}
}
-Node* Sub(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
+Node* Sub(RawMachineAssembler& m, // NOLINT(runtime/references)
+ MachineType type, Node* a, Node* b) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m.Int32Sub(a, b);
@@ -88,7 +91,8 @@ Node* Sub(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
}
}
-Node* Mul(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
+Node* Mul(RawMachineAssembler& m, // NOLINT(runtime/references)
+ MachineType type, Node* a, Node* b) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m.Int32Mul(a, b);
@@ -103,7 +107,8 @@ Node* Mul(RawMachineAssembler& m, MachineType type, Node* a, Node* b) {
}
}
-Node* ToInt32(RawMachineAssembler& m, MachineType type, Node* a) {
+Node* ToInt32(RawMachineAssembler& m, // NOLINT(runtime/references)
+ MachineType type, Node* a) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return a;
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 33e5cf1548..dac6f61932 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -4,6 +4,8 @@
#include <limits>
+#include "src/compiler/access-info.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/type-cache.h"
@@ -25,13 +27,15 @@ class RepresentationChangerTester : public HandleAndZoneScope,
javascript_(main_zone()),
jsgraph_(main_isolate(), main_graph_, &main_common_, &javascript_,
&main_simplified_, &main_machine_),
- changer_(&jsgraph_, main_isolate()) {
+ broker_{main_isolate(), main_zone(), FLAG_trace_heap_broker},
+ changer_(&jsgraph_, &broker_) {
Node* s = graph()->NewNode(common()->Start(num_parameters));
graph()->SetStart(s);
}
JSOperatorBuilder javascript_;
JSGraph jsgraph_;
+ JSHeapBroker broker_;
RepresentationChanger changer_;
Isolate* isolate() { return main_isolate(); }
@@ -376,8 +380,16 @@ TEST(Word64) {
TypeCache::Get()->kUint16, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord32,
Type::Signed32(), MachineRepresentation::kWord64);
+ CheckChange(
+ IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord32,
+ Type::Signed32OrMinusZero(), MachineRepresentation::kWord64,
+ UseInfo(MachineRepresentation::kWord64, Truncation::Any(kIdentifyZeros)));
CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord32,
Type::Unsigned32(), MachineRepresentation::kWord64);
+ CheckChange(
+ IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord32,
+ Type::Unsigned32OrMinusZero(), MachineRepresentation::kWord64,
+ UseInfo(MachineRepresentation::kWord64, Truncation::Any(kIdentifyZeros)));
CheckChange(IrOpcode::kTruncateInt64ToInt32, MachineRepresentation::kWord64,
Type::Signed32(), MachineRepresentation::kWord32);
@@ -514,6 +526,9 @@ TEST(SingleChanges) {
CheckChange(IrOpcode::kChangeBitToTagged, MachineRepresentation::kBit,
Type::Boolean(), MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeInt31ToCompressedSigned,
+ MachineRepresentation::kWord32, Type::Signed31(),
+ MachineRepresentation::kCompressedSigned);
CheckChange(IrOpcode::kChangeInt31ToTaggedSigned,
MachineRepresentation::kWord32, Type::Signed31(),
MachineRepresentation::kTagged);
@@ -544,7 +559,8 @@ TEST(SingleChanges) {
Type::Number(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kTruncateTaggedToFloat64,
MachineRepresentation::kTagged, Type::NumberOrUndefined(),
- UseInfo(MachineRepresentation::kFloat64, Truncation::Float64()));
+ UseInfo(MachineRepresentation::kFloat64,
+ Truncation::OddballAndBigIntToNumber()));
CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
Type::Signed31(), MachineRepresentation::kFloat64);
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 2432ec3afe..eed6cf1e59 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -327,28 +327,34 @@ class ArgsBuffer {
return kTypes;
}
- Node* MakeConstant(RawMachineAssembler& raw, int32_t value) {
+ Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
+ int32_t value) {
return raw.Int32Constant(value);
}
- Node* MakeConstant(RawMachineAssembler& raw, int64_t value) {
+ Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
+ int64_t value) {
return raw.Int64Constant(value);
}
- Node* MakeConstant(RawMachineAssembler& raw, float32 value) {
+ Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
+ float32 value) {
return raw.Float32Constant(value);
}
- Node* MakeConstant(RawMachineAssembler& raw, float64 value) {
+ Node* MakeConstant(RawMachineAssembler& raw, // NOLINT(runtime/references)
+ float64 value) {
return raw.Float64Constant(value);
}
- Node* LoadInput(RawMachineAssembler& raw, Node* base, int index) {
+ Node* LoadInput(RawMachineAssembler& raw, // NOLINT(runtime/references)
+ Node* base, int index) {
Node* offset = raw.Int32Constant(index * sizeof(CType));
return raw.Load(MachineTypeForC<CType>(), base, offset);
}
- Node* StoreOutput(RawMachineAssembler& raw, Node* value) {
+ Node* StoreOutput(RawMachineAssembler& raw, // NOLINT(runtime/references)
+ Node* value) {
Node* base = raw.PointerConstant(&output);
Node* offset = raw.Int32Constant(0);
return raw.Store(MachineTypeForC<CType>().representation(), base, offset,
@@ -710,9 +716,9 @@ static uint32_t coeff[] = {1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
79, 83, 89, 97, 101, 103, 107, 109, 113};
-
-static void Build_Int32_WeightedSum(CallDescriptor* desc,
- RawMachineAssembler& raw) {
+static void Build_Int32_WeightedSum(
+ CallDescriptor* desc,
+ RawMachineAssembler& raw) { // NOLINT(runtime/references)
Node* result = raw.Int32Constant(0);
for (int i = 0; i < ParamCount(desc); i++) {
Node* term = raw.Int32Mul(raw.Parameter(i), raw.Int32Constant(coeff[i]));
@@ -721,7 +727,6 @@ static void Build_Int32_WeightedSum(CallDescriptor* desc,
raw.Return(result);
}
-
static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
uint32_t result = 0;
for (int i = 0; i < ParamCount(desc); i++) {
@@ -767,13 +772,13 @@ TEST_INT32_WEIGHTEDSUM(11)
TEST_INT32_WEIGHTEDSUM(17)
TEST_INT32_WEIGHTEDSUM(19)
-
template <int which>
-static void Build_Select(CallDescriptor* desc, RawMachineAssembler& raw) {
+static void Build_Select(
+ CallDescriptor* desc,
+ RawMachineAssembler& raw) { // NOLINT(runtime/references)
raw.Return(raw.Parameter(which));
}
-
template <typename CType, int which>
static CType Compute_Select(CallDescriptor* desc, CType* inputs) {
return inputs[which];
@@ -943,10 +948,10 @@ TEST(Float64Select_stack_params_return_reg) {
}
}
-
template <typename CType, int which>
-static void Build_Select_With_Call(CallDescriptor* desc,
- RawMachineAssembler& raw) {
+static void Build_Select_With_Call(
+ CallDescriptor* desc,
+ RawMachineAssembler& raw) { // NOLINT(runtime/references)
Handle<Code> inner = Handle<Code>::null();
int num_params = ParamCount(desc);
CHECK_LE(num_params, kMaxParamCount);
@@ -977,7 +982,6 @@ static void Build_Select_With_Call(CallDescriptor* desc,
}
}
-
TEST(Float64StackParamsToStackParams) {
int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
Allocator params(nullptr, 0, nullptr, 0);
diff --git a/deps/v8/test/cctest/disasm-regex-helper.cc b/deps/v8/test/cctest/disasm-regex-helper.cc
new file mode 100644
index 0000000000..2b3b4f97a5
--- /dev/null
+++ b/deps/v8/test/cctest/disasm-regex-helper.cc
@@ -0,0 +1,291 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/disasm-regex-helper.h"
+
+#include "src/api/api-inl.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/objects/objects-inl.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+std::string DisassembleFunction(const char* function) {
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CcTest::global()->Get(context, v8_str(function)).ToLocalChecked())));
+
+ Address begin = f->code().raw_instruction_start();
+ Address end = f->code().raw_instruction_end();
+ Isolate* isolate = CcTest::i_isolate();
+ std::ostringstream os;
+ Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
+ reinterpret_cast<byte*>(end),
+ CodeReference(handle(f->code(), isolate)));
+ return os.str();
+}
+
+} // namespace
+
+bool CheckDisassemblyRegexPatterns(
+ const char* function_name, const std::vector<std::string>& patterns_array) {
+ std::istringstream reader(DisassembleFunction(function_name));
+ size_t size = patterns_array.size();
+ DCHECK_GT(size, 0);
+
+ std::smatch match;
+ std::string line;
+ RegexParser parser;
+ const std::string& first_pattern = patterns_array[0];
+ while (std::getline(reader, line)) {
+ RegexParser::Status status = parser.ProcessPattern(line, first_pattern);
+ if (status == RegexParser::Status::kSuccess) {
+ CHECK(std::getline(reader, line));
+ for (size_t i = 1; i < size; i++) {
+ const std::string& pattern = patterns_array[i];
+ status = parser.ProcessPattern(line, pattern);
+ if (status != RegexParser::Status::kSuccess) {
+ std::cout << "Pattern \"" << pattern << "\" not found" << std::endl;
+ std::cout << "Line: \"" << line << "\":" << std::endl;
+ parser.PrintSymbols(std::cout);
+ return false;
+ }
+ CHECK(std::getline(reader, line));
+ }
+
+ return true;
+ }
+ }
+ return false;
+}
+
+namespace {
+void RegexCheck(
+ const std::vector<std::string>& inputs,
+ const std::vector<std::string>& patterns,
+ RegexParser::Status expected_status,
+ std::function<void(const RegexParser&)> func = [](const RegexParser&) {}) {
+ size_t size = patterns.size();
+ CHECK_EQ(size, inputs.size());
+ RegexParser parser;
+ RegexParser::Status status;
+ size_t i = 0;
+ for (; i < size - 1; i++) {
+ const std::string& line = inputs[i];
+ const std::string& pattern = patterns[i];
+ status = parser.ProcessPattern(line, pattern);
+ CHECK_EQ(status, RegexParser::Status::kSuccess);
+ }
+ const std::string& line = inputs[i];
+ const std::string& pattern = patterns[i];
+ status = parser.ProcessPattern(line, pattern);
+
+ if (status != expected_status) {
+ parser.PrintSymbols(std::cout);
+ }
+ CHECK_EQ(status, expected_status);
+ func(parser);
+}
+
+// Check a line against a pattern.
+void RegexCheckOne(
+ const std::string& line, const std::string& pattern,
+ RegexParser::Status expected_status,
+ std::function<void(const RegexParser&)> func = [](const RegexParser&) {}) {
+ RegexParser parser;
+ RegexParser::Status status = parser.ProcessPattern(line, pattern);
+ CHECK_EQ(status, expected_status);
+ func(parser);
+}
+
+void TestSymbolValue(const std::string& sym_name, const std::string& value,
+ const RegexParser& p) {
+ CHECK(p.IsSymbolDefined(sym_name));
+ CHECK_EQ(p.GetSymbolMatchedValue(sym_name).compare(value), 0);
+}
+
+} // namespace
+
+// clang-format off
+TEST(RegexParserSingleLines) {
+ //
+ // Simple one-liners for found/not found.
+ //
+ RegexCheckOne(" a b a b c a",
+ "a b c",
+ RegexParser::Status::kSuccess);
+
+ RegexCheckOne(" a b a bc a",
+ "a b c",
+ RegexParser::Status::kNotMatched);
+
+ RegexCheckOne("aaabbaaa",
+ "ab.*?a",
+ RegexParser::Status::kSuccess);
+
+ RegexCheckOne("aaabbaa",
+ "^(?:aa+|b)+$",
+ RegexParser::Status::kSuccess);
+
+ RegexCheckOne("aaabba",
+ "^(?:aa+|b)+$",
+ RegexParser::Status::kNotMatched);
+
+ RegexCheckOne("(aaa)",
+ "\\(a+\\)",
+ RegexParser::Status::kSuccess);
+
+ RegexCheckOne("r19 qwerty",
+ "r<<Def:[0-9]+>>",
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def", "19", p);
+ });
+
+ RegexCheckOne("r19 qwerty",
+ "r<<Def:[a-z]+>>",
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def", "ty", p);
+ });
+
+ // Backreference/submatch groups are forbidden.
+ RegexCheckOne("aaabba",
+ "((aa+)|b)+?",
+ RegexParser::Status::kWrongPattern);
+
+ // Using passive groups.
+ RegexCheckOne("aaabba",
+ "(?:(?:aa+)|b)+?",
+ RegexParser::Status::kSuccess);
+
+ //
+ // Symbol definitions.
+ //
+ RegexCheckOne("r19 r20",
+ "r<<Def:19>>",
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def", "19", p);
+ });
+
+ RegexCheckOne("r19 r20",
+ "r<<Def:[0-9]+>>",
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def", "19", p);
+ });
+
+ RegexCheckOne("r19 r20",
+ "r<<Def0:[0-9]+>>.*?r<<Def1:[0-9]+>>",
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def0", "19", p);
+ TestSymbolValue("Def1", "20", p);
+ });
+
+ RegexCheckOne("r19 r20",
+ "r<<Def0:[0-9]+>>.*?r[0-9]",
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def0", "19", p);
+ });
+
+ // Checks that definitions are not committed unless the pattern is matched.
+ RegexCheckOne("r19",
+ "r<<Def0:[0-9]+>>.*?r<<Def1:[0-9]+>>",
+ RegexParser::Status::kNotMatched,
+ [] (const RegexParser& p) {
+ CHECK(!p.IsSymbolDefined("Def0"));
+ CHECK(!p.IsSymbolDefined("Def1"));
+ });
+
+ RegexCheckOne("r19 r19 r1",
+ "r<<Def0:[0-9]+>>.*?r<<Def0:[0-9]+>> r<<Def1:[0-9]+>>",
+ RegexParser::Status::kRedefinition,
+ [] (const RegexParser& p) {
+ CHECK(!p.IsSymbolDefined("Def0"));
+ CHECK(!p.IsSymbolDefined("Def1"));
+ });
+
+ RegexCheckOne("r19 r1",
+ "r<<Def0:[0-9]+>> (r1)",
+ RegexParser::Status::kWrongPattern,
+ [] (const RegexParser& p) {
+ CHECK(!p.IsSymbolDefined("Def0"));
+ });
+
+ //
+ // Undefined symbol references.
+ //
+ RegexCheckOne("r19 r1",
+ "r[0-9].*?r<<Undef>>",
+ RegexParser::Status::kDefNotFound,
+ [] (const RegexParser& p) {
+ CHECK(!p.IsSymbolDefined("Undef"));
+ });
+
+ RegexCheckOne("r19 r1",
+ "r<<Def0:[0-9]+>>.*?<<Undef>>",
+ RegexParser::Status::kDefNotFound,
+ [] (const RegexParser& p) {
+ CHECK(!p.IsSymbolDefined("Undef"));
+ CHECK(!p.IsSymbolDefined("Def0"));
+ });
+
+ RegexCheckOne("r19 r19",
+ "r<<Def0:[0-9]+>>.*?<<Def0>>",
+ RegexParser::Status::kDefNotFound,
+ [] (const RegexParser& p) {
+ CHECK(!p.IsSymbolDefined("Def0"));
+ });
+}
+
+TEST(RegexParserMultiLines) {
+ RegexCheck({ " a b a b c a",
+ " a b a b c a" },
+ { "a b c",
+ "a b c" },
+ RegexParser::Status::kSuccess);
+
+ RegexCheck({ "r16 = r15",
+ "r17 = r16" },
+ { "<<Def:r[0-9]+>> = r[0-9]+",
+ "[0-9]+ = <<Def>>" },
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def", "r16", p);
+ });
+
+ RegexCheck({ "r16 = r15 + r13",
+ "r17 = r16 + r14",
+ "r19 = r14" },
+ { "<<Def0:r[0-9]+>> = r[0-9]+",
+ "<<Def1:r[0-9]+>> = <<Def0>> \\+ <<Def2:r[0-9]+>>",
+ "<<Def3:r[0-9]+>> = <<Def2>>" },
+ RegexParser::Status::kSuccess,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def0", "r16", p);
+ TestSymbolValue("Def1", "r17", p);
+ TestSymbolValue("Def2", "r14", p);
+ TestSymbolValue("Def3", "r19", p);
+ });
+
+ // Constraint is not met for Def (r19 != r16).
+ RegexCheck({ "r16 = r15",
+ "r17 = r19" },
+ { "<<Def:r[0-9]+>> = r[0-9]+",
+ "[0-9]+ = <<Def>>" },
+ RegexParser::Status::kNotMatched,
+ [] (const RegexParser& p) {
+ TestSymbolValue("Def", "r16", p);
+ });
+}
+// clang-format on
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/disasm-regex-helper.h b/deps/v8/test/cctest/disasm-regex-helper.h
new file mode 100644
index 0000000000..c50b27a36b
--- /dev/null
+++ b/deps/v8/test/cctest/disasm-regex-helper.h
@@ -0,0 +1,318 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_DISASM_REGEX_HELPER_H_
+#define V8_CCTEST_DISASM_REGEX_HELPER_H_
+
+#include <iostream>
+#include <map>
+#include <regex> // NOLINT(build/c++11)
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// This class provides methods for regular expression matching with an extra
+// feature of user defined named capture groups which are alive across
+// regex search calls.
+//
+// The main use case for the class is to test multiple-line assembly
+// output with an ability to express dataflow or dependencies by allowing single
+// definition / multiple use symbols. When processing output lines and trying to
+// match them against the set of patterns a user can define a named group - a
+// symbol - and a regex for matching it. If the regex with the definitions is
+// matched then whenever this symbol appears again (no redefinitions though) in
+// the following patterns the parser will replace the symbol reference in the
+// pattern by an actual literal value matched during processing symbol
+// definition. This effectively checks that all of the output lines have
+// the same literal for the described symbol. To track the symbols this class
+// implements a simple single-definition symbol table.
+//
+// Example: Lets consider a case when we want to test that the assembly
+// output consists of two instructions - a load and a store; we also want
+// to check that the loaded value is used as store value for the store,
+// like here:
+//
+// ldr x3, [x4]
+// str x3, [x5]
+//
+// Using special syntax for symbol definitions and uses one could write the
+// following regex making sure that the load register is used by the store:
+//
+// 'ldr <<NamedReg:x[0-9]+>>, [x[0-9]+]'
+// 'str <<NamedReg>>, [x[0-9]+]'
+//
+// See 'ProcessPattern' for more details.
+class RegexParser {
+ public:
+ RegexParser()
+ // Regex to parse symbol references: definitions or uses.
+ // <<SymbolName[:'def regex']>>
+ : symbol_ref_regex_("<<([a-zA-Z_][a-zA-Z0-9_]*)(?::(.*?))?>>") {}
+
+ // Status codes used for return values and error diagnostics.
+ enum class Status {
+ kSuccess = 0,
+ kNotMatched,
+ kWrongPattern,
+ kDefNotFound,
+ kRedefinition,
+ };
+
+ // This class holds info on a symbol definition.
+ class SymbolInfo {
+ public:
+ explicit SymbolInfo(const std::string& matched_value)
+ : matched_value_(matched_value) {}
+
+ // Returns an actual matched value for the symbol.
+ const std::string& matched_value() const { return matched_value_; }
+
+ private:
+ std::string matched_value_;
+ };
+
+ // This class holds temporary info on a symbol while processing an input line.
+ class SymbolVectorElem {
+ public:
+ SymbolVectorElem(bool is_def, const std::string& symbol_name)
+ : is_def_(is_def), symbol_name_(symbol_name) {}
+
+ bool is_def() const { return is_def_; }
+ const std::string& symbol_name() const { return symbol_name_; }
+
+ private:
+ bool is_def_;
+ std::string symbol_name_;
+ };
+
+ using SymbolMap = std::map<std::string, SymbolInfo>;
+ using MatchVector = std::vector<SymbolVectorElem>;
+
+ // Tries to match (actually search, similar to std::regex_serach) the line
+ // against the pattern (possibly containing symbols references) and if
+ // matched commits symbols definitions from the pattern to the symbol table.
+ //
+ // Returns: status of the matching attempt.
+ //
+ // Important: the format of pattern regexs is based on std::ECMAScript syntax
+ // (http://www.cplusplus.com/reference/regex/ECMAScript/) with a few extra
+ // restrictions:
+ // * no backreference (or submatch) groups
+ // - when a group (e.g. "(a|b)+") is needed use a passive group
+ // (e.g. "(?:a|b)+").
+ // * special syntax for symbol definitions: <<Name:regex>>
+ // - 'Name' must be c-ctyle variable name ([a-zA-Z_][a-zA-Z0-9_]*).
+ // - 'regex' - is a regex for the actual literal expected in the symbol
+ // definition line. It must not contain any symbol references.
+ // * special syntax for symbol uses <<Name>>
+ //
+ // Semantical restrictions on symbols references:
+ // * symbols mustn't be referenced before they are defined.
+ // - a pattern R1 which uses symbol 'A' mustn't be processed if a pattern
+ // R2 with the symbol 'A' definition hasn't been yet matched (R1!=R2).
+ // - A pattern mustn't define a symbol and use it inside the same regex.
+ // * symbols mustn't be redefined.
+ // - if a line has been matched against a pattern R1 with symbol 'A'
+ // then other patterns mustn't define symbol 'A'.
+ // * symbols defininitions are only committed and registered if the whole
+ // pattern is successfully matched.
+ //
+ // Notes:
+ // * A pattern may contain uses of the same or different symbols and
+ // definitions of different symbols however if a symbol is defined in the
+ // pattern it can't be used in the same pattern.
+ //
+ // Pattern example: "<<A:[0-9]+>> <<B>>, <<B> <<C:[a-z]+>>" (assuming 'B' is
+ // defined and matched).
+ Status ProcessPattern(const std::string& line, const std::string& pattern) {
+ // Processed pattern which is going to be used for std::regex_search; symbol
+ // references are replaced accordingly to the reference type - def or use.
+ std::string final_pattern;
+ // A vector of records for symbols references in the pattern. The format is
+ // {is_definition, symbol_name}.
+ MatchVector symbols_refs;
+ Status status =
+ ParseSymbolsInPattern(pattern, &final_pattern, &symbols_refs);
+ if (status != Status::kSuccess) {
+ return status;
+ }
+
+ std::smatch match;
+ if (!std::regex_search(line, match, std::regex(final_pattern))) {
+ return Status::kNotMatched;
+ }
+
+ // This checks that no backreference groups were used in the pattern except
+ // for those added by ParseSymbolsInPattern.
+ if (symbols_refs.size() != (match.size() - 1)) {
+ return Status::kWrongPattern;
+ }
+
+ status = CheckSymbolsMatchedValues(symbols_refs, match);
+ if (status != Status::kSuccess) {
+ return status;
+ }
+
+ CommitSymbolsDefinitions(symbols_refs, match);
+
+ return Status::kSuccess;
+ }
+
+ // Returns whether a symbol is defined in the symbol name.
+ bool IsSymbolDefined(const std::string& symbol_name) const {
+ auto symbol_map_iter = map_.find(symbol_name);
+ return symbol_map_iter != std::end(map_);
+ }
+
+ // Returns the matched value for a symbol.
+ std::string GetSymbolMatchedValue(const std::string& symbol_name) const {
+ DCHECK(IsSymbolDefined(symbol_name));
+ return map_.find(symbol_name)->second.matched_value();
+ }
+
+ // Prints the symbol table.
+ void PrintSymbols(std::ostream& os) const {
+ os << "Printing symbol table..." << std::endl;
+ for (const auto& t : map_) {
+ const std::string& sym_name = t.first;
+ const SymbolInfo& sym_info = t.second;
+ os << "<<" << sym_name << ">>: \"" << sym_info.matched_value() << "\""
+ << std::endl;
+ }
+ }
+
+ protected:
+ // Fixed layout for the symbol reference match.
+ enum SymbolMatchIndex {
+ kFullSubmatch = 0,
+ kName = 1,
+ kDefRegex = 2,
+ kSize = kDefRegex + 1,
+ };
+
+ // Processes a symbol reference: for definitions it adds the symbol regex, for
+ // uses it adds actual literal from a previously matched definition. Also
+ // fills the symbol references vector.
+ Status ProcessSymbol(const std::smatch& match, MatchVector* symbols_refs,
+ std::string* new_pattern) const {
+ bool is_def = match[SymbolMatchIndex::kDefRegex].length() != 0;
+ const std::string& symbol_name = match[SymbolMatchIndex::kName];
+
+ if (is_def) {
+ // Make sure the symbol isn't already defined.
+ auto symbol_iter =
+ std::find_if(symbols_refs->begin(), symbols_refs->end(),
+ [symbol_name](const SymbolVectorElem& ref) -> bool {
+ return ref.symbol_name() == symbol_name;
+ });
+ if (symbol_iter != std::end(*symbols_refs)) {
+ return Status::kRedefinition;
+ }
+
+ symbols_refs->emplace_back(true, symbol_name);
+ new_pattern->append("(");
+ new_pattern->append(match[SymbolMatchIndex::kDefRegex]);
+ new_pattern->append(")");
+ } else {
+ auto symbol_map_iter = map_.find(symbol_name);
+ if (symbol_map_iter == std::end(map_)) {
+ return Status::kDefNotFound;
+ }
+
+ const SymbolInfo& sym_info = symbol_map_iter->second;
+ new_pattern->append("(");
+ new_pattern->append(sym_info.matched_value());
+ new_pattern->append(")");
+
+ symbols_refs->emplace_back(false, symbol_name);
+ }
+ return Status::kSuccess;
+ }
+
+ // Parses the input pattern regex, processes symbols defs and uses inside
+ // it, fills a raw pattern used for std::regex_search.
+ Status ParseSymbolsInPattern(const std::string& pattern,
+ std::string* raw_pattern,
+ MatchVector* symbols_refs) const {
+ std::string::const_iterator low = pattern.cbegin();
+ std::string::const_iterator high = pattern.cend();
+ std::smatch match;
+
+ while (low != high) {
+ // Search for a symbol reference.
+ if (!std::regex_search(low, high, match, symbol_ref_regex_)) {
+ raw_pattern->append(low, high);
+ break;
+ }
+
+ if (match.size() != SymbolMatchIndex::kSize) {
+ return Status::kWrongPattern;
+ }
+
+ raw_pattern->append(match.prefix());
+
+ Status status = ProcessSymbol(match, symbols_refs, raw_pattern);
+ if (status != Status::kSuccess) {
+ return status;
+ }
+ low = match[SymbolMatchIndex::kFullSubmatch].second;
+ }
+ return Status::kSuccess;
+ }
+
+ // Checks that there are no symbol redefinitions and the symbols uses matched
+ // literal values are equal to corresponding matched definitions.
+ Status CheckSymbolsMatchedValues(const MatchVector& symbols_refs,
+ const std::smatch& match) const {
+ // There is a one-to-one correspondence between matched subexpressions and
+ // symbols refences in the vector (by construction).
+ for (size_t vec_pos = 0, size = symbols_refs.size(); vec_pos < size;
+ vec_pos++) {
+ auto elem = symbols_refs[vec_pos];
+ auto map_iter = map_.find(elem.symbol_name());
+ if (elem.is_def()) {
+ if (map_iter != std::end(map_)) {
+ return Status::kRedefinition;
+ }
+ } else {
+ DCHECK(map_iter != std::end(map_));
+ // We replaced use with matched definition value literal.
+ DCHECK_EQ(map_iter->second.matched_value().compare(match[vec_pos + 1]),
+ 0);
+ }
+ }
+ return Status::kSuccess;
+ }
+
+ // Commits symbols definitions and their matched values to the symbol table.
+ void CommitSymbolsDefinitions(const MatchVector& groups_vector,
+ const std::smatch& match) {
+ for (size_t vec_pos = 0, size = groups_vector.size(); vec_pos < size;
+ vec_pos++) {
+ size_t match_pos = vec_pos + 1;
+ auto elem = groups_vector[vec_pos];
+ if (elem.is_def()) {
+ auto emplace_res =
+ map_.emplace(elem.symbol_name(), SymbolInfo(match[match_pos]));
+ USE(emplace_res); // Silence warning about unused variable.
+ DCHECK(emplace_res.second == true);
+ }
+ }
+ }
+
+ const std::regex symbol_ref_regex_;
+ SymbolMap map_;
+};
+
+bool CheckDisassemblyRegexPatterns(
+ const char* function_name, const std::vector<std::string>& patterns_array);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_DISASM_REGEX_HELPER_H_
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 24298d685c..8b53dab9c5 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -213,6 +213,7 @@ void ForceEvacuationCandidate(Page* page) {
CHECK(FLAG_manual_evacuation_candidates_selection);
page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
PagedSpace* space = static_cast<PagedSpace*>(page->owner());
+ DCHECK_NOT_NULL(space);
Address top = space->top();
Address limit = space->limit();
if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 114a4639bd..35bd9225ea 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -31,8 +31,9 @@ void CheckInvariantsOfAbortedPage(Page* page) {
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
-void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
- Page* page) {
+void CheckAllObjectsOnPage(
+ std::vector<Handle<FixedArray>>& handles, // NOLINT(runtime/references)
+ Page* page) {
for (Handle<FixedArray> fixed_array : handles) {
CHECK(Page::FromHeapObject(*fixed_array) == page);
}
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 445853bf9c..e4dbee2210 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -56,7 +56,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/transitions.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
@@ -604,8 +604,8 @@ TEST(BytecodeArray) {
if (FLAG_never_compact) return;
static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A};
static const int kRawBytesSize = sizeof(kRawBytes);
- static const int kFrameSize = 32;
- static const int kParameterCount = 2;
+ static const int32_t kFrameSize = 32;
+ static const int32_t kParameterCount = 2;
ManualGCScope manual_gc_scope;
FLAG_manual_evacuation_candidates_selection = true;
@@ -666,8 +666,8 @@ TEST(BytecodeArray) {
TEST(BytecodeArrayAging) {
static const uint8_t kRawBytes[] = {0xC3, 0x7E, 0xA5, 0x5A};
static const int kRawBytesSize = sizeof(kRawBytes);
- static const int kFrameSize = 32;
- static const int kParameterCount = 2;
+ static const int32_t kFrameSize = 32;
+ static const int32_t kParameterCount = 2;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@@ -1068,9 +1068,9 @@ TEST(StringAllocation) {
static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
// Count the number of objects found in the heap.
int found_count = 0;
- HeapIterator iterator(heap);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(heap);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
for (int i = 0; i < size; i++) {
if (*objs[i] == obj) {
found_count++;
@@ -1510,8 +1510,8 @@ TEST(TestSizeOfRegExpCode) {
LocalContext context;
// Adjust source below and this check to match
- // RegExpImple::kRegExpTooLargeToOptimize.
- CHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB);
+ // RegExp::kRegExpTooLargeToOptimize.
+ CHECK_EQ(i::RegExp::kRegExpTooLargeToOptimize, 20 * KB);
// Compile a regexp that is much larger if we are using regexp optimizations.
CompileRun(
@@ -1829,13 +1829,13 @@ TEST(MutableHeapNumberAlignment) {
}
}
-TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
+TEST(TestSizeOfObjectsVsHeapObjectIteratorPrecision) {
CcTest::InitializeVM();
- HeapIterator iterator(CcTest::heap());
+ HeapObjectIterator iterator(CcTest::heap());
intptr_t size_of_objects_1 = CcTest::heap()->SizeOfObjects();
intptr_t size_of_objects_2 = 0;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsFreeSpace()) {
size_of_objects_2 += obj.Size();
}
@@ -1948,9 +1948,9 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
static int NumberOfGlobalObjects() {
int count = 0;
- HeapIterator iterator(CcTest::heap());
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(CcTest::heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsJSGlobalObject()) count++;
}
return count;
@@ -3444,10 +3444,10 @@ void DetailedErrorStackTraceTest(const char* src,
Isolate* isolate = CcTest::i_isolate();
Handle<Name> key = isolate->factory()->stack_trace_symbol();
- Handle<FrameArray> stack_trace(Handle<FrameArray>::cast(
+ Handle<FixedArray> stack_trace(Handle<FixedArray>::cast(
Object::GetProperty(isolate, exception, key).ToHandleChecked()));
- test(stack_trace);
+ test(GetFrameArrayFromStackTrace(isolate, stack_trace));
}
// * Test interpreted function error
@@ -4796,11 +4796,11 @@ HEAP_TEST(Regress538257) {
FLAG_manual_evacuation_candidates_selection = true;
v8::Isolate::CreateParams create_params;
// Set heap limits.
- create_params.constraints.set_max_semi_space_size_in_kb(1024);
+ create_params.constraints.set_max_young_generation_size_in_bytes(3 * MB);
#ifdef DEBUG
- create_params.constraints.set_max_old_space_size(20);
+ create_params.constraints.set_max_old_generation_size_in_bytes(20 * MB);
#else
- create_params.constraints.set_max_old_space_size(6);
+ create_params.constraints.set_max_old_generation_size_in_bytes(6 * MB);
#endif
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -4864,13 +4864,14 @@ TEST(Regress507979) {
CHECK(Heap::InYoungGeneration(*o1));
CHECK(Heap::InYoungGeneration(*o2));
- HeapIterator it(isolate->heap(), i::HeapIterator::kFilterUnreachable);
+ HeapObjectIterator it(isolate->heap(),
+ i::HeapObjectIterator::kFilterUnreachable);
// Replace parts of an object placed before a live object with a filler. This
// way the filler object shares the mark bits with the following live object.
o1->Shrink(isolate, kFixedArrayLen - 1);
- for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
// Let's not optimize the loop away.
CHECK_NE(obj.address(), kNullAddress);
}
@@ -4924,7 +4925,7 @@ TEST(Regress388880) {
// Now everything is set up for crashing in JSObject::MigrateFastToFast()
// when it calls heap->AdjustLiveBytes(...).
- JSObject::MigrateToMap(o, map2);
+ JSObject::MigrateToMap(isolate, o, map2);
}
@@ -5281,8 +5282,8 @@ TEST(ScriptIterator) {
int script_count = 0;
{
- HeapIterator it(heap);
- for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
+ HeapObjectIterator it(heap);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (obj.IsScript()) script_count++;
}
}
@@ -5311,8 +5312,8 @@ TEST(SharedFunctionInfoIterator) {
int sfi_count = 0;
{
- HeapIterator it(heap);
- for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
+ HeapObjectIterator it(heap);
+ for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (!obj.IsSharedFunctionInfo()) continue;
sfi_count++;
}
@@ -5924,7 +5925,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
- CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
+ CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
@@ -5936,7 +5937,7 @@ TEST(YoungGenerationLargeObjectAllocationScavenge) {
// After the first young generation GC array_small will be in the old
// generation large object space.
chunk = MemoryChunk::FromHeapObject(*array_small);
- CHECK_EQ(LO_SPACE, chunk->owner()->identity());
+ CHECK_EQ(LO_SPACE, chunk->owner_identity());
CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage();
@@ -5954,7 +5955,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// TODO(hpayer): Update the test as soon as we have a tenure limit for LO.
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(200000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
- CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
+ CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::LARGE_PAGE));
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
@@ -5966,7 +5967,7 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
// After the first full GC array_small will be in the old generation
// large object space.
chunk = MemoryChunk::FromHeapObject(*array_small);
- CHECK_EQ(LO_SPACE, chunk->owner()->identity());
+ CHECK_EQ(LO_SPACE, chunk->owner_identity());
CHECK(!chunk->InYoungGeneration());
CcTest::CollectAllAvailableGarbage();
@@ -5986,7 +5987,7 @@ TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
for (int i = 0; i < 10; i++) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array_small);
- CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
+ CHECK_EQ(NEW_LO_SPACE, chunk->owner_identity());
CHECK(chunk->IsFlagSet(MemoryChunk::TO_PAGE));
}
}
@@ -6009,7 +6010,7 @@ TEST(UncommitUnusedLargeObjectMemory) {
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(200000, AllocationType::kOld);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
- CHECK(chunk->owner()->identity() == LO_SPACE);
+ CHECK(chunk->owner_identity() == LO_SPACE);
intptr_t size_before = array->Size();
size_t committed_memory_before = chunk->CommittedPhysicalMemory();
@@ -6033,7 +6034,7 @@ TEST(RememberedSetRemoveRange) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(
Page::kPageSize / kTaggedSize, AllocationType::kOld);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*array);
- CHECK(chunk->owner()->identity() == LO_SPACE);
+ CHECK(chunk->owner_identity() == LO_SPACE);
Address start = array->address();
// Maps slot to boolean indicator of whether the slot should be in the set.
std::map<Address, bool> slots;
@@ -6189,7 +6190,7 @@ HEAP_TEST(Regress5831) {
// Ensure it's not in large object space.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*code);
- CHECK(chunk->owner()->identity() != LO_SPACE);
+ CHECK(chunk->owner_identity() != LO_SPACE);
CHECK(chunk->NeverEvacuate());
}
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index bac98c8a26..c88cf1f3ba 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -345,7 +345,7 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
Handle<Map> map(obj->map(), isolate);
Handle<Map> normalized_map =
Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES, "testing");
- JSObject::MigrateToMap(obj, normalized_map);
+ JSObject::MigrateToMap(isolate, obj, normalized_map);
}
CcTest::CollectGarbage(i::NEW_SPACE);
CcTest::CollectGarbage(i::OLD_SPACE);
diff --git a/deps/v8/test/cctest/heap/test-iterators.cc b/deps/v8/test/cctest/heap/test-iterators.cc
index 9e39f7ca47..f2c1f17187 100644
--- a/deps/v8/test/cctest/heap/test-iterators.cc
+++ b/deps/v8/test/cctest/heap/test-iterators.cc
@@ -17,17 +17,17 @@ namespace v8 {
namespace internal {
namespace heap {
-TEST(HeapIteratorNullPastEnd) {
- HeapIterator iterator(CcTest::heap());
- while (!iterator.next().is_null()) {
+TEST(HeapObjectIteratorNullPastEnd) {
+ HeapObjectIterator iterator(CcTest::heap());
+ while (!iterator.Next().is_null()) {
}
for (int i = 0; i < 20; i++) {
- CHECK(iterator.next().is_null());
+ CHECK(iterator.Next().is_null());
}
}
-TEST(ReadOnlyHeapIteratorNullPastEnd) {
- ReadOnlyHeapIterator iterator(CcTest::heap()->read_only_heap());
+TEST(ReadOnlyHeapObjectIteratorNullPastEnd) {
+ ReadOnlyHeapObjectIterator iterator(CcTest::read_only_heap());
while (!iterator.Next().is_null()) {
}
for (int i = 0; i < 20; i++) {
@@ -35,8 +35,8 @@ TEST(ReadOnlyHeapIteratorNullPastEnd) {
}
}
-TEST(CombinedHeapIteratorNullPastEnd) {
- CombinedHeapIterator iterator(CcTest::heap());
+TEST(CombinedHeapObjectIteratorNullPastEnd) {
+ CombinedHeapObjectIterator iterator(CcTest::heap());
while (!iterator.Next().is_null()) {
}
for (int i = 0; i < 20; i++) {
@@ -51,11 +51,11 @@ Object CreateWritableObject() {
}
} // namespace
-TEST(ReadOnlyHeapIterator) {
+TEST(ReadOnlyHeapObjectIterator) {
CcTest::InitializeVM();
HandleScope handle_scope(CcTest::i_isolate());
const Object sample_object = CreateWritableObject();
- ReadOnlyHeapIterator iterator(CcTest::read_only_heap());
+ ReadOnlyHeapObjectIterator iterator(CcTest::read_only_heap());
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
@@ -65,15 +65,15 @@ TEST(ReadOnlyHeapIterator) {
}
}
-TEST(HeapIterator) {
+TEST(HeapObjectIterator) {
CcTest::InitializeVM();
HandleScope handle_scope(CcTest::i_isolate());
const Object sample_object = CreateWritableObject();
- HeapIterator iterator(CcTest::heap());
+ HeapObjectIterator iterator(CcTest::heap());
bool seen_sample_object = false;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
CHECK(!ReadOnlyHeap::Contains(obj));
CHECK(CcTest::heap()->Contains(obj));
if (sample_object == obj) seen_sample_object = true;
@@ -81,11 +81,11 @@ TEST(HeapIterator) {
CHECK(seen_sample_object);
}
-TEST(CombinedHeapIterator) {
+TEST(CombinedHeapObjectIterator) {
CcTest::InitializeVM();
HandleScope handle_scope(CcTest::i_isolate());
const Object sample_object = CreateWritableObject();
- CombinedHeapIterator iterator(CcTest::heap());
+ CombinedHeapObjectIterator iterator(CcTest::heap());
bool seen_sample_object = false;
for (HeapObject obj = iterator.Next(); !obj.is_null();
@@ -96,6 +96,24 @@ TEST(CombinedHeapIterator) {
CHECK(seen_sample_object);
}
+TEST(PagedSpaceIterator) {
+ Heap* const heap = CcTest::heap();
+ PagedSpaceIterator iterator(heap);
+ CHECK_EQ(iterator.Next(), reinterpret_cast<PagedSpace*>(heap->old_space()));
+ CHECK_EQ(iterator.Next(), reinterpret_cast<PagedSpace*>(heap->code_space()));
+ CHECK_EQ(iterator.Next(), reinterpret_cast<PagedSpace*>(heap->map_space()));
+ for (int i = 0; i < 20; i++) {
+ CHECK_NULL(iterator.Next());
+ }
+}
+
+TEST(SpaceIterator) {
+ auto* const read_only_space = CcTest::read_only_heap()->read_only_space();
+ for (SpaceIterator it(CcTest::heap()); it.HasNext();) {
+ CHECK_NE(it.Next(), reinterpret_cast<Space*>(read_only_space));
+ }
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index f629bc1171..df6211826e 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -43,7 +43,8 @@ v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
return isolate;
}
-Page* FindLastPageInNewSpace(std::vector<Handle<FixedArray>>& handles) {
+Page* FindLastPageInNewSpace(
+ std::vector<Handle<FixedArray>>& handles) { // NOLINT(runtime/references)
for (auto rit = handles.rbegin(); rit != handles.rend(); ++rit) {
// One deref gets the Handle, the second deref gets the FixedArray.
Page* candidate = Page::FromHeapObject(**rit);
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 5e6392bf59..a99f7abbb5 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -31,6 +31,7 @@
#include "src/base/platform/platform.h"
#include "src/heap/factory.h"
#include "src/heap/spaces-inl.h"
+#include "src/heap/spaces.h"
#include "src/objects/free-space.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 18dca8edb8..b3ec8cbf5a 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -16,24 +16,13 @@ namespace v8 {
namespace internal {
namespace heap {
-Handle<FeedbackVector> CreateFeedbackVectorForTest(
- v8::Isolate* isolate, Factory* factory,
- AllocationType allocation = AllocationType::kYoung) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- v8::Local<v8::Script> script =
- v8::Script::Compile(isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, "function foo() {}",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .ToLocalChecked();
- Handle<Object> obj = v8::Utils::OpenHandle(*script);
- Handle<SharedFunctionInfo> shared_function =
- Handle<SharedFunctionInfo>(JSFunction::cast(*obj).shared(), i_isolate);
- Handle<ClosureFeedbackCellArray> closure_cell_array =
- ClosureFeedbackCellArray::New(i_isolate, shared_function);
- Handle<FeedbackVector> fv = factory->NewFeedbackVector(
- shared_function, closure_cell_array, allocation);
- return fv;
+Handle<LoadHandler> CreateLoadHandlerForTest(
+ Factory* factory, AllocationType allocation = AllocationType::kYoung) {
+ Handle<LoadHandler> result = factory->NewLoadHandler(1, allocation);
+ result->set_smi_handler(Smi::kZero);
+ result->set_validity_cell(Smi::kZero);
+ result->set_data1(MaybeObject::FromSmi(Smi::zero()));
+ return result;
}
TEST(WeakReferencesBasic) {
@@ -42,15 +31,14 @@ TEST(WeakReferencesBasic) {
Factory* factory = isolate->factory();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv =
- CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InYoungGeneration(*fv));
+ Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
+ CHECK(Heap::InYoungGeneration(*lh));
- MaybeObject code_object = fv->optimized_code_weak_or_smi();
+ MaybeObject code_object = lh->data1();
CHECK(code_object->IsSmi());
CcTest::CollectAllGarbage();
- CHECK(Heap::InYoungGeneration(*fv));
- CHECK_EQ(code_object, fv->optimized_code_weak_or_smi());
+ CHECK(Heap::InYoungGeneration(*lh));
+ CHECK_EQ(code_object, lh->data1());
{
HandleScope inner_scope(isolate);
@@ -63,21 +51,19 @@ TEST(WeakReferencesBasic) {
Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
CHECK(code->IsCode());
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
+ lh->set_data1(HeapObjectReference::Weak(*code));
HeapObject code_heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(
- &code_heap_object));
+ CHECK(lh->data1()->GetHeapObjectIfWeak(&code_heap_object));
CHECK_EQ(*code, code_heap_object);
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(
- &code_heap_object));
+ CHECK(lh->data1()->GetHeapObjectIfWeak(&code_heap_object));
CHECK_EQ(*code, code_heap_object);
} // code will go out of scope.
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
+ CHECK(lh->data1()->IsCleared());
}
TEST(WeakReferencesOldToOld) {
@@ -91,15 +77,15 @@ TEST(WeakReferencesOldToOld) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv = CreateFeedbackVectorForTest(
- CcTest::isolate(), factory, AllocationType::kOld);
- CHECK(heap->InOldSpace(*fv));
+ Handle<LoadHandler> lh =
+ CreateLoadHandlerForTest(factory, AllocationType::kOld);
+ CHECK(heap->InOldSpace(*lh));
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array =
factory->NewFixedArray(1, AllocationType::kOld);
CHECK(heap->InOldSpace(*fixed_array));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array));
Page* page_before_gc = Page::FromHeapObject(*fixed_array);
heap::ForceEvacuationCandidate(page_before_gc);
@@ -107,7 +93,7 @@ TEST(WeakReferencesOldToOld) {
CHECK(heap->InOldSpace(*fixed_array));
HeapObject heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
+ CHECK(lh->data1()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -120,19 +106,19 @@ TEST(WeakReferencesOldToNew) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv = CreateFeedbackVectorForTest(
- CcTest::isolate(), factory, AllocationType::kOld);
- CHECK(heap->InOldSpace(*fv));
+ Handle<LoadHandler> lh =
+ CreateLoadHandlerForTest(factory, AllocationType::kOld);
+ CHECK(heap->InOldSpace(*lh));
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array));
CcTest::CollectAllGarbage();
HeapObject heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
+ CHECK(lh->data1()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -145,19 +131,19 @@ TEST(WeakReferencesOldToNewScavenged) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv = CreateFeedbackVectorForTest(
- CcTest::isolate(), factory, AllocationType::kOld);
- CHECK(heap->InOldSpace(*fv));
+ Handle<LoadHandler> lh =
+ CreateLoadHandlerForTest(factory, AllocationType::kOld);
+ CHECK(heap->InOldSpace(*lh));
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array));
CcTest::CollectGarbage(NEW_SPACE);
HeapObject heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
+ CHECK(lh->data1()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -172,14 +158,13 @@ TEST(WeakReferencesOldToCleared) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv = CreateFeedbackVectorForTest(
- CcTest::isolate(), factory, AllocationType::kOld);
- CHECK(heap->InOldSpace(*fv));
- fv->set_optimized_code_weak_or_smi(
- HeapObjectReference::ClearedValue(isolate));
+ Handle<LoadHandler> lh =
+ CreateLoadHandlerForTest(factory, AllocationType::kOld);
+ CHECK(heap->InOldSpace(*lh));
+ lh->set_data1(HeapObjectReference::ClearedValue(isolate));
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
+ CHECK(lh->data1()->IsCleared());
}
TEST(ObjectMovesBeforeClearingWeakField) {
@@ -193,33 +178,32 @@ TEST(ObjectMovesBeforeClearingWeakField) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv =
- CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InYoungGeneration(*fv));
- FeedbackVector fv_location = *fv;
+ Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
+ CHECK(Heap::InYoungGeneration(*lh));
+ LoadHandler lh_location = *lh;
{
HandleScope inner_scope(isolate);
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array));
// inner_scope will go out of scope, so when marking the next time,
// *fixed_array will stay white.
}
- // Do marking steps; this will store *fv into the list for later processing
+ // Do marking steps; this will store *lh into the list for later processing
// (since it points to a white object).
SimulateIncrementalMarking(heap, true);
- // Scavenger will move *fv.
+ // Scavenger will move *lh.
CcTest::CollectGarbage(NEW_SPACE);
- FeedbackVector new_fv_location = *fv;
- CHECK_NE(fv_location, new_fv_location);
- CHECK(fv->optimized_code_weak_or_smi()->IsWeak());
+ LoadHandler new_lh_location = *lh;
+ CHECK_NE(lh_location, new_lh_location);
+ CHECK(lh->data1()->IsWeak());
- // Now we try to clear *fv.
+ // Now we try to clear *lh.
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
+ CHECK(lh->data1()->IsCleared());
}
TEST(ObjectWithWeakFieldDies) {
@@ -234,26 +218,24 @@ TEST(ObjectWithWeakFieldDies) {
{
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv =
- CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InYoungGeneration(*fv));
+ Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
+ CHECK(Heap::InYoungGeneration(*lh));
{
HandleScope inner_scope(isolate);
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array));
- fv->set_optimized_code_weak_or_smi(
- HeapObjectReference::Weak(*fixed_array));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array));
// inner_scope will go out of scope, so when marking the next time,
// *fixed_array will stay white.
}
- // Do marking steps; this will store *fv into the list for later processing
+ // Do marking steps; this will store *lh into the list for later processing
// (since it points to a white object).
SimulateIncrementalMarking(heap, true);
} // outer_scope goes out of scope
- // fv will die
+ // lh will die
CcTest::CollectGarbage(NEW_SPACE);
// This used to crash when processing the dead weak reference.
@@ -267,22 +249,21 @@ TEST(ObjectWithWeakReferencePromoted) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv =
- CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InYoungGeneration(*fv));
+ Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
+ CHECK(Heap::InYoungGeneration(*lh));
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array));
- fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*fixed_array));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array));
CcTest::CollectGarbage(NEW_SPACE);
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(heap->InOldSpace(*fv));
+ CHECK(heap->InOldSpace(*lh));
CHECK(heap->InOldSpace(*fixed_array));
HeapObject heap_object;
- CHECK(fv->optimized_code_weak_or_smi()->GetHeapObjectIfWeak(&heap_object));
+ CHECK(lh->data1()->GetHeapObjectIfWeak(&heap_object));
CHECK_EQ(heap_object, *fixed_array);
}
@@ -293,23 +274,21 @@ TEST(ObjectWithClearedWeakReferencePromoted) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv =
- CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InYoungGeneration(*fv));
+ Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
+ CHECK(Heap::InYoungGeneration(*lh));
- fv->set_optimized_code_weak_or_smi(
- HeapObjectReference::ClearedValue(isolate));
+ lh->set_data1(HeapObjectReference::ClearedValue(isolate));
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(Heap::InYoungGeneration(*fv));
- CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
+ CHECK(Heap::InYoungGeneration(*lh));
+ CHECK(lh->data1()->IsCleared());
CcTest::CollectGarbage(NEW_SPACE);
- CHECK(heap->InOldSpace(*fv));
- CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
+ CHECK(heap->InOldSpace(*lh));
+ CHECK(lh->data1()->IsCleared());
CcTest::CollectAllGarbage();
- CHECK(fv->optimized_code_weak_or_smi()->IsCleared());
+ CHECK(lh->data1()->IsCleared());
}
TEST(WeakReferenceWriteBarrier) {
@@ -324,32 +303,29 @@ TEST(WeakReferenceWriteBarrier) {
Heap* heap = isolate->heap();
HandleScope outer_scope(isolate);
- Handle<FeedbackVector> fv =
- CreateFeedbackVectorForTest(CcTest::isolate(), factory);
- CHECK(Heap::InYoungGeneration(*fv));
+ Handle<LoadHandler> lh = CreateLoadHandlerForTest(factory);
+ CHECK(Heap::InYoungGeneration(*lh));
{
HandleScope inner_scope(isolate);
- // Create a new FixedArray which the FeedbackVector will point to.
+ // Create a new FixedArray which the LoadHandler will point to.
Handle<FixedArray> fixed_array1 = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array1));
- fv->set_optimized_code_weak_or_smi(
- HeapObjectReference::Weak(*fixed_array1));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array1));
SimulateIncrementalMarking(heap, true);
Handle<FixedArray> fixed_array2 = factory->NewFixedArray(1);
CHECK(Heap::InYoungGeneration(*fixed_array2));
// This write will trigger the write barrier.
- fv->set_optimized_code_weak_or_smi(
- HeapObjectReference::Weak(*fixed_array2));
+ lh->set_data1(HeapObjectReference::Weak(*fixed_array2));
}
CcTest::CollectAllGarbage();
// Check that the write barrier treated the weak reference as strong.
- CHECK(fv->optimized_code_weak_or_smi()->IsWeak());
+ CHECK(lh->data1()->IsWeak());
}
TEST(EmptyWeakArray) {
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 957bcff1db..370c5d8131 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -107,9 +107,10 @@ i::Handle<i::BytecodeArray>
BytecodeExpectationsPrinter::GetBytecodeArrayForModule(
v8::Local<v8::Module> module) const {
i::Handle<i::Module> i_module = v8::Utils::OpenHandle(*module);
- return i::handle(
- SharedFunctionInfo::cast(i_module->code()).GetBytecodeArray(),
- i_isolate());
+ return i::handle(SharedFunctionInfo::cast(
+ Handle<i::SourceTextModule>::cast(i_module)->code())
+ .GetBytecodeArray(),
+ i_isolate());
}
i::Handle<i::BytecodeArray>
@@ -306,7 +307,7 @@ void BytecodeExpectationsPrinter::PrintConstant(
void BytecodeExpectationsPrinter::PrintFrameSize(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
- int frame_size = bytecode_array->frame_size();
+ int32_t frame_size = bytecode_array->frame_size();
DCHECK(IsAligned(frame_size, kSystemPointerSize));
stream << "frame size: " << frame_size / kSystemPointerSize
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index 06329940ff..dc51e5fb7a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -70,7 +70,8 @@ class BytecodeExpectationsPrinter final {
const BytecodeArrayIterator& bytecode_iterator,
int parameter_count) const;
void PrintSourcePosition(std::ostream& stream, // NOLINT
- SourcePositionTableIterator& source_iterator,
+ SourcePositionTableIterator&
+ source_iterator, // NOLINT(runtime/references)
int bytecode_offset) const;
void PrintV8String(std::ostream& stream, // NOLINT
i::String string) const;
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index adbe5c9b92..ce579699d8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -212,7 +212,7 @@ snippet: "
async function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 20
+frame size: 21
parameter count: 1
bytecode array length: 372
bytecodes: [
@@ -298,15 +298,15 @@ bytecodes: [
B(Star), R(17),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(18),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(18),
- B(LdaConstant), U8(12),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(19),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
+ B(LdaConstant), U8(12),
+ B(Star), R(20),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(19), U8(2),
B(Throw),
- B(Mov), R(context), R(18),
B(CallProperty0), R(17), R(9), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(19),
@@ -401,7 +401,7 @@ handlers: [
[20, 318, 326],
[23, 282, 284],
[93, 180, 188],
- [234, 247, 249],
+ [214, 247, 249],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index 43515711db..2d44b972a0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -10,7 +10,7 @@ snippet: "
var x, a = [0,1,2,3];
[x] = a;
"
-frame size: 15
+frame size: 16
parameter count: 1
bytecode array length: 178
bytecodes: [
@@ -64,15 +64,15 @@ bytecodes: [
B(Star), R(12),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(13),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(13),
- B(LdaConstant), U8(6),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(13),
B(CallProperty0), R(12), R(4), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
@@ -105,7 +105,7 @@ constant pool: [
]
handlers: [
[44, 86, 94],
- [140, 153, 155],
+ [120, 153, 155],
]
---
@@ -113,7 +113,7 @@ snippet: "
var x, y, a = [0,1,2,3];
[,x,...y] = a;
"
-frame size: 16
+frame size: 17
parameter count: 1
bytecode array length: 264
bytecodes: [
@@ -201,15 +201,15 @@ bytecodes: [
B(Star), R(13),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(14),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(14),
- B(LdaConstant), U8(6),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
B(CallProperty0), R(13), R(5), U8(25),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
@@ -242,7 +242,7 @@ constant pool: [
]
handlers: [
[44, 172, 180],
- [226, 239, 241],
+ [206, 239, 241],
]
---
@@ -250,7 +250,7 @@ snippet: "
var x={}, y, a = [0];
[x.foo,y=4] = a;
"
-frame size: 17
+frame size: 18
parameter count: 1
bytecode array length: 229
bytecodes: [
@@ -325,15 +325,15 @@ bytecodes: [
B(Star), R(14),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(15),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(15),
- B(LdaConstant), U8(7),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(15),
B(CallProperty0), R(14), R(5), U8(19),
B(JumpIfJSReceiver), U8(21),
B(Star), R(16),
@@ -367,7 +367,7 @@ constant pool: [
]
handlers: [
[47, 137, 145],
- [191, 204, 206],
+ [171, 204, 206],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 82d51820bb..1cafe42d28 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -92,15 +92,15 @@ bytecodes: [
B(Star), R(16),
B(JumpIfUndefined), U8(88),
B(JumpIfNull), U8(86),
+ B(Mov), R(context), R(17),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(17),
- B(LdaConstant), U8(9),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
B(Throw),
- B(Mov), R(context), R(17),
B(CallProperty0), R(16), R(6), U8(19),
B(Star), R(19),
B(Mov), R(0), R(18),
@@ -171,7 +171,7 @@ constant pool: [
handlers: [
[20, 297, 297],
[77, 157, 165],
- [211, 260, 262],
+ [191, 260, 262],
]
---
@@ -261,15 +261,15 @@ bytecodes: [
B(Star), R(16),
B(JumpIfUndefined), U8(88),
B(JumpIfNull), U8(86),
+ B(Mov), R(context), R(17),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(17),
- B(LdaConstant), U8(9),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
B(Throw),
- B(Mov), R(context), R(17),
B(CallProperty0), R(16), R(6), U8(19),
B(Star), R(19),
B(Mov), R(0), R(18),
@@ -348,7 +348,7 @@ constant pool: [
handlers: [
[20, 318, 318],
[77, 161, 169],
- [215, 264, 266],
+ [195, 264, 266],
]
---
@@ -446,15 +446,15 @@ bytecodes: [
B(Star), R(16),
B(JumpIfUndefined), U8(88),
B(JumpIfNull), U8(86),
+ B(Mov), R(context), R(17),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(17),
- B(LdaConstant), U8(9),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(18),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(19),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
B(Throw),
- B(Mov), R(context), R(17),
B(CallProperty0), R(16), R(6), U8(21),
B(Star), R(19),
B(Mov), R(0), R(18),
@@ -525,7 +525,7 @@ constant pool: [
handlers: [
[20, 313, 313],
[77, 173, 181],
- [227, 276, 278],
+ [207, 276, 278],
]
---
@@ -536,7 +536,7 @@ snippet: "
}
f();
"
-frame size: 16
+frame size: 17
parameter count: 1
bytecode array length: 261
bytecodes: [
@@ -598,15 +598,15 @@ bytecodes: [
B(Star), R(13),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(14),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(14),
- B(LdaConstant), U8(8),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
B(CallProperty0), R(13), R(4), U8(20),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
@@ -670,6 +670,6 @@ constant pool: [
handlers: [
[16, 233, 233],
[59, 112, 120],
- [166, 179, 181],
+ [146, 179, 181],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index dffa8f577b..17e1d59343 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -9,7 +9,7 @@ wrap: yes
snippet: "
for (var p of [0, 1, 2]) {}
"
-frame size: 14
+frame size: 15
parameter count: 1
bytecode array length: 173
bytecodes: [
@@ -60,15 +60,15 @@ bytecodes: [
B(Star), R(11),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(12),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(12),
- B(LdaConstant), U8(6),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
B(CallProperty0), R(11), R(3), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
@@ -101,7 +101,7 @@ constant pool: [
]
handlers: [
[38, 81, 89],
- [135, 148, 150],
+ [115, 148, 150],
]
---
@@ -109,7 +109,7 @@ snippet: "
var x = 'potatoes';
for (var p of x) { return p; }
"
-frame size: 15
+frame size: 16
parameter count: 1
bytecode array length: 184
bytecodes: [
@@ -163,15 +163,15 @@ bytecodes: [
B(Star), R(12),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(13),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(13),
- B(LdaConstant), U8(6),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(13),
B(CallProperty0), R(12), R(4), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
@@ -208,7 +208,7 @@ constant pool: [
]
handlers: [
[39, 86, 94],
- [140, 153, 155],
+ [120, 153, 155],
]
---
@@ -218,7 +218,7 @@ snippet: "
if (x == 20) break;
}
"
-frame size: 14
+frame size: 15
parameter count: 1
bytecode array length: 189
bytecodes: [
@@ -276,15 +276,15 @@ bytecodes: [
B(Star), R(11),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(12),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(12),
- B(LdaConstant), U8(6),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
B(CallProperty0), R(11), R(3), U8(17),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
@@ -317,7 +317,7 @@ constant pool: [
]
handlers: [
[38, 97, 105],
- [151, 164, 166],
+ [131, 164, 166],
]
---
@@ -325,7 +325,7 @@ snippet: "
var x = { 'a': 1, 'b': 2 };
for (x['a'] of [1,2,3]) { return x['a']; }
"
-frame size: 14
+frame size: 15
parameter count: 1
bytecode array length: 195
bytecodes: [
@@ -382,15 +382,15 @@ bytecodes: [
B(Star), R(11),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(12),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(12),
- B(LdaConstant), U8(8),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
B(CallProperty0), R(11), R(2), U8(20),
B(JumpIfJSReceiver), U8(21),
B(Star), R(13),
@@ -429,6 +429,6 @@ constant pool: [
]
handlers: [
[44, 97, 105],
- [151, 164, 166],
+ [131, 164, 166],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index 1752a3124e..1b10e1bf6e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -13,7 +13,7 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 16
+frame size: 17
parameter count: 2
bytecode array length: 173
bytecodes: [
@@ -64,15 +64,15 @@ bytecodes: [
B(Star), R(13),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(14),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(14),
- B(LdaConstant), U8(5),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(5),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
B(CallProperty0), R(13), R(5), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
@@ -104,7 +104,7 @@ constant pool: [
]
handlers: [
[35, 81, 89],
- [135, 148, 150],
+ [115, 148, 150],
]
---
@@ -201,15 +201,15 @@ bytecodes: [
B(Star), R(14),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(15),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(15),
- B(LdaConstant), U8(10),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(LdaConstant), U8(10),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(15),
B(CallProperty0), R(14), R(5), U8(18),
B(JumpIfJSReceiver), U8(21),
B(Star), R(16),
@@ -247,7 +247,7 @@ constant pool: [
]
handlers: [
[65, 160, 168],
- [214, 227, 229],
+ [194, 227, 229],
]
---
@@ -257,7 +257,7 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 15
+frame size: 16
parameter count: 2
bytecode array length: 190
bytecodes: [
@@ -316,15 +316,15 @@ bytecodes: [
B(Star), R(12),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(13),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(13),
- B(LdaConstant), U8(7),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
B(Throw),
- B(Mov), R(context), R(13),
B(CallProperty0), R(12), R(3), U8(16),
B(JumpIfJSReceiver), U8(21),
B(Star), R(14),
@@ -358,7 +358,7 @@ constant pool: [
]
handlers: [
[35, 98, 106],
- [152, 165, 167],
+ [132, 165, 167],
]
---
@@ -368,7 +368,7 @@ snippet: "
}
f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);
"
-frame size: 18
+frame size: 19
parameter count: 2
bytecode array length: 197
bytecodes: [
@@ -428,15 +428,15 @@ bytecodes: [
B(Star), R(15),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(16),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(16),
- B(LdaConstant), U8(7),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(16),
B(CallProperty0), R(15), R(7), U8(19),
B(JumpIfJSReceiver), U8(21),
B(Star), R(17),
@@ -470,7 +470,7 @@ constant pool: [
]
handlers: [
[35, 105, 113],
- [159, 172, 174],
+ [139, 172, 174],
]
---
@@ -480,7 +480,7 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 17
+frame size: 18
parameter count: 2
bytecode array length: 214
bytecodes: [
@@ -545,15 +545,15 @@ bytecodes: [
B(Star), R(14),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(15),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(15),
- B(LdaConstant), U8(8),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(LdaConstant), U8(8),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(15),
B(CallProperty0), R(14), R(6), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(16),
@@ -588,7 +588,7 @@ constant pool: [
]
handlers: [
[76, 122, 130],
- [176, 189, 191],
+ [156, 189, 191],
]
---
@@ -598,7 +598,7 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 16
+frame size: 17
parameter count: 2
bytecode array length: 258
bytecodes: [
@@ -677,15 +677,15 @@ bytecodes: [
B(Star), R(13),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(14),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(14),
- B(LdaConstant), U8(11),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(11),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
B(CallProperty0), R(13), R(5), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
@@ -727,7 +727,7 @@ constant pool: [
]
handlers: [
[76, 160, 168],
- [214, 227, 229],
+ [194, 227, 229],
]
---
@@ -737,7 +737,7 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 18
+frame size: 19
parameter count: 2
bytecode array length: 228
bytecodes: [
@@ -793,15 +793,15 @@ bytecodes: [
B(Star), R(15),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(16),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(16),
- B(LdaConstant), U8(5),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(17),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(LdaConstant), U8(5),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
B(Throw),
- B(Mov), R(context), R(16),
B(CallProperty0), R(15), R(7), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(17),
@@ -854,7 +854,7 @@ constant pool: [
handlers: [
[16, 200, 200],
[50, 96, 104],
- [150, 163, 165],
+ [130, 163, 165],
]
---
@@ -864,7 +864,7 @@ snippet: "
}
f([1, 2, 3]);
"
-frame size: 17
+frame size: 18
parameter count: 2
bytecode array length: 264
bytecodes: [
@@ -933,15 +933,15 @@ bytecodes: [
B(Star), R(14),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(15),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(15),
- B(LdaConstant), U8(6),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(16),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
+ B(LdaConstant), U8(6),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
B(Throw),
- B(Mov), R(context), R(15),
B(CallProperty0), R(14), R(6), U8(14),
B(JumpIfJSReceiver), U8(21),
B(Star), R(16),
@@ -995,6 +995,6 @@ constant pool: [
handlers: [
[20, 236, 236],
[54, 132, 140],
- [186, 199, 201],
+ [166, 199, 201],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 996c15d2af..bfbd05cd31 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -98,7 +98,7 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 16
+frame size: 17
parameter count: 1
bytecode array length: 261
bytecodes: [
@@ -178,15 +178,15 @@ bytecodes: [
B(Star), R(13),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
+ B(Mov), R(context), R(14),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(158),
- B(Star), R(14),
- B(LdaConstant), U8(12),
+ B(Wide), B(LdaSmi), I16(159),
B(Star), R(15),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(14), U8(2),
+ B(LdaConstant), U8(12),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(15), U8(2),
B(Throw),
- B(Mov), R(context), R(14),
B(CallProperty0), R(13), R(5), U8(15),
B(JumpIfJSReceiver), U8(21),
B(Star), R(15),
@@ -229,7 +229,7 @@ constant pool: [
]
handlers: [
[79, 163, 171],
- [217, 230, 232],
+ [197, 230, 232],
]
---
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
index e783d81376..5821a20069 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
@@ -11,47 +11,57 @@ snippet: "
{
class A {
#a() { return 1; }
+ callA() { return this.#a(); }
}
- new A;
+ const a = new A;
+ a.callA();
}
"
-frame size: 7
+frame size: 9
parameter count: 1
-bytecode array length: 62
+bytecode array length: 80
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(2),
+ B(PushContext), R(3),
B(LdaTheHole),
- B(Star), R(6),
+ B(Star), R(7),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(3),
- B(LdaConstant), U8(1),
B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
B(CreateClosure), U8(3), U8(1), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
- B(Star), R(4),
- B(Mov), R(5), R(1),
- B(LdaConstant), U8(4),
+ B(CreateClosure), U8(4), U8(2), U8(2),
+ B(Star), R(8),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+ B(Mov), R(6), R(2),
+ B(LdaConstant), U8(5),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(6), U8(1),
B(StaCurrentContextSlot), U8(5),
- B(PopContext), R(2),
- B(Mov), R(1), R(0),
- /* 78 S> */ B(Ldar), R(0),
- /* 78 E> */ B(Construct), R(0), R(0), U8(0), U8(0),
+ B(PopContext), R(3),
+ B(Mov), R(2), R(0),
+ /* 122 S> */ B(Ldar), R(0),
+ /* 122 E> */ B(Construct), R(0), R(0), U8(0), U8(0),
+ B(Star), R(1),
+ /* 133 S> */ B(LdaNamedProperty), R(1), U8(6), U8(2),
+ B(Star), R(3),
+ /* 133 E> */ B(CallProperty0), R(3), R(1), U8(4),
B(LdaUndefined),
- /* 87 S> */ B(Return),
+ /* 144 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callA"],
]
handlers: [
]
@@ -60,79 +70,90 @@ handlers: [
snippet: "
{
class D {
- #d() {}
+ #d() { return 1; }
+ callD() { return this.#d(); }
}
class E extends D {
- #e() {}
+ #e() { return 2; }
+ callE() { return this.callD() + this.#e(); }
}
- new D;
- new E;
+ const e = new E;
+ e.callE();
}
"
-frame size: 9
+frame size: 11
parameter count: 1
-bytecode array length: 121
+bytecode array length: 138
bytecodes: [
/* 30 E> */ B(StackCheck),
B(CreateBlockContext), U8(0),
- B(PushContext), R(4),
+ B(PushContext), R(5),
B(LdaTheHole),
- B(Star), R(8),
+ B(Star), R(9),
B(CreateClosure), U8(2), U8(0), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(1),
B(Star), R(6),
+ B(LdaConstant), U8(1),
+ B(Star), R(7),
B(CreateClosure), U8(3), U8(1), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(3),
- B(LdaConstant), U8(4),
+ B(CreateClosure), U8(4), U8(2), U8(2),
+ B(Star), R(10),
+ B(Mov), R(6), R(8),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(7), U8(4),
B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(Mov), R(8), R(4),
+ B(LdaConstant), U8(5),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(8), U8(1),
B(StaCurrentContextSlot), U8(5),
- B(PopContext), R(4),
- B(Mov), R(3), R(0),
- /* 38 E> */ B(CreateBlockContext), U8(5),
- B(PushContext), R(4),
- /* 83 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
- B(Star), R(5),
- B(LdaConstant), U8(6),
+ B(PopContext), R(5),
+ B(Mov), R(4), R(0),
+ /* 38 E> */ B(CreateBlockContext), U8(6),
+ B(PushContext), R(5),
+ /* 128 E> */ B(CreateClosure), U8(8), U8(3), U8(2),
B(Star), R(6),
- B(CreateClosure), U8(8), U8(3), U8(2),
+ B(LdaConstant), U8(7),
+ B(Star), R(7),
+ B(CreateClosure), U8(9), U8(4), U8(2),
B(StaCurrentContextSlot), U8(4),
- B(Mov), R(5), R(7),
- B(Mov), R(3), R(8),
- B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
- B(Star), R(6),
- B(Mov), R(7), R(2),
- B(LdaConstant), U8(9),
+ B(CreateClosure), U8(10), U8(5), U8(2),
+ B(Star), R(10),
+ B(Mov), R(6), R(8),
+ B(Mov), R(4), R(9),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(7), U8(4),
B(Star), R(7),
- B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(Mov), R(8), R(3),
+ B(LdaConstant), U8(11),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(8), U8(1),
B(StaCurrentContextSlot), U8(5),
- B(PopContext), R(4),
- B(Mov), R(2), R(1),
- /* 106 S> */ B(Ldar), R(3),
- /* 106 E> */ B(Construct), R(3), R(0), U8(0), U8(0),
- /* 115 S> */ B(Ldar), R(2),
- /* 115 E> */ B(Construct), R(2), R(0), U8(0), U8(2),
+ B(PopContext), R(5),
+ B(Mov), R(3), R(1),
+ /* 221 S> */ B(Ldar), R(1),
+ /* 221 E> */ B(Construct), R(1), R(0), U8(0), U8(0),
+ B(Star), R(2),
+ /* 232 S> */ B(LdaNamedProperty), R(2), U8(12), U8(2),
+ B(Star), R(5),
+ /* 232 E> */ B(CallProperty0), R(5), R(2), U8(4),
B(LdaUndefined),
- /* 124 S> */ B(Return),
+ /* 243 S> */ B(Return),
]
constant pool: [
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
SCOPE_INFO_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["callE"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index af18097284..746c554087 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
#include <cstring>
#include <fstream>
#include <memory>
+#include <sstream>
#include <vector>
#include "test/cctest/interpreter/bytecode-expectations-printer.h"
@@ -37,6 +39,7 @@ class ProgramOptions final {
read_raw_js_snippet_(false),
read_from_stdin_(false),
rebaseline_(false),
+ check_baseline_(false),
wrap_(true),
module_(false),
top_level_(false),
@@ -58,6 +61,8 @@ class ProgramOptions final {
return output_filename_.empty() && !rebaseline_;
}
bool rebaseline() const { return rebaseline_; }
+ bool check_baseline() const { return check_baseline_; }
+ bool baseline() const { return rebaseline_ || check_baseline_; }
bool wrap() const { return wrap_; }
bool module() const { return module_; }
bool top_level() const { return top_level_; }
@@ -66,7 +71,7 @@ class ProgramOptions final {
bool async_iteration() const { return async_iteration_; }
bool private_methods() const { return private_methods_; }
bool verbose() const { return verbose_; }
- bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
+ bool suppress_runtime_errors() const { return baseline() && !verbose_; }
std::vector<std::string> input_filenames() const { return input_filenames_; }
std::string output_filename() const { return output_filename_; }
std::string test_function_name() const { return test_function_name_; }
@@ -77,6 +82,7 @@ class ProgramOptions final {
bool read_raw_js_snippet_;
bool read_from_stdin_;
bool rebaseline_;
+ bool check_baseline_;
bool wrap_;
bool module_;
bool top_level_;
@@ -174,6 +180,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.read_from_stdin_ = true;
} else if (strcmp(argv[i], "--rebaseline") == 0) {
options.rebaseline_ = true;
+ } else if (strcmp(argv[i], "--check-baseline") == 0) {
+ options.check_baseline_ = true;
} else if (strcmp(argv[i], "--no-wrap") == 0) {
options.wrap_ = false;
} else if (strcmp(argv[i], "--module") == 0) {
@@ -203,7 +211,13 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
}
}
- if (options.rebaseline_ && options.input_filenames_.empty()) {
+ if (options.rebaseline() && options.check_baseline()) {
+ REPORT_ERROR("Can't check baseline and rebaseline at the same time.");
+ std::exit(1);
+ }
+
+ if ((options.check_baseline_ || options.rebaseline_) &&
+ options.input_filenames_.empty()) {
#if defined(V8_OS_POSIX) || defined(V8_OS_WIN)
if (options.verbose_) {
std::cout << "Looking for golden files in " << kGoldenFilesPath << '\n';
@@ -236,24 +250,29 @@ bool ProgramOptions::Validate() const {
return false;
}
- if (rebaseline_ && read_raw_js_snippet_) {
- REPORT_ERROR("Cannot use --rebaseline on a raw JS snippet.");
+ if (baseline() && read_raw_js_snippet_) {
+ REPORT_ERROR(
+ "Cannot use --rebaseline or --check-baseline on a raw JS snippet.");
return false;
}
- if (rebaseline_ && !output_filename_.empty()) {
- REPORT_ERROR("Output file cannot be specified together with --rebaseline.");
+ if (baseline() && !output_filename_.empty()) {
+ REPORT_ERROR(
+ "Output file cannot be specified together with --rebaseline or "
+ "--check-baseline.");
return false;
}
- if (rebaseline_ && read_from_stdin_) {
- REPORT_ERROR("Cannot --rebaseline when input is --stdin.");
+ if (baseline() && read_from_stdin_) {
+ REPORT_ERROR(
+ "Cannot --rebaseline or --check-baseline when input is --stdin.");
return false;
}
- if (input_filenames_.size() > 1 && !rebaseline_ && !read_raw_js_snippet()) {
+ if (input_filenames_.size() > 1 && !baseline() && !read_raw_js_snippet()) {
REPORT_ERROR(
- "Multiple input files, but no --rebaseline or --raw-js specified.");
+ "Multiple input files, but no --rebaseline, --check-baseline or "
+ "--raw-js specified.");
return false;
}
@@ -297,8 +316,8 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
oneshot_opt_ = ParseBoolean(line.c_str() + strlen(kOneshotOpt));
} else if (line.compare(0, 17, "async iteration: ") == 0) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
- } else if (line.compare(0, 16, "private methods: ") == 0) {
- private_methods_ = ParseBoolean(line.c_str() + 16);
+ } else if (line.compare(0, 17, "private methods: ") == 0) {
+ private_methods_ = ParseBoolean(line.c_str() + 17);
} else if (line == "---") {
break;
} else if (line.empty()) {
@@ -331,6 +350,7 @@ V8InitializationScope::V8InitializationScope(const char* exec_path)
: platform_(v8::platform::NewDefaultPlatform()) {
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_enable_lazy_source_positions = false;
v8::V8::InitializeICUDefaultLocation(exec_path);
v8::V8::InitializeExternalStartupData(exec_path);
@@ -462,9 +482,19 @@ bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
return true;
}
+std::string WriteExpectationsToString(
+ const std::vector<std::string>& snippet_list,
+ const V8InitializationScope& platform, const ProgramOptions& options) {
+ std::stringstream output_string;
+
+ GenerateExpectationsFile(output_string, snippet_list, platform, options);
+
+ return output_string.str();
+}
+
void PrintMessage(v8::Local<v8::Message> message, v8::Local<v8::Value>) {
std::cerr << "INFO: "
- << *v8::String::Utf8Value(v8::Isolate::GetCurrent(), message->Get())
+ << *v8::String::Utf8Value(message->GetIsolate(), message->Get())
<< '\n';
}
@@ -475,11 +505,12 @@ void PrintUsage(const char* exec_path) {
<< "\nUsage: " << exec_path
<< " [OPTIONS]... [INPUT FILES]...\n\n"
"Options:\n"
- " --help Print this help message.\n"
- " --verbose Emit messages about the progress of the tool.\n"
- " --raw-js Read raw JavaScript, instead of the output format.\n"
- " --stdin Read from standard input instead of file.\n"
+ " --help Print this help message.\n"
+ " --verbose Emit messages about the progress of the tool.\n"
+ " --raw-js Read raw JavaScript, instead of the output format.\n"
+ " --stdin Read from standard input instead of file.\n"
" --rebaseline Rebaseline input snippet file.\n"
+ " --check-baseline Checks the current baseline is valid.\n"
" --no-wrap Do not wrap the snippet in a function.\n"
" --disable-oneshot-opt Disable Oneshot Optimization.\n"
" --print-callee Print bytecode of callee, function should "
@@ -496,9 +527,9 @@ void PrintUsage(const char* exec_path) {
" Specify the type of the entries in the constant pool "
"(default: mixed).\n"
"\n"
- "When using --rebaseline, flags --no-wrap, --test-function-name \n"
- "and --pool-type will be overridden by the options specified in \n"
- "the input file header.\n\n"
+ "When using --rebaseline or --check-baseline, flags --no-wrap,\n"
+ "--test-function-name and --pool-type will be overridden by the\n"
+ "options specified in the input file header.\n\n"
"Each raw JavaScript file is interpreted as a single snippet.\n\n"
"This tool is intended as a help in writing tests.\n"
"Please, DO NOT blindly copy and paste the output "
@@ -507,6 +538,62 @@ void PrintUsage(const char* exec_path) {
} // namespace
+bool CheckBaselineExpectations(const std::string& input_filename,
+ const std::vector<std::string>& snippet_list,
+ const V8InitializationScope& platform,
+ const ProgramOptions& options) {
+ std::string actual =
+ WriteExpectationsToString(snippet_list, platform, options);
+
+ std::ifstream input_stream(input_filename);
+ if (!input_stream.is_open()) {
+ REPORT_ERROR("Could not open " << input_filename << " for reading.");
+ std::exit(2);
+ }
+
+ bool check_failed = false;
+ std::string expected((std::istreambuf_iterator<char>(input_stream)),
+ std::istreambuf_iterator<char>());
+ if (expected != actual) {
+ REPORT_ERROR("Mismatch: " << input_filename);
+ check_failed = true;
+ if (expected.size() != actual.size()) {
+ REPORT_ERROR(" Expected size (" << expected.size()
+ << ") != actual size (" << actual.size()
+ << ")");
+ }
+
+ int line = 1;
+ for (size_t i = 0; i < std::min(expected.size(), actual.size()); ++i) {
+ if (expected[i] != actual[i]) {
+ // Find the start of the line that has the mismatch carefully
+ // handling the case where it's the first line that mismatches.
+ size_t start = expected[i] != '\n' ? expected.rfind("\n", i)
+ : actual.rfind("\n", i);
+ if (start == std::string::npos) {
+ start = 0;
+ } else {
+ ++start;
+ }
+
+ // If there is no new line, then these two lines will consume the
+ // remaining characters in the string, because npos - start will
+ // always be longer than the string itself.
+ std::string expected_line =
+ expected.substr(start, expected.find("\n", i) - start);
+ std::string actual_line =
+ actual.substr(start, actual.find("\n", i) - start);
+ REPORT_ERROR(" First mismatch on line " << line << ")");
+ REPORT_ERROR(" Expected : '" << expected_line << "'");
+ REPORT_ERROR(" Actual : '" << actual_line << "'");
+ break;
+ }
+ if (expected[i] == '\n') line++;
+ }
+ }
+ return check_failed;
+}
+
int main(int argc, char** argv) {
ProgramOptions options = ProgramOptions::FromCommandLine(argc, argv);
@@ -524,9 +611,10 @@ int main(int argc, char** argv) {
if (options.read_from_stdin()) {
// Rebaseline will never get here, so we will always take the
// GenerateExpectationsFile at the end of this function.
- DCHECK(!options.rebaseline());
+ DCHECK(!options.rebaseline() && !options.check_baseline());
ExtractSnippets(&snippet_list, std::cin, options.read_raw_js_snippet());
} else {
+ bool check_failed = false;
for (const std::string& input_filename : options.input_filenames()) {
if (options.verbose()) {
std::cerr << "Processing " << input_filename << '\n';
@@ -539,25 +627,35 @@ int main(int argc, char** argv) {
}
ProgramOptions updated_options = options;
- if (options.rebaseline()) {
+ if (options.baseline()) {
updated_options.UpdateFromHeader(input_stream);
CHECK(updated_options.Validate());
}
ExtractSnippets(&snippet_list, input_stream,
options.read_raw_js_snippet());
+ input_stream.close();
if (options.rebaseline()) {
if (!WriteExpectationsFile(snippet_list, platform, updated_options,
input_filename)) {
return 3;
}
+ } else if (options.check_baseline()) {
+ check_failed |= CheckBaselineExpectations(input_filename, snippet_list,
+ platform, updated_options);
+ }
+
+ if (options.baseline()) {
snippet_list.clear();
}
}
+ if (check_failed) {
+ return 4;
+ }
}
- if (!options.rebaseline()) {
+ if (!options.baseline()) {
if (!WriteExpectationsFile(snippet_list, platform, options,
options.output_filename())) {
return 3;
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 3a4d089786..e1601d4642 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -131,21 +131,22 @@ std::string BuildActual(const BytecodeExpectationsPrinter& printer,
}
// inplace left trim
-static inline void ltrim(std::string& str) {
+static inline void ltrim(std::string& str) { // NOLINT(runtime/references)
str.erase(str.begin(),
std::find_if(str.begin(), str.end(),
[](unsigned char ch) { return !std::isspace(ch); }));
}
// inplace right trim
-static inline void rtrim(std::string& str) {
+static inline void rtrim(std::string& str) { // NOLINT(runtime/references)
str.erase(std::find_if(str.rbegin(), str.rend(),
[](unsigned char ch) { return !std::isspace(ch); })
.base(),
str.end());
}
-static inline std::string trim(std::string& str) {
+static inline std::string trim(
+ std::string& str) { // NOLINT(runtime/references)
ltrim(str);
rtrim(str);
return str;
@@ -2767,23 +2768,28 @@ TEST(PrivateMethods) {
"{\n"
" class A {\n"
" #a() { return 1; }\n"
+ " callA() { return this.#a(); }\n"
" }\n"
"\n"
- " new A;\n"
+ " const a = new A;\n"
+ " a.callA();\n"
"}\n",
"{\n"
" class D {\n"
- " #d() {}\n"
+ " #d() { return 1; }\n"
+ " callD() { return this.#d(); }\n"
" }\n"
"\n"
" class E extends D {\n"
- " #e() {}\n"
+ " #e() { return 2; }\n"
+ " callE() { return this.callD() + this.#e(); }\n"
" }\n"
"\n"
- " new D;\n"
- " new E;\n"
+ " const e = new E;\n"
+ " e.callE();\n"
"}\n"};
+
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PrivateMethods.golden")));
i::FLAG_harmony_private_methods = old_methods_flag;
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 9b907588ef..466e768d7d 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -1485,19 +1485,18 @@ TEST(InterpreterCall) {
}
}
-static BytecodeArrayBuilder& SetRegister(BytecodeArrayBuilder& builder,
- Register reg, int value,
- Register scratch) {
+static BytecodeArrayBuilder& SetRegister(
+ BytecodeArrayBuilder& builder, // NOLINT(runtime/references)
+ Register reg, int value, Register scratch) {
return builder.StoreAccumulatorInRegister(scratch)
.LoadLiteral(Smi::FromInt(value))
.StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(scratch);
}
-static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
- Register reg, int value,
- Register scratch,
- int slot_index) {
+static BytecodeArrayBuilder& IncrementRegister(
+ BytecodeArrayBuilder& builder, // NOLINT(runtime/references)
+ Register reg, int value, Register scratch, int slot_index) {
return builder.StoreAccumulatorInRegister(scratch)
.LoadLiteral(Smi::FromInt(value))
.BinaryOperation(Token::Value::ADD, reg, slot_index)
@@ -5064,6 +5063,7 @@ TEST(InterpreterGetBytecodeHandler) {
TEST(InterpreterCollectSourcePositions) {
FLAG_enable_lazy_source_positions = true;
+ FLAG_stress_lazy_source_positions = false;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5089,6 +5089,7 @@ TEST(InterpreterCollectSourcePositions) {
TEST(InterpreterCollectSourcePositions_StackOverflow) {
FLAG_enable_lazy_source_positions = true;
+ FLAG_stress_lazy_source_positions = false;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5125,6 +5126,7 @@ TEST(InterpreterCollectSourcePositions_StackOverflow) {
TEST(InterpreterCollectSourcePositions_ThrowFrom1stFrame) {
FLAG_enable_lazy_source_positions = true;
+ FLAG_stress_lazy_source_positions = false;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5160,6 +5162,7 @@ TEST(InterpreterCollectSourcePositions_ThrowFrom1stFrame) {
TEST(InterpreterCollectSourcePositions_ThrowFrom2ndFrame) {
FLAG_enable_lazy_source_positions = true;
+ FLAG_stress_lazy_source_positions = false;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5197,19 +5200,26 @@ TEST(InterpreterCollectSourcePositions_ThrowFrom2ndFrame) {
namespace {
+void CheckStringEqual(const char* expected_ptr, const char* actual_ptr) {
+ CHECK_NOT_NULL(expected_ptr);
+ CHECK_NOT_NULL(actual_ptr);
+ std::string expected(expected_ptr);
+ std::string actual(actual_ptr);
+ CHECK_EQ(expected, actual);
+}
+
void CheckStringEqual(const char* expected_ptr, Handle<Object> actual_handle) {
v8::String::Utf8Value utf8(
v8::Isolate::GetCurrent(),
v8::Utils::ToLocal(Handle<String>::cast(actual_handle)));
- std::string expected(expected_ptr);
- std::string actual(*utf8);
- CHECK_EQ(expected, actual);
+ CheckStringEqual(expected_ptr, *utf8);
}
} // namespace
TEST(InterpreterCollectSourcePositions_GenerateStackTrace) {
FLAG_enable_lazy_source_positions = true;
+ FLAG_stress_lazy_source_positions = false;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5246,6 +5256,23 @@ TEST(InterpreterCollectSourcePositions_GenerateStackTrace) {
CHECK_GT(source_position_table.length(), 0);
}
+TEST(InterpreterLookupNameOfBytecodeHandler) {
+ Interpreter* interpreter = CcTest::i_isolate()->interpreter();
+ Code ldaLookupSlot = interpreter->GetBytecodeHandler(Bytecode::kLdaLookupSlot,
+ OperandScale::kSingle);
+ CheckStringEqual("LdaLookupSlotHandler",
+ interpreter->LookupNameOfBytecodeHandler(ldaLookupSlot));
+ Code wideLdaLookupSlot = interpreter->GetBytecodeHandler(
+ Bytecode::kLdaLookupSlot, OperandScale::kDouble);
+ CheckStringEqual("LdaLookupSlotWideHandler",
+ interpreter->LookupNameOfBytecodeHandler(wideLdaLookupSlot));
+ Code extraWideLdaLookupSlot = interpreter->GetBytecodeHandler(
+ Bytecode::kLdaLookupSlot, OperandScale::kQuadruple);
+ CheckStringEqual(
+ "LdaLookupSlotExtraWideHandler",
+ interpreter->LookupNameOfBytecodeHandler(extraWideLdaLookupSlot));
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 0d6de10f2a..d20dfff652 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -11,9 +11,28 @@
#ifdef V8_USE_PERFETTO
#include "perfetto/trace/chrome/chrome_trace_event.pb.h"
+#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h"
#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "perfetto/trace/trace.pb.h"
+#include "perfetto/tracing.h"
+#include "src/libplatform/tracing/json-trace-event-listener.h"
#include "src/libplatform/tracing/trace-event-listener.h"
-#endif
+#endif // V8_USE_PERFETTO
+
+#ifdef V8_USE_PERFETTO
+class TestDataSource : public perfetto::DataSource<TestDataSource> {
+ public:
+ void OnSetup(const SetupArgs&) override {}
+ void OnStart(const StartArgs&) override { started_.Signal(); }
+ void OnStop(const StopArgs&) override {}
+
+ static v8::base::Semaphore started_;
+};
+
+v8::base::Semaphore TestDataSource::started_{0};
+
+PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(TestDataSource);
+#endif // V8_USE_PERFETTO
namespace v8 {
namespace platform {
@@ -206,6 +225,21 @@ TEST(TestJSONTraceWriterWithCustomtag) {
CHECK_EQ(expected_trace_str, trace_str);
}
+void GetJSONStrings(std::vector<std::string>* ret, const std::string& str,
+ const std::string& param, const std::string& start_delim,
+ const std::string& end_delim) {
+ size_t pos = str.find(param);
+ while (pos != std::string::npos) {
+ size_t start_pos = str.find(start_delim, pos + param.length());
+ size_t end_pos = str.find(end_delim, start_pos + 1);
+ CHECK_NE(start_pos, std::string::npos);
+ CHECK_NE(end_pos, std::string::npos);
+ ret->push_back(str.substr(start_pos + 1, end_pos - start_pos - 1));
+ pos = str.find(param, pos + 1);
+ }
+}
+
+#ifndef V8_USE_PERFETTO
TEST(TestTracingController) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
std::unique_ptr<v8::Platform> default_platform(
@@ -242,20 +276,6 @@ TEST(TestTracingController) {
i::V8::SetPlatformForTesting(old_platform);
}
-void GetJSONStrings(std::vector<std::string>& ret, std::string str,
- std::string param, std::string start_delim,
- std::string end_delim) {
- size_t pos = str.find(param);
- while (pos != std::string::npos) {
- size_t start_pos = str.find(start_delim, pos + param.length());
- size_t end_pos = str.find(end_delim, start_pos + 1);
- CHECK_NE(start_pos, std::string::npos);
- CHECK_NE(end_pos, std::string::npos);
- ret.push_back(str.substr(start_pos + 1, end_pos - start_pos - 1));
- pos = str.find(param, pos + 1);
- }
-}
-
TEST(TestTracingControllerMultipleArgsAndCopy) {
std::ostringstream stream, perfetto_stream;
uint64_t aa = 11;
@@ -351,9 +371,9 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
std::string trace_str = stream.str();
std::vector<std::string> all_args, all_names, all_cats;
- GetJSONStrings(all_args, trace_str, "\"args\"", "{", "}");
- GetJSONStrings(all_names, trace_str, "\"name\"", "\"", "\"");
- GetJSONStrings(all_cats, trace_str, "\"cat\"", "\"", "\"");
+ GetJSONStrings(&all_args, trace_str, "\"args\"", "{", "}");
+ GetJSONStrings(&all_names, trace_str, "\"name\"", "\"", "\"");
+ GetJSONStrings(&all_cats, trace_str, "\"cat\"", "\"", "\"");
CHECK_EQ(all_args.size(), 24u);
CHECK_EQ(all_args[0], "\"aa\":11");
@@ -387,6 +407,7 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
CHECK_EQ(all_args[22], "\"a1\":[42,42]");
CHECK_EQ(all_args[23], "\"a1\":[42,42],\"a2\":[123,123]");
}
+#endif // !V8_USE_PERFETTO
namespace {
@@ -533,36 +554,14 @@ TEST(AddTraceEventMultiThreaded) {
#ifdef V8_USE_PERFETTO
-struct TraceEvent {
- std::string name;
- int64_t timestamp;
- int32_t phase;
- int32_t thread_id;
- int64_t duration;
- int64_t thread_duration;
- std::string scope;
- uint64_t id;
- uint32_t flags;
- std::string category_group_name;
- int32_t process_id;
- int64_t thread_timestamp;
- uint64_t bind_id;
-};
+using TraceEvent = ::perfetto::protos::ChromeTraceEvent;
class TestListener : public TraceEventListener {
public:
- void ProcessPacket(
- const ::perfetto::protos::ChromeTracePacket& packet) override {
+ void ProcessPacket(const ::perfetto::protos::TracePacket& packet) {
for (const ::perfetto::protos::ChromeTraceEvent& event :
packet.chrome_events().trace_events()) {
- TraceEvent trace_event{event.name(), event.timestamp(),
- event.phase(), event.thread_id(),
- event.duration(), event.thread_duration(),
- event.scope(), event.id(),
- event.flags(), event.category_group_name(),
- event.process_id(), event.thread_timestamp(),
- event.bind_id()};
- events_.push_back(trace_event);
+ events_.push_back(event);
}
}
@@ -587,10 +586,6 @@ class TracingTestHarness {
static_cast<v8::platform::DefaultPlatform*>(default_platform_.get())
->SetTracingController(std::move(tracing));
- MockTraceWriter* writer = new MockTraceWriter();
- TraceBuffer* ring_buffer =
- TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
- tracing_controller_->Initialize(ring_buffer);
tracing_controller_->InitializeForPerfetto(&perfetto_json_stream_);
tracing_controller_->SetTraceEventListenerForTesting(&listener_);
}
@@ -636,52 +631,332 @@ TEST(Perfetto) {
harness.StopTracing();
TraceEvent* event = harness.get_event(0);
- int32_t thread_id = event->thread_id;
- int32_t process_id = event->process_id;
- CHECK_EQ("test1", event->name);
- CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase);
- int64_t timestamp = event->timestamp;
+ int32_t thread_id = event->thread_id();
+ int32_t process_id = event->process_id();
+ CHECK_EQ("test1", event->name());
+ CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase());
+ int64_t timestamp = event->timestamp();
event = harness.get_event(1);
- CHECK_EQ("test2", event->name);
- CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase);
- CHECK_EQ(thread_id, event->thread_id);
- CHECK_EQ(process_id, event->process_id);
- CHECK_GE(event->timestamp, timestamp);
- timestamp = event->timestamp;
+ CHECK_EQ("test2", event->name());
+ CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase());
+ CHECK_EQ(thread_id, event->thread_id());
+ CHECK_EQ(process_id, event->process_id());
+ CHECK_GE(event->timestamp(), timestamp);
+ timestamp = event->timestamp();
event = harness.get_event(2);
- CHECK_EQ("test3", event->name);
- CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase);
- CHECK_EQ(thread_id, event->thread_id);
- CHECK_EQ(process_id, event->process_id);
- CHECK_GE(event->timestamp, timestamp);
- timestamp = event->timestamp;
+ CHECK_EQ("test3", event->name());
+ CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase());
+ CHECK_EQ(thread_id, event->thread_id());
+ CHECK_EQ(process_id, event->process_id());
+ CHECK_GE(event->timestamp(), timestamp);
+ timestamp = event->timestamp();
event = harness.get_event(3);
- CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase);
- CHECK_EQ(thread_id, event->thread_id);
- CHECK_EQ(process_id, event->process_id);
- CHECK_GE(event->timestamp, timestamp);
- timestamp = event->timestamp;
+ CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase());
+ CHECK_EQ(thread_id, event->thread_id());
+ CHECK_EQ(process_id, event->process_id());
+ CHECK_GE(event->timestamp(), timestamp);
+ timestamp = event->timestamp();
event = harness.get_event(4);
- CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase);
- CHECK_EQ(thread_id, event->thread_id);
- CHECK_EQ(process_id, event->process_id);
- CHECK_GE(event->timestamp, timestamp);
- timestamp = event->timestamp;
+ CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase());
+ CHECK_EQ(thread_id, event->thread_id());
+ CHECK_EQ(process_id, event->process_id());
+ CHECK_GE(event->timestamp(), timestamp);
+ timestamp = event->timestamp();
event = harness.get_event(5);
- CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase);
- CHECK_EQ(thread_id, event->thread_id);
- CHECK_EQ(process_id, event->process_id);
- CHECK_GE(event->timestamp, timestamp);
- timestamp = event->timestamp;
+ CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase());
+ CHECK_EQ(thread_id, event->thread_id());
+ CHECK_EQ(process_id, event->process_id());
+ CHECK_GE(event->timestamp(), timestamp);
+ timestamp = event->timestamp();
CHECK_EQ(6, harness.events_size());
}
+// Replacement for 'TestTracingController'
+TEST(Categories) {
+ TracingTestHarness harness;
+ harness.StartTracing();
+
+ {
+ TRACE_EVENT0("v8", "v8.Test");
+ // cat category is not included in default config
+ TRACE_EVENT0("cat", "v8.Test2");
+ TRACE_EVENT0("v8", "v8.Test3");
+ }
+ TRACE_EVENT_INSTANT0("v8", "final event not captured",
+ TRACE_EVENT_SCOPE_THREAD);
+
+ harness.StopTracing();
+
+ CHECK_EQ(4, harness.events_size());
+ TraceEvent* event = harness.get_event(0);
+ CHECK_EQ("v8.Test", event->name());
+ event = harness.get_event(1);
+ CHECK_EQ("v8.Test3", event->name());
+}
+
+// Replacement for 'TestTracingControllerMultipleArgsAndCopy'
+TEST(MultipleArgsAndCopy) {
+ uint64_t aa = 11;
+ unsigned int bb = 22;
+ uint16_t cc = 33;
+ unsigned char dd = 44;
+ int64_t ee = -55;
+ int ff = -66;
+ int16_t gg = -77;
+ signed char hh = -88;
+ bool ii1 = true;
+ bool ii2 = false;
+ double jj1 = 99.0;
+ double jj2 = 1e100;
+ double jj3 = std::numeric_limits<double>::quiet_NaN();
+ double jj4 = std::numeric_limits<double>::infinity();
+ double jj5 = -std::numeric_limits<double>::infinity();
+ void* kk = &aa;
+ const char* ll = "100";
+ std::string mm = "INIT";
+ std::string mmm = "\"INIT\"";
+
+ TracingTestHarness harness;
+ harness.StartTracing();
+
+ // Create a scope for the tracing controller to terminate the trace writer.
+ {
+ TRACE_EVENT1("v8", "v8.Test.aa", "aa", aa);
+ TRACE_EVENT1("v8", "v8.Test.bb", "bb", bb);
+ TRACE_EVENT1("v8", "v8.Test.cc", "cc", cc);
+ TRACE_EVENT1("v8", "v8.Test.dd", "dd", dd);
+ TRACE_EVENT1("v8", "v8.Test.ee", "ee", ee);
+ TRACE_EVENT1("v8", "v8.Test.ff", "ff", ff);
+ TRACE_EVENT1("v8", "v8.Test.gg", "gg", gg);
+ TRACE_EVENT1("v8", "v8.Test.hh", "hh", hh);
+ TRACE_EVENT1("v8", "v8.Test.ii", "ii1", ii1);
+ TRACE_EVENT1("v8", "v8.Test.ii", "ii2", ii2);
+ TRACE_EVENT1("v8", "v8.Test.jj1", "jj1", jj1);
+ TRACE_EVENT1("v8", "v8.Test.jj2", "jj2", jj2);
+ TRACE_EVENT1("v8", "v8.Test.jj3", "jj3", jj3);
+ TRACE_EVENT1("v8", "v8.Test.jj4", "jj4", jj4);
+ TRACE_EVENT1("v8", "v8.Test.jj5", "jj5", jj5);
+ TRACE_EVENT1("v8", "v8.Test.kk", "kk", kk);
+ TRACE_EVENT1("v8", "v8.Test.ll", "ll", ll);
+ TRACE_EVENT1("v8", "v8.Test.mm", "mm", TRACE_STR_COPY(mmm.c_str()));
+
+ TRACE_EVENT2("v8", "v8.Test2.1", "aa", aa, "ll", ll);
+ TRACE_EVENT2("v8", "v8.Test2.2", "mm1", TRACE_STR_COPY(mm.c_str()), "mm2",
+ TRACE_STR_COPY(mmm.c_str()));
+
+ // Check copies are correct.
+ TRACE_EVENT_COPY_INSTANT0("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_COPY_INSTANT2("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD, "mm1",
+ mm.c_str(), "mm2", mmm.c_str());
+ mm = "CHANGED";
+ mmm = "CHANGED";
+
+ TRACE_EVENT_INSTANT1("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
+ new ConvertableToTraceFormatMock(42));
+ std::unique_ptr<ConvertableToTraceFormatMock> trace_event_arg(
+ new ConvertableToTraceFormatMock(42));
+ TRACE_EVENT_INSTANT2("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
+ std::move(trace_event_arg), "a2",
+ new ConvertableToTraceFormatMock(123));
+ }
+ TRACE_EVENT_INSTANT0("v8", "final event not captured",
+ TRACE_EVENT_SCOPE_THREAD);
+
+ harness.StopTracing();
+
+ // 20 START/END events, 4 INSTANT events.
+ CHECK_EQ(44, harness.events_size());
+ TraceEvent* event = harness.get_event(0);
+ CHECK_EQ("aa", event->args()[0].name());
+ CHECK_EQ(aa, event->args()[0].uint_value());
+
+ event = harness.get_event(1);
+ CHECK_EQ("bb", event->args()[0].name());
+ CHECK_EQ(bb, event->args()[0].uint_value());
+
+ event = harness.get_event(2);
+ CHECK_EQ("cc", event->args()[0].name());
+ CHECK_EQ(cc, event->args()[0].uint_value());
+
+ event = harness.get_event(3);
+ CHECK_EQ("dd", event->args()[0].name());
+ CHECK_EQ(dd, event->args()[0].uint_value());
+
+ event = harness.get_event(4);
+ CHECK_EQ("ee", event->args()[0].name());
+ CHECK_EQ(ee, event->args()[0].int_value());
+
+ event = harness.get_event(5);
+ CHECK_EQ("ff", event->args()[0].name());
+ CHECK_EQ(ff, event->args()[0].int_value());
+
+ event = harness.get_event(6);
+ CHECK_EQ("gg", event->args()[0].name());
+ CHECK_EQ(gg, event->args()[0].int_value());
+
+ event = harness.get_event(7);
+ CHECK_EQ("hh", event->args()[0].name());
+ CHECK_EQ(hh, event->args()[0].int_value());
+
+ event = harness.get_event(8);
+ CHECK_EQ("ii1", event->args()[0].name());
+ CHECK_EQ(ii1, event->args()[0].bool_value());
+
+ event = harness.get_event(9);
+ CHECK_EQ("ii2", event->args()[0].name());
+ CHECK_EQ(ii2, event->args()[0].bool_value());
+
+ event = harness.get_event(10);
+ CHECK_EQ("jj1", event->args()[0].name());
+ CHECK_EQ(jj1, event->args()[0].double_value());
+
+ event = harness.get_event(11);
+ CHECK_EQ("jj2", event->args()[0].name());
+ CHECK_EQ(jj2, event->args()[0].double_value());
+
+ event = harness.get_event(12);
+ CHECK_EQ("jj3", event->args()[0].name());
+ CHECK(std::isnan(event->args()[0].double_value()));
+
+ event = harness.get_event(13);
+ CHECK_EQ("jj4", event->args()[0].name());
+ CHECK_EQ(jj4, event->args()[0].double_value());
+
+ event = harness.get_event(14);
+ CHECK_EQ("jj5", event->args()[0].name());
+ CHECK_EQ(jj5, event->args()[0].double_value());
+
+ event = harness.get_event(15);
+ CHECK_EQ("kk", event->args()[0].name());
+ CHECK_EQ(kk, reinterpret_cast<void*>(event->args()[0].pointer_value()));
+
+ event = harness.get_event(16);
+ CHECK_EQ("ll", event->args()[0].name());
+ CHECK_EQ(ll, event->args()[0].string_value());
+
+ event = harness.get_event(17);
+ CHECK_EQ("mm", event->args()[0].name());
+ CHECK_EQ("\"INIT\"", event->args()[0].string_value());
+
+ event = harness.get_event(18);
+ CHECK_EQ("v8.Test2.1", event->name());
+ CHECK_EQ("aa", event->args()[0].name());
+ CHECK_EQ(aa, event->args()[0].uint_value());
+ CHECK_EQ("ll", event->args()[1].name());
+ CHECK_EQ(ll, event->args()[1].string_value());
+
+ event = harness.get_event(19);
+ CHECK_EQ("mm1", event->args()[0].name());
+ CHECK_EQ("INIT", event->args()[0].string_value());
+ CHECK_EQ("mm2", event->args()[1].name());
+ CHECK_EQ("\"INIT\"", event->args()[1].string_value());
+
+ event = harness.get_event(20);
+ CHECK_EQ("INIT", event->name());
+
+ event = harness.get_event(21);
+ CHECK_EQ("INIT", event->name());
+ CHECK_EQ("mm1", event->args()[0].name());
+ CHECK_EQ("INIT", event->args()[0].string_value());
+ CHECK_EQ("mm2", event->args()[1].name());
+ CHECK_EQ("\"INIT\"", event->args()[1].string_value());
+
+ event = harness.get_event(22);
+ CHECK_EQ("a1", event->args()[0].name());
+ CHECK_EQ("[42,42]", event->args()[0].json_value());
+
+ event = harness.get_event(23);
+ CHECK_EQ("a1", event->args()[0].name());
+ CHECK_EQ("[42,42]", event->args()[0].json_value());
+ CHECK_EQ("a2", event->args()[1].name());
+ CHECK_EQ("[123,123]", event->args()[1].json_value());
+}
+
+TEST(JsonIntegrationTest) {
+ // Check that tricky values are rendered correctly in the JSON output.
+ double big_num = 1e100;
+ double nan_num = std::numeric_limits<double>::quiet_NaN();
+ double inf_num = std::numeric_limits<double>::infinity();
+ double neg_inf_num = -std::numeric_limits<double>::infinity();
+
+ TracingTestHarness harness;
+ harness.StartTracing();
+
+ {
+ TRACE_EVENT1("v8", "v8.Test.1", "1", big_num);
+ TRACE_EVENT1("v8", "v8.Test.2", "2", nan_num);
+ TRACE_EVENT1("v8", "v8.Test.3", "3", inf_num);
+ TRACE_EVENT1("v8", "v8.Test.4", "4", neg_inf_num);
+ }
+ TRACE_EVENT_INSTANT0("v8", "final event not captured",
+ TRACE_EVENT_SCOPE_THREAD);
+
+ harness.StopTracing();
+ std::string json = harness.perfetto_json_stream();
+
+ std::vector<std::string> all_args;
+ GetJSONStrings(&all_args, json, "\"args\"", "{", "}");
+
+ CHECK_EQ("\"1\":1e+100", all_args[0]);
+ CHECK_EQ("\"2\":\"NaN\"", all_args[1]);
+ CHECK_EQ("\"3\":\"Infinity\"", all_args[2]);
+ CHECK_EQ("\"4\":\"-Infinity\"", all_args[3]);
+}
+
+TEST(TracingPerfetto) {
+ ::perfetto::TraceConfig perfetto_trace_config;
+ perfetto_trace_config.add_buffers()->set_size_kb(4096);
+ auto* ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
+ ds_config->set_name("v8.trace_events");
+
+ perfetto::DataSourceDescriptor dsd;
+ dsd.set_name("v8.trace_events");
+ TestDataSource::Register(dsd);
+
+ auto tracing_session_ =
+ perfetto::Tracing::NewTrace(perfetto::BackendType::kInProcessBackend);
+ tracing_session_->Setup(perfetto_trace_config);
+ tracing_session_->Start();
+ TestDataSource::started_.Wait();
+
+ for (int i = 0; i < 15; i++) {
+ TestDataSource::Trace([&](TestDataSource::TraceContext ctx) {
+ auto packet = ctx.NewTracePacket();
+ auto* trace_event_bundle = packet->set_chrome_events();
+ auto* trace_event = trace_event_bundle->add_trace_events();
+
+ trace_event->set_phase('c');
+ trace_event->set_thread_id(v8::base::OS::GetCurrentThreadId());
+ trace_event->set_timestamp(123);
+ trace_event->set_process_id(v8::base::OS::GetCurrentProcessId());
+ trace_event->set_thread_timestamp(123);
+ });
+ }
+ v8::base::Semaphore stopped_{0};
+ tracing_session_->SetOnStopCallback([&stopped_]() { stopped_.Signal(); });
+ tracing_session_->Stop();
+ stopped_.Wait();
+
+ std::ostringstream perfetto_json_stream_;
+
+ {
+ v8::platform::tracing::JSONTraceEventListener json_listener_(
+ &perfetto_json_stream_);
+
+ std::vector<char> trace = tracing_session_->ReadTraceBlocking();
+ json_listener_.ParseFromArray(trace);
+ }
+
+ printf("%s\n", perfetto_json_stream_.str().c_str());
+ CHECK_GT(perfetto_json_stream_.str().length(), 0);
+}
+
#endif // V8_USE_PERFETTO
} // namespace tracing
diff --git a/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js b/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js
deleted file mode 100644
index a3831ad24c..0000000000
--- a/deps/v8/test/cctest/log-eq-of-logging-and-traversal.js
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This is a supplementary file for test-log/EquivalenceOfLoggingAndTraversal.
-
-function parseState(s) {
- switch (s) {
- case "": return Profile.CodeState.COMPILED;
- case "~": return Profile.CodeState.OPTIMIZABLE;
- case "*": return Profile.CodeState.OPTIMIZED;
- }
- throw new Error("unknown code state: " + s);
-}
-
-function LogProcessor() {
- LogReader.call(this, {
- 'code-creation': {
- parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
- parseString, parseVarArgs],
- processor: this.processCodeCreation },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: this.processCodeMove },
- 'code-delete': parseString,
- 'sfi-move': { parsers: [parseInt, parseInt],
- processor: this.processFunctionMove },
- 'shared-library': parseString,
- 'profiler': parseString,
- 'tick': parseString });
- this.profile = new Profile();
-
-}
-LogProcessor.prototype.__proto__ = LogReader.prototype;
-
-LogProcessor.prototype.processCodeCreation = function(
- type, kind, timestamp, start, size, name, maybe_func) {
- if (type != "LazyCompile" && type != "Script" && type != "Function") {
- return;
- }
- // Scripts will compile into anonymous functions starting at 1:1. Adjust the
- // name here so that it matches corrsponding function's name during the heap
- // traversal.
- if (type == "Script") name = " :1:1";
- // Discard types to avoid discrepancies in "LazyCompile" vs. "Function".
- type = "";
- if (maybe_func.length) {
- var funcAddr = parseInt(maybe_func[0]);
- var state = parseState(maybe_func[1]);
- this.profile.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
- } else {
- this.profile.addCode(type, name, timestamp, start, size);
- }
-};
-
-LogProcessor.prototype.processCodeMove = function(from, to) {
- this.profile.moveCode(from, to);
-};
-
-LogProcessor.prototype.processFunctionMove = function(from, to) {
- this.profile.moveFunc(from, to);
-};
-
-function RunTest() {
- // _log must be provided externally.
- var log_lines = _log.split("\n");
- var line, pos = 0, log_lines_length = log_lines.length;
- if (log_lines_length < 2)
- return "log_lines_length < 2";
- var logging_processor = new LogProcessor();
- for ( ; pos < log_lines_length; ++pos) {
- line = log_lines[pos];
- if (line === "test-logging-done,\"\"") {
- ++pos;
- break;
- }
- logging_processor.processLogLine(line);
- }
- logging_processor.profile.cleanUpFuncEntries();
- var logging_entries =
- logging_processor.profile.codeMap_.getAllDynamicEntriesWithAddresses();
- if (logging_entries.length === 0)
- return "logging_entries.length === 0";
-
- var traversal_processor = new LogProcessor();
- for ( ; pos < log_lines_length; ++pos) {
- line = log_lines[pos];
- if (line === "test-traversal-done,\"\"") break;
- traversal_processor.processLogLine(line);
- }
- var traversal_entries =
- traversal_processor.profile.codeMap_.getAllDynamicEntriesWithAddresses();
- if (traversal_entries.length === 0)
- return "traversal_entries.length === 0";
-
- function addressComparator(entryA, entryB) {
- return entryA[0] < entryB[0] ? -1 : (entryA[0] > entryB[0] ? 1 : 0);
- }
-
- logging_entries.sort(addressComparator);
- traversal_entries.sort(addressComparator);
-
- function entityNamesEqual(entityA, entityB) {
- if ("getRawName" in entityB &&
- entityNamesEqual.builtins.indexOf(entityB.getRawName()) !== -1) {
- return true;
- }
- if (entityNamesEqual.builtins.indexOf(entityB.getName()) !== -1) return true;
- return entityA.getName() === entityB.getName();
- }
- entityNamesEqual.builtins =
- ["Boolean", "Function", "Number", "Object",
- "Script", "String", "RegExp", "Date", "Error"];
-
- function entitiesEqual(entityA, entityB) {
- if ((entityA === null && entityB !== null) ||
- (entityA !== null && entityB === null)) return true;
- return entityA.size === entityB.size && entityNamesEqual(entityA, entityB);
- }
-
- var l_pos = 0, t_pos = 0;
- var l_len = logging_entries.length, t_len = traversal_entries.length;
- var comparison = [];
- var equal = true;
- // Do a merge-like comparison of entries. At the same address we expect to
- // find the same entries. We skip builtins during log parsing, but compiled
- // functions traversal may erroneously recognize them as functions, so we are
- // expecting more functions in traversal vs. logging.
- // Since we don't track code deletions, logging can also report more entries
- // than traversal.
- while (l_pos < l_len && t_pos < t_len) {
- var entryA = logging_entries[l_pos];
- var entryB = traversal_entries[t_pos];
- var cmp = addressComparator(entryA, entryB);
- var entityA = entryA[1], entityB = entryB[1];
- var address = entryA[0];
- if (cmp < 0) {
- ++l_pos;
- entityB = null;
- } else if (cmp > 0) {
- ++t_pos;
- entityA = null;
- address = entryB[0];
- } else {
- ++l_pos;
- ++t_pos;
- }
- var entities_equal = entitiesEqual(entityA, entityB);
- if (!entities_equal) equal = false;
- comparison.push([entities_equal, address, entityA, entityB]);
- }
- return [equal, comparison];
-}
-
-
-var result = RunTest();
-if (typeof result !== "string") {
- var out = [];
- if (!result[0]) {
- var comparison = result[1];
- for (var i = 0, l = comparison.length; i < l; ++i) {
- var c = comparison[i];
- out.push((c[0] ? " " : "* ") +
- c[1].toString(16) + " " +
- (c[2] ? c[2] : "---") + " " +
- (c[3] ? c[3] : "---"));
- }
- out.push("================================================")
- out.push("MAKE SURE TO USE A CLEAN ISOLATiE!");
- out.push("Use tools/test.py");
- out.push("================================================")
- out.push("* Lines are the same");
- out.push("--- Line is missing"
- out.push("================================================")
- }
- result[0] ? true : out.join("\n");
-} else {
- result;
-}
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index e416c554ef..e0b4f83473 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -139,7 +139,7 @@ TEST(MallocedOperatorNewOOM) {
CcTest::isolate()->SetFatalErrorHandler(OnMallocedOperatorNewOOM);
// On failure, this won't return, since a Malloced::New failure is fatal.
// In that case, behavior is checked in OnMallocedOperatorNewOOM before exit.
- void* result = v8::internal::Malloced::New(GetHugeMemoryAmount());
+ void* result = v8::internal::Malloced::operator new(GetHugeMemoryAmount());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
new file mode 100644
index 0000000000..5b8433a6a2
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -0,0 +1,529 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/test-api.h"
+
+#include "src/api/api-inl.h"
+
+using ::v8::Array;
+using ::v8::Context;
+using ::v8::Local;
+using ::v8::Value;
+
+namespace {
+
+class ScopedArrayBufferContents {
+ public:
+ explicit ScopedArrayBufferContents(const v8::ArrayBuffer::Contents& contents)
+ : contents_(contents) {}
+ ~ScopedArrayBufferContents() { free(contents_.AllocationBase()); }
+ void* Data() const { return contents_.Data(); }
+ size_t ByteLength() const { return contents_.ByteLength(); }
+
+ void* AllocationBase() const { return contents_.AllocationBase(); }
+ size_t AllocationLength() const { return contents_.AllocationLength(); }
+ v8::ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
+ return contents_.AllocationMode();
+ }
+
+ private:
+ const v8::ArrayBuffer::Contents contents_;
+};
+
+class ScopedSharedArrayBufferContents {
+ public:
+ explicit ScopedSharedArrayBufferContents(
+ const v8::SharedArrayBuffer::Contents& contents)
+ : contents_(contents) {}
+ ~ScopedSharedArrayBufferContents() { free(contents_.AllocationBase()); }
+ void* Data() const { return contents_.Data(); }
+ size_t ByteLength() const { return contents_.ByteLength(); }
+
+ void* AllocationBase() const { return contents_.AllocationBase(); }
+ size_t AllocationLength() const { return contents_.AllocationLength(); }
+ v8::ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
+ return contents_.AllocationMode();
+ }
+
+ private:
+ const v8::SharedArrayBuffer::Contents contents_;
+};
+
+void CheckDataViewIsDetached(v8::Local<v8::DataView> dv) {
+ CHECK_EQ(0, static_cast<int>(dv->ByteLength()));
+ CHECK_EQ(0, static_cast<int>(dv->ByteOffset()));
+}
+
+void CheckIsDetached(v8::Local<v8::TypedArray> ta) {
+ CHECK_EQ(0, static_cast<int>(ta->ByteLength()));
+ CHECK_EQ(0, static_cast<int>(ta->Length()));
+ CHECK_EQ(0, static_cast<int>(ta->ByteOffset()));
+}
+
+void CheckIsTypedArrayVarDetached(const char* name) {
+ i::ScopedVector<char> source(1024);
+ i::SNPrintF(source,
+ "%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
+ name, name, name);
+ CHECK(CompileRun(source.begin())->IsTrue());
+ v8::Local<v8::TypedArray> ta =
+ v8::Local<v8::TypedArray>::Cast(CompileRun(name));
+ CheckIsDetached(ta);
+}
+
+template <typename TypedArray, int kElementSize>
+Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab, int byteOffset,
+ int length) {
+ v8::Local<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
+ CHECK_EQ(byteOffset, static_cast<int>(ta->ByteOffset()));
+ CHECK_EQ(length, static_cast<int>(ta->Length()));
+ CHECK_EQ(length * kElementSize, static_cast<int>(ta->ByteLength()));
+ return ta;
+}
+
+} // namespace
+
+THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
+ CheckInternalFieldsAreZero(ab);
+ CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
+ CcTest::CollectAllGarbage();
+
+ ScopedArrayBufferContents ab_contents(ab->Externalize());
+ CHECK(ab->IsExternal());
+
+ CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
+ uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
+ CHECK_NOT_NULL(data);
+ CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
+
+ v8::Local<v8::Value> result = CompileRun("ab.byteLength");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
+
+ result = CompileRun(
+ "var u8 = new Uint8Array(ab);"
+ "u8[0] = 0xFF;"
+ "u8[1] = 0xAA;"
+ "u8.length");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0xFF, data[0]);
+ CHECK_EQ(0xAA, data[1]);
+ data[0] = 0xCC;
+ data[1] = 0x11;
+ result = CompileRun("u8[0] + u8[1]");
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
+}
+
+THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8::Local<v8::Value> result = CompileRun(
+ "var ab1 = new ArrayBuffer(2);"
+ "var u8_a = new Uint8Array(ab1);"
+ "u8_a[0] = 0xAA;"
+ "u8_a[1] = 0xFF; u8_a.buffer");
+ Local<v8::ArrayBuffer> ab1 = Local<v8::ArrayBuffer>::Cast(result);
+ CheckInternalFieldsAreZero(ab1);
+ CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
+ CHECK(!ab1->IsExternal());
+ ScopedArrayBufferContents ab1_contents(ab1->Externalize());
+ CHECK(ab1->IsExternal());
+
+ result = CompileRun("ab1.byteLength");
+ CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
+ result = CompileRun("u8_a[0]");
+ CHECK_EQ(0xAA, result->Int32Value(env.local()).FromJust());
+ result = CompileRun("u8_a[1]");
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
+ result = CompileRun(
+ "var u8_b = new Uint8Array(ab1);"
+ "u8_b[0] = 0xBB;"
+ "u8_a[0]");
+ CHECK_EQ(0xBB, result->Int32Value(env.local()).FromJust());
+ result = CompileRun("u8_b[1]");
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
+
+ CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
+ uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
+ CHECK_EQ(0xBB, ab1_data[0]);
+ CHECK_EQ(0xFF, ab1_data[1]);
+ ab1_data[0] = 0xCC;
+ ab1_data[1] = 0x11;
+ result = CompileRun("u8_a[0] + u8_a[1]");
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
+}
+
+THREADED_TEST(ArrayBuffer_External) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<uint8_t> my_data(100);
+ memset(my_data.begin(), 0, 100);
+ Local<v8::ArrayBuffer> ab3 =
+ v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
+ CheckInternalFieldsAreZero(ab3);
+ CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
+ CHECK(ab3->IsExternal());
+
+ CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
+
+ v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
+
+ result = CompileRun(
+ "var u8_b = new Uint8Array(ab3);"
+ "u8_b[0] = 0xBB;"
+ "u8_b[1] = 0xCC;"
+ "u8_b.length");
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0xBB, my_data[0]);
+ CHECK_EQ(0xCC, my_data[1]);
+ my_data[0] = 0xCC;
+ my_data[1] = 0x11;
+ result = CompileRun("u8_b[0] + u8_b[1]");
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
+}
+
+THREADED_TEST(ArrayBuffer_DisableDetach) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<uint8_t> my_data(100);
+ memset(my_data.begin(), 0, 100);
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
+ CHECK(ab->IsDetachable());
+
+ i::Handle<i::JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ buf->set_is_detachable(false);
+
+ CHECK(!ab->IsDetachable());
+}
+
+THREADED_TEST(ArrayBuffer_DetachingApi) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1024);
+
+ v8::Local<v8::Uint8Array> u8a =
+ CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
+ v8::Local<v8::Uint8ClampedArray> u8c =
+ CreateAndCheck<v8::Uint8ClampedArray, 1>(buffer, 1, 1023);
+ v8::Local<v8::Int8Array> i8a =
+ CreateAndCheck<v8::Int8Array, 1>(buffer, 1, 1023);
+
+ v8::Local<v8::Uint16Array> u16a =
+ CreateAndCheck<v8::Uint16Array, 2>(buffer, 2, 511);
+ v8::Local<v8::Int16Array> i16a =
+ CreateAndCheck<v8::Int16Array, 2>(buffer, 2, 511);
+
+ v8::Local<v8::Uint32Array> u32a =
+ CreateAndCheck<v8::Uint32Array, 4>(buffer, 4, 255);
+ v8::Local<v8::Int32Array> i32a =
+ CreateAndCheck<v8::Int32Array, 4>(buffer, 4, 255);
+
+ v8::Local<v8::Float32Array> f32a =
+ CreateAndCheck<v8::Float32Array, 4>(buffer, 4, 255);
+ v8::Local<v8::Float64Array> f64a =
+ CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
+
+ v8::Local<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
+ CHECK_EQ(1, static_cast<int>(dv->ByteOffset()));
+ CHECK_EQ(1023, static_cast<int>(dv->ByteLength()));
+
+ ScopedArrayBufferContents contents(buffer->Externalize());
+ buffer->Detach();
+ CHECK_EQ(0, static_cast<int>(buffer->ByteLength()));
+ CheckIsDetached(u8a);
+ CheckIsDetached(u8c);
+ CheckIsDetached(i8a);
+ CheckIsDetached(u16a);
+ CheckIsDetached(i16a);
+ CheckIsDetached(u32a);
+ CheckIsDetached(i32a);
+ CheckIsDetached(f32a);
+ CheckIsDetached(f64a);
+ CheckDataViewIsDetached(dv);
+}
+
+THREADED_TEST(ArrayBuffer_DetachingScript) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ CompileRun(
+ "var ab = new ArrayBuffer(1024);"
+ "var u8a = new Uint8Array(ab, 1, 1023);"
+ "var u8c = new Uint8ClampedArray(ab, 1, 1023);"
+ "var i8a = new Int8Array(ab, 1, 1023);"
+ "var u16a = new Uint16Array(ab, 2, 511);"
+ "var i16a = new Int16Array(ab, 2, 511);"
+ "var u32a = new Uint32Array(ab, 4, 255);"
+ "var i32a = new Int32Array(ab, 4, 255);"
+ "var f32a = new Float32Array(ab, 4, 255);"
+ "var f64a = new Float64Array(ab, 8, 127);"
+ "var dv = new DataView(ab, 1, 1023);");
+
+ v8::Local<v8::ArrayBuffer> ab =
+ Local<v8::ArrayBuffer>::Cast(CompileRun("ab"));
+
+ v8::Local<v8::DataView> dv = v8::Local<v8::DataView>::Cast(CompileRun("dv"));
+
+ ScopedArrayBufferContents contents(ab->Externalize());
+ ab->Detach();
+ CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
+ CHECK_EQ(0, v8_run_int32value(v8_compile("ab.byteLength")));
+
+ CheckIsTypedArrayVarDetached("u8a");
+ CheckIsTypedArrayVarDetached("u8c");
+ CheckIsTypedArrayVarDetached("i8a");
+ CheckIsTypedArrayVarDetached("u16a");
+ CheckIsTypedArrayVarDetached("i16a");
+ CheckIsTypedArrayVarDetached("u32a");
+ CheckIsTypedArrayVarDetached("i32a");
+ CheckIsTypedArrayVarDetached("f32a");
+ CheckIsTypedArrayVarDetached("f64a");
+
+ CHECK(CompileRun("dv.byteLength == 0 && dv.byteOffset == 0")->IsTrue());
+ CheckDataViewIsDetached(dv);
+}
+
+THREADED_TEST(ArrayBuffer_AllocationInformation) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ const size_t ab_size = 1024;
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, ab_size);
+ ScopedArrayBufferContents contents(ab->Externalize());
+
+ // Array buffers should have normal allocation mode.
+ CHECK_EQ(contents.AllocationMode(),
+ v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
+ // The allocation must contain the buffer (normally they will be equal, but
+ // this is not required by the contract).
+ CHECK_NOT_NULL(contents.AllocationBase());
+ const uintptr_t alloc =
+ reinterpret_cast<uintptr_t>(contents.AllocationBase());
+ const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
+ CHECK_LE(alloc, data);
+ CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
+}
+
+THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 0);
+ CheckInternalFieldsAreZero(ab);
+ CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
+
+ // Externalize the buffer (taking ownership of the backing store memory).
+ ScopedArrayBufferContents ab_contents(ab->Externalize());
+
+ Local<v8::Uint8Array> u8a = v8::Uint8Array::New(ab, 0, 0);
+ // Calling Buffer() will materialize the ArrayBuffer (transitioning it from
+ // on-heap to off-heap if need be). This should not affect whether it is
+ // marked as is_external or not.
+ USE(u8a->Buffer());
+
+ CHECK(ab->IsExternal());
+}
+
+THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::SharedArrayBuffer> ab = v8::SharedArrayBuffer::New(isolate, 1024);
+ CheckInternalFieldsAreZero(ab);
+ CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
+ CHECK(!ab->IsExternal());
+ CcTest::CollectAllGarbage();
+
+ ScopedSharedArrayBufferContents ab_contents(ab->Externalize());
+ CHECK(ab->IsExternal());
+
+ CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
+ uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
+ CHECK_NOT_NULL(data);
+ CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
+
+ v8::Local<v8::Value> result = CompileRun("ab.byteLength");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
+
+ result = CompileRun(
+ "var u8 = new Uint8Array(ab);"
+ "u8[0] = 0xFF;"
+ "u8[1] = 0xAA;"
+ "u8.length");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0xFF, data[0]);
+ CHECK_EQ(0xAA, data[1]);
+ data[0] = 0xCC;
+ data[1] = 0x11;
+ result = CompileRun("u8[0] + u8[1]");
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
+}
+
+THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8::Local<v8::Value> result = CompileRun(
+ "var ab1 = new SharedArrayBuffer(2);"
+ "var u8_a = new Uint8Array(ab1);"
+ "u8_a[0] = 0xAA;"
+ "u8_a[1] = 0xFF; u8_a.buffer");
+ Local<v8::SharedArrayBuffer> ab1 = Local<v8::SharedArrayBuffer>::Cast(result);
+ CheckInternalFieldsAreZero(ab1);
+ CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
+ CHECK(!ab1->IsExternal());
+ ScopedSharedArrayBufferContents ab1_contents(ab1->Externalize());
+ CHECK(ab1->IsExternal());
+
+ result = CompileRun("ab1.byteLength");
+ CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
+ result = CompileRun("u8_a[0]");
+ CHECK_EQ(0xAA, result->Int32Value(env.local()).FromJust());
+ result = CompileRun("u8_a[1]");
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
+ result = CompileRun(
+ "var u8_b = new Uint8Array(ab1);"
+ "u8_b[0] = 0xBB;"
+ "u8_a[0]");
+ CHECK_EQ(0xBB, result->Int32Value(env.local()).FromJust());
+ result = CompileRun("u8_b[1]");
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
+
+ CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
+ uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
+ CHECK_EQ(0xBB, ab1_data[0]);
+ CHECK_EQ(0xFF, ab1_data[1]);
+ ab1_data[0] = 0xCC;
+ ab1_data[1] = 0x11;
+ result = CompileRun("u8_a[0] + u8_a[1]");
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
+}
+
+THREADED_TEST(SharedArrayBuffer_External) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<uint8_t> my_data(100);
+ memset(my_data.begin(), 0, 100);
+ Local<v8::SharedArrayBuffer> ab3 =
+ v8::SharedArrayBuffer::New(isolate, my_data.begin(), 100);
+ CheckInternalFieldsAreZero(ab3);
+ CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
+ CHECK(ab3->IsExternal());
+
+ CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
+
+ v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
+
+ result = CompileRun(
+ "var u8_b = new Uint8Array(ab3);"
+ "u8_b[0] = 0xBB;"
+ "u8_b[1] = 0xCC;"
+ "u8_b.length");
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0xBB, my_data[0]);
+ CHECK_EQ(0xCC, my_data[1]);
+ my_data[0] = 0xCC;
+ my_data[1] = 0x11;
+ result = CompileRun("u8_b[0] + u8_b[1]");
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
+}
+
+THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ const size_t ab_size = 1024;
+ Local<v8::SharedArrayBuffer> ab =
+ v8::SharedArrayBuffer::New(isolate, ab_size);
+ ScopedSharedArrayBufferContents contents(ab->Externalize());
+
+ // Array buffers should have normal allocation mode.
+ CHECK_EQ(contents.AllocationMode(),
+ v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
+ // The allocation must contain the buffer (normally they will be equal, but
+ // this is not required by the contract).
+ CHECK_NOT_NULL(contents.AllocationBase());
+ const uintptr_t alloc =
+ reinterpret_cast<uintptr_t>(contents.AllocationBase());
+ const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
+ CHECK_LE(alloc, data);
+ CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
+}
+
+THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ // Make sure the pointer looks like a heap object
+ uint8_t* store_ptr = reinterpret_cast<uint8_t*>(i::kHeapObjectTag);
+
+ // Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
+
+ // Should not crash
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+
+ // Should not move the pointer
+ CHECK_EQ(ab->GetContents().Data(), store_ptr);
+}
+
+THREADED_TEST(SkipArrayBufferDuringScavenge) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ // Make sure the pointer looks like a heap object
+ Local<v8::Object> tmp = v8::Object::New(isolate);
+ uint8_t* store_ptr =
+ reinterpret_cast<uint8_t*>(*reinterpret_cast<uintptr_t*>(*tmp));
+
+ // Make `store_ptr` point to from space
+ CcTest::CollectGarbage(i::NEW_SPACE);
+
+ // Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
+
+ // Should not crash,
+ // i.e. backing store pointer should not be treated as a heap object pointer
+ CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
+ CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
+
+ // Use `ab` to silence compiler warning
+ CHECK_EQ(ab->GetContents().Data(), store_ptr);
+}
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index c1c8a28b05..bceba18c4a 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -190,7 +190,9 @@ static void checkStackFrame(const char* expected_script_name,
} else {
CHECK_NOT_NULL(strstr(*script_name, expected_script_name));
}
- CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
+ if (!frame->GetFunctionName().IsEmpty()) {
+ CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
+ }
CHECK_EQ(expected_line_number, frame->GetLineNumber());
CHECK_EQ(expected_column, frame->GetColumn());
CHECK_EQ(is_eval, frame->IsEval());
diff --git a/deps/v8/test/cctest/test-api-typed-array.cc b/deps/v8/test/cctest/test-api-typed-array.cc
new file mode 100644
index 0000000000..117089d566
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-typed-array.cc
@@ -0,0 +1,661 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/test-api.h"
+
+#include "src/api/api-inl.h"
+
+using ::v8::Array;
+using ::v8::Context;
+using ::v8::Local;
+using ::v8::Value;
+
+namespace {
+
+void CheckElementValue(i::Isolate* isolate, int expected,
+ i::Handle<i::Object> obj, int offset) {
+ i::Object element =
+ *i::Object::GetElement(isolate, obj, offset).ToHandleChecked();
+ CHECK_EQ(expected, i::Smi::ToInt(element));
+}
+
+template <class ElementType>
+void ObjectWithExternalArrayTestHelper(Local<Context> context,
+ v8::Local<v8::TypedArray> obj,
+ int element_count,
+ i::ExternalArrayType array_type,
+ int64_t low, int64_t high) {
+ i::Handle<i::JSTypedArray> jsobj = v8::Utils::OpenHandle(*obj);
+ v8::Isolate* v8_isolate = context->GetIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ obj->Set(context, v8_str("field"), v8::Int32::New(v8_isolate, 1503))
+ .FromJust();
+ CHECK(context->Global()->Set(context, v8_str("ext_array"), obj).FromJust());
+ v8::Local<v8::Value> result = CompileRun("ext_array.field");
+ CHECK_EQ(1503, result->Int32Value(context).FromJust());
+ result = CompileRun("ext_array[1]");
+ CHECK_EQ(1, result->Int32Value(context).FromJust());
+
+ // Check assigned smis
+ result = CompileRun(
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = i;"
+ "}"
+ "var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += ext_array[i];"
+ "}"
+ "sum;");
+
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
+ // Check pass through of assigned smis
+ result = CompileRun(
+ "var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += ext_array[i] = ext_array[i] = -i;"
+ "}"
+ "sum;");
+ CHECK_EQ(-28, result->Int32Value(context).FromJust());
+
+ // Check assigned smis in reverse order
+ result = CompileRun(
+ "for (var i = 8; --i >= 0; ) {"
+ " ext_array[i] = i;"
+ "}"
+ "var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " sum += ext_array[i];"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
+
+ // Check pass through of assigned HeapNumbers
+ result = CompileRun(
+ "var sum = 0;"
+ "for (var i = 0; i < 16; i+=2) {"
+ " sum += ext_array[i] = ext_array[i] = (-i * 0.5);"
+ "}"
+ "sum;");
+ CHECK_EQ(-28, result->Int32Value(context).FromJust());
+
+ // Check assigned HeapNumbers
+ result = CompileRun(
+ "for (var i = 0; i < 16; i+=2) {"
+ " ext_array[i] = (i * 0.5);"
+ "}"
+ "var sum = 0;"
+ "for (var i = 0; i < 16; i+=2) {"
+ " sum += ext_array[i];"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
+
+ // Check assigned HeapNumbers in reverse order
+ result = CompileRun(
+ "for (var i = 14; i >= 0; i-=2) {"
+ " ext_array[i] = (i * 0.5);"
+ "}"
+ "var sum = 0;"
+ "for (var i = 0; i < 16; i+=2) {"
+ " sum += ext_array[i];"
+ "}"
+ "sum;");
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
+
+ i::ScopedVector<char> test_buf(1024);
+
+ // Check legal boundary conditions.
+ // The repeated loads and stores ensure the ICs are exercised.
+ const char* boundary_program =
+ "var res = 0;"
+ "for (var i = 0; i < 16; i++) {"
+ " ext_array[i] = %lld;"
+ " if (i > 8) {"
+ " res = ext_array[i];"
+ " }"
+ "}"
+ "res;";
+ i::SNPrintF(test_buf, boundary_program, low);
+ result = CompileRun(test_buf.begin());
+ CHECK_EQ(low, result->IntegerValue(context).FromJust());
+
+ i::SNPrintF(test_buf, boundary_program, high);
+ result = CompileRun(test_buf.begin());
+ CHECK_EQ(high, result->IntegerValue(context).FromJust());
+
+ // Check misprediction of type in IC.
+ result = CompileRun(
+ "var tmp_array = ext_array;"
+ "var sum = 0;"
+ "for (var i = 0; i < 8; i++) {"
+ " tmp_array[i] = i;"
+ " sum += tmp_array[i];"
+ " if (i == 4) {"
+ " tmp_array = {};"
+ " }"
+ "}"
+ "sum;");
+ // Force GC to trigger verification.
+ CcTest::CollectAllGarbage();
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
+
+ // Make sure out-of-range loads do not throw.
+ i::SNPrintF(test_buf,
+ "var caught_exception = false;"
+ "try {"
+ " ext_array[%d];"
+ "} catch (e) {"
+ " caught_exception = true;"
+ "}"
+ "caught_exception;",
+ element_count);
+ result = CompileRun(test_buf.begin());
+ CHECK(!result->BooleanValue(v8_isolate));
+
+ // Make sure out-of-range stores do not throw.
+ i::SNPrintF(test_buf,
+ "var caught_exception = false;"
+ "try {"
+ " ext_array[%d] = 1;"
+ "} catch (e) {"
+ " caught_exception = true;"
+ "}"
+ "caught_exception;",
+ element_count);
+ result = CompileRun(test_buf.begin());
+ CHECK(!result->BooleanValue(v8_isolate));
+
+ // Check other boundary conditions, values and operations.
+ result = CompileRun(
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[7] = undefined;"
+ "}"
+ "ext_array[7];");
+ CHECK_EQ(0, result->Int32Value(context).FromJust());
+ if (array_type == i::kExternalFloat64Array ||
+ array_type == i::kExternalFloat32Array) {
+ CHECK(std::isnan(
+ i::Object::GetElement(isolate, jsobj, 7).ToHandleChecked()->Number()));
+ } else {
+ CheckElementValue(isolate, 0, jsobj, 7);
+ }
+
+ result = CompileRun(
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[6] = '2.3';"
+ "}"
+ "ext_array[6];");
+ CHECK_EQ(2, result->Int32Value(context).FromJust());
+ CHECK_EQ(2, static_cast<int>(i::Object::GetElement(isolate, jsobj, 6)
+ .ToHandleChecked()
+ ->Number()));
+
+ if (array_type != i::kExternalFloat32Array &&
+ array_type != i::kExternalFloat64Array) {
+ // Though the specification doesn't state it, be explicit about
+ // converting NaNs and +/-Infinity to zero.
+ result = CompileRun(
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = 5;"
+ "}"
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = NaN;"
+ "}"
+ "ext_array[5];");
+ CHECK_EQ(0, result->Int32Value(context).FromJust());
+ CheckElementValue(isolate, 0, jsobj, 5);
+
+ result = CompileRun(
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = 5;"
+ "}"
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = Infinity;"
+ "}"
+ "ext_array[5];");
+ int expected_value =
+ (array_type == i::kExternalUint8ClampedArray) ? 255 : 0;
+ CHECK_EQ(expected_value, result->Int32Value(context).FromJust());
+ CheckElementValue(isolate, expected_value, jsobj, 5);
+
+ result = CompileRun(
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = 5;"
+ "}"
+ "for (var i = 0; i < 8; i++) {"
+ " ext_array[i] = -Infinity;"
+ "}"
+ "ext_array[5];");
+ CHECK_EQ(0, result->Int32Value(context).FromJust());
+ CheckElementValue(isolate, 0, jsobj, 5);
+
+ // Check truncation behavior of integral arrays.
+ const char* unsigned_data =
+ "var source_data = [0.6, 10.6];"
+ "var expected_results = [0, 10];";
+ const char* signed_data =
+ "var source_data = [0.6, 10.6, -0.6, -10.6];"
+ "var expected_results = [0, 10, 0, -10];";
+ const char* pixel_data =
+ "var source_data = [0.6, 10.6];"
+ "var expected_results = [1, 11];";
+ bool is_unsigned = (array_type == i::kExternalUint8Array ||
+ array_type == i::kExternalUint16Array ||
+ array_type == i::kExternalUint32Array);
+ bool is_pixel_data = array_type == i::kExternalUint8ClampedArray;
+
+ i::SNPrintF(test_buf,
+ "%s"
+ "var all_passed = true;"
+ "for (var i = 0; i < source_data.length; i++) {"
+ " for (var j = 0; j < 8; j++) {"
+ " ext_array[j] = source_data[i];"
+ " }"
+ " all_passed = all_passed &&"
+ " (ext_array[5] == expected_results[i]);"
+ "}"
+ "all_passed;",
+ (is_unsigned ? unsigned_data
+ : (is_pixel_data ? pixel_data : signed_data)));
+ result = CompileRun(test_buf.begin());
+ CHECK(result->BooleanValue(v8_isolate));
+ }
+
+ {
+ ElementType* data_ptr = static_cast<ElementType*>(jsobj->DataPtr());
+ for (int i = 0; i < element_count; i++) {
+ data_ptr[i] = static_cast<ElementType>(i);
+ }
+ }
+
+ bool old_natives_flag_sentry = i::FLAG_allow_natives_syntax;
+ i::FLAG_allow_natives_syntax = true;
+
+ // Test complex assignments
+ result = CompileRun(
+ "function ee_op_test_complex_func(sum) {"
+ " for (var i = 0; i < 40; ++i) {"
+ " sum += (ext_array[i] += 1);"
+ " sum += (ext_array[i] -= 1);"
+ " } "
+ " return sum;"
+ "};"
+ "%PrepareFunctionForOptimization(ee_op_test_complex_func);"
+ "sum=0;"
+ "sum=ee_op_test_complex_func(sum);"
+ "sum=ee_op_test_complex_func(sum);"
+ "%OptimizeFunctionOnNextCall(ee_op_test_complex_func);"
+ "sum=ee_op_test_complex_func(sum);"
+ "sum;");
+ CHECK_EQ(4800, result->Int32Value(context).FromJust());
+
+ // Test count operations
+ result = CompileRun(
+ "function ee_op_test_count_func(sum) {"
+ " for (var i = 0; i < 40; ++i) {"
+ " sum += (++ext_array[i]);"
+ " sum += (--ext_array[i]);"
+ " } "
+ " return sum;"
+ "};"
+ "%PrepareFunctionForOptimization(ee_op_test_count_func);"
+ "sum=0;"
+ "sum=ee_op_test_count_func(sum);"
+ "sum=ee_op_test_count_func(sum);"
+ "%OptimizeFunctionOnNextCall(ee_op_test_count_func);"
+ "sum=ee_op_test_count_func(sum);"
+ "sum;");
+ CHECK_EQ(4800, result->Int32Value(context).FromJust());
+
+ i::FLAG_allow_natives_syntax = old_natives_flag_sentry;
+
+ result = CompileRun(
+ "ext_array[3] = 33;"
+ "delete ext_array[3];"
+ "ext_array[3];");
+ CHECK_EQ(33, result->Int32Value(context).FromJust());
+
+ result = CompileRun(
+ "ext_array[0] = 10; ext_array[1] = 11;"
+ "ext_array[2] = 12; ext_array[3] = 13;"
+ "try { ext_array.__defineGetter__('2', function() { return 120; }); }"
+ "catch (e) { }"
+ "ext_array[2];");
+ CHECK_EQ(12, result->Int32Value(context).FromJust());
+
+ result = CompileRun(
+ "var js_array = new Array(40);"
+ "js_array[0] = 77;"
+ "js_array;");
+ CHECK_EQ(77, v8::Object::Cast(*result)
+ ->Get(context, v8_str("0"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
+
+ result = CompileRun(
+ "ext_array[1] = 23;"
+ "ext_array.__proto__ = [];"
+ "js_array.__proto__ = ext_array;"
+ "js_array.concat(ext_array);");
+ CHECK_EQ(77, v8::Object::Cast(*result)
+ ->Get(context, v8_str("0"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(23, v8::Object::Cast(*result)
+ ->Get(context, v8_str("1"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
+
+ result = CompileRun("ext_array[1] = 23;");
+ CHECK_EQ(23, result->Int32Value(context).FromJust());
+}
+
+template <typename ElementType, typename TypedArray, class ArrayBufferType>
+void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
+ int64_t high) {
+ const int kElementCount = 50;
+
+ i::ScopedVector<ElementType> backing_store(kElementCount + 2);
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<ArrayBufferType> ab =
+ ArrayBufferType::New(isolate, backing_store.begin(),
+ (kElementCount + 2) * sizeof(ElementType));
+ Local<TypedArray> ta =
+ TypedArray::New(ab, 2 * sizeof(ElementType), kElementCount);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
+ CHECK_EQ(kElementCount, static_cast<int>(ta->Length()));
+ CHECK_EQ(2 * sizeof(ElementType), ta->ByteOffset());
+ CHECK_EQ(kElementCount * sizeof(ElementType), ta->ByteLength());
+ CHECK(ab->Equals(env.local(), ta->Buffer()).FromJust());
+
+ ElementType* data = backing_store.begin() + 2;
+ for (int i = 0; i < kElementCount; i++) {
+ data[i] = static_cast<ElementType>(i);
+ }
+
+ ObjectWithExternalArrayTestHelper<ElementType>(env.local(), ta, kElementCount,
+ array_type, low, high);
+}
+
+} // namespace
+
+THREADED_TEST(Uint8Array) {
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, v8::ArrayBuffer>(
+ i::kExternalUint8Array, 0, 0xFF);
+}
+
+THREADED_TEST(Int8Array) {
+ TypedArrayTestHelper<int8_t, v8::Int8Array, v8::ArrayBuffer>(
+ i::kExternalInt8Array, -0x80, 0x7F);
+}
+
+THREADED_TEST(Uint16Array) {
+ TypedArrayTestHelper<uint16_t, v8::Uint16Array, v8::ArrayBuffer>(
+ i::kExternalUint16Array, 0, 0xFFFF);
+}
+
+THREADED_TEST(Int16Array) {
+ TypedArrayTestHelper<int16_t, v8::Int16Array, v8::ArrayBuffer>(
+ i::kExternalInt16Array, -0x8000, 0x7FFF);
+}
+
+THREADED_TEST(Uint32Array) {
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, v8::ArrayBuffer>(
+ i::kExternalUint32Array, 0, UINT_MAX);
+}
+
+THREADED_TEST(Int32Array) {
+ TypedArrayTestHelper<int32_t, v8::Int32Array, v8::ArrayBuffer>(
+ i::kExternalInt32Array, INT_MIN, INT_MAX);
+}
+
+THREADED_TEST(Float32Array) {
+ TypedArrayTestHelper<float, v8::Float32Array, v8::ArrayBuffer>(
+ i::kExternalFloat32Array, -500, 500);
+}
+
+THREADED_TEST(Float64Array) {
+ TypedArrayTestHelper<double, v8::Float64Array, v8::ArrayBuffer>(
+ i::kExternalFloat64Array, -500, 500);
+}
+
+THREADED_TEST(Uint8ClampedArray) {
+ TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, v8::ArrayBuffer>(
+ i::kExternalUint8ClampedArray, 0, 0xFF);
+}
+
+THREADED_TEST(DataView) {
+ const int kSize = 50;
+
+ i::ScopedVector<uint8_t> backing_store(kSize + 2);
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::ArrayBuffer> ab =
+ v8::ArrayBuffer::New(isolate, backing_store.begin(), 2 + kSize);
+ Local<v8::DataView> dv = v8::DataView::New(ab, 2, kSize);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
+ CHECK_EQ(2u, dv->ByteOffset());
+ CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
+ CHECK(ab->Equals(env.local(), dv->Buffer()).FromJust());
+}
+
+THREADED_TEST(SharedUint8Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, v8::SharedArrayBuffer>(
+ i::kExternalUint8Array, 0, 0xFF);
+}
+
+THREADED_TEST(SharedInt8Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<int8_t, v8::Int8Array, v8::SharedArrayBuffer>(
+ i::kExternalInt8Array, -0x80, 0x7F);
+}
+
+THREADED_TEST(SharedUint16Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<uint16_t, v8::Uint16Array, v8::SharedArrayBuffer>(
+ i::kExternalUint16Array, 0, 0xFFFF);
+}
+
+THREADED_TEST(SharedInt16Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<int16_t, v8::Int16Array, v8::SharedArrayBuffer>(
+ i::kExternalInt16Array, -0x8000, 0x7FFF);
+}
+
+THREADED_TEST(SharedUint32Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, v8::SharedArrayBuffer>(
+ i::kExternalUint32Array, 0, UINT_MAX);
+}
+
+THREADED_TEST(SharedInt32Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<int32_t, v8::Int32Array, v8::SharedArrayBuffer>(
+ i::kExternalInt32Array, INT_MIN, INT_MAX);
+}
+
+THREADED_TEST(SharedFloat32Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<float, v8::Float32Array, v8::SharedArrayBuffer>(
+ i::kExternalFloat32Array, -500, 500);
+}
+
+THREADED_TEST(SharedFloat64Array) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<double, v8::Float64Array, v8::SharedArrayBuffer>(
+ i::kExternalFloat64Array, -500, 500);
+}
+
+THREADED_TEST(SharedUint8ClampedArray) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, v8::SharedArrayBuffer>(
+ i::kExternalUint8ClampedArray, 0, 0xFF);
+}
+
+THREADED_TEST(SharedDataView) {
+ i::FLAG_harmony_sharedarraybuffer = true;
+ const int kSize = 50;
+
+ i::ScopedVector<uint8_t> backing_store(kSize + 2);
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::SharedArrayBuffer> ab =
+ v8::SharedArrayBuffer::New(isolate, backing_store.begin(), 2 + kSize);
+ Local<v8::DataView> dv = v8::DataView::New(ab, 2, kSize);
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
+ CHECK_EQ(2u, dv->ByteOffset());
+ CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
+ CHECK(ab->Equals(env.local(), dv->Buffer()).FromJust());
+}
+
+#define IS_ARRAY_BUFFER_VIEW_TEST(View) \
+ THREADED_TEST(Is##View) { \
+ LocalContext env; \
+ v8::Isolate* isolate = env->GetIsolate(); \
+ v8::HandleScope handle_scope(isolate); \
+ \
+ Local<Value> result = CompileRun( \
+ "var ab = new ArrayBuffer(128);" \
+ "new " #View "(ab)"); \
+ CHECK(result->IsArrayBufferView()); \
+ CHECK(result->Is##View()); \
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(result.As<v8::View>()); \
+ }
+
+IS_ARRAY_BUFFER_VIEW_TEST(Uint8Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Int8Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Uint16Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Int16Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Uint32Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Int32Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Float32Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Float64Array)
+IS_ARRAY_BUFFER_VIEW_TEST(Uint8ClampedArray)
+IS_ARRAY_BUFFER_VIEW_TEST(DataView)
+
+#undef IS_ARRAY_BUFFER_VIEW_TEST
+
+TEST(InternalFieldsOnTypedArray) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ Context::Scope context_scope(context);
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1);
+ v8::Local<v8::Uint8Array> array = v8::Uint8Array::New(buffer, 0, 1);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ CHECK_EQ(static_cast<void*>(nullptr),
+ array->GetAlignedPointerFromInternalField(i));
+ }
+}
+
+TEST(InternalFieldsOnDataView) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ Context::Scope context_scope(context);
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1);
+ v8::Local<v8::DataView> array = v8::DataView::New(buffer, 0, 1);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ CHECK_EQ(static_cast<void*>(nullptr),
+ array->GetAlignedPointerFromInternalField(i));
+ }
+}
+
+namespace {
+void TestOnHeapHasBuffer(const char* array_name, size_t elem_size) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<char> source(128);
+ // Test on-heap sizes.
+ for (size_t size = 0; size <= i::JSTypedArray::kMaxSizeInHeap;
+ size += elem_size) {
+ size_t length = size / elem_size;
+ i::SNPrintF(source, "new %sArray(%zu)", array_name, length);
+ auto typed_array =
+ v8::Local<v8::TypedArray>::Cast(CompileRun(source.begin()));
+
+ CHECK_EQ(length, typed_array->Length());
+
+ // Should not (yet) have a buffer.
+ CHECK(!typed_array->HasBuffer());
+
+ // Get the buffer and check its length.
+ i::Handle<i::JSTypedArray> i_typed_array =
+ v8::Utils::OpenHandle(*typed_array);
+ auto i_array_buffer1 = i_typed_array->GetBuffer();
+ CHECK_EQ(size, i_array_buffer1->byte_length());
+ CHECK(typed_array->HasBuffer());
+
+ // Should have the same buffer each time.
+ auto i_array_buffer2 = i_typed_array->GetBuffer();
+ CHECK(i_array_buffer1.is_identical_to(i_array_buffer2));
+ }
+}
+
+void TestOffHeapHasBuffer(const char* array_name, size_t elem_size) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ i::ScopedVector<char> source(128);
+ // Test off-heap sizes.
+ size_t size = i::JSTypedArray::kMaxSizeInHeap;
+ for (int i = 0; i < 3; i++) {
+ size_t length = 1 + (size / elem_size);
+ i::SNPrintF(source, "new %sArray(%zu)", array_name, length);
+ auto typed_array =
+ v8::Local<v8::TypedArray>::Cast(CompileRun(source.begin()));
+ CHECK_EQ(length, typed_array->Length());
+
+ // Should already have a buffer.
+ CHECK(typed_array->HasBuffer());
+
+ // Get the buffer and check its length.
+ i::Handle<i::JSTypedArray> i_typed_array =
+ v8::Utils::OpenHandle(*typed_array);
+ auto i_array_buffer1 = i_typed_array->GetBuffer();
+ CHECK_EQ(length * elem_size, i_array_buffer1->byte_length());
+
+ size *= 2;
+ }
+}
+
+} // namespace
+
+#define TEST_HAS_BUFFER(array_name, elem_size) \
+ TEST(OnHeap_##array_name##Array_HasBuffer) { \
+ TestOnHeapHasBuffer(#array_name, elem_size); \
+ } \
+ TEST(OffHeap_##array_name##_HasBuffer) { \
+ TestOffHeapHasBuffer(#array_name, elem_size); \
+ }
+
+TEST_HAS_BUFFER(Uint8, 1)
+TEST_HAS_BUFFER(Int8, 1)
+TEST_HAS_BUFFER(Uint16, 2)
+TEST_HAS_BUFFER(Int16, 2)
+TEST_HAS_BUFFER(Uint32, 4)
+TEST_HAS_BUFFER(Int32, 4)
+TEST_HAS_BUFFER(Float32, 4)
+TEST_HAS_BUFFER(Float64, 8)
+
+#undef TEST_HAS_BUFFER
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 63c980cf61..73bea08d08 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -58,7 +58,9 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/lookup.h"
+#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/string-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/strings/unicode-inl.h"
#include "src/utils/utils.h"
@@ -3649,469 +3651,6 @@ THREADED_TEST(GlobalPrivates) {
CHECK(!obj->Has(env.local(), intern).FromJust());
}
-
-class ScopedArrayBufferContents {
- public:
- explicit ScopedArrayBufferContents(const v8::ArrayBuffer::Contents& contents)
- : contents_(contents) {}
- ~ScopedArrayBufferContents() { free(contents_.AllocationBase()); }
- void* Data() const { return contents_.Data(); }
- size_t ByteLength() const { return contents_.ByteLength(); }
-
- void* AllocationBase() const { return contents_.AllocationBase(); }
- size_t AllocationLength() const { return contents_.AllocationLength(); }
- v8::ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
- return contents_.AllocationMode();
- }
-
- private:
- const v8::ArrayBuffer::Contents contents_;
-};
-
-template <typename T>
-static void CheckInternalFieldsAreZero(v8::Local<T> value) {
- CHECK_EQ(T::kInternalFieldCount, value->InternalFieldCount());
- for (int i = 0; i < value->InternalFieldCount(); i++) {
- CHECK_EQ(0, value->GetInternalField(i)
- ->Int32Value(CcTest::isolate()->GetCurrentContext())
- .FromJust());
- }
-}
-
-
-THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 1024);
- CheckInternalFieldsAreZero(ab);
- CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
- CHECK(!ab->IsExternal());
- CcTest::CollectAllGarbage();
-
- ScopedArrayBufferContents ab_contents(ab->Externalize());
- CHECK(ab->IsExternal());
-
- CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
- uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
- CHECK_NOT_NULL(data);
- CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab.byteLength");
- CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8 = new Uint8Array(ab);"
- "u8[0] = 0xFF;"
- "u8[1] = 0xAA;"
- "u8.length");
- CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xFF, data[0]);
- CHECK_EQ(0xAA, data[1]);
- data[0] = 0xCC;
- data[1] = 0x11;
- result = CompileRun("u8[0] + u8[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-
-THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
-
- v8::Local<v8::Value> result = CompileRun(
- "var ab1 = new ArrayBuffer(2);"
- "var u8_a = new Uint8Array(ab1);"
- "u8_a[0] = 0xAA;"
- "u8_a[1] = 0xFF; u8_a.buffer");
- Local<v8::ArrayBuffer> ab1 = Local<v8::ArrayBuffer>::Cast(result);
- CheckInternalFieldsAreZero(ab1);
- CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
- CHECK(!ab1->IsExternal());
- ScopedArrayBufferContents ab1_contents(ab1->Externalize());
- CHECK(ab1->IsExternal());
-
- result = CompileRun("ab1.byteLength");
- CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
- result = CompileRun("u8_a[0]");
- CHECK_EQ(0xAA, result->Int32Value(env.local()).FromJust());
- result = CompileRun("u8_a[1]");
- CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
- result = CompileRun(
- "var u8_b = new Uint8Array(ab1);"
- "u8_b[0] = 0xBB;"
- "u8_a[0]");
- CHECK_EQ(0xBB, result->Int32Value(env.local()).FromJust());
- result = CompileRun("u8_b[1]");
- CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
-
- CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
- uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
- CHECK_EQ(0xBB, ab1_data[0]);
- CHECK_EQ(0xFF, ab1_data[1]);
- ab1_data[0] = 0xCC;
- ab1_data[1] = 0x11;
- result = CompileRun("u8_a[0] + u8_a[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-
-THREADED_TEST(ArrayBuffer_External) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- Local<v8::ArrayBuffer> ab3 =
- v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
- CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
- CHECK(ab3->IsExternal());
-
- CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xBB, my_data[0]);
- CHECK_EQ(0xCC, my_data[1]);
- my_data[0] = 0xCC;
- my_data[1] = 0x11;
- result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-THREADED_TEST(ArrayBuffer_DisableDetach) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- Local<v8::ArrayBuffer> ab =
- v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
- CHECK(ab->IsDetachable());
-
- i::Handle<i::JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
- buf->set_is_detachable(false);
-
- CHECK(!ab->IsDetachable());
-}
-
-static void CheckDataViewIsDetached(v8::Local<v8::DataView> dv) {
- CHECK_EQ(0, static_cast<int>(dv->ByteLength()));
- CHECK_EQ(0, static_cast<int>(dv->ByteOffset()));
-}
-
-static void CheckIsDetached(v8::Local<v8::TypedArray> ta) {
- CHECK_EQ(0, static_cast<int>(ta->ByteLength()));
- CHECK_EQ(0, static_cast<int>(ta->Length()));
- CHECK_EQ(0, static_cast<int>(ta->ByteOffset()));
-}
-
-static void CheckIsTypedArrayVarDetached(const char* name) {
- i::ScopedVector<char> source(1024);
- i::SNPrintF(source,
- "%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
- name, name, name);
- CHECK(CompileRun(source.begin())->IsTrue());
- v8::Local<v8::TypedArray> ta =
- v8::Local<v8::TypedArray>::Cast(CompileRun(name));
- CheckIsDetached(ta);
-}
-
-template <typename TypedArray, int kElementSize>
-static Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab,
- int byteOffset, int length) {
- v8::Local<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
- CHECK_EQ(byteOffset, static_cast<int>(ta->ByteOffset()));
- CHECK_EQ(length, static_cast<int>(ta->Length()));
- CHECK_EQ(length * kElementSize, static_cast<int>(ta->ByteLength()));
- return ta;
-}
-
-THREADED_TEST(ArrayBuffer_DetachingApi) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1024);
-
- v8::Local<v8::Uint8Array> u8a =
- CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
- v8::Local<v8::Uint8ClampedArray> u8c =
- CreateAndCheck<v8::Uint8ClampedArray, 1>(buffer, 1, 1023);
- v8::Local<v8::Int8Array> i8a =
- CreateAndCheck<v8::Int8Array, 1>(buffer, 1, 1023);
-
- v8::Local<v8::Uint16Array> u16a =
- CreateAndCheck<v8::Uint16Array, 2>(buffer, 2, 511);
- v8::Local<v8::Int16Array> i16a =
- CreateAndCheck<v8::Int16Array, 2>(buffer, 2, 511);
-
- v8::Local<v8::Uint32Array> u32a =
- CreateAndCheck<v8::Uint32Array, 4>(buffer, 4, 255);
- v8::Local<v8::Int32Array> i32a =
- CreateAndCheck<v8::Int32Array, 4>(buffer, 4, 255);
-
- v8::Local<v8::Float32Array> f32a =
- CreateAndCheck<v8::Float32Array, 4>(buffer, 4, 255);
- v8::Local<v8::Float64Array> f64a =
- CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
-
- v8::Local<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
- CHECK_EQ(1, static_cast<int>(dv->ByteOffset()));
- CHECK_EQ(1023, static_cast<int>(dv->ByteLength()));
-
- ScopedArrayBufferContents contents(buffer->Externalize());
- buffer->Detach();
- CHECK_EQ(0, static_cast<int>(buffer->ByteLength()));
- CheckIsDetached(u8a);
- CheckIsDetached(u8c);
- CheckIsDetached(i8a);
- CheckIsDetached(u16a);
- CheckIsDetached(i16a);
- CheckIsDetached(u32a);
- CheckIsDetached(i32a);
- CheckIsDetached(f32a);
- CheckIsDetached(f64a);
- CheckDataViewIsDetached(dv);
-}
-
-THREADED_TEST(ArrayBuffer_DetachingScript) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- CompileRun(
- "var ab = new ArrayBuffer(1024);"
- "var u8a = new Uint8Array(ab, 1, 1023);"
- "var u8c = new Uint8ClampedArray(ab, 1, 1023);"
- "var i8a = new Int8Array(ab, 1, 1023);"
- "var u16a = new Uint16Array(ab, 2, 511);"
- "var i16a = new Int16Array(ab, 2, 511);"
- "var u32a = new Uint32Array(ab, 4, 255);"
- "var i32a = new Int32Array(ab, 4, 255);"
- "var f32a = new Float32Array(ab, 4, 255);"
- "var f64a = new Float64Array(ab, 8, 127);"
- "var dv = new DataView(ab, 1, 1023);");
-
- v8::Local<v8::ArrayBuffer> ab =
- Local<v8::ArrayBuffer>::Cast(CompileRun("ab"));
-
- v8::Local<v8::DataView> dv = v8::Local<v8::DataView>::Cast(CompileRun("dv"));
-
- ScopedArrayBufferContents contents(ab->Externalize());
- ab->Detach();
- CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
- CHECK_EQ(0, v8_run_int32value(v8_compile("ab.byteLength")));
-
- CheckIsTypedArrayVarDetached("u8a");
- CheckIsTypedArrayVarDetached("u8c");
- CheckIsTypedArrayVarDetached("i8a");
- CheckIsTypedArrayVarDetached("u16a");
- CheckIsTypedArrayVarDetached("i16a");
- CheckIsTypedArrayVarDetached("u32a");
- CheckIsTypedArrayVarDetached("i32a");
- CheckIsTypedArrayVarDetached("f32a");
- CheckIsTypedArrayVarDetached("f64a");
-
- CHECK(CompileRun("dv.byteLength == 0 && dv.byteOffset == 0")->IsTrue());
- CheckDataViewIsDetached(dv);
-}
-
-THREADED_TEST(ArrayBuffer_AllocationInformation) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- const size_t ab_size = 1024;
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, ab_size);
- ScopedArrayBufferContents contents(ab->Externalize());
-
- // Array buffers should have normal allocation mode.
- CHECK_EQ(contents.AllocationMode(),
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
- // The allocation must contain the buffer (normally they will be equal, but
- // this is not required by the contract).
- CHECK_NOT_NULL(contents.AllocationBase());
- const uintptr_t alloc =
- reinterpret_cast<uintptr_t>(contents.AllocationBase());
- const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
- CHECK_LE(alloc, data);
- CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
-}
-
-THREADED_TEST(ArrayBuffer_ExternalizeEmpty) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 0);
- CheckInternalFieldsAreZero(ab);
- CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
- CHECK(!ab->IsExternal());
-
- // Externalize the buffer (taking ownership of the backing store memory).
- ScopedArrayBufferContents ab_contents(ab->Externalize());
-
- Local<v8::Uint8Array> u8a = v8::Uint8Array::New(ab, 0, 0);
- // Calling Buffer() will materialize the ArrayBuffer (transitioning it from
- // on-heap to off-heap if need be). This should not affect whether it is
- // marked as is_external or not.
- USE(u8a->Buffer());
-
- CHECK(ab->IsExternal());
-}
-
-class ScopedSharedArrayBufferContents {
- public:
- explicit ScopedSharedArrayBufferContents(
- const v8::SharedArrayBuffer::Contents& contents)
- : contents_(contents) {}
- ~ScopedSharedArrayBufferContents() { free(contents_.AllocationBase()); }
- void* Data() const { return contents_.Data(); }
- size_t ByteLength() const { return contents_.ByteLength(); }
-
- void* AllocationBase() const { return contents_.AllocationBase(); }
- size_t AllocationLength() const { return contents_.AllocationLength(); }
- v8::ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
- return contents_.AllocationMode();
- }
-
- private:
- const v8::SharedArrayBuffer::Contents contents_;
-};
-
-
-THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- Local<v8::SharedArrayBuffer> ab = v8::SharedArrayBuffer::New(isolate, 1024);
- CheckInternalFieldsAreZero(ab);
- CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
- CHECK(!ab->IsExternal());
- CcTest::CollectAllGarbage();
-
- ScopedSharedArrayBufferContents ab_contents(ab->Externalize());
- CHECK(ab->IsExternal());
-
- CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
- uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
- CHECK_NOT_NULL(data);
- CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab.byteLength");
- CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8 = new Uint8Array(ab);"
- "u8[0] = 0xFF;"
- "u8[1] = 0xAA;"
- "u8.length");
- CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xFF, data[0]);
- CHECK_EQ(0xAA, data[1]);
- data[0] = 0xCC;
- data[1] = 0x11;
- result = CompileRun("u8[0] + u8[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-
-THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
-
- v8::Local<v8::Value> result = CompileRun(
- "var ab1 = new SharedArrayBuffer(2);"
- "var u8_a = new Uint8Array(ab1);"
- "u8_a[0] = 0xAA;"
- "u8_a[1] = 0xFF; u8_a.buffer");
- Local<v8::SharedArrayBuffer> ab1 = Local<v8::SharedArrayBuffer>::Cast(result);
- CheckInternalFieldsAreZero(ab1);
- CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
- CHECK(!ab1->IsExternal());
- ScopedSharedArrayBufferContents ab1_contents(ab1->Externalize());
- CHECK(ab1->IsExternal());
-
- result = CompileRun("ab1.byteLength");
- CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
- result = CompileRun("u8_a[0]");
- CHECK_EQ(0xAA, result->Int32Value(env.local()).FromJust());
- result = CompileRun("u8_a[1]");
- CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
- result = CompileRun(
- "var u8_b = new Uint8Array(ab1);"
- "u8_b[0] = 0xBB;"
- "u8_a[0]");
- CHECK_EQ(0xBB, result->Int32Value(env.local()).FromJust());
- result = CompileRun("u8_b[1]");
- CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
-
- CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
- uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
- CHECK_EQ(0xBB, ab1_data[0]);
- CHECK_EQ(0xFF, ab1_data[1]);
- ab1_data[0] = 0xCC;
- ab1_data[1] = 0x11;
- result = CompileRun("u8_a[0] + u8_a[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-
-THREADED_TEST(SharedArrayBuffer_External) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.begin(), 0, 100);
- Local<v8::SharedArrayBuffer> ab3 =
- v8::SharedArrayBuffer::New(isolate, my_data.begin(), 100);
- CheckInternalFieldsAreZero(ab3);
- CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
- CHECK(ab3->IsExternal());
-
- CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
-
- v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
-
- result = CompileRun(
- "var u8_b = new Uint8Array(ab3);"
- "u8_b[0] = 0xBB;"
- "u8_b[1] = 0xCC;"
- "u8_b.length");
- CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
- CHECK_EQ(0xBB, my_data[0]);
- CHECK_EQ(0xCC, my_data[1]);
- my_data[0] = 0xCC;
- my_data[1] = 0x11;
- result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
-}
-
-
THREADED_TEST(HiddenProperties) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -9308,17 +8847,16 @@ TEST(ApiUncaughtException) {
static const char* script_resource_name = "ExceptionInNativeScript.js";
static void ExceptionInNativeScriptTestListener(v8::Local<v8::Message> message,
v8::Local<Value>) {
+ v8::Isolate* isolate = message->GetIsolate();
v8::Local<v8::Value> name_val = message->GetScriptOrigin().ResourceName();
CHECK(!name_val.IsEmpty() && name_val->IsString());
- v8::String::Utf8Value name(v8::Isolate::GetCurrent(),
+ v8::String::Utf8Value name(isolate,
message->GetScriptOrigin().ResourceName());
CHECK_EQ(0, strcmp(script_resource_name, *name));
- v8::Local<v8::Context> context =
- v8::Isolate::GetCurrent()->GetCurrentContext();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
CHECK_EQ(3, message->GetLineNumber(context).FromJust());
v8::String::Utf8Value source_line(
- v8::Isolate::GetCurrent(),
- message->GetSourceLine(context).ToLocalChecked());
+ isolate, message->GetSourceLine(context).ToLocalChecked());
CHECK_EQ(0, strcmp(" new o.foo();", *source_line));
}
@@ -10715,6 +10253,63 @@ static void GlobalObjectInstancePropertiesGet(
ApiTestFuzzer::Fuzz();
}
+static int script_execution_count = 0;
+static void ScriptExecutionCallback(v8::Isolate* isolate,
+ Local<Context> context) {
+ script_execution_count++;
+}
+
+THREADED_TEST(ContextScriptExecutionCallback) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ {
+ v8::TryCatch try_catch(isolate);
+ script_execution_count = 0;
+ ExpectTrue("1 + 1 == 2");
+ CHECK_EQ(0, script_execution_count);
+ CHECK(!try_catch.HasCaught());
+ }
+
+ context->SetAbortScriptExecution(ScriptExecutionCallback);
+
+ { // Function binding does not trigger callback.
+ v8::Local<v8::FunctionTemplate> function_template =
+ v8::FunctionTemplate::New(isolate, DummyCallHandler);
+ v8::Local<v8::Function> function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+
+ v8::TryCatch try_catch(isolate);
+ script_execution_count = 0;
+
+ CHECK_EQ(13.4,
+ function->Call(context.local(), v8::Undefined(isolate), 0, nullptr)
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(0, script_execution_count);
+ CHECK(!try_catch.HasCaught());
+ }
+
+ { // Script execution triggers callback.
+ v8::TryCatch try_catch(isolate);
+ script_execution_count = 0;
+ CHECK(CompileRun(context.local(), "2 + 2 == 4").IsEmpty());
+ CHECK_EQ(1, script_execution_count);
+ CHECK(try_catch.HasCaught());
+ }
+
+ context->SetAbortScriptExecution(nullptr);
+
+ { // Script execution no longer triggers callback.
+ v8::TryCatch try_catch(isolate);
+ script_execution_count = 0;
+ ExpectTrue("2 + 2 == 4");
+ CHECK_EQ(0, script_execution_count);
+ CHECK(!try_catch.HasCaught());
+ }
+}
THREADED_TEST(GlobalObjectInstanceProperties) {
v8::Isolate* isolate = CcTest::isolate();
@@ -13115,9 +12710,9 @@ TEST(ObjectProtoToStringES6) {
Local<v8::Symbol> valueSymbol = v8_symbol("TestSymbol");
Local<v8::Function> valueFunction =
CompileRun("(function fn() {})").As<v8::Function>();
- Local<v8::Object> valueObject = v8::Object::New(v8::Isolate::GetCurrent());
- Local<v8::Primitive> valueNull = v8::Null(v8::Isolate::GetCurrent());
- Local<v8::Primitive> valueUndef = v8::Undefined(v8::Isolate::GetCurrent());
+ Local<v8::Object> valueObject = v8::Object::New(isolate);
+ Local<v8::Primitive> valueNull = v8::Null(isolate);
+ Local<v8::Primitive> valueUndef = v8::Undefined(isolate);
#define TEST_TOSTRINGTAG(type, tagValue, expected) \
do { \
@@ -13610,9 +13205,9 @@ THREADED_TEST(LockUnlockLock) {
static int GetGlobalObjectsCount() {
int count = 0;
- i::HeapIterator it(CcTest::heap());
- for (i::HeapObject object = it.next(); !object.is_null();
- object = it.next()) {
+ i::HeapObjectIterator it(CcTest::heap());
+ for (i::HeapObject object = it.Next(); !object.is_null();
+ object = it.Next()) {
if (object.IsJSGlobalObject()) {
i::JSGlobalObject g = i::JSGlobalObject::cast(object);
// Skip dummy global object.
@@ -15647,606 +15242,6 @@ THREADED_TEST(ReplaceConstantFunction) {
CHECK(!obj->Get(context.local(), foo_string).ToLocalChecked()->IsUndefined());
}
-
-static void CheckElementValue(i::Isolate* isolate,
- int expected,
- i::Handle<i::Object> obj,
- int offset) {
- i::Object element =
- *i::Object::GetElement(isolate, obj, offset).ToHandleChecked();
- CHECK_EQ(expected, i::Smi::ToInt(element));
-}
-
-template <class ElementType>
-static void ObjectWithExternalArrayTestHelper(Local<Context> context,
- v8::Local<v8::TypedArray> obj,
- int element_count,
- i::ExternalArrayType array_type,
- int64_t low, int64_t high) {
- i::Handle<i::JSTypedArray> jsobj = v8::Utils::OpenHandle(*obj);
- v8::Isolate* v8_isolate = context->GetIsolate();
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- obj->Set(context, v8_str("field"), v8::Int32::New(v8_isolate, 1503))
- .FromJust();
- CHECK(context->Global()->Set(context, v8_str("ext_array"), obj).FromJust());
- v8::Local<v8::Value> result = CompileRun("ext_array.field");
- CHECK_EQ(1503, result->Int32Value(context).FromJust());
- result = CompileRun("ext_array[1]");
- CHECK_EQ(1, result->Int32Value(context).FromJust());
-
- // Check assigned smis
- result = CompileRun("for (var i = 0; i < 8; i++) {"
- " ext_array[i] = i;"
- "}"
- "var sum = 0;"
- "for (var i = 0; i < 8; i++) {"
- " sum += ext_array[i];"
- "}"
- "sum;");
-
- CHECK_EQ(28, result->Int32Value(context).FromJust());
- // Check pass through of assigned smis
- result = CompileRun("var sum = 0;"
- "for (var i = 0; i < 8; i++) {"
- " sum += ext_array[i] = ext_array[i] = -i;"
- "}"
- "sum;");
- CHECK_EQ(-28, result->Int32Value(context).FromJust());
-
-
- // Check assigned smis in reverse order
- result = CompileRun("for (var i = 8; --i >= 0; ) {"
- " ext_array[i] = i;"
- "}"
- "var sum = 0;"
- "for (var i = 0; i < 8; i++) {"
- " sum += ext_array[i];"
- "}"
- "sum;");
- CHECK_EQ(28, result->Int32Value(context).FromJust());
-
- // Check pass through of assigned HeapNumbers
- result = CompileRun("var sum = 0;"
- "for (var i = 0; i < 16; i+=2) {"
- " sum += ext_array[i] = ext_array[i] = (-i * 0.5);"
- "}"
- "sum;");
- CHECK_EQ(-28, result->Int32Value(context).FromJust());
-
- // Check assigned HeapNumbers
- result = CompileRun("for (var i = 0; i < 16; i+=2) {"
- " ext_array[i] = (i * 0.5);"
- "}"
- "var sum = 0;"
- "for (var i = 0; i < 16; i+=2) {"
- " sum += ext_array[i];"
- "}"
- "sum;");
- CHECK_EQ(28, result->Int32Value(context).FromJust());
-
- // Check assigned HeapNumbers in reverse order
- result = CompileRun("for (var i = 14; i >= 0; i-=2) {"
- " ext_array[i] = (i * 0.5);"
- "}"
- "var sum = 0;"
- "for (var i = 0; i < 16; i+=2) {"
- " sum += ext_array[i];"
- "}"
- "sum;");
- CHECK_EQ(28, result->Int32Value(context).FromJust());
-
- i::ScopedVector<char> test_buf(1024);
-
- // Check legal boundary conditions.
- // The repeated loads and stores ensure the ICs are exercised.
- const char* boundary_program =
- "var res = 0;"
- "for (var i = 0; i < 16; i++) {"
- " ext_array[i] = %lld;"
- " if (i > 8) {"
- " res = ext_array[i];"
- " }"
- "}"
- "res;";
- i::SNPrintF(test_buf,
- boundary_program,
- low);
- result = CompileRun(test_buf.begin());
- CHECK_EQ(low, result->IntegerValue(context).FromJust());
-
- i::SNPrintF(test_buf,
- boundary_program,
- high);
- result = CompileRun(test_buf.begin());
- CHECK_EQ(high, result->IntegerValue(context).FromJust());
-
- // Check misprediction of type in IC.
- result = CompileRun("var tmp_array = ext_array;"
- "var sum = 0;"
- "for (var i = 0; i < 8; i++) {"
- " tmp_array[i] = i;"
- " sum += tmp_array[i];"
- " if (i == 4) {"
- " tmp_array = {};"
- " }"
- "}"
- "sum;");
- // Force GC to trigger verification.
- CcTest::CollectAllGarbage();
- CHECK_EQ(28, result->Int32Value(context).FromJust());
-
- // Make sure out-of-range loads do not throw.
- i::SNPrintF(test_buf,
- "var caught_exception = false;"
- "try {"
- " ext_array[%d];"
- "} catch (e) {"
- " caught_exception = true;"
- "}"
- "caught_exception;",
- element_count);
- result = CompileRun(test_buf.begin());
- CHECK(!result->BooleanValue(v8_isolate));
-
- // Make sure out-of-range stores do not throw.
- i::SNPrintF(test_buf,
- "var caught_exception = false;"
- "try {"
- " ext_array[%d] = 1;"
- "} catch (e) {"
- " caught_exception = true;"
- "}"
- "caught_exception;",
- element_count);
- result = CompileRun(test_buf.begin());
- CHECK(!result->BooleanValue(v8_isolate));
-
- // Check other boundary conditions, values and operations.
- result = CompileRun("for (var i = 0; i < 8; i++) {"
- " ext_array[7] = undefined;"
- "}"
- "ext_array[7];");
- CHECK_EQ(0, result->Int32Value(context).FromJust());
- if (array_type == i::kExternalFloat64Array ||
- array_type == i::kExternalFloat32Array) {
- CHECK(std::isnan(
- i::Object::GetElement(isolate, jsobj, 7).ToHandleChecked()->Number()));
- } else {
- CheckElementValue(isolate, 0, jsobj, 7);
- }
-
- result = CompileRun("for (var i = 0; i < 8; i++) {"
- " ext_array[6] = '2.3';"
- "}"
- "ext_array[6];");
- CHECK_EQ(2, result->Int32Value(context).FromJust());
- CHECK_EQ(2,
- static_cast<int>(
- i::Object::GetElement(
- isolate, jsobj, 6).ToHandleChecked()->Number()));
-
- if (array_type != i::kExternalFloat32Array &&
- array_type != i::kExternalFloat64Array) {
- // Though the specification doesn't state it, be explicit about
- // converting NaNs and +/-Infinity to zero.
- result = CompileRun("for (var i = 0; i < 8; i++) {"
- " ext_array[i] = 5;"
- "}"
- "for (var i = 0; i < 8; i++) {"
- " ext_array[i] = NaN;"
- "}"
- "ext_array[5];");
- CHECK_EQ(0, result->Int32Value(context).FromJust());
- CheckElementValue(isolate, 0, jsobj, 5);
-
- result = CompileRun("for (var i = 0; i < 8; i++) {"
- " ext_array[i] = 5;"
- "}"
- "for (var i = 0; i < 8; i++) {"
- " ext_array[i] = Infinity;"
- "}"
- "ext_array[5];");
- int expected_value =
- (array_type == i::kExternalUint8ClampedArray) ? 255 : 0;
- CHECK_EQ(expected_value, result->Int32Value(context).FromJust());
- CheckElementValue(isolate, expected_value, jsobj, 5);
-
- result = CompileRun("for (var i = 0; i < 8; i++) {"
- " ext_array[i] = 5;"
- "}"
- "for (var i = 0; i < 8; i++) {"
- " ext_array[i] = -Infinity;"
- "}"
- "ext_array[5];");
- CHECK_EQ(0, result->Int32Value(context).FromJust());
- CheckElementValue(isolate, 0, jsobj, 5);
-
- // Check truncation behavior of integral arrays.
- const char* unsigned_data =
- "var source_data = [0.6, 10.6];"
- "var expected_results = [0, 10];";
- const char* signed_data =
- "var source_data = [0.6, 10.6, -0.6, -10.6];"
- "var expected_results = [0, 10, 0, -10];";
- const char* pixel_data =
- "var source_data = [0.6, 10.6];"
- "var expected_results = [1, 11];";
- bool is_unsigned = (array_type == i::kExternalUint8Array ||
- array_type == i::kExternalUint16Array ||
- array_type == i::kExternalUint32Array);
- bool is_pixel_data = array_type == i::kExternalUint8ClampedArray;
-
- i::SNPrintF(test_buf,
- "%s"
- "var all_passed = true;"
- "for (var i = 0; i < source_data.length; i++) {"
- " for (var j = 0; j < 8; j++) {"
- " ext_array[j] = source_data[i];"
- " }"
- " all_passed = all_passed &&"
- " (ext_array[5] == expected_results[i]);"
- "}"
- "all_passed;",
- (is_unsigned ?
- unsigned_data :
- (is_pixel_data ? pixel_data : signed_data)));
- result = CompileRun(test_buf.begin());
- CHECK(result->BooleanValue(v8_isolate));
- }
-
- {
- ElementType* data_ptr = static_cast<ElementType*>(jsobj->DataPtr());
- for (int i = 0; i < element_count; i++) {
- data_ptr[i] = static_cast<ElementType>(i);
- }
- }
-
- bool old_natives_flag_sentry = i::FLAG_allow_natives_syntax;
- i::FLAG_allow_natives_syntax = true;
-
- // Test complex assignments
- result = CompileRun(
- "function ee_op_test_complex_func(sum) {"
- " for (var i = 0; i < 40; ++i) {"
- " sum += (ext_array[i] += 1);"
- " sum += (ext_array[i] -= 1);"
- " } "
- " return sum;"
- "};"
- "%PrepareFunctionForOptimization(ee_op_test_complex_func);"
- "sum=0;"
- "sum=ee_op_test_complex_func(sum);"
- "sum=ee_op_test_complex_func(sum);"
- "%OptimizeFunctionOnNextCall(ee_op_test_complex_func);"
- "sum=ee_op_test_complex_func(sum);"
- "sum;");
- CHECK_EQ(4800, result->Int32Value(context).FromJust());
-
- // Test count operations
- result = CompileRun(
- "function ee_op_test_count_func(sum) {"
- " for (var i = 0; i < 40; ++i) {"
- " sum += (++ext_array[i]);"
- " sum += (--ext_array[i]);"
- " } "
- " return sum;"
- "};"
- "%PrepareFunctionForOptimization(ee_op_test_count_func);"
- "sum=0;"
- "sum=ee_op_test_count_func(sum);"
- "sum=ee_op_test_count_func(sum);"
- "%OptimizeFunctionOnNextCall(ee_op_test_count_func);"
- "sum=ee_op_test_count_func(sum);"
- "sum;");
- CHECK_EQ(4800, result->Int32Value(context).FromJust());
-
- i::FLAG_allow_natives_syntax = old_natives_flag_sentry;
-
- result = CompileRun("ext_array[3] = 33;"
- "delete ext_array[3];"
- "ext_array[3];");
- CHECK_EQ(33, result->Int32Value(context).FromJust());
-
- result = CompileRun(
- "ext_array[0] = 10; ext_array[1] = 11;"
- "ext_array[2] = 12; ext_array[3] = 13;"
- "try { ext_array.__defineGetter__('2', function() { return 120; }); }"
- "catch (e) { }"
- "ext_array[2];");
- CHECK_EQ(12, result->Int32Value(context).FromJust());
-
- result = CompileRun("var js_array = new Array(40);"
- "js_array[0] = 77;"
- "js_array;");
- CHECK_EQ(77, v8::Object::Cast(*result)
- ->Get(context, v8_str("0"))
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
-
- result = CompileRun("ext_array[1] = 23;"
- "ext_array.__proto__ = [];"
- "js_array.__proto__ = ext_array;"
- "js_array.concat(ext_array);");
- CHECK_EQ(77, v8::Object::Cast(*result)
- ->Get(context, v8_str("0"))
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
- CHECK_EQ(23, v8::Object::Cast(*result)
- ->Get(context, v8_str("1"))
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust());
-
- result = CompileRun("ext_array[1] = 23;");
- CHECK_EQ(23, result->Int32Value(context).FromJust());
-}
-
-template <typename ElementType, typename TypedArray, class ArrayBufferType>
-void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
- int64_t high) {
- const int kElementCount = 50;
-
- i::ScopedVector<ElementType> backing_store(kElementCount+2);
-
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- Local<ArrayBufferType> ab =
- ArrayBufferType::New(isolate, backing_store.begin(),
- (kElementCount + 2) * sizeof(ElementType));
- Local<TypedArray> ta =
- TypedArray::New(ab, 2*sizeof(ElementType), kElementCount);
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
- CHECK_EQ(kElementCount, static_cast<int>(ta->Length()));
- CHECK_EQ(2 * sizeof(ElementType), ta->ByteOffset());
- CHECK_EQ(kElementCount * sizeof(ElementType), ta->ByteLength());
- CHECK(ab->Equals(env.local(), ta->Buffer()).FromJust());
-
- ElementType* data = backing_store.begin() + 2;
- for (int i = 0; i < kElementCount; i++) {
- data[i] = static_cast<ElementType>(i);
- }
-
- ObjectWithExternalArrayTestHelper<ElementType>(env.local(), ta, kElementCount,
- array_type, low, high);
-}
-
-THREADED_TEST(Uint8Array) {
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, v8::ArrayBuffer>(
- i::kExternalUint8Array, 0, 0xFF);
-}
-
-
-THREADED_TEST(Int8Array) {
- TypedArrayTestHelper<int8_t, v8::Int8Array, v8::ArrayBuffer>(
- i::kExternalInt8Array, -0x80, 0x7F);
-}
-
-
-THREADED_TEST(Uint16Array) {
- TypedArrayTestHelper<uint16_t, v8::Uint16Array, v8::ArrayBuffer>(
- i::kExternalUint16Array, 0, 0xFFFF);
-}
-
-
-THREADED_TEST(Int16Array) {
- TypedArrayTestHelper<int16_t, v8::Int16Array, v8::ArrayBuffer>(
- i::kExternalInt16Array, -0x8000, 0x7FFF);
-}
-
-
-THREADED_TEST(Uint32Array) {
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, v8::ArrayBuffer>(
- i::kExternalUint32Array, 0, UINT_MAX);
-}
-
-
-THREADED_TEST(Int32Array) {
- TypedArrayTestHelper<int32_t, v8::Int32Array, v8::ArrayBuffer>(
- i::kExternalInt32Array, INT_MIN, INT_MAX);
-}
-
-
-THREADED_TEST(Float32Array) {
- TypedArrayTestHelper<float, v8::Float32Array, v8::ArrayBuffer>(
- i::kExternalFloat32Array, -500, 500);
-}
-
-
-THREADED_TEST(Float64Array) {
- TypedArrayTestHelper<double, v8::Float64Array, v8::ArrayBuffer>(
- i::kExternalFloat64Array, -500, 500);
-}
-
-
-THREADED_TEST(Uint8ClampedArray) {
- TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, v8::ArrayBuffer>(
- i::kExternalUint8ClampedArray, 0, 0xFF);
-}
-
-
-THREADED_TEST(DataView) {
- const int kSize = 50;
-
- i::ScopedVector<uint8_t> backing_store(kSize+2);
-
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- Local<v8::ArrayBuffer> ab =
- v8::ArrayBuffer::New(isolate, backing_store.begin(), 2 + kSize);
- Local<v8::DataView> dv = v8::DataView::New(ab, 2, kSize);
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
- CHECK_EQ(2u, dv->ByteOffset());
- CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
- CHECK(ab->Equals(env.local(), dv->Buffer()).FromJust());
-}
-
-
-THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- // Make sure the pointer looks like a heap object
- uint8_t* store_ptr = reinterpret_cast<uint8_t*>(i::kHeapObjectTag);
-
- // Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
-
- // Should not crash
- CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
- CcTest::CollectAllGarbage();
- CcTest::CollectAllGarbage();
-
- // Should not move the pointer
- CHECK_EQ(ab->GetContents().Data(), store_ptr);
-}
-
-
-THREADED_TEST(SkipArrayBufferDuringScavenge) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- // Make sure the pointer looks like a heap object
- Local<v8::Object> tmp = v8::Object::New(isolate);
- uint8_t* store_ptr =
- reinterpret_cast<uint8_t*>(*reinterpret_cast<uintptr_t*>(*tmp));
-
- // Make `store_ptr` point to from space
- CcTest::CollectGarbage(i::NEW_SPACE);
-
- // Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
-
- // Should not crash,
- // i.e. backing store pointer should not be treated as a heap object pointer
- CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
-
- // Use `ab` to silence compiler warning
- CHECK_EQ(ab->GetContents().Data(), store_ptr);
-}
-
-
-THREADED_TEST(SharedUint8Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, v8::SharedArrayBuffer>(
- i::kExternalUint8Array, 0, 0xFF);
-}
-
-
-THREADED_TEST(SharedInt8Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int8_t, v8::Int8Array, v8::SharedArrayBuffer>(
- i::kExternalInt8Array, -0x80, 0x7F);
-}
-
-
-THREADED_TEST(SharedUint16Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint16_t, v8::Uint16Array, v8::SharedArrayBuffer>(
- i::kExternalUint16Array, 0, 0xFFFF);
-}
-
-
-THREADED_TEST(SharedInt16Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int16_t, v8::Int16Array, v8::SharedArrayBuffer>(
- i::kExternalInt16Array, -0x8000, 0x7FFF);
-}
-
-
-THREADED_TEST(SharedUint32Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, v8::SharedArrayBuffer>(
- i::kExternalUint32Array, 0, UINT_MAX);
-}
-
-
-THREADED_TEST(SharedInt32Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int32_t, v8::Int32Array, v8::SharedArrayBuffer>(
- i::kExternalInt32Array, INT_MIN, INT_MAX);
-}
-
-
-THREADED_TEST(SharedFloat32Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<float, v8::Float32Array, v8::SharedArrayBuffer>(
- i::kExternalFloat32Array, -500, 500);
-}
-
-
-THREADED_TEST(SharedFloat64Array) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<double, v8::Float64Array, v8::SharedArrayBuffer>(
- i::kExternalFloat64Array, -500, 500);
-}
-
-
-THREADED_TEST(SharedUint8ClampedArray) {
- i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, v8::SharedArrayBuffer>(
- i::kExternalUint8ClampedArray, 0, 0xFF);
-}
-
-
-THREADED_TEST(SharedDataView) {
- i::FLAG_harmony_sharedarraybuffer = true;
- const int kSize = 50;
-
- i::ScopedVector<uint8_t> backing_store(kSize + 2);
-
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- Local<v8::SharedArrayBuffer> ab =
- v8::SharedArrayBuffer::New(isolate, backing_store.begin(), 2 + kSize);
- Local<v8::DataView> dv =
- v8::DataView::New(ab, 2, kSize);
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
- CHECK_EQ(2u, dv->ByteOffset());
- CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
- CHECK(ab->Equals(env.local(), dv->Buffer()).FromJust());
-}
-
-#define IS_ARRAY_BUFFER_VIEW_TEST(View) \
- THREADED_TEST(Is##View) { \
- LocalContext env; \
- v8::Isolate* isolate = env->GetIsolate(); \
- v8::HandleScope handle_scope(isolate); \
- \
- Local<Value> result = CompileRun( \
- "var ab = new ArrayBuffer(128);" \
- "new " #View "(ab)"); \
- CHECK(result->IsArrayBufferView()); \
- CHECK(result->Is##View()); \
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(result.As<v8::View>()); \
- }
-
-IS_ARRAY_BUFFER_VIEW_TEST(Uint8Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Int8Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Uint16Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Int16Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Uint32Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Int32Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Float32Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Float64Array)
-IS_ARRAY_BUFFER_VIEW_TEST(Uint8ClampedArray)
-IS_ARRAY_BUFFER_VIEW_TEST(DataView)
-
-#undef IS_ARRAY_BUFFER_VIEW_TEST
-
-
-
THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
@@ -19313,9 +18308,9 @@ TEST(RunTwoIsolatesOnSingleThread) {
}
{
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(isolate1);
v8::Local<v8::Context> context =
- v8::Local<v8::Context>::New(v8::Isolate::GetCurrent(), context1);
+ v8::Local<v8::Context>::New(isolate1, context1);
v8::Context::Scope context_scope(context);
ExpectString("function f() { return foo; }; f()", "isolate 1");
}
@@ -20208,6 +19203,21 @@ bool CodeGenerationDisallowed(Local<Context> context, Local<String> source) {
return false;
}
+v8::MaybeLocal<String> ModifyCodeGeneration(Local<Context> context,
+ Local<Value> source) {
+ // For testing purposes, deny all odd-length strings and replace '2' with '3'
+ String::Utf8Value utf8(context->GetIsolate(), source);
+ DCHECK(utf8.length());
+ if (utf8.length() == 0 || utf8.length() % 2 != 0)
+ return v8::MaybeLocal<String>();
+
+ for (char* i = *utf8; *i != '\0'; i++) {
+ if (*i == '2') *i = '3';
+ }
+ return String::NewFromUtf8(context->GetIsolate(), *utf8,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
THREADED_TEST(AllowCodeGenFromStrings) {
LocalContext context;
@@ -20240,6 +19250,36 @@ THREADED_TEST(AllowCodeGenFromStrings) {
CheckCodeGenerationDisallowed();
}
+TEST(ModifyCodeGenFromStrings) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ context->AllowCodeGenerationFromStrings(false);
+ context->GetIsolate()->SetModifyCodeGenerationFromStringsCallback(
+ &ModifyCodeGeneration);
+
+ // Test 'allowed' case in different modes (direct eval, indirect eval,
+ // Function constructor, Function contructor with arguments).
+ Local<Value> result = CompileRun("eval('42')");
+ CHECK_EQ(43, result->Int32Value(context.local()).FromJust());
+
+ result = CompileRun("(function(e) { return e('42'); })(eval)");
+ CHECK_EQ(43, result->Int32Value(context.local()).FromJust());
+
+ result = CompileRun("var f = new Function('return 42;'); f()");
+ CHECK_EQ(43, result->Int32Value(context.local()).FromJust());
+
+ // Test 'disallowed' cases.
+ TryCatch try_catch(CcTest::isolate());
+ result = CompileRun("eval('123')");
+ CHECK(result.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+
+ result = CompileRun("new Function('a', 'return 42;')(123)");
+ CHECK(result.IsEmpty());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+}
TEST(SetErrorMessageForCodeGenFromStrings) {
LocalContext context;
@@ -24526,6 +23566,33 @@ v8::MaybeLocal<Module> UnexpectedModuleResolveCallback(Local<Context> context,
CHECK_WITH_MSG(false, "Unexpected call to resolve callback");
}
+v8::MaybeLocal<Value> UnexpectedSyntheticModuleEvaluationStepsCallback(
+ Local<Context> context, Local<Module> module) {
+ CHECK_WITH_MSG(false, "Unexpected call to synthetic module re callback");
+}
+
+static int synthetic_module_callback_count;
+
+v8::MaybeLocal<Value> SyntheticModuleEvaluationStepsCallback(
+ Local<Context> context, Local<Module> module) {
+ synthetic_module_callback_count++;
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(context->GetIsolate()));
+}
+
+v8::MaybeLocal<Value> SyntheticModuleEvaluationStepsCallbackFail(
+ Local<Context> context, Local<Module> module) {
+ synthetic_module_callback_count++;
+ context->GetIsolate()->ThrowException(
+ v8_str("SyntheticModuleEvaluationStepsCallbackFail exception"));
+ return v8::MaybeLocal<Value>();
+}
+
+v8::MaybeLocal<Value> SyntheticModuleEvaluationStepsCallbackSetExport(
+ Local<Context> context, Local<Module> module) {
+ module->SetSyntheticModuleExport(v8_str("test_export"), v8_num(42));
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(context->GetIsolate()));
+}
+
namespace {
Local<Module> CompileAndInstantiateModule(v8::Isolate* isolate,
@@ -24548,6 +23615,18 @@ Local<Module> CompileAndInstantiateModule(v8::Isolate* isolate,
return module;
}
+Local<Module> CreateAndInstantiateSyntheticModule(
+ v8::Isolate* isolate, Local<String> module_name, Local<Context> context,
+ std::vector<v8::Local<v8::String>> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
+ Local<Module> module = v8::Module::CreateSyntheticModule(
+ isolate, module_name, export_names, evaluation_steps);
+ module->InstantiateModule(context, UnexpectedModuleResolveCallback)
+ .ToChecked();
+
+ return module;
+}
+
Local<Module> CompileAndInstantiateModuleFromCache(
v8::Isolate* isolate, Local<Context> context, const char* resource_name,
const char* source, v8::ScriptCompiler::CachedData* cache) {
@@ -24571,6 +23650,28 @@ Local<Module> CompileAndInstantiateModuleFromCache(
} // namespace
+v8::MaybeLocal<Module> SyntheticModuleResolveCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer) {
+ std::vector<v8::Local<v8::String>> export_names{v8_str("test_export")};
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ context->GetIsolate(),
+ v8_str("SyntheticModuleResolveCallback-TestSyntheticModule"), context,
+ export_names, SyntheticModuleEvaluationStepsCallbackSetExport);
+ return v8::MaybeLocal<Module>(module);
+}
+
+v8::MaybeLocal<Module> SyntheticModuleThatThrowsDuringEvaluateResolveCallback(
+ Local<Context> context, Local<String> specifier, Local<Module> referrer) {
+ std::vector<v8::Local<v8::String>> export_names{v8_str("test_export")};
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ context->GetIsolate(),
+ v8_str("SyntheticModuleThatThrowsDuringEvaluateResolveCallback-"
+ "TestSyntheticModule"),
+ context, export_names, SyntheticModuleEvaluationStepsCallbackFail);
+ return v8::MaybeLocal<Module>(module);
+}
+
TEST(ModuleCodeCache) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -24636,6 +23737,214 @@ TEST(ModuleCodeCache) {
}
}
+TEST(CreateSyntheticModule) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope cscope(context);
+
+ std::vector<v8::Local<v8::String>> export_names{v8_str("default")};
+
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ isolate, v8_str("CreateSyntheticModule-TestSyntheticModule"), context,
+ export_names, UnexpectedSyntheticModuleEvaluationStepsCallback);
+ i::Handle<i::SyntheticModule> i_module =
+ i::Handle<i::SyntheticModule>::cast(v8::Utils::OpenHandle(*module));
+ i::Handle<i::ObjectHashTable> exports(i_module->exports(), i_isolate);
+ i::Handle<i::String> default_name =
+ i_isolate->factory()->NewStringFromAsciiChecked("default");
+
+ CHECK(
+ i::Handle<i::Object>(exports->Lookup(default_name), i_isolate)->IsCell());
+ CHECK(i::Handle<i::Cell>::cast(
+ i::Handle<i::Object>(exports->Lookup(default_name), i_isolate))
+ ->value()
+ .IsUndefined());
+ CHECK_EQ(i_module->export_names().length(), 1);
+ CHECK(i::String::cast(i_module->export_names().get(0)).Equals(*default_name));
+ CHECK_EQ(i_module->status(), i::Module::kInstantiated);
+}
+
+TEST(SyntheticModuleSetExports) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope cscope(context);
+
+ Local<String> foo_string = v8_str("foo");
+ Local<String> bar_string = v8_str("bar");
+ std::vector<v8::Local<v8::String>> export_names{foo_string};
+
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ isolate, v8_str("SyntheticModuleSetExports-TestSyntheticModule"), context,
+ export_names, UnexpectedSyntheticModuleEvaluationStepsCallback);
+
+ i::Handle<i::SyntheticModule> i_module =
+ i::Handle<i::SyntheticModule>::cast(v8::Utils::OpenHandle(*module));
+ i::Handle<i::ObjectHashTable> exports(i_module->exports(), i_isolate);
+
+ i::Handle<i::Cell> foo_cell = i::Handle<i::Cell>::cast(i::Handle<i::Object>(
+ exports->Lookup(v8::Utils::OpenHandle(*foo_string)), i_isolate));
+
+ // During Instantiation there should be a Cell for the export initialized to
+ // undefined.
+ CHECK(foo_cell->value().IsUndefined());
+
+ module->SetSyntheticModuleExport(foo_string, bar_string);
+
+ // After setting the export the Cell should still have the same idenitity.
+ CHECK_EQ(exports->Lookup(v8::Utils::OpenHandle(*foo_string)), *foo_cell);
+
+ // Test that the export value was actually set.
+ CHECK(i::Handle<i::String>::cast(
+ i::Handle<i::Object>(foo_cell->value(), i_isolate))
+ ->Equals(*v8::Utils::OpenHandle(*bar_string)));
+}
+
+TEST(SyntheticModuleEvaluationStepsNoThrow) {
+ synthetic_module_callback_count = 0;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope cscope(context);
+
+ std::vector<v8::Local<v8::String>> export_names{v8_str("default")};
+
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ isolate,
+ v8_str("SyntheticModuleEvaluationStepsNoThrow-TestSyntheticModule"),
+ context, export_names, SyntheticModuleEvaluationStepsCallback);
+ CHECK_EQ(synthetic_module_callback_count, 0);
+ Local<Value> completion_value = module->Evaluate(context).ToLocalChecked();
+ CHECK(completion_value->IsUndefined());
+ CHECK_EQ(synthetic_module_callback_count, 1);
+ CHECK_EQ(module->GetStatus(), Module::kEvaluated);
+}
+
+TEST(SyntheticModuleEvaluationStepsThrow) {
+ synthetic_module_callback_count = 0;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ v8::Context::Scope cscope(context);
+
+ std::vector<v8::Local<v8::String>> export_names{v8_str("default")};
+
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ isolate,
+ v8_str("SyntheticModuleEvaluationStepsThrow-TestSyntheticModule"),
+ context, export_names, SyntheticModuleEvaluationStepsCallbackFail);
+ TryCatch try_catch(isolate);
+ CHECK_EQ(synthetic_module_callback_count, 0);
+ v8::MaybeLocal<Value> completion_value = module->Evaluate(context);
+ CHECK(completion_value.IsEmpty());
+ CHECK_EQ(synthetic_module_callback_count, 1);
+ CHECK_EQ(module->GetStatus(), Module::kErrored);
+ CHECK(try_catch.HasCaught());
+}
+
+TEST(SyntheticModuleEvaluationStepsSetExport) {
+ synthetic_module_callback_count = 0;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ auto i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope cscope(context);
+
+ Local<String> test_export_string = v8_str("test_export");
+ std::vector<v8::Local<v8::String>> export_names{test_export_string};
+
+ Local<Module> module = CreateAndInstantiateSyntheticModule(
+ isolate,
+ v8_str("SyntheticModuleEvaluationStepsSetExport-TestSyntheticModule"),
+ context, export_names, SyntheticModuleEvaluationStepsCallbackSetExport);
+
+ i::Handle<i::SyntheticModule> i_module =
+ i::Handle<i::SyntheticModule>::cast(v8::Utils::OpenHandle(*module));
+ i::Handle<i::ObjectHashTable> exports(i_module->exports(), i_isolate);
+
+ i::Handle<i::Cell> test_export_cell =
+ i::Handle<i::Cell>::cast(i::Handle<i::Object>(
+ exports->Lookup(v8::Utils::OpenHandle(*test_export_string)),
+ i_isolate));
+ CHECK(test_export_cell->value().IsUndefined());
+
+ Local<Value> completion_value = module->Evaluate(context).ToLocalChecked();
+ CHECK(completion_value->IsUndefined());
+ CHECK_EQ(42, test_export_cell->value().Number());
+ CHECK_EQ(module->GetStatus(), Module::kEvaluated);
+}
+
+TEST(ImportFromSyntheticModule) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope cscope(context);
+
+ Local<String> url = v8_str("www.test.com");
+ Local<String> source_text = v8_str(
+ "import {test_export} from './synthetic.module';"
+ "(function() { return test_export; })();");
+ v8::ScriptOrigin origin(url, Local<v8::Integer>(), Local<v8::Integer>(),
+ Local<v8::Boolean>(), Local<v8::Integer>(),
+ Local<v8::Value>(), Local<v8::Boolean>(),
+ Local<v8::Boolean>(), True(isolate));
+ v8::ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ module->InstantiateModule(context, SyntheticModuleResolveCallback)
+ .ToChecked();
+
+ Local<Value> completion_value = module->Evaluate(context).ToLocalChecked();
+ CHECK_EQ(42, completion_value->Int32Value(context).FromJust());
+}
+
+TEST(ImportFromSyntheticModuleThrow) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::Isolate::Scope iscope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope cscope(context);
+
+ Local<String> url = v8_str("www.test.com");
+ Local<String> source_text = v8_str(
+ "import {test_export} from './synthetic.module';"
+ "(function() { return test_export; })();");
+ v8::ScriptOrigin origin(url, Local<v8::Integer>(), Local<v8::Integer>(),
+ Local<v8::Boolean>(), Local<v8::Integer>(),
+ Local<v8::Value>(), Local<v8::Boolean>(),
+ Local<v8::Boolean>(), True(isolate));
+ v8::ScriptCompiler::Source source(source_text, origin);
+ Local<Module> module =
+ v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
+ module
+ ->InstantiateModule(
+ context, SyntheticModuleThatThrowsDuringEvaluateResolveCallback)
+ .ToChecked();
+
+ CHECK_EQ(module->GetStatus(), Module::kInstantiated);
+ TryCatch try_catch(isolate);
+ v8::MaybeLocal<Value> completion_value = module->Evaluate(context);
+ CHECK(completion_value.IsEmpty());
+ CHECK_EQ(module->GetStatus(), Module::kErrored);
+ CHECK(try_catch.HasCaught());
+}
+
// Tests that the code cache does not confuse the same source code compiled as a
// script and as a module.
TEST(CodeCacheModuleScriptMismatch) {
@@ -25552,30 +24861,6 @@ TEST(FutexInterruption) {
timeout_thread.Join();
}
-THREADED_TEST(SharedArrayBuffer_AllocationInformation) {
- i::FLAG_harmony_sharedarraybuffer = true;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- const size_t ab_size = 1024;
- Local<v8::SharedArrayBuffer> ab =
- v8::SharedArrayBuffer::New(isolate, ab_size);
- ScopedSharedArrayBufferContents contents(ab->Externalize());
-
- // Array buffers should have normal allocation mode.
- CHECK_EQ(contents.AllocationMode(),
- v8::ArrayBuffer::Allocator::AllocationMode::kNormal);
- // The allocation must contain the buffer (normally they will be equal, but
- // this is not required by the contract).
- CHECK_NOT_NULL(contents.AllocationBase());
- const uintptr_t alloc =
- reinterpret_cast<uintptr_t>(contents.AllocationBase());
- const uintptr_t data = reinterpret_cast<uintptr_t>(contents.Data());
- CHECK_LE(alloc, data);
- CHECK_LE(data + contents.ByteLength(), alloc + contents.AllocationLength());
-}
-
static int nb_uncaught_exception_callback_calls = 0;
@@ -26227,34 +25512,6 @@ THREADED_TEST(MutableProtoGlobal) {
.FromJust());
}
-TEST(InternalFieldsOnTypedArray) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = env.local();
- Context::Scope context_scope(context);
- v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1);
- v8::Local<v8::Uint8Array> array = v8::Uint8Array::New(buffer, 0, 1);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- CHECK_EQ(static_cast<void*>(nullptr),
- array->GetAlignedPointerFromInternalField(i));
- }
-}
-
-TEST(InternalFieldsOnDataView) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = env.local();
- Context::Scope context_scope(context);
- v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1);
- v8::Local<v8::DataView> array = v8::DataView::New(buffer, 0, 1);
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- CHECK_EQ(static_cast<void*>(nullptr),
- array->GetAlignedPointerFromInternalField(i));
- }
-}
-
TEST(SetPrototypeTemplate) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -26483,7 +25740,7 @@ TEST(ImportMeta) {
v8::ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
i::Handle<i::Object> meta =
i_isolate->RunHostInitializeImportMetaObjectCallback(
- v8::Utils::OpenHandle(*module));
+ i::Handle<i::SourceTextModule>::cast(v8::Utils::OpenHandle(*module)));
CHECK(meta->IsJSObject());
Local<Object> meta_obj = Local<Object>::Cast(v8::Utils::ToLocal(meta));
CHECK(meta_obj->Get(context.local(), v8_str("foo"))
@@ -27861,3 +27118,5 @@ UNINITIALIZED_TEST(NestedIsolates) {
isolate_1->Dispose();
isolate_2->Dispose();
}
+
+#undef THREADED_PROFILED_TEST
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index c6d9ac3509..7a5a9b64ca 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -42,4 +42,14 @@ static void CheckReturnValue(const T& t, i::Address callback) {
}
}
+template <typename T>
+static void CheckInternalFieldsAreZero(v8::Local<T> value) {
+ CHECK_EQ(T::kInternalFieldCount, value->InternalFieldCount());
+ for (int i = 0; i < value->InternalFieldCount(); i++) {
+ CHECK_EQ(0, value->GetInternalField(i)
+ ->Int32Value(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
+ }
+}
+
#endif // V8_TEST_CCTEST_TEST_API_H_
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index de238f20c2..c96a0199bb 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -3390,7 +3390,8 @@ TEST(ARMv8_vminmax_f32) {
}
template <typename T, typename Inputs, typename Results>
-static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(MacroAssembler& assm) {
+static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(
+ MacroAssembler& assm) { // NOLINT(runtime/references)
T a = T::from_code(0); // d0/s0
T b = T::from_code(1); // d1/s1
T c = T::from_code(2); // d2/s2
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index d49f8c8974..1f6b732808 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -42,6 +42,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/diagnostics/arm64/disasm-arm64.h"
#include "src/execution/arm64/simulator-arm64.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
@@ -117,22 +118,23 @@ static void InitializeVM() {
#ifdef USE_SIMULATOR
// Run tests with the simulator.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- CHECK_NOT_NULL(isolate); \
- std::unique_ptr<byte[]> owned_buf{new byte[buf_size]}; \
- byte* buf = owned_buf.get(); \
- MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
- ExternalAssemblerBuffer(buf, buf_size)); \
- Decoder<DispatchingDecoderVisitor>* decoder = \
- new Decoder<DispatchingDecoderVisitor>(); \
- Simulator simulator(decoder); \
- std::unique_ptr<PrintDisassembler> pdis; \
- RegisterDump core; \
- if (i::FLAG_trace_sim) { \
- pdis.reset(new PrintDisassembler(stdout)); \
- decoder->PrependVisitor(pdis.get()); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK_NOT_NULL(isolate); \
+ std::unique_ptr<byte[]> owned_buf{new byte[buf_size]}; \
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
+ ExternalAssemblerBuffer(owned_buf.get(), buf_size)); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Simulator simulator(decoder); \
+ std::unique_ptr<PrintDisassembler> pdis; \
+ RegisterDump core; \
+ HandleScope handle_scope(isolate); \
+ Handle<Code> code; \
+ if (i::FLAG_trace_sim) { \
+ pdis.reset(new PrintDisassembler(stdout)); \
+ decoder->PrependVisitor(pdis.get()); \
}
// Reset the assembler and simulator, so that instructions can be generated,
@@ -154,17 +156,18 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
+#define RUN() simulator.RunFrom(reinterpret_cast<Instruction*>(code->entry()))
-#define END() \
- __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- { \
- CodeDesc desc; \
- __ GetCode(masm.isolate(), &desc); \
+#define END() \
+ __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); \
+ if (FLAG_print_code) code->Print(); \
}
#else // ifdef USE_SIMULATOR.
@@ -176,8 +179,8 @@ static void InitializeVM() {
auto owned_buf = AllocateAssemblerBuffer(buf_size); \
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
owned_buf->CreateView()); \
- uint8_t* buf = owned_buf->start(); \
- USE(buf); \
+ HandleScope handle_scope(isolate); \
+ Handle<Code> code; \
RegisterDump core;
#define RESET() \
@@ -194,20 +197,21 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- owned_buf->MakeExecutable(); \
- { \
- auto* test_function = bit_cast<void (*)()>(buf); \
- test_function(); \
+#define RUN() \
+ { \
+ auto f = GeneratedCode<void>::FromCode(*code); \
+ f.Call(); \
}
-#define END() \
- core.Dump(&masm); \
- __ PopCalleeSavedRegisters(); \
- __ Ret(); \
- { \
- CodeDesc desc; \
- __ GetCode(masm.isolate(), &desc); \
+#define END() \
+ core.Dump(&masm); \
+ __ PopCalleeSavedRegisters(); \
+ __ Ret(); \
+ { \
+ CodeDesc desc; \
+ __ GetCode(masm.isolate(), &desc); \
+ code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build(); \
+ if (FLAG_print_code) code->Print(); \
}
#endif // ifdef USE_SIMULATOR.
@@ -227,6 +231,9 @@ static void InitializeVM() {
#define CHECK_EQUAL_64(expected, result) \
CHECK(Equal64(expected, &core, result))
+#define CHECK_FULL_HEAP_OBJECT_IN_REGISTER(expected, result) \
+ CHECK(Equal64(expected->ptr(), &core, result))
+
#define CHECK_EQUAL_FP64(expected, result) \
CHECK(EqualFP64(expected, &core, result))
@@ -370,7 +377,7 @@ TEST(mov) {
__ Mov(w13, Operand(w11, LSL, 1));
__ Mov(x14, Operand(x12, LSL, 2));
__ Mov(w15, Operand(w11, LSR, 3));
- __ Mov(x18, Operand(x12, LSR, 4));
+ __ Mov(x28, Operand(x12, LSR, 4));
__ Mov(w19, Operand(w11, ASR, 11));
__ Mov(x20, Operand(x12, ASR, 12));
__ Mov(w21, Operand(w11, ROR, 13));
@@ -399,7 +406,7 @@ TEST(mov) {
CHECK_EQUAL_64(0x00001FFE, x13);
CHECK_EQUAL_64(0x0000000000003FFCUL, x14);
CHECK_EQUAL_64(0x000001FF, x15);
- CHECK_EQUAL_64(0x00000000000000FFUL, x18);
+ CHECK_EQUAL_64(0x00000000000000FFUL, x28);
CHECK_EQUAL_64(0x00000001, x19);
CHECK_EQUAL_64(0x0, x20);
CHECK_EQUAL_64(0x7FF80000, x21);
@@ -517,7 +524,7 @@ TEST(mov_imm_x) {
__ Mov(x13, 0x0000000000001234L);
__ Mov(x14, 0x0000000012345678L);
__ Mov(x15, 0x0000123400005678L);
- __ Mov(x18, 0x1234000000005678L);
+ __ Mov(x30, 0x1234000000005678L);
__ Mov(x19, 0x1234000056780000L);
__ Mov(x20, 0x1234567800000000L);
__ Mov(x21, 0x1234000000000000L);
@@ -547,7 +554,7 @@ TEST(mov_imm_x) {
CHECK_EQUAL_64(0x0000000000001234L, x13);
CHECK_EQUAL_64(0x0000000012345678L, x14);
CHECK_EQUAL_64(0x0000123400005678L, x15);
- CHECK_EQUAL_64(0x1234000000005678L, x18);
+ CHECK_EQUAL_64(0x1234000000005678L, x30);
CHECK_EQUAL_64(0x1234000056780000L, x19);
CHECK_EQUAL_64(0x1234567800000000L, x20);
CHECK_EQUAL_64(0x1234000000000000L, x21);
@@ -1095,27 +1102,27 @@ TEST(mul) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x15, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mul(w0, w16, w16);
__ Mul(w1, w16, w17);
- __ Mul(w2, w17, w18);
- __ Mul(w3, w18, w19);
+ __ Mul(w2, w17, w15);
+ __ Mul(w3, w15, w19);
__ Mul(x4, x16, x16);
- __ Mul(x5, x17, x18);
- __ Mul(x6, x18, x19);
+ __ Mul(x5, x17, x15);
+ __ Mul(x6, x15, x19);
__ Mul(x7, x19, x19);
- __ Smull(x8, w17, w18);
- __ Smull(x9, w18, w18);
+ __ Smull(x8, w17, w15);
+ __ Smull(x9, w15, w15);
__ Smull(x10, w19, w19);
__ Mneg(w11, w16, w16);
__ Mneg(w12, w16, w17);
- __ Mneg(w13, w17, w18);
- __ Mneg(w14, w18, w19);
+ __ Mneg(w13, w17, w15);
+ __ Mneg(w14, w15, w19);
__ Mneg(x20, x16, x16);
- __ Mneg(x21, x17, x18);
- __ Mneg(x22, x18, x19);
+ __ Mneg(x21, x17, x15);
+ __ Mneg(x22, x15, x19);
__ Mneg(x23, x19, x19);
END();
@@ -1170,33 +1177,33 @@ TEST(madd) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Madd(w0, w16, w16, w16);
__ Madd(w1, w16, w16, w17);
- __ Madd(w2, w16, w16, w18);
+ __ Madd(w2, w16, w16, w28);
__ Madd(w3, w16, w16, w19);
__ Madd(w4, w16, w17, w17);
- __ Madd(w5, w17, w17, w18);
+ __ Madd(w5, w17, w17, w28);
__ Madd(w6, w17, w17, w19);
- __ Madd(w7, w17, w18, w16);
- __ Madd(w8, w17, w18, w18);
- __ Madd(w9, w18, w18, w17);
- __ Madd(w10, w18, w19, w18);
+ __ Madd(w7, w17, w28, w16);
+ __ Madd(w8, w17, w28, w28);
+ __ Madd(w9, w28, w28, w17);
+ __ Madd(w10, w28, w19, w28);
__ Madd(w11, w19, w19, w19);
__ Madd(x12, x16, x16, x16);
__ Madd(x13, x16, x16, x17);
- __ Madd(x14, x16, x16, x18);
+ __ Madd(x14, x16, x16, x28);
__ Madd(x15, x16, x16, x19);
__ Madd(x20, x16, x17, x17);
- __ Madd(x21, x17, x17, x18);
+ __ Madd(x21, x17, x17, x28);
__ Madd(x22, x17, x17, x19);
- __ Madd(x23, x17, x18, x16);
- __ Madd(x24, x17, x18, x18);
- __ Madd(x25, x18, x18, x17);
- __ Madd(x26, x18, x19, x18);
+ __ Madd(x23, x17, x28, x16);
+ __ Madd(x24, x17, x28, x28);
+ __ Madd(x25, x28, x28, x17);
+ __ Madd(x26, x28, x19, x28);
__ Madd(x27, x19, x19, x19);
END();
@@ -1237,33 +1244,33 @@ TEST(msub) {
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Msub(w0, w16, w16, w16);
__ Msub(w1, w16, w16, w17);
- __ Msub(w2, w16, w16, w18);
+ __ Msub(w2, w16, w16, w28);
__ Msub(w3, w16, w16, w19);
__ Msub(w4, w16, w17, w17);
- __ Msub(w5, w17, w17, w18);
+ __ Msub(w5, w17, w17, w28);
__ Msub(w6, w17, w17, w19);
- __ Msub(w7, w17, w18, w16);
- __ Msub(w8, w17, w18, w18);
- __ Msub(w9, w18, w18, w17);
- __ Msub(w10, w18, w19, w18);
+ __ Msub(w7, w17, w28, w16);
+ __ Msub(w8, w17, w28, w28);
+ __ Msub(w9, w28, w28, w17);
+ __ Msub(w10, w28, w19, w28);
__ Msub(w11, w19, w19, w19);
__ Msub(x12, x16, x16, x16);
__ Msub(x13, x16, x16, x17);
- __ Msub(x14, x16, x16, x18);
+ __ Msub(x14, x16, x16, x28);
__ Msub(x15, x16, x16, x19);
__ Msub(x20, x16, x17, x17);
- __ Msub(x21, x17, x17, x18);
+ __ Msub(x21, x17, x17, x28);
__ Msub(x22, x17, x17, x19);
- __ Msub(x23, x17, x18, x16);
- __ Msub(x24, x17, x18, x18);
- __ Msub(x25, x18, x18, x17);
- __ Msub(x26, x18, x19, x18);
+ __ Msub(x23, x17, x28, x16);
+ __ Msub(x24, x17, x28, x28);
+ __ Msub(x25, x28, x28, x17);
+ __ Msub(x26, x28, x19, x28);
__ Msub(x27, x19, x19, x19);
END();
@@ -1349,17 +1356,17 @@ TEST(smaddl_umaddl) {
START();
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
- __ Smaddl(x9, w17, w18, x20);
- __ Smaddl(x10, w18, w18, x20);
+ __ Smaddl(x9, w17, w28, x20);
+ __ Smaddl(x10, w28, w28, x20);
__ Smaddl(x11, w19, w19, x20);
__ Smaddl(x12, w19, w19, x21);
- __ Umaddl(x13, w17, w18, x20);
- __ Umaddl(x14, w18, w18, x20);
+ __ Umaddl(x13, w17, w28, x20);
+ __ Umaddl(x14, w28, w28, x20);
__ Umaddl(x15, w19, w19, x20);
__ Umaddl(x22, w19, w19, x21);
END();
@@ -1382,17 +1389,17 @@ TEST(smsubl_umsubl) {
START();
__ Mov(x17, 1);
- __ Mov(x18, 0xFFFFFFFF);
+ __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
- __ Smsubl(x9, w17, w18, x20);
- __ Smsubl(x10, w18, w18, x20);
+ __ Smsubl(x9, w17, w28, x20);
+ __ Smsubl(x10, w28, w28, x20);
__ Smsubl(x11, w19, w19, x20);
__ Smsubl(x12, w19, w19, x21);
- __ Umsubl(x13, w17, w18, x20);
- __ Umsubl(x14, w18, w18, x20);
+ __ Umsubl(x13, w17, w28, x20);
+ __ Umsubl(x14, w28, w28, x20);
__ Umsubl(x15, w19, w19, x20);
__ Umsubl(x22, w19, w19, x21);
END();
@@ -1416,7 +1423,7 @@ TEST(div) {
START();
__ Mov(x16, 1);
__ Mov(x17, 0xFFFFFFFF);
- __ Mov(x18, 0xFFFFFFFFFFFFFFFFUL);
+ __ Mov(x30, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL);
__ Mov(x21, 2);
@@ -1425,13 +1432,13 @@ TEST(div) {
__ Udiv(w1, w17, w16);
__ Sdiv(w2, w16, w16);
__ Sdiv(w3, w16, w17);
- __ Sdiv(w4, w17, w18);
+ __ Sdiv(w4, w17, w30);
__ Udiv(x5, x16, x16);
- __ Udiv(x6, x17, x18);
+ __ Udiv(x6, x17, x30);
__ Sdiv(x7, x16, x16);
__ Sdiv(x8, x16, x17);
- __ Sdiv(x9, x17, x18);
+ __ Sdiv(x9, x17, x30);
__ Udiv(w10, w19, w21);
__ Sdiv(w11, w19, w21);
@@ -1442,16 +1449,16 @@ TEST(div) {
__ Udiv(w22, w19, w17);
__ Sdiv(w23, w19, w17);
- __ Udiv(x24, x20, x18);
- __ Sdiv(x25, x20, x18);
+ __ Udiv(x24, x20, x30);
+ __ Sdiv(x25, x20, x30);
__ Udiv(x26, x16, x21);
__ Sdiv(x27, x16, x21);
- __ Udiv(x28, x18, x21);
- __ Sdiv(x29, x18, x21);
+ __ Udiv(x28, x30, x21);
+ __ Sdiv(x29, x30, x21);
__ Mov(x17, 0);
- __ Udiv(w18, w16, w17);
+ __ Udiv(w30, w16, w17);
__ Sdiv(w19, w16, w17);
__ Udiv(x20, x16, x17);
__ Sdiv(x21, x16, x17);
@@ -1483,7 +1490,7 @@ TEST(div) {
CHECK_EQUAL_64(0, x27);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x28);
CHECK_EQUAL_64(0, x29);
- CHECK_EQUAL_64(0, x18);
+ CHECK_EQUAL_64(0, x30);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0, x20);
CHECK_EQUAL_64(0, x21);
@@ -1899,17 +1906,17 @@ TEST(compare_branch) {
__ Mov(x3, 1);
__ Bind(&nzf_end);
- __ Mov(x18, 0xFFFFFFFF00000000UL);
+ __ Mov(x19, 0xFFFFFFFF00000000UL);
Label a, a_end;
- __ Cbz(w18, &a);
+ __ Cbz(w19, &a);
__ B(&a_end);
__ Bind(&a);
__ Mov(x4, 1);
__ Bind(&a_end);
Label b, b_end;
- __ Cbnz(w18, &b);
+ __ Cbnz(w19, &b);
__ B(&b_end);
__ Bind(&b);
__ Mov(x5, 1);
@@ -2357,17 +2364,17 @@ TEST(ldr_str_offset) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Ldr(w0, MemOperand(x17));
- __ Str(w0, MemOperand(x18));
+ __ Str(w0, MemOperand(x19));
__ Ldr(w1, MemOperand(x17, 4));
- __ Str(w1, MemOperand(x18, 12));
+ __ Str(w1, MemOperand(x19, 12));
__ Ldr(x2, MemOperand(x17, 8));
- __ Str(x2, MemOperand(x18, 16));
+ __ Str(x2, MemOperand(x19, 16));
__ Ldrb(w3, MemOperand(x17, 1));
- __ Strb(w3, MemOperand(x18, 25));
+ __ Strb(w3, MemOperand(x19, 25));
__ Ldrh(w4, MemOperand(x17, 2));
- __ Strh(w4, MemOperand(x18, 33));
+ __ Strh(w4, MemOperand(x19, 33));
END();
RUN();
@@ -2383,7 +2390,7 @@ TEST(ldr_str_offset) {
CHECK_EQUAL_64(0x7654, x4);
CHECK_EQUAL_64(0x765400, dst[4]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base, x18);
+ CHECK_EQUAL_64(dst_base, x19);
}
TEST(ldr_str_wide) {
@@ -2443,7 +2450,7 @@ TEST(ldr_str_preindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base + 16);
@@ -2453,7 +2460,7 @@ TEST(ldr_str_preindex) {
__ Mov(x25, src_base);
__ Mov(x26, dst_base);
__ Ldr(w0, MemOperand(x17, 4, PreIndex));
- __ Str(w0, MemOperand(x18, 12, PreIndex));
+ __ Str(w0, MemOperand(x28, 12, PreIndex));
__ Ldr(x1, MemOperand(x19, 8, PreIndex));
__ Str(x1, MemOperand(x20, 16, PreIndex));
__ Ldr(w2, MemOperand(x21, -4, PreIndex));
@@ -2477,7 +2484,7 @@ TEST(ldr_str_preindex) {
CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 4, x17);
- CHECK_EQUAL_64(dst_base + 12, x18);
+ CHECK_EQUAL_64(dst_base + 12, x28);
CHECK_EQUAL_64(src_base + 8, x19);
CHECK_EQUAL_64(dst_base + 16, x20);
CHECK_EQUAL_64(src_base + 12, x21);
@@ -2499,7 +2506,7 @@ TEST(ldr_str_postindex) {
START();
__ Mov(x17, src_base + 4);
- __ Mov(x18, dst_base + 12);
+ __ Mov(x28, dst_base + 12);
__ Mov(x19, src_base + 8);
__ Mov(x20, dst_base + 16);
__ Mov(x21, src_base + 8);
@@ -2509,7 +2516,7 @@ TEST(ldr_str_postindex) {
__ Mov(x25, src_base + 3);
__ Mov(x26, dst_base + 41);
__ Ldr(w0, MemOperand(x17, 4, PostIndex));
- __ Str(w0, MemOperand(x18, 12, PostIndex));
+ __ Str(w0, MemOperand(x28, 12, PostIndex));
__ Ldr(x1, MemOperand(x19, 8, PostIndex));
__ Str(x1, MemOperand(x20, 16, PostIndex));
__ Ldr(x2, MemOperand(x21, -8, PostIndex));
@@ -2533,7 +2540,7 @@ TEST(ldr_str_postindex) {
CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 8, x17);
- CHECK_EQUAL_64(dst_base + 24, x18);
+ CHECK_EQUAL_64(dst_base + 24, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base, x21);
@@ -2591,7 +2598,7 @@ TEST(load_store_regoffset) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, src_base + 3 * sizeof(src[0]));
+ __ Mov(x21, src_base + 3 * sizeof(src[0]));
__ Mov(x19, dst_base + 3 * sizeof(dst[0]));
__ Mov(x20, dst_base + 4 * sizeof(dst[0]));
__ Mov(x24, 0);
@@ -2603,9 +2610,9 @@ TEST(load_store_regoffset) {
__ Ldr(w0, MemOperand(x16, x24));
__ Ldr(x1, MemOperand(x16, x25));
- __ Ldr(w2, MemOperand(x18, x26));
- __ Ldr(w3, MemOperand(x18, x27, SXTW));
- __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
+ __ Ldr(w2, MemOperand(x21, x26));
+ __ Ldr(w3, MemOperand(x21, x27, SXTW));
+ __ Ldr(w4, MemOperand(x21, x28, SXTW, 2));
__ Str(w0, MemOperand(x17, x24));
__ Str(x1, MemOperand(x17, x25));
__ Str(w2, MemOperand(x20, x29, SXTW, 2));
@@ -2635,13 +2642,13 @@ TEST(load_store_float) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(s0, MemOperand(x17, sizeof(src[0])));
- __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(s0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2657,7 +2664,7 @@ TEST(load_store_float) {
CHECK_EQUAL_FP32(3.0, s2);
CHECK_EQUAL_FP32(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2675,13 +2682,13 @@ TEST(load_store_double) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(d0, MemOperand(x17, sizeof(src[0])));
- __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(d0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2697,7 +2704,7 @@ TEST(load_store_double) {
CHECK_EQUAL_FP64(3.0, d2);
CHECK_EQUAL_FP64(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2715,13 +2722,13 @@ TEST(load_store_b) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(b0, MemOperand(x17, sizeof(src[0])));
- __ Str(b0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(b0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(b1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(b1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2737,7 +2744,7 @@ TEST(load_store_b) {
CHECK_EQUAL_128(0, 0x34, q2);
CHECK_EQUAL_64(0x34, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2755,13 +2762,13 @@ TEST(load_store_h) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(h0, MemOperand(x17, sizeof(src[0])));
- __ Str(h0, MemOperand(x18, sizeof(dst[0]), PostIndex));
+ __ Str(h0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(h1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(h1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
@@ -2777,7 +2784,7 @@ TEST(load_store_h) {
CHECK_EQUAL_128(0, 0x3456, q2);
CHECK_EQUAL_64(0x3456, dst[1]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+ CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
@@ -2800,13 +2807,13 @@ TEST(load_store_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(q0, MemOperand(x17, 16));
- __ Str(q0, MemOperand(x18, 16, PostIndex));
+ __ Str(q0, MemOperand(x28, 16, PostIndex));
__ Ldr(q1, MemOperand(x19, 16, PostIndex));
__ Str(q1, MemOperand(x20, 32, PreIndex));
__ Ldr(q2, MemOperand(x21, 32, PreIndex));
@@ -2825,7 +2832,7 @@ TEST(load_store_q) {
CHECK_EQUAL_64(0x02E0CEAC8A684624, dst[2]);
CHECK_EQUAL_64(0x200EECCAA8866442, dst[3]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base + 32, x21);
@@ -2892,7 +2899,7 @@ TEST(neon_ld1_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
@@ -2900,7 +2907,7 @@ TEST(neon_ld1_d_postindex) {
__ Mov(x23, 1);
__ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register.
__ Ld1(v2.V8B(), MemOperand(x17, x23, PostIndex));
- __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x18, 16, PostIndex));
+ __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x28, 16, PostIndex));
__ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex));
__ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
MemOperand(x20, 32, PostIndex));
@@ -2931,7 +2938,7 @@ TEST(neon_ld1_d_postindex) {
CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22);
CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 16, x18);
+ CHECK_EQUAL_64(src_base + 1 + 16, x28);
CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
@@ -2991,13 +2998,13 @@ TEST(neon_ld1_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex));
- __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex));
+ __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x28, 32, PostIndex));
__ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex));
__ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(),
MemOperand(x20, 64, PostIndex));
@@ -3022,7 +3029,7 @@ TEST(neon_ld1_q_postindex) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0);
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 32, x18);
+ CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21);
@@ -3135,13 +3142,13 @@ TEST(neon_ld2_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld2(v2.V8B(), v3.V8B(), MemOperand(x17, x22, PostIndex));
- __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x18, 16, PostIndex));
+ __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x28, 16, PostIndex));
__ Ld2(v5.V4H(), v6.V4H(), MemOperand(x19, 16, PostIndex));
__ Ld2(v16.V2S(), v17.V2S(), MemOperand(x20, 16, PostIndex));
__ Ld2(v31.V2S(), v0.V2S(), MemOperand(x21, 16, PostIndex));
@@ -3160,7 +3167,7 @@ TEST(neon_ld2_d_postindex) {
CHECK_EQUAL_128(0, 0x131211100B0A0908, q0);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 16, x18);
+ CHECK_EQUAL_64(src_base + 1 + 16, x28);
CHECK_EQUAL_64(src_base + 2 + 16, x19);
CHECK_EQUAL_64(src_base + 3 + 16, x20);
CHECK_EQUAL_64(src_base + 4 + 16, x21);
@@ -3215,13 +3222,13 @@ TEST(neon_ld2_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex));
- __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x18, 32, PostIndex));
+ __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x28, 32, PostIndex));
__ Ld2(v6.V8H(), v7.V8H(), MemOperand(x19, 32, PostIndex));
__ Ld2(v16.V4S(), v17.V4S(), MemOperand(x20, 32, PostIndex));
__ Ld2(v31.V2D(), v0.V2D(), MemOperand(x21, 32, PostIndex));
@@ -3241,7 +3248,7 @@ TEST(neon_ld2_q_postindex) {
CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 32, x18);
+ CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
@@ -3337,7 +3344,7 @@ TEST(neon_ld2_lane_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Mov(x19, src_base);
__ Mov(x20, src_base);
__ Mov(x21, src_base);
@@ -3351,7 +3358,7 @@ TEST(neon_ld2_lane_postindex) {
}
for (int i = 7; i >= 0; i--) {
- __ Ld2(v2.H(), v3.H(), i, MemOperand(x18, 4, PostIndex));
+ __ Ld2(v2.H(), v3.H(), i, MemOperand(x28, 4, PostIndex));
}
for (int i = 3; i >= 0; i--) {
@@ -3409,7 +3416,7 @@ TEST(neon_ld2_lane_postindex) {
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15);
CHECK_EQUAL_64(src_base + 32, x17);
- CHECK_EQUAL_64(src_base + 32, x18);
+ CHECK_EQUAL_64(src_base + 32, x28);
CHECK_EQUAL_64(src_base + 32, x19);
CHECK_EQUAL_64(src_base + 32, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -3430,7 +3437,6 @@ TEST(neon_ld2_alllanes) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
__ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17));
__ Add(x17, x17, 2);
__ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17));
@@ -3476,12 +3482,12 @@ TEST(neon_ld2_alllanes_postindex) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17, 2, PostIndex));
- __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x18, PostIndex));
- __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x18, PostIndex));
+ __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x19, PostIndex));
+ __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v6.V8H(), v7.V8H(), MemOperand(x17, 4, PostIndex));
- __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x18, PostIndex));
+ __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v10.V4S(), v11.V4S(), MemOperand(x17, 8, PostIndex));
__ Ld2r(v12.V2D(), v13.V2D(), MemOperand(x17, 16, PostIndex));
END();
@@ -3554,13 +3560,13 @@ TEST(neon_ld3_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld3(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x17, x22, PostIndex));
- __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x18, 24, PostIndex));
+ __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x28, 24, PostIndex));
__ Ld3(v8_.V4H(), v9.V4H(), v10.V4H(), MemOperand(x19, 24, PostIndex));
__ Ld3(v11.V2S(), v12.V2S(), v13.V2S(), MemOperand(x20, 24, PostIndex));
__ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x21, 24, PostIndex));
@@ -3585,7 +3591,7 @@ TEST(neon_ld3_d_postindex) {
CHECK_EQUAL_128(0, 0x1B1A19180F0E0D0C, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 24, x18);
+ CHECK_EQUAL_64(src_base + 1 + 24, x28);
CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 24, x20);
CHECK_EQUAL_64(src_base + 4 + 24, x21);
@@ -3645,14 +3651,14 @@ TEST(neon_ld3_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
__ Mov(x22, 1);
__ Ld3(v2.V16B(), v3.V16B(), v4.V16B(), MemOperand(x17, x22, PostIndex));
- __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x18, 48, PostIndex));
+ __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x28, 48, PostIndex));
__ Ld3(v8_.V8H(), v9.V8H(), v10.V8H(), MemOperand(x19, 48, PostIndex));
__ Ld3(v11.V4S(), v12.V4S(), v13.V4S(), MemOperand(x20, 48, PostIndex));
__ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x21, 48, PostIndex));
@@ -3677,7 +3683,7 @@ TEST(neon_ld3_q_postindex) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 48, x18);
+ CHECK_EQUAL_64(src_base + 1 + 48, x28);
CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 48, x20);
CHECK_EQUAL_64(src_base + 4 + 48, x21);
@@ -3781,7 +3787,7 @@ TEST(neon_ld3_lane_postindex) {
// Test loading whole register by element.
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Mov(x19, src_base);
__ Mov(x20, src_base);
__ Mov(x21, src_base);
@@ -3793,7 +3799,7 @@ TEST(neon_ld3_lane_postindex) {
}
for (int i = 7; i >= 0; i--) {
- __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x18, 6, PostIndex));
+ __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x28, 6, PostIndex));
}
for (int i = 3; i >= 0; i--) {
@@ -3863,7 +3869,7 @@ TEST(neon_ld3_lane_postindex) {
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q23);
CHECK_EQUAL_64(src_base + 48, x17);
- CHECK_EQUAL_64(src_base + 48, x18);
+ CHECK_EQUAL_64(src_base + 48, x28);
CHECK_EQUAL_64(src_base + 48, x19);
CHECK_EQUAL_64(src_base + 48, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -3884,7 +3890,6 @@ TEST(neon_ld3_alllanes) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
__ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17));
__ Add(x17, x17, 3);
__ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17));
@@ -3934,17 +3939,15 @@ TEST(neon_ld3_alllanes_postindex) {
src[i] = i;
}
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
- __ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17, 3, PostIndex));
- __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x18, PostIndex));
- __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x18, PostIndex));
+ __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x19, PostIndex));
+ __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v9.V8H(), v10.V8H(), v11.V8H(), MemOperand(x17, 6, PostIndex));
- __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x18, PostIndex));
+ __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17, 12, PostIndex));
__ Ld3r(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x17, 24, PostIndex));
END();
@@ -4027,7 +4030,7 @@ TEST(neon_ld4_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
@@ -4035,7 +4038,7 @@ TEST(neon_ld4_d_postindex) {
__ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(),
MemOperand(x17, x22, PostIndex));
__ Ld4(v6.V8B(), v7.V8B(), v8_.V8B(), v9.V8B(),
- MemOperand(x18, 32, PostIndex));
+ MemOperand(x28, 32, PostIndex));
__ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(),
MemOperand(x19, 32, PostIndex));
__ Ld4(v14.V2S(), v15.V2S(), v16.V2S(), v17.V2S(),
@@ -4068,7 +4071,7 @@ TEST(neon_ld4_d_postindex) {
CHECK_EQUAL_128(0, 0x2322212013121110, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 32, x18);
+ CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21);
@@ -4133,7 +4136,7 @@ TEST(neon_ld4_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base + 1);
+ __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4);
@@ -4142,7 +4145,7 @@ TEST(neon_ld4_q_postindex) {
__ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(),
MemOperand(x17, x22, PostIndex));
__ Ld4(v6.V16B(), v7.V16B(), v8_.V16B(), v9.V16B(),
- MemOperand(x18, 64, PostIndex));
+ MemOperand(x28, 64, PostIndex));
__ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(),
MemOperand(x19, 64, PostIndex));
__ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(),
@@ -4175,7 +4178,7 @@ TEST(neon_ld4_q_postindex) {
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q1);
CHECK_EQUAL_64(src_base + 1, x17);
- CHECK_EQUAL_64(src_base + 1 + 64, x18);
+ CHECK_EQUAL_64(src_base + 1 + 64, x28);
CHECK_EQUAL_64(src_base + 2 + 64, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21);
@@ -4304,9 +4307,9 @@ TEST(neon_ld4_lane_postindex) {
__ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17, 4, PostIndex));
}
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
for (int i = 7; i >= 0; i--) {
- __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x18, 8, PostIndex));
+ __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x28, 8, PostIndex));
}
__ Mov(x19, src_base);
@@ -4401,7 +4404,7 @@ TEST(neon_ld4_lane_postindex) {
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31);
CHECK_EQUAL_64(src_base + 64, x17);
- CHECK_EQUAL_64(src_base + 64, x18);
+ CHECK_EQUAL_64(src_base + 64, x28);
CHECK_EQUAL_64(src_base + 64, x19);
CHECK_EQUAL_64(src_base + 64, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -4422,7 +4425,6 @@ TEST(neon_ld4_alllanes) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
__ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x17));
__ Add(x17, x17, 4);
__ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x17));
@@ -4480,22 +4482,20 @@ TEST(neon_ld4_alllanes_postindex) {
src[i] = i;
}
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
- __ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
MemOperand(x17, 4, PostIndex));
__ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(),
- MemOperand(x17, x18, PostIndex));
+ MemOperand(x17, x19, PostIndex));
__ Ld4r(v8_.V4H(), v9.V4H(), v10.V4H(), v11.V4H(),
- MemOperand(x17, x18, PostIndex));
+ MemOperand(x17, x19, PostIndex));
__ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(),
MemOperand(x17, 8, PostIndex));
__ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
- MemOperand(x17, x18, PostIndex));
+ MemOperand(x17, x19, PostIndex));
__ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(),
MemOperand(x17, 16, PostIndex));
__ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(),
@@ -4547,32 +4547,32 @@ TEST(neon_st1_lane) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -16);
+ __ Mov(x19, -16);
__ Ldr(q0, MemOperand(x17));
for (int i = 15; i >= 0; i--) {
__ St1(v0.B(), i, MemOperand(x17));
__ Add(x17, x17, 1);
}
- __ Ldr(q1, MemOperand(x17, x18));
+ __ Ldr(q1, MemOperand(x17, x19));
for (int i = 7; i >= 0; i--) {
__ St1(v0.H(), i, MemOperand(x17));
__ Add(x17, x17, 2);
}
- __ Ldr(q2, MemOperand(x17, x18));
+ __ Ldr(q2, MemOperand(x17, x19));
for (int i = 3; i >= 0; i--) {
__ St1(v0.S(), i, MemOperand(x17));
__ Add(x17, x17, 4);
}
- __ Ldr(q3, MemOperand(x17, x18));
+ __ Ldr(q3, MemOperand(x17, x19));
for (int i = 1; i >= 0; i--) {
__ St1(v0.D(), i, MemOperand(x17));
__ Add(x17, x17, 8);
}
- __ Ldr(q4, MemOperand(x17, x18));
+ __ Ldr(q4, MemOperand(x17, x19));
END();
@@ -4595,17 +4595,17 @@ TEST(neon_st2_lane) {
START();
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
// Test B stores with and without post index.
for (int i = 15; i >= 0; i--) {
- __ St2(v0.B(), v1.B(), i, MemOperand(x18));
- __ Add(x18, x18, 2);
+ __ St2(v0.B(), v1.B(), i, MemOperand(x19));
+ __ Add(x19, x19, 2);
}
for (int i = 15; i >= 0; i--) {
- __ St2(v0.B(), v1.B(), i, MemOperand(x18, 2, PostIndex));
+ __ St2(v0.B(), v1.B(), i, MemOperand(x19, 2, PostIndex));
}
__ Ldr(q2, MemOperand(x17, 0 * 16));
__ Ldr(q3, MemOperand(x17, 1 * 16));
@@ -4615,11 +4615,11 @@ TEST(neon_st2_lane) {
// Test H stores with and without post index.
__ Mov(x0, 4);
for (int i = 7; i >= 0; i--) {
- __ St2(v0.H(), v1.H(), i, MemOperand(x18));
- __ Add(x18, x18, 4);
+ __ St2(v0.H(), v1.H(), i, MemOperand(x19));
+ __ Add(x19, x19, 4);
}
for (int i = 7; i >= 0; i--) {
- __ St2(v0.H(), v1.H(), i, MemOperand(x18, x0, PostIndex));
+ __ St2(v0.H(), v1.H(), i, MemOperand(x19, x0, PostIndex));
}
__ Ldr(q6, MemOperand(x17, 4 * 16));
__ Ldr(q7, MemOperand(x17, 5 * 16));
@@ -4628,11 +4628,11 @@ TEST(neon_st2_lane) {
// Test S stores with and without post index.
for (int i = 3; i >= 0; i--) {
- __ St2(v0.S(), v1.S(), i, MemOperand(x18));
- __ Add(x18, x18, 8);
+ __ St2(v0.S(), v1.S(), i, MemOperand(x19));
+ __ Add(x19, x19, 8);
}
for (int i = 3; i >= 0; i--) {
- __ St2(v0.S(), v1.S(), i, MemOperand(x18, 8, PostIndex));
+ __ St2(v0.S(), v1.S(), i, MemOperand(x19, 8, PostIndex));
}
__ Ldr(q18, MemOperand(x17, 8 * 16));
__ Ldr(q19, MemOperand(x17, 9 * 16));
@@ -4641,11 +4641,11 @@ TEST(neon_st2_lane) {
// Test D stores with and without post index.
__ Mov(x0, 16);
- __ St2(v0.D(), v1.D(), 1, MemOperand(x18));
- __ Add(x18, x18, 16);
- __ St2(v0.D(), v1.D(), 0, MemOperand(x18, 16, PostIndex));
- __ St2(v0.D(), v1.D(), 1, MemOperand(x18, x0, PostIndex));
- __ St2(v0.D(), v1.D(), 0, MemOperand(x18, x0, PostIndex));
+ __ St2(v0.D(), v1.D(), 1, MemOperand(x19));
+ __ Add(x19, x19, 16);
+ __ St2(v0.D(), v1.D(), 0, MemOperand(x19, 16, PostIndex));
+ __ St2(v0.D(), v1.D(), 1, MemOperand(x19, x0, PostIndex));
+ __ St2(v0.D(), v1.D(), 0, MemOperand(x19, x0, PostIndex));
__ Ldr(q22, MemOperand(x17, 12 * 16));
__ Ldr(q23, MemOperand(x17, 13 * 16));
__ Ldr(q24, MemOperand(x17, 14 * 16));
@@ -4686,18 +4686,18 @@ TEST(neon_st3_lane) {
START();
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
__ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
// Test B stores with and without post index.
for (int i = 15; i >= 0; i--) {
- __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18));
- __ Add(x18, x18, 3);
+ __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x19));
+ __ Add(x19, x19, 3);
}
for (int i = 15; i >= 0; i--) {
- __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18, 3, PostIndex));
+ __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x19, 3, PostIndex));
}
__ Ldr(q3, MemOperand(x17, 0 * 16));
__ Ldr(q4, MemOperand(x17, 1 * 16));
@@ -4709,11 +4709,11 @@ TEST(neon_st3_lane) {
// Test H stores with and without post index.
__ Mov(x0, 6);
for (int i = 7; i >= 0; i--) {
- __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18));
- __ Add(x18, x18, 6);
+ __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x19));
+ __ Add(x19, x19, 6);
}
for (int i = 7; i >= 0; i--) {
- __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18, x0, PostIndex));
+ __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x19, x0, PostIndex));
}
__ Ldr(q17, MemOperand(x17, 6 * 16));
__ Ldr(q18, MemOperand(x17, 7 * 16));
@@ -4724,11 +4724,11 @@ TEST(neon_st3_lane) {
// Test S stores with and without post index.
for (int i = 3; i >= 0; i--) {
- __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18));
- __ Add(x18, x18, 12);
+ __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x19));
+ __ Add(x19, x19, 12);
}
for (int i = 3; i >= 0; i--) {
- __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18, 12, PostIndex));
+ __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x19, 12, PostIndex));
}
__ Ldr(q23, MemOperand(x17, 12 * 16));
__ Ldr(q24, MemOperand(x17, 13 * 16));
@@ -4739,10 +4739,10 @@ TEST(neon_st3_lane) {
// Test D stores with and without post index.
__ Mov(x0, 24);
- __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18));
- __ Add(x18, x18, 24);
- __ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x18, 24, PostIndex));
- __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18, x0, PostIndex));
+ __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x19));
+ __ Add(x19, x19, 24);
+ __ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x19, 24, PostIndex));
+ __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x19, x0, PostIndex));
__ Ldr(q29, MemOperand(x17, 18 * 16));
__ Ldr(q30, MemOperand(x17, 19 * 16));
__ Ldr(q31, MemOperand(x17, 20 * 16));
@@ -4783,7 +4783,7 @@ TEST(neon_st4_lane) {
START();
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base);
+ __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
__ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
@@ -4791,8 +4791,8 @@ TEST(neon_st4_lane) {
// Test B stores without post index.
for (int i = 15; i >= 0; i--) {
- __ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x18));
- __ Add(x18, x18, 4);
+ __ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x19));
+ __ Add(x19, x19, 4);
}
__ Ldr(q4, MemOperand(x17, 0 * 16));
__ Ldr(q5, MemOperand(x17, 1 * 16));
@@ -4802,7 +4802,7 @@ TEST(neon_st4_lane) {
// Test H stores with post index.
__ Mov(x0, 8);
for (int i = 7; i >= 0; i--) {
- __ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x18, x0, PostIndex));
+ __ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x19, x0, PostIndex));
}
__ Ldr(q16, MemOperand(x17, 4 * 16));
__ Ldr(q17, MemOperand(x17, 5 * 16));
@@ -4811,8 +4811,8 @@ TEST(neon_st4_lane) {
// Test S stores without post index.
for (int i = 3; i >= 0; i--) {
- __ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x18));
- __ Add(x18, x18, 16);
+ __ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x19));
+ __ Add(x19, x19, 16);
}
__ Ldr(q20, MemOperand(x17, 8 * 16));
__ Ldr(q21, MemOperand(x17, 9 * 16));
@@ -4821,8 +4821,8 @@ TEST(neon_st4_lane) {
// Test D stores with post index.
__ Mov(x0, 32);
- __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x18, 32, PostIndex));
- __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x18, x0, PostIndex));
+ __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x19, 32, PostIndex));
+ __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x19, x0, PostIndex));
__ Ldr(q24, MemOperand(x17, 12 * 16));
__ Ldr(q25, MemOperand(x17, 13 * 16));
@@ -4865,7 +4865,7 @@ TEST(neon_ld1_lane_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Mov(x19, src_base);
__ Mov(x20, src_base);
__ Mov(x21, src_base);
@@ -4879,7 +4879,7 @@ TEST(neon_ld1_lane_postindex) {
}
for (int i = 7; i >= 0; i--) {
- __ Ld1(v1.H(), i, MemOperand(x18, 2, PostIndex));
+ __ Ld1(v1.H(), i, MemOperand(x28, 2, PostIndex));
}
for (int i = 3; i >= 0; i--) {
@@ -4920,7 +4920,7 @@ TEST(neon_ld1_lane_postindex) {
CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
CHECK_EQUAL_64(src_base + 16, x17);
- CHECK_EQUAL_64(src_base + 16, x18);
+ CHECK_EQUAL_64(src_base + 16, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(src_base + 16, x20);
CHECK_EQUAL_64(src_base + 1, x21);
@@ -4941,28 +4941,28 @@ TEST(neon_st1_lane_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -16);
+ __ Mov(x19, -16);
__ Ldr(q0, MemOperand(x17));
for (int i = 15; i >= 0; i--) {
__ St1(v0.B(), i, MemOperand(x17, 1, PostIndex));
}
- __ Ldr(q1, MemOperand(x17, x18));
+ __ Ldr(q1, MemOperand(x17, x19));
for (int i = 7; i >= 0; i--) {
__ St1(v0.H(), i, MemOperand(x17, 2, PostIndex));
}
- __ Ldr(q2, MemOperand(x17, x18));
+ __ Ldr(q2, MemOperand(x17, x19));
for (int i = 3; i >= 0; i--) {
__ St1(v0.S(), i, MemOperand(x17, 4, PostIndex));
}
- __ Ldr(q3, MemOperand(x17, x18));
+ __ Ldr(q3, MemOperand(x17, x19));
for (int i = 1; i >= 0; i--) {
__ St1(v0.D(), i, MemOperand(x17, 8, PostIndex));
}
- __ Ldr(q4, MemOperand(x17, x18));
+ __ Ldr(q4, MemOperand(x17, x19));
END();
@@ -5027,12 +5027,12 @@ TEST(neon_ld1_alllanes_postindex) {
START();
__ Mov(x17, src_base + 1);
- __ Mov(x18, 1);
+ __ Mov(x19, 1);
__ Ld1r(v0.V8B(), MemOperand(x17, 1, PostIndex));
- __ Ld1r(v1.V16B(), MemOperand(x17, x18, PostIndex));
- __ Ld1r(v2.V4H(), MemOperand(x17, x18, PostIndex));
+ __ Ld1r(v1.V16B(), MemOperand(x17, x19, PostIndex));
+ __ Ld1r(v2.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v3.V8H(), MemOperand(x17, 2, PostIndex));
- __ Ld1r(v4.V2S(), MemOperand(x17, x18, PostIndex));
+ __ Ld1r(v4.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v5.V4S(), MemOperand(x17, 4, PostIndex));
__ Ld1r(v6.V2D(), MemOperand(x17, 8, PostIndex));
END();
@@ -5116,7 +5116,7 @@ TEST(neon_st1_d_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -8);
+ __ Mov(x28, -8);
__ Mov(x19, -16);
__ Mov(x20, -24);
__ Mov(x21, -32);
@@ -5127,7 +5127,7 @@ TEST(neon_st1_d_postindex) {
__ Mov(x17, src_base);
__ St1(v0.V8B(), MemOperand(x17, 8, PostIndex));
- __ Ldr(d16, MemOperand(x17, x18));
+ __ Ldr(d16, MemOperand(x17, x28));
__ St1(v0.V8B(), v1.V8B(), MemOperand(x17, 16, PostIndex));
__ Ldr(q17, MemOperand(x17, x19));
@@ -5135,7 +5135,7 @@ TEST(neon_st1_d_postindex) {
__ St1(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x17, 24, PostIndex));
__ Ldr(d18, MemOperand(x17, x20));
__ Ldr(d19, MemOperand(x17, x19));
- __ Ldr(d20, MemOperand(x17, x18));
+ __ Ldr(d20, MemOperand(x17, x28));
__ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(),
MemOperand(x17, 32, PostIndex));
@@ -5223,7 +5223,7 @@ TEST(neon_st1_q_postindex) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, -16);
+ __ Mov(x28, -16);
__ Mov(x19, -32);
__ Mov(x20, -48);
__ Mov(x21, -64);
@@ -5233,23 +5233,23 @@ TEST(neon_st1_q_postindex) {
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St1(v0.V16B(), MemOperand(x17, 16, PostIndex));
- __ Ldr(q16, MemOperand(x17, x18));
+ __ Ldr(q16, MemOperand(x17, x28));
__ St1(v0.V8H(), v1.V8H(), MemOperand(x17, 32, PostIndex));
__ Ldr(q17, MemOperand(x17, x19));
- __ Ldr(q18, MemOperand(x17, x18));
+ __ Ldr(q18, MemOperand(x17, x28));
__ St1(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x17, 48, PostIndex));
__ Ldr(q19, MemOperand(x17, x20));
__ Ldr(q20, MemOperand(x17, x19));
- __ Ldr(q21, MemOperand(x17, x18));
+ __ Ldr(q21, MemOperand(x17, x28));
__ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(),
MemOperand(x17, 64, PostIndex));
__ Ldr(q22, MemOperand(x17, x21));
__ Ldr(q23, MemOperand(x17, x20));
__ Ldr(q24, MemOperand(x17, x19));
- __ Ldr(q25, MemOperand(x17, x18));
+ __ Ldr(q25, MemOperand(x17, x28));
END();
@@ -5279,15 +5279,15 @@ TEST(neon_st2_d) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V8B(), v1.V8B(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St2(v0.V4H(), v1.V4H(), MemOperand(x18));
- __ Add(x18, x18, 11);
- __ St2(v0.V2S(), v1.V2S(), MemOperand(x18));
+ __ St2(v0.V8B(), v1.V8B(), MemOperand(x19));
+ __ Add(x19, x19, 22);
+ __ St2(v0.V4H(), v1.V4H(), MemOperand(x19));
+ __ Add(x19, x19, 11);
+ __ St2(v0.V2S(), v1.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5318,13 +5318,13 @@ TEST(neon_st2_d_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V8B(), v1.V8B(), MemOperand(x18, x22, PostIndex));
- __ St2(v0.V4H(), v1.V4H(), MemOperand(x18, 16, PostIndex));
- __ St2(v0.V2S(), v1.V2S(), MemOperand(x18));
+ __ St2(v0.V8B(), v1.V8B(), MemOperand(x19, x22, PostIndex));
+ __ St2(v0.V4H(), v1.V4H(), MemOperand(x19, 16, PostIndex));
+ __ St2(v0.V2S(), v1.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5352,17 +5352,17 @@ TEST(neon_st2_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V16B(), v1.V16B(), MemOperand(x18));
- __ Add(x18, x18, 8);
- __ St2(v0.V8H(), v1.V8H(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St2(v0.V4S(), v1.V4S(), MemOperand(x18));
- __ Add(x18, x18, 2);
- __ St2(v0.V2D(), v1.V2D(), MemOperand(x18));
+ __ St2(v0.V16B(), v1.V16B(), MemOperand(x19));
+ __ Add(x19, x19, 8);
+ __ St2(v0.V8H(), v1.V8H(), MemOperand(x19));
+ __ Add(x19, x19, 22);
+ __ St2(v0.V4S(), v1.V4S(), MemOperand(x19));
+ __ Add(x19, x19, 2);
+ __ St2(v0.V2D(), v1.V2D(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5393,14 +5393,14 @@ TEST(neon_st2_q_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
- __ St2(v0.V16B(), v1.V16B(), MemOperand(x18, x22, PostIndex));
- __ St2(v0.V8H(), v1.V8H(), MemOperand(x18, 32, PostIndex));
- __ St2(v0.V4S(), v1.V4S(), MemOperand(x18, x22, PostIndex));
- __ St2(v0.V2D(), v1.V2D(), MemOperand(x18));
+ __ St2(v0.V16B(), v1.V16B(), MemOperand(x19, x22, PostIndex));
+ __ St2(v0.V8H(), v1.V8H(), MemOperand(x19, 32, PostIndex));
+ __ St2(v0.V4S(), v1.V4S(), MemOperand(x19, x22, PostIndex));
+ __ St2(v0.V2D(), v1.V2D(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5432,16 +5432,16 @@ TEST(neon_st3_d) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18));
- __ Add(x18, x18, 3);
- __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18));
- __ Add(x18, x18, 2);
- __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18));
+ __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x19));
+ __ Add(x19, x19, 3);
+ __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x19));
+ __ Add(x19, x19, 2);
+ __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5468,14 +5468,14 @@ TEST(neon_st3_d_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18, x22, PostIndex));
- __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18, 24, PostIndex));
- __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18));
+ __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x19, x22, PostIndex));
+ __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x19, 24, PostIndex));
+ __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5505,18 +5505,18 @@ TEST(neon_st3_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18));
- __ Add(x18, x18, 5);
- __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18));
- __ Add(x18, x18, 12);
- __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18));
+ __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x19));
+ __ Add(x19, x19, 5);
+ __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x19));
+ __ Add(x19, x19, 12);
+ __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x19));
+ __ Add(x19, x19, 22);
+ __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x19));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5551,15 +5551,15 @@ TEST(neon_st3_q_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
- __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18, x22, PostIndex));
- __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18, 48, PostIndex));
- __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18, x22, PostIndex));
- __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18));
+ __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x28, x22, PostIndex));
+ __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x28, 48, PostIndex));
+ __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x28, x22, PostIndex));
+ __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5595,17 +5595,17 @@ TEST(neon_st4_d) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
- __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x18));
- __ Add(x18, x18, 12);
- __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x18));
- __ Add(x18, x18, 15);
- __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
+ __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x28));
+ __ Add(x28, x28, 12);
+ __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x28));
+ __ Add(x28, x28, 15);
+ __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5636,17 +5636,17 @@ TEST(neon_st4_d_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
- MemOperand(x18, x22, PostIndex));
+ MemOperand(x28, x22, PostIndex));
__ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(),
- MemOperand(x18, 32, PostIndex));
- __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18));
+ MemOperand(x28, 32, PostIndex));
+ __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5678,20 +5678,20 @@ TEST(neon_st4_q) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
- __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x18));
- __ Add(x18, x18, 5);
- __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x18));
- __ Add(x18, x18, 12);
- __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x18));
- __ Add(x18, x18, 22);
- __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
- __ Add(x18, x18, 10);
+ __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x28));
+ __ Add(x28, x28, 5);
+ __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x28));
+ __ Add(x28, x28, 12);
+ __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x28));
+ __ Add(x28, x28, 22);
+ __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x28));
+ __ Add(x28, x28, 10);
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -5728,19 +5728,19 @@ TEST(neon_st4_q_postindex) {
START();
__ Mov(x22, 5);
__ Mov(x17, src_base);
- __ Mov(x18, src_base);
+ __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(),
- MemOperand(x18, x22, PostIndex));
+ MemOperand(x28, x22, PostIndex));
__ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(),
- MemOperand(x18, 64, PostIndex));
+ MemOperand(x28, 64, PostIndex));
__ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(),
- MemOperand(x18, x22, PostIndex));
- __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18));
+ MemOperand(x28, x22, PostIndex));
+ __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x28));
__ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex));
@@ -6065,13 +6065,13 @@ TEST(ldp_stp_offset) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, src_base + 24);
+ __ Mov(x28, src_base + 24);
__ Mov(x19, dst_base + 56);
__ Ldp(w0, w1, MemOperand(x16));
__ Ldp(w2, w3, MemOperand(x16, 4));
__ Ldp(x4, x5, MemOperand(x16, 8));
- __ Ldp(w6, w7, MemOperand(x18, -12));
- __ Ldp(x8, x9, MemOperand(x18, -16));
+ __ Ldp(w6, w7, MemOperand(x28, -12));
+ __ Ldp(x8, x9, MemOperand(x28, -16));
__ Stp(w0, w1, MemOperand(x17));
__ Stp(w2, w3, MemOperand(x17, 8));
__ Stp(x4, x5, MemOperand(x17, 16));
@@ -6100,7 +6100,7 @@ TEST(ldp_stp_offset) {
CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(src_base + 24, x18);
+ CHECK_EQUAL_64(src_base + 24, x28);
CHECK_EQUAL_64(dst_base + 56, x19);
}
@@ -6120,13 +6120,13 @@ TEST(ldp_stp_offset_wide) {
START();
__ Mov(x20, src_base - base_offset);
__ Mov(x21, dst_base - base_offset);
- __ Mov(x18, src_base + base_offset + 24);
+ __ Mov(x28, src_base + base_offset + 24);
__ Mov(x19, dst_base + base_offset + 56);
__ Ldp(w0, w1, MemOperand(x20, base_offset));
__ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
__ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
- __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
- __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
+ __ Ldp(w6, w7, MemOperand(x28, -12 - base_offset));
+ __ Ldp(x8, x9, MemOperand(x28, -16 - base_offset));
__ Stp(w0, w1, MemOperand(x21, base_offset));
__ Stp(w2, w3, MemOperand(x21, base_offset + 8));
__ Stp(x4, x5, MemOperand(x21, base_offset + 16));
@@ -6155,7 +6155,7 @@ TEST(ldp_stp_offset_wide) {
CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base - base_offset, x20);
CHECK_EQUAL_64(dst_base - base_offset, x21);
- CHECK_EQUAL_64(src_base + base_offset + 24, x18);
+ CHECK_EQUAL_64(src_base + base_offset + 24, x28);
CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
}
@@ -6172,7 +6172,7 @@ TEST(ldp_stp_preindex) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base + 16);
+ __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
__ Mov(x19, x16);
__ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
@@ -6182,9 +6182,9 @@ TEST(ldp_stp_preindex) {
__ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
__ Mov(x21, x16);
__ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
- __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
- __ Mov(x22, x18);
- __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8, PreIndex));
+ __ Mov(x22, x28);
+ __ Stp(x5, x4, MemOperand(x28, -8, PreIndex));
END();
RUN();
@@ -6204,7 +6204,7 @@ TEST(ldp_stp_preindex) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
@@ -6227,7 +6227,7 @@ TEST(ldp_stp_preindex_wide) {
START();
__ Mov(x24, src_base - base_offset);
__ Mov(x25, dst_base + base_offset);
- __ Mov(x18, dst_base + base_offset + 16);
+ __ Mov(x28, dst_base + base_offset + 16);
__ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
__ Mov(x19, x24);
__ Mov(x24, src_base - base_offset + 4);
@@ -6241,10 +6241,10 @@ TEST(ldp_stp_preindex_wide) {
__ Mov(x21, x24);
__ Mov(x24, src_base - base_offset + 8);
__ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
- __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
- __ Mov(x22, x18);
- __ Mov(x18, dst_base + base_offset + 16 + 8);
- __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8 - base_offset, PreIndex));
+ __ Mov(x22, x28);
+ __ Mov(x28, dst_base + base_offset + 16 + 8);
+ __ Stp(x5, x4, MemOperand(x28, -8 - base_offset, PreIndex));
END();
RUN();
@@ -6264,7 +6264,7 @@ TEST(ldp_stp_preindex_wide) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x24);
CHECK_EQUAL_64(dst_base, x25);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
@@ -6284,7 +6284,7 @@ TEST(ldp_stp_postindex) {
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
- __ Mov(x18, dst_base + 16);
+ __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
__ Mov(x19, x16);
__ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
@@ -6294,9 +6294,9 @@ TEST(ldp_stp_postindex) {
__ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
__ Mov(x21, x16);
__ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
- __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
- __ Mov(x22, x18);
- __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8, PostIndex));
+ __ Mov(x22, x28);
+ __ Stp(x5, x4, MemOperand(x28, -8, PostIndex));
END();
RUN();
@@ -6316,7 +6316,7 @@ TEST(ldp_stp_postindex) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
- CHECK_EQUAL_64(dst_base + 16, x18);
+ CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21);
@@ -6339,7 +6339,7 @@ TEST(ldp_stp_postindex_wide) {
START();
__ Mov(x24, src_base);
__ Mov(x25, dst_base);
- __ Mov(x18, dst_base + 16);
+ __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
__ Mov(x19, x24);
__ Sub(x24, x24, base_offset);
@@ -6353,10 +6353,10 @@ TEST(ldp_stp_postindex_wide) {
__ Mov(x21, x24);
__ Sub(x24, x24, base_offset);
__ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
- __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
- __ Mov(x22, x18);
- __ Add(x18, x18, base_offset);
- __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
+ __ Stp(x7, x6, MemOperand(x28, 8 - base_offset, PostIndex));
+ __ Mov(x22, x28);
+ __ Add(x28, x28, base_offset);
+ __ Stp(x5, x4, MemOperand(x28, -8 - base_offset, PostIndex));
END();
RUN();
@@ -6376,7 +6376,7 @@ TEST(ldp_stp_postindex_wide) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base + base_offset, x24);
CHECK_EQUAL_64(dst_base - base_offset, x25);
- CHECK_EQUAL_64(dst_base - base_offset + 16, x18);
+ CHECK_EQUAL_64(dst_base - base_offset + 16, x28);
CHECK_EQUAL_64(src_base + base_offset + 4, x19);
CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
CHECK_EQUAL_64(src_base + base_offset + 8, x21);
@@ -6412,14 +6412,14 @@ TEST(ldur_stur) {
START();
__ Mov(x17, src_base);
- __ Mov(x18, dst_base);
+ __ Mov(x28, dst_base);
__ Mov(x19, src_base + 16);
__ Mov(x20, dst_base + 32);
__ Mov(x21, dst_base + 40);
__ Ldr(w0, MemOperand(x17, 1));
- __ Str(w0, MemOperand(x18, 2));
+ __ Str(w0, MemOperand(x28, 2));
__ Ldr(x1, MemOperand(x17, 3));
- __ Str(x1, MemOperand(x18, 9));
+ __ Str(x1, MemOperand(x28, 9));
__ Ldr(w2, MemOperand(x19, -9));
__ Str(w2, MemOperand(x20, -5));
__ Ldrb(w3, MemOperand(x19, -1));
@@ -6438,28 +6438,18 @@ TEST(ldur_stur) {
CHECK_EQUAL_64(0x00000001, x3);
CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
CHECK_EQUAL_64(src_base, x17);
- CHECK_EQUAL_64(dst_base, x18);
+ CHECK_EQUAL_64(dst_base, x28);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
}
-namespace {
-
-void LoadLiteral(MacroAssembler* masm, Register reg, uint64_t imm) {
- // Since we do not allow non-relocatable entries in the literal pool, we need
- // to fake a relocation mode that is not NONE here.
- masm->Ldr(reg, Immediate(imm, RelocInfo::FULL_EMBEDDED_OBJECT));
-}
-
-} // namespace
-
TEST(ldr_pcrel_large_offset) {
INIT_V8();
SETUP_SIZE(1 * MB);
START();
- LoadLiteral(&masm, x1, 0x1234567890ABCDEFUL);
+ __ Ldr(x1, isolate->factory()->undefined_value());
{
v8::internal::PatchingAssembler::BlockPoolsScope scope(&masm);
@@ -6469,14 +6459,14 @@ TEST(ldr_pcrel_large_offset) {
}
}
- LoadLiteral(&masm, x2, 0x1234567890ABCDEFUL);
+ __ Ldr(x2, isolate->factory()->undefined_value());
END();
RUN();
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x1);
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x1);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x2);
}
TEST(ldr_literal) {
@@ -6484,132 +6474,159 @@ TEST(ldr_literal) {
SETUP();
START();
- LoadLiteral(&masm, x2, 0x1234567890ABCDEFUL);
+ __ Ldr(x2, isolate->factory()->undefined_value());
END();
RUN();
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x2);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x2);
}
#ifdef DEBUG
// These tests rely on functions available in debug mode.
enum LiteralPoolEmitOutcome { EmitExpected, NoEmitExpected };
+enum LiteralPoolEmissionAlignment { EmitAtUnaligned, EmitAtAligned };
-static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
- size_t prepadding = 0) {
+static void LdrLiteralRangeHelper(
+ size_t range, LiteralPoolEmitOutcome outcome,
+ LiteralPoolEmissionAlignment unaligned_emission) {
SETUP_SIZE(static_cast<int>(range + 1024));
- size_t code_size = 0;
- const size_t pool_entries = 2;
- const size_t kEntrySize = 8;
+ const size_t first_pool_entries = 2;
+ const size_t first_pool_size_bytes = first_pool_entries * kInt64Size;
START();
// Force a pool dump so the pool starts off empty.
- __ CheckConstPool(true, true);
+ __ ForceConstantPoolEmissionWithJump();
CHECK_CONSTANT_POOL_SIZE(0);
- // Emit prepadding to influence alignment of the pool; we don't count this
- // into code size.
- for (size_t i = 0; i < prepadding; ++i) __ Nop();
+ // Emit prepadding to influence alignment of the pool.
+ bool currently_aligned = IsAligned(__ pc_offset(), kInt64Size);
+ if ((unaligned_emission == EmitAtUnaligned && currently_aligned) ||
+ (unaligned_emission == EmitAtAligned && !currently_aligned)) {
+ __ Nop();
+ }
- LoadLiteral(&masm, x0, 0x1234567890ABCDEFUL);
- LoadLiteral(&masm, x1, 0xABCDEF1234567890UL);
- code_size += 2 * kInstrSize;
- CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ int initial_pc_offset = __ pc_offset();
+ __ Ldr(x0, isolate->factory()->undefined_value());
+ __ Ldr(x1, isolate->factory()->the_hole_value());
+ CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
- // Check that the requested range (allowing space for a branch over the pool)
- // can be handled by this test.
- CHECK_LE(code_size, range);
+ size_t expected_pool_size = 0;
#if defined(_M_ARM64) && !defined(__clang__)
auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
#else
- auto PoolSizeAt = [](int pc_offset) {
+ auto PoolSizeAt = [unaligned_emission](int pc_offset) {
#endif
// To determine padding, consider the size of the prologue of the pool,
// and the jump around the pool, which we always need.
size_t prologue_size = 2 * kInstrSize + kInstrSize;
size_t pc = pc_offset + prologue_size;
- const size_t padding = IsAligned(pc, 8) ? 0 : 4;
- return prologue_size + pool_entries * kEntrySize + padding;
+ const size_t padding = IsAligned(pc, kInt64Size) ? 0 : kInt32Size;
+ CHECK_EQ(padding == 0, unaligned_emission == EmitAtAligned);
+ return prologue_size + first_pool_size_bytes + padding;
};
int pc_offset_before_emission = -1;
- // Emit NOPs up to 'range'.
- while (code_size < range) {
+ bool pool_was_emitted = false;
+ while (__ pc_offset() - initial_pc_offset < static_cast<intptr_t>(range)) {
pc_offset_before_emission = __ pc_offset() + kInstrSize;
__ Nop();
- code_size += kInstrSize;
+ if (__ GetConstantPoolEntriesSizeForTesting() == 0) {
+ pool_was_emitted = true;
+ break;
+ }
}
- CHECK_EQ(code_size, range);
if (outcome == EmitExpected) {
- CHECK_CONSTANT_POOL_SIZE(0);
+ if (!pool_was_emitted) {
+ FATAL(
+ "Pool was not emitted up to pc_offset %d which corresponds to a "
+ "distance to the first constant of %d bytes",
+ __ pc_offset(), __ pc_offset() - initial_pc_offset);
+ }
// Check that the size of the emitted constant pool is as expected.
- size_t pool_size = PoolSizeAt(pc_offset_before_emission);
- CHECK_EQ(pc_offset_before_emission + pool_size, __ pc_offset());
- byte* pool_start = buf + pc_offset_before_emission;
- Instruction* branch = reinterpret_cast<Instruction*>(pool_start);
- CHECK(branch->IsImmBranch());
- CHECK_EQ(pool_size, branch->ImmPCOffset());
- Instruction* marker =
- reinterpret_cast<Instruction*>(pool_start + kInstrSize);
- CHECK(marker->IsLdrLiteralX());
- const size_t padding =
- IsAligned(pc_offset_before_emission + kInstrSize, kEntrySize) ? 0 : 1;
- CHECK_EQ(pool_entries * 2 + 1 + padding, marker->ImmLLiteral());
-
+ expected_pool_size = PoolSizeAt(pc_offset_before_emission);
+ CHECK_EQ(pc_offset_before_emission + expected_pool_size, __ pc_offset());
} else {
CHECK_EQ(outcome, NoEmitExpected);
- CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ if (pool_was_emitted) {
+ FATAL("Pool was unexpectedly emitted at pc_offset %d ",
+ pc_offset_before_emission);
+ }
+ CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
CHECK_EQ(pc_offset_before_emission, __ pc_offset());
}
// Force a pool flush to check that a second pool functions correctly.
- __ CheckConstPool(true, true);
+ __ ForceConstantPoolEmissionWithJump();
CHECK_CONSTANT_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
- LoadLiteral(&masm, x4, 0x34567890ABCDEF12UL);
- LoadLiteral(&masm, x5, 0xABCDEF0123456789UL);
- CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
+ const int second_pool_entries = 2;
+ __ Ldr(x4, isolate->factory()->true_value());
+ __ Ldr(x5, isolate->factory()->false_value());
+ CHECK_CONSTANT_POOL_SIZE(second_pool_entries * kInt64Size);
+
END();
+ if (outcome == EmitExpected) {
+ Address pool_start = code->InstructionStart() + pc_offset_before_emission;
+ Instruction* branch = reinterpret_cast<Instruction*>(pool_start);
+ CHECK(branch->IsImmBranch());
+ CHECK_EQ(expected_pool_size, branch->ImmPCOffset());
+ Instruction* marker =
+ reinterpret_cast<Instruction*>(pool_start + kInstrSize);
+ CHECK(marker->IsLdrLiteralX());
+ size_t pool_data_start_offset = pc_offset_before_emission + kInstrSize;
+ size_t padding =
+ IsAligned(pool_data_start_offset, kInt64Size) ? 0 : kInt32Size;
+ size_t marker_size = kInstrSize;
+ CHECK_EQ((first_pool_size_bytes + marker_size + padding) / kInt32Size,
+ marker->ImmLLiteral());
+ }
+
RUN();
// Check that the literals loaded correctly.
- CHECK_EQUAL_64(0x1234567890ABCDEFUL, x0);
- CHECK_EQUAL_64(0xABCDEF1234567890UL, x1);
- CHECK_EQUAL_64(0x34567890ABCDEF12UL, x4);
- CHECK_EQUAL_64(0xABCDEF0123456789UL, x5);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->undefined_value(), x0);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->the_hole_value(), x1);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->true_value(), x4);
+ CHECK_FULL_HEAP_OBJECT_IN_REGISTER(isolate->factory()->false_value(), x5);
}
TEST(ldr_literal_range_max_dist_emission_1) {
INIT_V8();
- LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
- EmitExpected);
+ LdrLiteralRangeHelper(
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ EmitExpected, EmitAtAligned);
}
TEST(ldr_literal_range_max_dist_emission_2) {
INIT_V8();
- LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
- EmitExpected, 1);
+ LdrLiteralRangeHelper(
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ EmitExpected, EmitAtUnaligned);
}
TEST(ldr_literal_range_max_dist_no_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(
- MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
- NoEmitExpected);
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ NoEmitExpected, EmitAtUnaligned);
}
TEST(ldr_literal_range_max_dist_no_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(
- MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
- NoEmitExpected, 1);
+ MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
+ MacroAssembler::GetCheckConstPoolIntervalForTesting(),
+ NoEmitExpected, EmitAtAligned);
}
#endif
@@ -6682,7 +6699,7 @@ TEST(add_sub_wide_imm) {
__ Add(w12, w0, Operand(0x12345678));
__ Add(w13, w1, Operand(0xFFFFFFFF));
- __ Add(w18, w0, Operand(kWMinInt));
+ __ Add(w28, w0, Operand(kWMinInt));
__ Sub(w19, w0, Operand(kWMinInt));
__ Sub(x20, x0, Operand(0x1234567890ABCDEFUL));
@@ -6697,7 +6714,7 @@ TEST(add_sub_wide_imm) {
CHECK_EQUAL_32(0x12345678, w12);
CHECK_EQUAL_64(0x0, x13);
- CHECK_EQUAL_32(kWMinInt, w18);
+ CHECK_EQUAL_32(kWMinInt, w28);
CHECK_EQUAL_32(kWMinInt, w19);
CHECK_EQUAL_64(-0x1234567890ABCDEFLL, x20);
@@ -6720,7 +6737,7 @@ TEST(add_sub_shifted) {
__ Add(x13, x0, Operand(x1, ASR, 8));
__ Add(x14, x0, Operand(x2, ASR, 8));
__ Add(w15, w0, Operand(w1, ASR, 8));
- __ Add(w18, w3, Operand(w1, ROR, 8));
+ __ Add(w28, w3, Operand(w1, ROR, 8));
__ Add(x19, x3, Operand(x1, ROR, 8));
__ Sub(x20, x3, Operand(x2));
@@ -6741,7 +6758,7 @@ TEST(add_sub_shifted) {
CHECK_EQUAL_64(0x000123456789ABCDL, x13);
CHECK_EQUAL_64(0xFFFEDCBA98765432L, x14);
CHECK_EQUAL_64(0xFF89ABCD, x15);
- CHECK_EQUAL_64(0xEF89ABCC, x18);
+ CHECK_EQUAL_64(0xEF89ABCC, x28);
CHECK_EQUAL_64(0xEF0123456789ABCCL, x19);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x20);
@@ -6773,7 +6790,7 @@ TEST(add_sub_extended) {
__ Add(x15, x0, Operand(x1, SXTB, 1));
__ Add(x16, x0, Operand(x1, SXTH, 2));
__ Add(x17, x0, Operand(x1, SXTW, 3));
- __ Add(x18, x0, Operand(x2, SXTB, 0));
+ __ Add(x4, x0, Operand(x2, SXTB, 0));
__ Add(x19, x0, Operand(x2, SXTB, 1));
__ Add(x20, x0, Operand(x2, SXTH, 2));
__ Add(x21, x0, Operand(x2, SXTW, 3));
@@ -6803,7 +6820,7 @@ TEST(add_sub_extended) {
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFDEL, x15);
CHECK_EQUAL_64(0xFFFFFFFFFFFF37BCL, x16);
CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x17);
- CHECK_EQUAL_64(0x10L, x18);
+ CHECK_EQUAL_64(0x10L, x4);
CHECK_EQUAL_64(0x20L, x19);
CHECK_EQUAL_64(0xC840L, x20);
CHECK_EQUAL_64(0x3B2A19080L, x21);
@@ -7430,7 +7447,7 @@ TEST(adc_sbc_shift) {
// Set the C flag.
__ Cmp(w0, Operand(w0));
- __ Adc(x18, x2, Operand(x3));
+ __ Adc(x28, x2, Operand(x3));
__ Adc(x19, x0, Operand(x1, LSL, 60));
__ Sbc(x20, x4, Operand(x3, LSR, 4));
__ Adc(x21, x2, Operand(x3, ASR, 4));
@@ -7457,7 +7474,7 @@ TEST(adc_sbc_shift) {
CHECK_EQUAL_32(0x91111110, w13);
CHECK_EQUAL_32(0x9A222221, w14);
- CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x18);
+ CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x28);
CHECK_EQUAL_64((1LL << 60) + 1, x19);
CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20);
CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
@@ -7581,7 +7598,7 @@ TEST(adc_sbc_wide_imm) {
// Set the C flag.
__ Cmp(w0, Operand(w0));
- __ Adc(x18, x0, Operand(0x1234567890ABCDEFUL));
+ __ Adc(x28, x0, Operand(0x1234567890ABCDEFUL));
__ Adc(w19, w0, Operand(0xFFFFFFFF));
__ Sbc(x20, x0, Operand(0x1234567890ABCDEFUL));
__ Sbc(w21, w0, Operand(0xFFFFFFFF));
@@ -7598,7 +7615,7 @@ TEST(adc_sbc_wide_imm) {
CHECK_EQUAL_64(0xFFFFFFFF, x11);
CHECK_EQUAL_64(0xFFFF, x12);
- CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x18);
+ CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x28);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xEDCBA9876F543211UL, x20);
CHECK_EQUAL_64(1, x21);
@@ -7758,7 +7775,7 @@ TEST(cmp_shift) {
SETUP();
START();
- __ Mov(x18, 0xF0000000);
+ __ Mov(x28, 0xF0000000);
__ Mov(x19, 0xF000000010000000UL);
__ Mov(x20, 0xF0000000F0000000UL);
__ Mov(x21, 0x7800000078000000UL);
@@ -7778,7 +7795,7 @@ TEST(cmp_shift) {
__ Cmp(w19, Operand(w23, LSR, 3));
__ Mrs(x2, NZCV);
- __ Cmp(x18, Operand(x24, LSR, 4));
+ __ Cmp(x28, Operand(x24, LSR, 4));
__ Mrs(x3, NZCV);
__ Cmp(w20, Operand(w25, ASR, 2));
@@ -7993,7 +8010,7 @@ TEST(csel) {
__ Cneg(x12, x24, ne);
__ csel(w15, w24, w25, al);
- __ csel(x18, x24, x25, nv);
+ __ csel(x28, x24, x25, nv);
__ CzeroX(x24, ne);
__ CzeroX(x25, eq);
@@ -8020,7 +8037,7 @@ TEST(csel) {
CHECK_EQUAL_64(0x0000000F, x13);
CHECK_EQUAL_64(0x0000000F0000000FUL, x14);
CHECK_EQUAL_64(0x0000000F, x15);
- CHECK_EQUAL_64(0x0000000F0000000FUL, x18);
+ CHECK_EQUAL_64(0x0000000F0000000FUL, x28);
CHECK_EQUAL_64(0, x24);
CHECK_EQUAL_64(0x0000001F0000001FUL, x25);
CHECK_EQUAL_64(0x0000001F0000001FUL, x26);
@@ -8032,11 +8049,11 @@ TEST(csel_imm) {
SETUP();
START();
- __ Mov(x18, 0);
+ __ Mov(x28, 0);
__ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL);
- __ Cmp(x18, Operand(0));
+ __ Cmp(x28, Operand(0));
__ Csel(w0, w19, -2, ne);
__ Csel(w1, w19, -1, ne);
__ Csel(w2, w19, 0, ne);
@@ -8098,7 +8115,7 @@ TEST(lslv) {
__ Lsl(x16, x0, x1);
__ Lsl(x17, x0, x2);
- __ Lsl(x18, x0, x3);
+ __ Lsl(x28, x0, x3);
__ Lsl(x19, x0, x4);
__ Lsl(x20, x0, x5);
__ Lsl(x21, x0, x6);
@@ -8116,7 +8133,7 @@ TEST(lslv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value << (shift[0] & 63), x16);
CHECK_EQUAL_64(value << (shift[1] & 63), x17);
- CHECK_EQUAL_64(value << (shift[2] & 63), x18);
+ CHECK_EQUAL_64(value << (shift[2] & 63), x28);
CHECK_EQUAL_64(value << (shift[3] & 63), x19);
CHECK_EQUAL_64(value << (shift[4] & 63), x20);
CHECK_EQUAL_64(value << (shift[5] & 63), x21);
@@ -8148,7 +8165,7 @@ TEST(lsrv) {
__ Lsr(x16, x0, x1);
__ Lsr(x17, x0, x2);
- __ Lsr(x18, x0, x3);
+ __ Lsr(x28, x0, x3);
__ Lsr(x19, x0, x4);
__ Lsr(x20, x0, x5);
__ Lsr(x21, x0, x6);
@@ -8166,7 +8183,7 @@ TEST(lsrv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
- CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
+ CHECK_EQUAL_64(value >> (shift[2] & 63), x28);
CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
@@ -8200,7 +8217,7 @@ TEST(asrv) {
__ Asr(x16, x0, x1);
__ Asr(x17, x0, x2);
- __ Asr(x18, x0, x3);
+ __ Asr(x28, x0, x3);
__ Asr(x19, x0, x4);
__ Asr(x20, x0, x5);
__ Asr(x21, x0, x6);
@@ -8218,7 +8235,7 @@ TEST(asrv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
- CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
+ CHECK_EQUAL_64(value >> (shift[2] & 63), x28);
CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
@@ -8252,7 +8269,7 @@ TEST(rorv) {
__ Ror(x16, x0, x1);
__ Ror(x17, x0, x2);
- __ Ror(x18, x0, x3);
+ __ Ror(x28, x0, x3);
__ Ror(x19, x0, x4);
__ Ror(x20, x0, x5);
__ Ror(x21, x0, x6);
@@ -8270,7 +8287,7 @@ TEST(rorv) {
CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(0xF0123456789ABCDEUL, x16);
CHECK_EQUAL_64(0xEF0123456789ABCDUL, x17);
- CHECK_EQUAL_64(0xDEF0123456789ABCUL, x18);
+ CHECK_EQUAL_64(0xDEF0123456789ABCUL, x28);
CHECK_EQUAL_64(0xCDEF0123456789ABUL, x19);
CHECK_EQUAL_64(0xABCDEF0123456789UL, x20);
CHECK_EQUAL_64(0x789ABCDEF0123456UL, x21);
@@ -8338,7 +8355,7 @@ TEST(sbfm) {
__ sbfm(w17, w2, 24, 15);
// Aliases.
- __ Asr(x18, x1, 32);
+ __ Asr(x3, x1, 32);
__ Asr(x19, x2, 32);
__ Sbfiz(x20, x1, 8, 16);
__ Sbfiz(x21, x2, 8, 16);
@@ -8364,7 +8381,7 @@ TEST(sbfm) {
CHECK_EQUAL_32(0x54, w16);
CHECK_EQUAL_32(0x00321000, w17);
- CHECK_EQUAL_64(0x01234567L, x18);
+ CHECK_EQUAL_64(0x01234567L, x3);
CHECK_EQUAL_64(0xFFFFFFFFFEDCBA98L, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFCDEF00L, x20);
CHECK_EQUAL_64(0x321000L, x21);
@@ -8403,7 +8420,7 @@ TEST(ubfm) {
__ Lsl(x15, x1, 63);
__ Lsl(x16, x1, 0);
__ Lsr(x17, x1, 32);
- __ Ubfiz(x18, x1, 8, 16);
+ __ Ubfiz(x3, x1, 8, 16);
__ Ubfx(x19, x1, 8, 16);
__ Uxtb(x20, x1);
__ Uxth(x21, x1);
@@ -8425,7 +8442,7 @@ TEST(ubfm) {
CHECK_EQUAL_64(0x8000000000000000L, x15);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x16);
CHECK_EQUAL_64(0x01234567L, x17);
- CHECK_EQUAL_64(0xCDEF00L, x18);
+ CHECK_EQUAL_64(0xCDEF00L, x3);
CHECK_EQUAL_64(0xABCDL, x19);
CHECK_EQUAL_64(0xEFL, x20);
CHECK_EQUAL_64(0xCDEFL, x21);
@@ -9359,8 +9376,8 @@ TEST(fcmp) {
__ Fmov(s8, 0.0);
__ Fmov(s9, 0.5);
- __ Mov(w18, 0x7F800001); // Single precision NaN.
- __ Fmov(s18, w18);
+ __ Mov(w19, 0x7F800001); // Single precision NaN.
+ __ Fmov(s18, w19);
__ Fcmp(s8, s8);
__ Mrs(x0, NZCV);
@@ -10190,6 +10207,9 @@ TEST(fcvtas) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10207,8 +10227,8 @@ TEST(fcvtas) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 2.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 2.5);
__ Fmov(s19, -2.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10239,7 +10259,6 @@ TEST(fcvtas) {
__ Fcvtas(w14, d14);
__ Fcvtas(w15, d15);
__ Fcvtas(x17, s17);
- __ Fcvtas(x18, s18);
__ Fcvtas(x19, s19);
__ Fcvtas(x20, s20);
__ Fcvtas(x21, s21);
@@ -10250,6 +10269,12 @@ TEST(fcvtas) {
__ Fcvtas(x26, d26);
__ Fcvtas(x27, d27);
__ Fcvtas(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtas(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtas(x29, d29);
__ Fcvtas(x30, d30);
END();
@@ -10272,8 +10297,8 @@ TEST(fcvtas) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(3, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(3, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFDUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10339,7 +10364,7 @@ TEST(fcvtau) {
__ Fcvtau(w15, d15);
__ Fcvtau(x16, s16);
__ Fcvtau(x17, s17);
- __ Fcvtau(x18, s18);
+ __ Fcvtau(x7, s18);
__ Fcvtau(x19, s19);
__ Fcvtau(x20, s20);
__ Fcvtau(x21, s21);
@@ -10371,7 +10396,7 @@ TEST(fcvtau) {
CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(3, x18);
+ CHECK_EQUAL_64(3, x7);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21);
@@ -10389,6 +10414,9 @@ TEST(fcvtms) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10406,8 +10434,8 @@ TEST(fcvtms) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10438,7 +10466,6 @@ TEST(fcvtms) {
__ Fcvtms(w14, d14);
__ Fcvtms(w15, d15);
__ Fcvtms(x17, s17);
- __ Fcvtms(x18, s18);
__ Fcvtms(x19, s19);
__ Fcvtms(x20, s20);
__ Fcvtms(x21, s21);
@@ -10449,6 +10476,12 @@ TEST(fcvtms) {
__ Fcvtms(x26, d26);
__ Fcvtms(x27, d27);
__ Fcvtms(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtms(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtms(x29, d29);
__ Fcvtms(x30, d30);
END();
@@ -10471,8 +10504,8 @@ TEST(fcvtms) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10491,6 +10524,9 @@ TEST(fcvtmu) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10508,8 +10544,8 @@ TEST(fcvtmu) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10538,8 +10574,8 @@ TEST(fcvtmu) {
__ Fcvtmu(w12, d12);
__ Fcvtmu(w13, d13);
__ Fcvtmu(w14, d14);
+ __ Fcvtmu(w15, d15);
__ Fcvtmu(x17, s17);
- __ Fcvtmu(x18, s18);
__ Fcvtmu(x19, s19);
__ Fcvtmu(x20, s20);
__ Fcvtmu(x21, s21);
@@ -10550,6 +10586,12 @@ TEST(fcvtmu) {
__ Fcvtmu(x26, d26);
__ Fcvtmu(x27, d27);
__ Fcvtmu(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtmu(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtmu(x29, d29);
__ Fcvtmu(x30, d30);
END();
@@ -10571,8 +10613,9 @@ TEST(fcvtmu) {
CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
+ CHECK_EQUAL_64(0x0, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21);
@@ -10591,6 +10634,9 @@ TEST(fcvtns) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10608,8 +10654,8 @@ TEST(fcvtns) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10640,7 +10686,6 @@ TEST(fcvtns) {
__ Fcvtns(w14, d14);
__ Fcvtns(w15, d15);
__ Fcvtns(x17, s17);
- __ Fcvtns(x18, s18);
__ Fcvtns(x19, s19);
__ Fcvtns(x20, s20);
__ Fcvtns(x21, s21);
@@ -10651,6 +10696,12 @@ TEST(fcvtns) {
__ Fcvtns(x26, d26);
__ Fcvtns(x27, d27);
// __ Fcvtns(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtns(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtns(x29, d29);
__ Fcvtns(x30, d30);
END();
@@ -10673,8 +10724,8 @@ TEST(fcvtns) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(2, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10701,6 +10752,7 @@ TEST(fcvtnu) {
__ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity);
__ Fmov(s6, 0xFFFFFF00); // Largest float < UINT32_MAX.
+ __ Fmov(s7, 1.5);
__ Fmov(d8, 1.0);
__ Fmov(d9, 1.1);
__ Fmov(d10, 1.5);
@@ -10710,7 +10762,6 @@ TEST(fcvtnu) {
__ Fmov(d14, 0xFFFFFFFE);
__ Fmov(s16, 1.0);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10730,6 +10781,7 @@ TEST(fcvtnu) {
__ Fcvtnu(w4, s4);
__ Fcvtnu(w5, s5);
__ Fcvtnu(w6, s6);
+ __ Fcvtnu(x7, s7);
__ Fcvtnu(w8, d8);
__ Fcvtnu(w9, d9);
__ Fcvtnu(w10, d10);
@@ -10740,7 +10792,6 @@ TEST(fcvtnu) {
__ Fcvtnu(w15, d15);
__ Fcvtnu(x16, s16);
__ Fcvtnu(x17, s17);
- __ Fcvtnu(x18, s18);
__ Fcvtnu(x19, s19);
__ Fcvtnu(x20, s20);
__ Fcvtnu(x21, s21);
@@ -10763,6 +10814,7 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5);
CHECK_EQUAL_64(0xFFFFFF00, x6);
+ CHECK_EQUAL_64(2, x7);
CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(2, x10);
@@ -10772,7 +10824,6 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21);
@@ -10790,6 +10841,9 @@ TEST(fcvtzs) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10807,8 +10861,8 @@ TEST(fcvtzs) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10839,7 +10893,6 @@ TEST(fcvtzs) {
__ Fcvtzs(w14, d14);
__ Fcvtzs(w15, d15);
__ Fcvtzs(x17, s17);
- __ Fcvtzs(x18, s18);
__ Fcvtzs(x19, s19);
__ Fcvtzs(x20, s20);
__ Fcvtzs(x21, s21);
@@ -10850,6 +10903,12 @@ TEST(fcvtzs) {
__ Fcvtzs(x26, d26);
__ Fcvtzs(x27, d27);
__ Fcvtzs(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtmu(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtzs(x29, d29);
__ Fcvtzs(x30, d30);
END();
@@ -10872,8 +10931,8 @@ TEST(fcvtzs) {
CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21);
@@ -10892,6 +10951,9 @@ TEST(fcvtzu) {
INIT_V8();
SETUP();
+ int64_t scratch = 0;
+ uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
+
START();
__ Fmov(s0, 1.0);
__ Fmov(s1, 1.1);
@@ -10909,8 +10971,8 @@ TEST(fcvtzu) {
__ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1);
+ __ Fmov(s16, 1.5);
__ Fmov(s17, 1.1);
- __ Fmov(s18, 1.5);
__ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity);
@@ -10939,8 +11001,8 @@ TEST(fcvtzu) {
__ Fcvtzu(w12, d12);
__ Fcvtzu(w13, d13);
__ Fcvtzu(w14, d14);
+ __ Fcvtzu(w15, d15);
__ Fcvtzu(x17, s17);
- __ Fcvtzu(x18, s18);
__ Fcvtzu(x19, s19);
__ Fcvtzu(x20, s20);
__ Fcvtzu(x21, s21);
@@ -10951,6 +11013,12 @@ TEST(fcvtzu) {
__ Fcvtzu(x26, d26);
__ Fcvtzu(x27, d27);
__ Fcvtzu(x28, d28);
+
+ // Save results to the scratch memory, for those that don't fit in registers.
+ __ Mov(x30, scratch_base);
+ __ Fcvtzu(x29, s16);
+ __ Str(x29, MemOperand(x30));
+
__ Fcvtzu(x29, d29);
__ Fcvtzu(x30, d30);
END();
@@ -10972,8 +11040,9 @@ TEST(fcvtzu) {
CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14);
+ CHECK_EQUAL_64(0x0, x15);
+ CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17);
- CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21);
@@ -11439,6 +11508,8 @@ TEST(zero_dest) {
__ Mov(x0, 0);
__ Mov(x1, literal_base);
for (int i = 2; i < x30.code(); i++) {
+ // Skip x18, the platform register.
+ if (i == 18) continue;
__ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
}
before.Dump(&masm);
@@ -11503,6 +11574,8 @@ TEST(zero_dest_setflags) {
__ Mov(x0, 0);
__ Mov(x1, literal_base);
for (int i = 2; i < 30; i++) {
+ // Skip x18, the platform register.
+ if (i == 18) continue;
__ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
}
before.Dump(&masm);
@@ -11858,10 +11931,14 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
// Registers in the TmpList can be used by the macro assembler for debug code
// (for example in 'Pop'), so we can't use them here.
- static RegList const allowed = ~(masm.TmpList()->list());
+ // x18 is reserved for the platform register.
+ // Disallow x31 / xzr, to ensure this list has an even number of elements, to
+ // ensure alignment.
+ RegList allowed = ~(masm.TmpList()->list() | x18.bit() | x31.bit());
if (reg_count == kPushPopMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters);
}
+ DCHECK_EQ(reg_count % 2, 0);
// Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, kNumberOfRegisters>();
auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
@@ -12245,7 +12322,7 @@ TEST(push_pop) {
__ Claim(2);
__ Push(w2, w2, w1, w1);
__ Push(x3, x3);
- __ Pop(w18, w19, w20, w21);
+ __ Pop(w30, w19, w20, w21);
__ Pop(x22, x23);
__ Claim(2);
@@ -12259,8 +12336,10 @@ TEST(push_pop) {
__ Claim(2);
__ PushXRegList(0);
__ PopXRegList(0);
- __ PushXRegList(0xFFFFFFFF);
- __ PopXRegList(0xFFFFFFFF);
+ // Don't push/pop x18 (platform register) or xzr (for alignment)
+ RegList all_regs = 0xFFFFFFFF & ~(x18.bit() | x31.bit());
+ __ PushXRegList(all_regs);
+ __ PopXRegList(all_regs);
__ Drop(12);
END();
@@ -12287,7 +12366,7 @@ TEST(push_pop) {
CHECK_EQUAL_32(0x33333333U, w15);
CHECK_EQUAL_32(0x22222222U, w14);
- CHECK_EQUAL_32(0x11111111U, w18);
+ CHECK_EQUAL_32(0x11111111U, w30);
CHECK_EQUAL_32(0x11111111U, w19);
CHECK_EQUAL_32(0x11111111U, w20);
CHECK_EQUAL_32(0x11111111U, w21);
@@ -14242,12 +14321,11 @@ TEST(default_nan_double) {
DefaultNaNHelper(qn, qm, qa);
}
-TEST(call_no_relocation) {
+TEST(near_call_no_relocation) {
INIT_V8();
SETUP();
START();
- Address buf_addr = reinterpret_cast<Address>(buf);
Label function;
Label test;
@@ -14263,7 +14341,8 @@ TEST(call_no_relocation) {
__ Push(lr, xzr);
{
Assembler::BlockConstPoolScope scope(&masm);
- __ Call(buf_addr + function.pos(), RelocInfo::NONE);
+ int offset = (function.pos() - __ pc_offset()) / kInstrSize;
+ __ near_call(offset, RelocInfo::NONE);
}
__ Pop(xzr, lr);
END();
@@ -14273,7 +14352,6 @@ TEST(call_no_relocation) {
CHECK_EQUAL_64(1, x0);
}
-
static void AbsHelperX(int64_t value) {
int64_t expected;
@@ -14421,12 +14499,11 @@ TEST(pool_size) {
__ bind(&exit);
- HandleScope handle_scope(isolate);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB)
- .set_self_reference(masm.CodeObject())
- .Build();
+ code = Factory::CodeBuilder(isolate, desc, Code::STUB)
+ .set_self_reference(masm.CodeObject())
+ .Build();
unsigned pool_count = 0;
int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
@@ -14625,6 +14702,7 @@ TEST(internal_reference_linked) {
#undef CHECK_EQUAL_32
#undef CHECK_EQUAL_FP32
#undef CHECK_EQUAL_64
+#undef CHECK_FULL_HEAP_OBJECT_IN_REGISTER
#undef CHECK_EQUAL_FP64
#undef CHECK_EQUAL_128
#undef CHECK_CONSTANT_POOL_SIZE
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 947120816b..1cc1aa3213 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -4825,8 +4825,9 @@ TEST(r6_beqzc) {
}
}
-void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
- MSARegister w, Register t0, Register t1) {
+void load_elements_of_vector(
+ MacroAssembler& assm, // NOLINT(runtime/references)
+ const uint64_t elements[], MSARegister w, Register t0, Register t1) {
__ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
__ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 0, t0);
@@ -4837,8 +4838,9 @@ void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
__ insert_w(w, 3, t1);
}
-inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
- Register a) {
+inline void store_elements_of_vector(
+ MacroAssembler& assm, // NOLINT(runtime/references)
+ MSARegister w, Register a) {
__ st_d(w, MemOperand(a, 0));
}
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 3e1ac5902b..f337fdfcac 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -5430,8 +5430,9 @@ TEST(r6_beqzc) {
}
}
-void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
- MSARegister w, Register t0, Register t1) {
+void load_elements_of_vector(
+ MacroAssembler& assm, // NOLINT(runtime/references)
+ const uint64_t elements[], MSARegister w, Register t0, Register t1) {
__ li(t0, static_cast<uint32_t>(elements[0] & 0xFFFFFFFF));
__ li(t1, static_cast<uint32_t>((elements[0] >> 32) & 0xFFFFFFFF));
__ insert_w(w, 0, t0);
@@ -5442,8 +5443,9 @@ void load_elements_of_vector(MacroAssembler& assm, const uint64_t elements[],
__ insert_w(w, 3, t1);
}
-inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
- Register a) {
+inline void store_elements_of_vector(
+ MacroAssembler& assm, // NOLINT(runtime/references)
+ MSARegister w, Register a) {
__ st_d(w, MemOperand(a, 0));
}
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index 18dc72a16e..3456f72c57 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -600,6 +600,11 @@ TEST(12) {
// vector basics
TEST(13) {
+ // check if the VECTOR_FACILITY is supported
+ if (!CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ return;
+ }
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -659,6 +664,11 @@ TEST(13) {
// vector sum, packs, unpacks
TEST(14) {
+ // check if the VECTOR_FACILITY is supported
+ if (!CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ return;
+ }
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -743,6 +753,11 @@ TEST(14) {
// vector comparisons
TEST(15) {
+ // check if the VECTOR_FACILITY is supported
+ if (!CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ return;
+ }
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -827,6 +842,11 @@ TEST(15) {
// vector select and test mask
TEST(16) {
+ // check if the VECTOR_FACILITY is supported
+ if (!CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ return;
+ }
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -868,6 +888,11 @@ TEST(16) {
// vector fp instructions
TEST(17) {
+ // check if the VECTOR_FACILITY is supported
+ if (!CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ return;
+ }
+
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -934,6 +959,98 @@ TEST(17) {
CHECK_EQ(0, static_cast<int>(res));
}
+//TMHH, TMHL
+TEST(18) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(AssemblerOptions{});
+
+ Label done, error;
+ Label continue1, continue2, continue3, continue4;
+ Label continue5, continue6, continue7, continue8, continue9;
+
+ // selected bits all 0
+ __ lgfi(r1, Operand(0));
+ __ tmhh(r1, Operand(1));
+ __ beq(&continue1); //8
+ __ b(&error);
+
+ __ bind(&continue1);
+ __ tmhl(r1, Operand(1));
+ __ beq(&continue2); //8
+ __ b(&error);
+
+ // mask = 0
+ __ bind(&continue2);
+ __ lgfi(r1, Operand(-1));
+ __ tmhh(r1, Operand(0));
+ __ beq(&continue3); //8
+ __ b(&error);
+
+ __ bind(&continue3);
+ __ tmhh(r1, Operand(0));
+ __ beq(&continue4); //8
+ __ b(&error);
+
+ // selected bits all 1
+ __ bind(&continue4);
+ __ tmhh(r1, Operand(1));
+ __ b(Condition(1), &continue5); //1
+ __ b(&error);
+
+ __ bind(&continue5);
+ __ tmhl(r1, Operand(1));
+ __ b(Condition(1), &continue6); //1
+ __ b(&error);
+
+ // leftmost = 1
+ __ bind(&continue6);
+ __ lgfi(r1, Operand(0xF000F000));
+ __ slag(r2, r1, Operand(32));
+ __ tmhh(r2, Operand(0xFFFF));
+ __ b(Condition(2), &done); //2
+ __ b(&error);
+
+ __ bind(&continue7);
+ __ tmhl(r1, Operand(0xFFFF));
+ __ b(Condition(2), &continue8); //2
+ __ b(&error);
+
+ // leftmost = 0
+ __ bind(&continue8);
+ __ lgfi(r1, Operand(0x0FF00FF0));
+ __ slag(r2, r1, Operand(32));
+ __ tmhh(r2, Operand(0xFFFF));
+ __ b(Condition(4), &done); //4
+ __ b(&error);
+
+ __ bind(&continue9);
+ __ tmhl(r1, Operand(0xFFFF));
+ __ b(Condition(4), &done); //4
+ __ b(&error);
+
+ __ bind(&error);
+ __ lgfi(r2, Operand(1));
+ __ b(r14);
+
+ __ bind(&done);
+ __ lgfi(r2, Operand::Zero());
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ // f.Call(reg2, reg3, reg4, reg5, reg6) -> set the register value
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 63c0602638..e7fc946675 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -1109,7 +1109,8 @@ TEST(TryHasOwnProperty) {
factory->NewFunctionForTest(factory->empty_string());
Handle<JSObject> object = factory->NewJSObject(function);
AddProperties(object, names, arraysize(names));
- JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "test");
+ JSObject::NormalizeProperties(isolate, object, CLEAR_INOBJECT_PROPERTIES, 0,
+ "test");
JSObject::AddProperty(isolate, object, deleted_property_name, object, NONE);
CHECK(JSObject::DeleteProperty(object, deleted_property_name,
@@ -1316,7 +1317,8 @@ TEST(TryGetOwnProperty) {
Handle<JSObject> object = factory->NewJSObject(function);
AddProperties(object, names, arraysize(names), values, arraysize(values),
rand_gen.NextInt());
- JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "test");
+ JSObject::NormalizeProperties(isolate, object, CLEAR_INOBJECT_PROPERTIES, 0,
+ "test");
JSObject::AddProperty(isolate, object, deleted_property_name, object, NONE);
CHECK(JSObject::DeleteProperty(object, deleted_property_name,
@@ -1571,7 +1573,7 @@ TEST(TryLookupElement) {
Handle<JSFunction> constructor = isolate->string_function();
Handle<JSObject> object = factory->NewJSObject(constructor);
Handle<String> str = factory->InternalizeUtf8String("ab");
- Handle<JSValue>::cast(object)->set_value(*str);
+ Handle<JSPrimitiveWrapper>::cast(object)->set_value(*str);
AddElement(object, 13, smi0);
CHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, object->map().elements_kind());
@@ -1586,7 +1588,7 @@ TEST(TryLookupElement) {
Handle<JSFunction> constructor = isolate->string_function();
Handle<JSObject> object = factory->NewJSObject(constructor);
Handle<String> str = factory->InternalizeUtf8String("ab");
- Handle<JSValue>::cast(object)->set_value(*str);
+ Handle<JSPrimitiveWrapper>::cast(object)->set_value(*str);
AddElement(object, 13, smi0);
JSObject::NormalizeElements(object);
CHECK_EQ(SLOW_STRING_WRAPPER_ELEMENTS, object->map().elements_kind());
@@ -1652,6 +1654,15 @@ TEST(AllocateJSObjectFromMap) {
Node* result = m.AllocateJSObjectFromMap(map, properties, elements);
+ CodeStubAssembler::Label done(&m);
+ m.GotoIfNot(m.IsJSArrayMap(map), &done);
+
+ // JS array verification requires the length field to be set.
+ m.StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
+ m.SmiConstant(0));
+ m.Goto(&done);
+
+ m.Bind(&done);
m.Return(result);
}
@@ -1685,7 +1696,7 @@ TEST(AllocateJSObjectFromMap) {
Handle<JSObject> object = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*CompileRun("var object = {a:1,b:2, 1:1, 2:2}; "
"object")));
- JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, object, KEEP_INOBJECT_PROPERTIES, 0,
"Normalize");
Handle<JSObject> result = Handle<JSObject>::cast(
ft.Call(handle(object->map(), isolate),
@@ -3545,34 +3556,6 @@ TEST(TestGotoIfDebugExecutionModeChecksSideEffects) {
CHECK_EQ(true, result->BooleanValue(isolate));
}
-#ifdef V8_COMPRESS_POINTERS
-
-TEST(CompressedSlotInterleavedGC) {
- Isolate* isolate(CcTest::InitIsolateOnce());
-
- const int kNumParams = 1;
-
- CodeAssemblerTester asm_tester(isolate, kNumParams);
- CodeStubAssembler m(asm_tester.state());
-
- Node* compressed = m.ChangeTaggedToCompressed(m.Parameter(0));
-
- m.Print(m.ChangeCompressedToTagged(compressed));
-
- Node* const context = m.Parameter(kNumParams + 2);
- m.CallRuntime(Runtime::kCollectGarbage, context, m.SmiConstant(0));
-
- m.Return(m.ChangeCompressedToTagged(compressed));
-
- FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
- Handle<Object> result =
- ft.Call(isolate->factory()->NewNumber(0.5)).ToHandleChecked();
- CHECK(result->IsHeapNumber());
- CHECK_EQ(0.5, Handle<HeapNumber>::cast(result)->Number());
-}
-
-#endif // V8_COMPRESS_POINTERS
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 660ee98a9a..b961da9437 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -987,8 +987,8 @@ TEST(DecideToPretenureDuringCompilation) {
// This setting ensures Heap::MaximumSizeScavenge will return `true`.
// We need to initialize the heap with at least 1 page, while keeping the
// limit low, to ensure the new space fills even on 32-bit architectures.
- create_params.constraints.set_max_semi_space_size_in_kb(Page::kPageSize /
- 1024);
+ create_params.constraints.set_max_young_generation_size_in_bytes(
+ 3 * Page::kPageSize);
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e978aff2ba..4b9ee4629f 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -2433,6 +2433,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
"\n"
"startProfiling();\n"
"\n"
+ "%EnsureFeedbackVectorForFunction(opt_function);\n"
"%PrepareFunctionForOptimization(test);\n"
"\n"
"test(10, 10);\n"
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 82ebc8ca46..b3da0329f6 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -165,9 +165,9 @@ void CheckDebuggerUnloaded() {
CcTest::CollectAllGarbage();
// Iterate the heap and check that there are no debugger related objects left.
- HeapIterator iterator(CcTest::heap());
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(CcTest::heap());
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
CHECK(!obj.IsDebugInfo());
}
}
@@ -4151,7 +4151,7 @@ size_t NearHeapLimitCallback(void* data, size_t current_heap_limit,
UNINITIALIZED_TEST(DebugSetOutOfMemoryListener) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- create_params.constraints.set_max_old_space_size(10);
+ create_params.constraints.set_max_old_generation_size_in_bytes(10 * i::MB);
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
@@ -4368,9 +4368,9 @@ TEST(DebugEvaluateNoSideEffect) {
i::Isolate* isolate = CcTest::i_isolate();
std::vector<i::Handle<i::JSFunction>> all_functions;
{
- i::HeapIterator iterator(isolate->heap());
- for (i::HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ i::HeapObjectIterator iterator(isolate->heap());
+ for (i::HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsJSFunction()) continue;
i::JSFunction fun = i::JSFunction::cast(obj);
all_functions.emplace_back(fun, isolate);
@@ -4665,8 +4665,8 @@ TEST(GetPrivateFields) {
.ToLocalChecked()
->ToObject(context)
.ToLocalChecked());
- Handle<v8::internal::JSValue> private_value =
- Handle<v8::internal::JSValue>::cast(private_name);
+ Handle<v8::internal::JSPrimitiveWrapper> private_value =
+ Handle<v8::internal::JSPrimitiveWrapper>::cast(private_name);
Handle<v8::internal::Symbol> priv_symbol(
v8::internal::Symbol::cast(private_value->value()), isolate);
CHECK(priv_symbol->is_private_name());
@@ -4694,8 +4694,8 @@ TEST(GetPrivateFields) {
.ToLocalChecked()
->ToObject(context)
.ToLocalChecked());
- Handle<v8::internal::JSValue> private_value =
- Handle<v8::internal::JSValue>::cast(private_name);
+ Handle<v8::internal::JSPrimitiveWrapper> private_value =
+ Handle<v8::internal::JSPrimitiveWrapper>::cast(private_name);
Handle<v8::internal::Symbol> priv_symbol(
v8::internal::Symbol::cast(private_value->value()), isolate);
CHECK(priv_symbol->is_private_name());
@@ -4725,8 +4725,8 @@ TEST(GetPrivateFields) {
.ToLocalChecked()
->ToObject(context)
.ToLocalChecked());
- Handle<v8::internal::JSValue> private_value =
- Handle<v8::internal::JSValue>::cast(private_name);
+ Handle<v8::internal::JSPrimitiveWrapper> private_value =
+ Handle<v8::internal::JSPrimitiveWrapper>::cast(private_name);
Handle<v8::internal::Symbol> priv_symbol(
v8::internal::Symbol::cast(private_value->value()), isolate);
CHECK(priv_symbol->is_private_name());
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index f2c7e71a89..4078bd429c 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -897,6 +897,8 @@ TEST(DisasmIa320) {
__ Nop(i);
}
+ __ mfence();
+ __ lfence();
__ pause();
__ ret(0);
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index c84f502f23..ff21e9b265 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -542,8 +542,11 @@ TEST(DisasmX64) {
__ pinsrw(xmm2, rcx, 1);
__ pextrd(rbx, xmm15, 0);
__ pextrd(r12, xmm0, 1);
+ __ pextrq(r12, xmm0, 1);
__ pinsrd(xmm9, r9, 0);
__ pinsrd(xmm5, Operand(rax, 4), 1);
+ __ pinsrq(xmm9, r9, 0);
+ __ pinsrq(xmm5, Operand(rax, 4), 1);
__ pblendw(xmm5, xmm1, 1);
__ pblendw(xmm9, Operand(rax, 4), 1);
@@ -603,6 +606,14 @@ TEST(DisasmX64) {
SSE4_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
}
}
+
+ {
+ if (CpuFeatures::IsSupported(SSE4_2)) {
+ CpuFeatureScope scope(&assm, SSE4_2);
+
+ SSE4_2_INSTRUCTION_LIST(EMIT_SSE34_INSTR)
+ }
+ }
#undef EMIT_SSE34_INSTR
// AVX instruction
@@ -964,6 +975,8 @@ TEST(DisasmX64) {
__ Nop(i);
}
+ __ mfence();
+ __ lfence();
__ pause();
__ ret(0);
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index c487a1e2ec..436925146b 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -26,7 +26,7 @@ namespace {
static Handle<JSFunction> GetFunction(const char* name) {
v8::MaybeLocal<v8::Value> v8_f = CcTest::global()->Get(
- v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str(name));
+ CcTest::isolate()->GetCurrentContext(), v8_str(name));
Handle<JSFunction> f =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*v8_f.ToLocalChecked()));
return f;
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 2a5e1dbb09..d23078b68a 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -306,6 +306,14 @@ class Expectations {
bool Check(Map map) const { return Check(map, number_of_properties_); }
+ bool CheckNormalized(Map map) const {
+ CHECK(map.is_dictionary_map());
+ CHECK_EQ(elements_kind_, map.elements_kind());
+ // TODO(leszeks): Iterate over the key/value pairs of the map and compare
+ // them against the expected fields.
+ return true;
+ }
+
//
// Helper methods for initializing expectations and adding properties to
// given |map|.
@@ -521,7 +529,7 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
// that the data property is uninitialized.
Factory* factory = isolate->factory();
Handle<JSObject> obj = factory->NewJSObjectFromMap(map);
- JSObject::MigrateToMap(obj, prepared_map);
+ JSObject::MigrateToMap(isolate, obj, prepared_map);
FieldIndex index = FieldIndex::ForDescriptor(*prepared_map, 0);
CHECK(obj->RawFastPropertyAt(index).IsUninitialized(isolate));
#ifdef VERIFY_HEAP
@@ -1053,8 +1061,8 @@ void TestReconfigureDataFieldAttribute_GeneralizeField(
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
- Handle<Map> new_map =
- Map::ReconfigureExistingProperty(isolate, map2, kSplitProp, kData, NONE);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ isolate, map2, kSplitProp, kData, NONE, PropertyConstness::kConst);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -1141,8 +1149,8 @@ void TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
- Handle<Map> new_map =
- Map::ReconfigureExistingProperty(isolate, map2, kSplitProp, kData, NONE);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ isolate, map2, kSplitProp, kData, NONE, PropertyConstness::kConst);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -1399,22 +1407,17 @@ struct CheckUnrelated {
}
};
-
-// Checks that given |map| is NOT deprecated, and |new_map| is a result of
-// copy-generalize-all-representations.
-struct CheckCopyGeneralizeAllFields {
+// Checks that given |map| is NOT deprecated, and |new_map| is a result of going
+// dictionary mode.
+struct CheckNormalize {
void Check(Isolate* isolate, Handle<Map> map, Handle<Map> new_map,
- Expectations& expectations) {
+ const Expectations& expectations) {
CHECK(!map->is_deprecated());
CHECK_NE(*map, *new_map);
CHECK(new_map->GetBackPointer().IsUndefined(isolate));
- for (int i = 0; i < kPropCount; i++) {
- expectations.GeneralizeField(i);
- }
-
CHECK(!new_map->is_deprecated());
- CHECK(expectations.Check(*new_map));
+ CHECK(expectations.CheckNormalized(*new_map));
}
};
@@ -1433,7 +1436,8 @@ struct CheckCopyGeneralizeAllFields {
//
template <typename TestConfig, typename Checker>
static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
- TestConfig& config, Checker& checker) {
+ TestConfig& config, // NOLINT(runtime/references)
+ Checker& checker) { // NOLINT(runtime/references)
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -1497,8 +1501,8 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
- Handle<Map> new_map =
- Map::ReconfigureExistingProperty(isolate, map2, kSplitProp, kData, NONE);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ isolate, map2, kSplitProp, kData, NONE, PropertyConstness::kConst);
// |map2| should be left unchanged but marked unstable.
CHECK(!map2->is_stable());
@@ -1510,7 +1514,6 @@ static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
checker.Check(isolate, map1, new_map, expectations1);
}
-
TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -1523,14 +1526,18 @@ TEST(ReconfigureDataFieldAttribute_SameDataConstantAfterTargetMap) {
js_func_ = factory->NewFunctionForTest(factory->empty_string());
}
- Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(
+ int branch_id,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
// Add the same data constant property at both transition tree branches.
return expectations.AddDataConstant(map, NONE, js_func_);
}
- void UpdateExpectations(int property_index, Expectations& expectations) {
+ void UpdateExpectations(
+ int property_index,
+ Expectations& expectations) { // NOLINT(runtime/references)
// Expectations stay the same.
}
};
@@ -1568,14 +1575,18 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
factory->NewFunction(sloppy_map, info, isolate->native_context());
}
- Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(
+ int branch_id,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
Handle<JSFunction> js_func = branch_id == 1 ? js_func1_ : js_func2_;
return expectations.AddDataConstant(map, NONE, js_func);
}
- void UpdateExpectations(int property_index, Expectations& expectations) {
+ void UpdateExpectations(
+ int property_index,
+ Expectations& expectations) { // NOLINT(runtime/references)
expectations.SetDataField(property_index, PropertyConstness::kConst,
Representation::HeapObject(), function_type_);
}
@@ -1601,8 +1612,10 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
pair_ = CreateAccessorPair(true, true);
}
- Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(
+ int branch_id,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
if (branch_id == 1) {
return expectations.AddDataConstant(map, NONE, js_func_);
@@ -1611,7 +1624,10 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToAccConstantAfterTargetMap) {
}
}
- void UpdateExpectations(int property_index, Expectations& expectations) {}
+ void UpdateExpectations(
+ int property_index,
+ Expectations& expectations // NOLINT(runtime/references)
+ ) {}
};
TestConfig config;
@@ -1629,15 +1645,19 @@ TEST(ReconfigureDataFieldAttribute_SameAccessorConstantAfterTargetMap) {
Handle<AccessorPair> pair_;
TestConfig() { pair_ = CreateAccessorPair(true, true); }
- Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(
+ int branch_id,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
// Add the same accessor constant property at both transition tree
// branches.
return expectations.AddAccessorConstant(map, NONE, pair_);
}
- void UpdateExpectations(int property_index, Expectations& expectations) {
+ void UpdateExpectations(
+ int property_index,
+ Expectations& expectations) { // NOLINT(runtime/references)
// Two branches are "compatible" so the |map1| should NOT be deprecated.
}
};
@@ -1660,19 +1680,23 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToAccFieldAfterTargetMap) {
pair2_ = CreateAccessorPair(true, true);
}
- Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(
+ int branch_id,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
Handle<AccessorPair> pair = branch_id == 1 ? pair1_ : pair2_;
return expectations.AddAccessorConstant(map, NONE, pair);
}
- void UpdateExpectations(int property_index, Expectations& expectations) {
+ void UpdateExpectations(
+ int property_index,
+ Expectations& expectations) { // NOLINT(runtime/references)
if (IS_ACCESSOR_FIELD_SUPPORTED) {
expectations.SetAccessorField(property_index);
} else {
- // Currently we have a copy-generalize-all-representations case and
- // ACCESSOR property becomes ACCESSOR_CONSTANT.
+ // Currently we have a normalize case and ACCESSOR property becomes
+ // ACCESSOR_CONSTANT.
expectations.SetAccessorConstant(property_index, pair2_);
}
}
@@ -1680,11 +1704,11 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToAccFieldAfterTargetMap) {
TestConfig config;
if (IS_ACCESSOR_FIELD_SUPPORTED) {
- CheckCopyGeneralizeAllFields checker;
+ CheckSameMap checker;
TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
} else {
- // Currently we have a copy-generalize-all-representations case.
- CheckCopyGeneralizeAllFields checker;
+ // Currently we have a normalize case.
+ CheckNormalize checker;
TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
}
}
@@ -1698,8 +1722,10 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
Handle<AccessorPair> pair_;
TestConfig() { pair_ = CreateAccessorPair(true, true); }
- Handle<Map> AddPropertyAtBranch(int branch_id, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> AddPropertyAtBranch(
+ int branch_id,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
CHECK(branch_id == 1 || branch_id == 2);
if (branch_id == 1) {
return expectations.AddAccessorConstant(map, NONE, pair_);
@@ -1711,7 +1737,10 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
}
}
- void UpdateExpectations(int property_index, Expectations& expectations) {}
+ void UpdateExpectations(
+ int property_index,
+ Expectations& expectations // NOLINT(runtime/references)
+ ) {}
};
TestConfig config;
@@ -2083,18 +2112,13 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
CHECK(!TransitionsAccessor(isolate, map2).CanHaveMoreTransitions());
// Try to update |map|, since there is no place for propX transition at |map2|
- // |map| should become "copy-generalized".
+ // |map| should become normalized.
Handle<Map> updated_map = Map::Update(isolate, map);
- CHECK(updated_map->GetBackPointer().IsUndefined(isolate));
- for (int i = 0; i < kPropCount; i++) {
- expectations.SetDataField(i, PropertyConstness::kMutable,
- Representation::Tagged(), any_type);
- }
- CHECK(expectations.Check(*updated_map));
+ CheckNormalize checker;
+ checker.Check(isolate, map2, updated_map, expectations);
}
-
////////////////////////////////////////////////////////////////////////////////
// A set of tests involving special transitions (such as elements kind
// transition, observed transition or prototype transition).
@@ -2117,8 +2141,9 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
// fixed.
template <typename TestConfig>
static void TestGeneralizeFieldWithSpecialTransition(
- TestConfig& config, const CRFTData& from, const CRFTData& to,
- const CRFTData& expected, bool expected_deprecation) {
+ TestConfig& config, // NOLINT(runtime/references)
+ const CRFTData& from, const CRFTData& to, const CRFTData& expected,
+ bool expected_deprecation) {
Isolate* isolate = CcTest::i_isolate();
Expectations expectations(isolate);
@@ -2359,7 +2384,9 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
}
- Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
+ Handle<Map> Transition(
+ Handle<Map> map,
+ Expectations& expectations) { // NOLINT(runtime/references)
return Map::TransitionToPrototype(CcTest::i_isolate(), map, prototype_);
}
// TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
@@ -2399,7 +2426,9 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
prototype_ = factory->NewJSObjectFromMap(Map::Create(isolate, 0));
}
- Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
+ Handle<Map> Transition(
+ Handle<Map> map,
+ Expectations& expectations) { // NOLINT(runtime/references)
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -2455,7 +2484,9 @@ struct TransitionToDataFieldOperator {
heap_type_(heap_type),
value_(value) {}
- Handle<Map> DoTransition(Expectations& expectations, Handle<Map> map) {
+ Handle<Map> DoTransition(
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
return expectations.TransitionToDataField(
map, attributes_, constness_, representation_, heap_type_, value_);
}
@@ -2505,12 +2536,15 @@ struct ReconfigureAsDataPropertyOperator {
attributes_(attributes),
heap_type_(heap_type) {}
- Handle<Map> DoTransition(Isolate* isolate, Expectations& expectations,
- Handle<Map> map) {
+ Handle<Map> DoTransition(
+ Isolate* isolate,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map) {
expectations.SetDataField(descriptor_, PropertyConstness::kMutable,
representation_, heap_type_);
return Map::ReconfigureExistingProperty(isolate, map, descriptor_, kData,
- attributes_);
+ attributes_,
+ PropertyConstness::kConst);
}
};
@@ -2527,7 +2561,8 @@ struct ReconfigureAsAccessorPropertyOperator {
Handle<Map> map) {
expectations.SetAccessorField(descriptor_);
return Map::ReconfigureExistingProperty(isolate, map, descriptor_,
- kAccessor, attributes_);
+ kAccessor, attributes_,
+ PropertyConstness::kConst);
}
};
@@ -2549,8 +2584,9 @@ struct FieldGeneralizationChecker {
attributes_(attributes),
heap_type_(heap_type) {}
- void Check(Isolate* isolate, Expectations& expectations2, Handle<Map> map1,
- Handle<Map> map2) {
+ void Check(Isolate* isolate,
+ Expectations& expectations2, // NOLINT(runtime/references)
+ Handle<Map> map1, Handle<Map> map2) {
CHECK(!map2->is_deprecated());
CHECK(map1->is_deprecated());
@@ -2568,8 +2604,9 @@ struct FieldGeneralizationChecker {
// Checks that existing transition was taken as is.
struct SameMapChecker {
- void Check(Isolate* isolate, Expectations& expectations, Handle<Map> map1,
- Handle<Map> map2) {
+ void Check(Isolate* isolate,
+ Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map1, Handle<Map> map2) {
CHECK(!map2->is_deprecated());
CHECK_EQ(*map1, *map2);
CHECK(expectations.Check(*map2));
@@ -2580,7 +2617,8 @@ struct SameMapChecker {
// Checks that both |map1| and |map2| should stays non-deprecated, this is
// the case when property kind is change.
struct PropertyKindReconfigurationChecker {
- void Check(Expectations& expectations, Handle<Map> map1, Handle<Map> map2) {
+ void Check(Expectations& expectations, // NOLINT(runtime/references)
+ Handle<Map> map1, Handle<Map> map2) {
CHECK(!map1->is_deprecated());
CHECK(!map2->is_deprecated());
CHECK_NE(*map1, *map2);
@@ -2605,8 +2643,10 @@ struct PropertyKindReconfigurationChecker {
// where "p4A" and "p4B" differ only in the attributes.
//
template <typename TransitionOp1, typename TransitionOp2, typename Checker>
-static void TestTransitionTo(TransitionOp1& transition_op1,
- TransitionOp2& transition_op2, Checker& checker) {
+static void TestTransitionTo(
+ TransitionOp1& transition_op1, // NOLINT(runtime/references)
+ TransitionOp2& transition_op2, // NOLINT(runtime/references)
+ Checker& checker) { // NOLINT(runtime/references)
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
@@ -2632,7 +2672,6 @@ static void TestTransitionTo(TransitionOp1& transition_op1,
checker.Check(isolate, expectations2, map1, map2);
}
-
TEST(TransitionDataFieldToDataField) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2943,6 +2982,31 @@ TEST(StoreToConstantField_StoreIC) {
TestStoreToConstantField_NaN(store_func_source, 2);
}
+TEST(NormalizeToMigrationTarget) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ CHECK(
+ isolate->native_context()->normalized_map_cache().IsNormalizedMapCache());
+
+ Handle<Map> base_map = Map::Create(isolate, 4);
+
+ Handle<Map> existing_normalized_map = Map::Normalize(
+ isolate, base_map, PropertyNormalizationMode::CLEAR_INOBJECT_PROPERTIES,
+ "Test_NormalizeToMigrationTarget_ExistingMap");
+ existing_normalized_map->set_is_migration_target(true);
+
+ // Normalizing a second map should hit the normalized map cache, including it
+ // being OK for the new map to be a migration target.
+ CHECK(!base_map->is_migration_target());
+ Handle<Map> new_normalized_map = Map::Normalize(
+ isolate, base_map, PropertyNormalizationMode::CLEAR_INOBJECT_PROPERTIES,
+ "Test_NormalizeToMigrationTarget_NewMap");
+ CHECK_EQ(*existing_normalized_map, *new_normalized_map);
+ CHECK(new_normalized_map->is_migration_target());
+}
+
} // namespace test_field_type_tracking
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/test-hashcode.cc b/deps/v8/test/cctest/test-hashcode.cc
index 3a46d2b0da..e311646f73 100644
--- a/deps/v8/test/cctest/test-hashcode.cc
+++ b/deps/v8/test/cctest/test-hashcode.cc
@@ -96,7 +96,7 @@ TEST(AddHashCodeToSlowObject) {
Handle<JSObject> obj =
isolate->factory()->NewJSObject(isolate->object_function());
CHECK(obj->HasFastProperties());
- JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, obj, CLEAR_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
CHECK(obj->raw_properties_or_hash().IsNameDictionary());
@@ -165,7 +165,7 @@ TEST(TransitionFastWithPropertyArrayToSlow) {
CHECK(obj->raw_properties_or_hash().IsPropertyArray());
CHECK_EQ(hash, obj->property_array().Hash());
- JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, obj, KEEP_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
CheckDictionaryObject(obj, hash);
}
@@ -179,7 +179,7 @@ TEST(TransitionSlowToSlow) {
CompileRun(source);
Handle<JSObject> obj = GetGlobal<JSObject>("x");
- JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, obj, CLEAR_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
CHECK(obj->raw_properties_or_hash().IsNameDictionary());
@@ -199,7 +199,7 @@ TEST(TransitionSlowToFastWithoutProperties) {
Handle<JSObject> obj =
isolate->factory()->NewJSObject(isolate->object_function());
- JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
+ JSObject::NormalizeProperties(isolate, obj, CLEAR_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
CHECK(obj->raw_properties_or_hash().IsNameDictionary());
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 815b7f51bb..e534670bb6 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -2671,7 +2671,7 @@ TEST(ArrayGrowLeftTrim) {
}
TEST(TrackHeapAllocationsWithInlining) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -2705,7 +2705,7 @@ TEST(TrackHeapAllocationsWithoutInlining) {
// Disable inlining
i::FLAG_max_inlined_bytecode_size = 0;
i::FLAG_max_inlined_bytecode_size_small = 0;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -2752,7 +2752,7 @@ static const char* inline_heap_allocation_source =
TEST(TrackBumpPointerAllocations) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -2807,7 +2807,7 @@ TEST(TrackBumpPointerAllocations) {
TEST(TrackV8ApiAllocation) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3428,7 +3428,8 @@ TEST(AddressToTraceMap) {
}
static const v8::AllocationProfile::Node* FindAllocationProfileNode(
- v8::Isolate* isolate, v8::AllocationProfile& profile,
+ v8::Isolate* isolate,
+ v8::AllocationProfile& profile, // NOLINT(runtime/references)
const Vector<const char*>& names) {
v8::AllocationProfile::Node* node = profile.GetRootNode();
for (int i = 0; node != nullptr && i < names.length(); ++i) {
@@ -3474,7 +3475,7 @@ static const char* simple_sampling_heap_profiler_script =
"foo();";
TEST(SamplingHeapProfiler) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3557,7 +3558,7 @@ TEST(SamplingHeapProfiler) {
}
TEST(SamplingHeapProfilerRateAgnosticEstimates) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3639,7 +3640,7 @@ TEST(SamplingHeapProfilerRateAgnosticEstimates) {
}
TEST(SamplingHeapProfilerApiAllocation) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3662,7 +3663,7 @@ TEST(SamplingHeapProfilerApiAllocation) {
}
TEST(SamplingHeapProfilerApiSamples) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3707,7 +3708,7 @@ TEST(SamplingHeapProfilerApiSamples) {
}
TEST(SamplingHeapProfilerLeftTrimming) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3742,7 +3743,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
return;
}
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3807,7 +3808,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
}
TEST(SamplingHeapProfilerLargeInterval) {
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -3845,7 +3846,7 @@ TEST(HeapSnapshotPrototypeNotJSReceiver) {
TEST(SamplingHeapProfilerSampleDuringDeopt) {
i::FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(v8::Isolate::GetCurrent());
+ v8::HandleScope scope(CcTest::isolate());
LocalContext env;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 58a3964619..f0df0c6149 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -28,8 +28,7 @@ static Handle<T> OpenHandle(v8::Local<v8::Value> value) {
static inline v8::Local<v8::Value> Run(v8::Local<v8::Script> script) {
v8::Local<v8::Value> result;
- if (script->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocal(&result)) {
+ if (script->Run(CcTest::isolate()->GetCurrentContext()).ToLocal(&result)) {
return result;
}
return v8::Local<v8::Value>();
@@ -1020,8 +1019,8 @@ TEST(SubclassBooleanBuiltin) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- TestSubclassBuiltin("A1", JS_VALUE_TYPE, "Boolean", "true");
- TestSubclassBuiltin("A2", JS_VALUE_TYPE, "Boolean", "false");
+ TestSubclassBuiltin("A1", JS_PRIMITIVE_WRAPPER_TYPE, "Boolean", "true");
+ TestSubclassBuiltin("A2", JS_PRIMITIVE_WRAPPER_TYPE, "Boolean", "false");
}
@@ -1061,8 +1060,8 @@ TEST(SubclassNumberBuiltin) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- TestSubclassBuiltin("A1", JS_VALUE_TYPE, "Number", "42");
- TestSubclassBuiltin("A2", JS_VALUE_TYPE, "Number", "4.2");
+ TestSubclassBuiltin("A1", JS_PRIMITIVE_WRAPPER_TYPE, "Number", "42");
+ TestSubclassBuiltin("A2", JS_PRIMITIVE_WRAPPER_TYPE, "Number", "4.2");
}
@@ -1094,8 +1093,9 @@ TEST(SubclassStringBuiltin) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- TestSubclassBuiltin("A1", JS_VALUE_TYPE, "String", "'some string'");
- TestSubclassBuiltin("A2", JS_VALUE_TYPE, "String", "");
+ TestSubclassBuiltin("A1", JS_PRIMITIVE_WRAPPER_TYPE, "String",
+ "'some string'");
+ TestSubclassBuiltin("A2", JS_PRIMITIVE_WRAPPER_TYPE, "String", "");
}
diff --git a/deps/v8/test/cctest/test-inspector.cc b/deps/v8/test/cctest/test-inspector.cc
index 6dd2aefb9e..2f7e8b1ea1 100644
--- a/deps/v8/test/cctest/test-inspector.cc
+++ b/deps/v8/test/cctest/test-inspector.cc
@@ -9,7 +9,9 @@
#include "include/v8-inspector.h"
#include "include/v8.h"
#include "src/inspector/protocol/Runtime.h"
+#include "src/inspector/string-16.h"
+using v8_inspector::String16;
using v8_inspector::StringBuffer;
using v8_inspector::StringView;
using v8_inspector::V8ContextInfo;
@@ -63,3 +65,16 @@ TEST(WrapInsideWrapOnInterrupt) {
isolate->RequestInterrupt(&WrapOnInterrupt, session.get());
session->wrapObject(env.local(), v8::Null(isolate), object_group_view, false);
}
+
+TEST(String16EndianTest) {
+ const v8_inspector::UChar* expected =
+ reinterpret_cast<const v8_inspector::UChar*>(u"Hello, \U0001F30E.");
+ const uint16_t* utf16le = reinterpret_cast<const uint16_t*>(
+ "H\0e\0l\0l\0o\0,\0 \0\x3c\xd8\x0e\xdf.\0"); // Same text in UTF16LE
+ // encoding
+
+ String16 utf16_str = String16::fromUTF16LE(utf16le, 10);
+ String16 expected_str = expected;
+
+ CHECK_EQ(utf16_str, expected_str);
+}
diff --git a/deps/v8/test/cctest/test-intl.cc b/deps/v8/test/cctest/test-intl.cc
index 47add77b0a..f3162411d3 100644
--- a/deps/v8/test/cctest/test-intl.cc
+++ b/deps/v8/test/cctest/test-intl.cc
@@ -238,7 +238,7 @@ TEST(GetAvailableLocales) {
CHECK(locales.count("en-US"));
locales = JSPluralRules::GetAvailableLocales();
- CHECK(locales.count("en-US"));
+ CHECK(locales.count("en"));
locales = JSRelativeTimeFormat::GetAvailableLocales();
CHECK(locales.count("en-US"));
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index 858c9f577a..51add24a60 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -719,7 +719,7 @@ TEST(TestJSWeakRefKeepDuringJob) {
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
- heap->AddKeepDuringJobTarget(js_object);
+ heap->KeepDuringJob(js_object);
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
}
@@ -756,7 +756,7 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
Handle<JSObject> js_object =
isolate->factory()->NewJSObject(isolate->object_function());
Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
- heap->AddKeepDuringJobTarget(js_object);
+ heap->KeepDuringJob(js_object);
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
}
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 4319d5bebe..1ef2c12966 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -505,7 +505,7 @@ TEST(LiveEditCompileError) {
CHECK_EQ(result.column_number, 51);
v8::String::Utf8Value result_message(env->GetIsolate(), result.message);
CHECK_NOT_NULL(
- strstr(*result_message, "Uncaught SyntaxError: Unexpected token )"));
+ strstr(*result_message, "Uncaught SyntaxError: Unexpected token ')'"));
{
v8::Local<v8::String> result =
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index ed01d6ed21..571b0000eb 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -58,7 +58,6 @@ class DeoptimizeCodeThread : public v8::base::Thread {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
- CHECK_EQ(isolate_, v8::Isolate::GetCurrent());
// This code triggers deoptimization of some function that will be
// used in a different thread.
CompileRun(source_);
@@ -73,7 +72,7 @@ class DeoptimizeCodeThread : public v8::base::Thread {
};
void UnlockForDeoptimization(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = args.GetIsolate();
// Gets the pointer to the thread that will trigger the deoptimization of the
// code.
DeoptimizeCodeThread* deoptimizer =
@@ -94,7 +93,7 @@ void UnlockForDeoptimization(const v8::FunctionCallbackInfo<v8::Value>& args) {
void UnlockForDeoptimizationIfReady(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = args.GetIsolate();
bool* ready_to_deoptimize = reinterpret_cast<bool*>(isolate->GetData(1));
if (*ready_to_deoptimize) {
// The test should enter here only once, so put the flag back to false.
@@ -297,7 +296,6 @@ class KangarooThread : public v8::base::Thread {
{
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
- CHECK_EQ(isolate_, v8::Isolate::GetCurrent());
v8::HandleScope scope(isolate_);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
@@ -338,7 +336,6 @@ TEST(KangarooIsolates) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- CHECK_EQ(isolate, v8::Isolate::GetCurrent());
CompileRun("function getValue() { return 30; }");
thread1.reset(new KangarooThread(isolate, context));
}
@@ -416,7 +413,6 @@ class IsolateLockingThreadWithLocalContext : public JoinableThread {
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
LocalContext local_context(isolate_);
- CHECK_EQ(isolate_, v8::Isolate::GetCurrent());
CalcFibAndCheck(local_context.local());
}
private:
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 1ff517643e..83060f0cac 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -32,6 +32,7 @@
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
#include "src/init/v8.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 3a552dfb18..26d574b435 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -433,75 +433,6 @@ UNINITIALIZED_TEST(LogAccessorCallbacks) {
isolate->Dispose();
}
-// Test that logging of code create / move events is equivalent to traversal of
-// a resulting heap.
-UNINITIALIZED_TEST(EquivalenceOfLoggingAndTraversal) {
- // This test needs to be run on a "clean" V8 to ensure that snapshot log
- // is loaded. This is always true when running using tools/test.py because
- // it launches a new cctest instance for every test. To be sure that launching
- // cctest manually also works, please be sure that no tests below
- // are using V8.
-
- // Start with profiling to capture all code events from the beginning.
- SETUP_FLAGS();
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
-
- // Compile and run a function that creates other functions.
- CompileRun(
- "(function f(obj) {\n"
- " obj.test =\n"
- " (function a(j) { return function b() { return j; } })(100);\n"
- "})(this);");
- logger.logger()->StopProfilerThread();
- CcTest::PreciseCollectAllGarbage();
- logger.StringEvent("test-logging-done", "");
-
- // Iterate heap to find compiled functions, will write to log.
- logger.LogCompiledFunctions();
- logger.StringEvent("test-traversal-done", "");
-
- logger.StopLogging();
-
- v8::Local<v8::String> log_str = logger.GetLogString();
- logger.env()
- ->Global()
- ->Set(logger.env(), v8_str("_log"), log_str)
- .FromJust();
-
- // Load the Test snapshot's sources, see log-eq-of-logging-and-traversal.js
- i::Vector<const char> source =
- i::NativesCollection<i::TEST>::GetScriptsSource();
- v8::Local<v8::String> source_str =
- v8::String::NewFromUtf8(isolate, source.begin(),
- v8::NewStringType::kNormal, source.length())
- .ToLocalChecked();
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::Script> script = CompileWithOrigin(source_str, "", false);
- if (script.IsEmpty()) {
- v8::String::Utf8Value exception(isolate, try_catch.Exception());
- FATAL("compile: %s\n", *exception);
- }
- v8::Local<v8::Value> result;
- if (!script->Run(logger.env()).ToLocal(&result)) {
- v8::String::Utf8Value exception(isolate, try_catch.Exception());
- FATAL("run: %s\n", *exception);
- }
- // The result either be the "true" literal or problem description.
- if (!result->IsTrue()) {
- v8::Local<v8::String> s = result->ToString(logger.env()).ToLocalChecked();
- i::ScopedVector<char> data(s->Utf8Length(isolate) + 1);
- CHECK(data.begin());
- s->WriteUtf8(isolate, data.begin());
- FATAL("%s\n", data.begin());
- }
- }
- isolate->Dispose();
-}
-
UNINITIALIZED_TEST(LogVersion) {
SETUP_FLAGS();
v8::Isolate::CreateParams create_params;
@@ -929,11 +860,11 @@ void ValidateMapDetailsLogging(v8::Isolate* isolate,
// Iterate over all maps on the heap.
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- i::HeapIterator iterator(heap);
+ i::HeapObjectIterator iterator(heap);
i::DisallowHeapAllocation no_gc;
size_t i = 0;
- for (i::HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (i::HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (!obj.IsMap()) continue;
i++;
uintptr_t address = obj.ptr();
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm64.cc b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
new file mode 100644
index 0000000000..e1387cb096
--- /dev/null
+++ b/deps/v8/test/cctest/test-macro-assembler-arm64.cc
@@ -0,0 +1,97 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "src/init/v8.h"
+
+#include "src/base/platform/platform.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
+#include "src/heap/factory.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/utils/ostreams.h"
+#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace test_macro_assembler_arm64 {
+
+using F0 = int();
+
+#define __ masm.
+
+TEST(EmbeddedObj) {
+#ifdef V8_COMPRESS_POINTERS
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+
+ Handle<HeapObject> old_array = isolate->factory()->NewFixedArray(2000);
+ Handle<HeapObject> my_array = isolate->factory()->NewFixedArray(1000);
+ __ Mov(w4, Immediate(my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
+ __ Mov(x5, old_array);
+ __ ret(x5);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ StdoutStream os;
+ code->Print(os);
+#endif
+
+ // Collect garbage to ensure reloc info can be walked by the heap.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+
+ // Test the user-facing reloc interface.
+ const int mode_mask = RelocInfo::EmbeddedObjectModeMask();
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsCompressedEmbeddedObject(mode)) {
+ CHECK_EQ(*my_array, it.rinfo()->target_object());
+ } else {
+ CHECK(RelocInfo::IsFullEmbeddedObject(mode));
+ CHECK_EQ(*old_array, it.rinfo()->target_object());
+ }
+ }
+#endif // V8_COMPRESS_POINTERS
+}
+
+#undef __
+
+} // namespace test_macro_assembler_arm64
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 0b0ab7b032..005aeef652 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -195,11 +195,11 @@ TEST(LoadAddress) {
CHECK_EQ(4, check_size);
__ jr(a4);
__ nop();
- __ stop("invalid");
- __ stop("invalid");
- __ stop("invalid");
- __ stop("invalid");
- __ stop("invalid");
+ __ stop();
+ __ stop();
+ __ stop();
+ __ stop();
+ __ stop();
CodeDesc desc;
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index aaef09b91e..ba69b6d2f6 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -4701,10 +4701,10 @@ TEST(ImportExpressionSuccess) {
// context.
// For example, a top level "import(" is parsed as an
// import declaration. The parser parses the import token correctly
- // and then shows an "Unexpected token (" error message. The
+ // and then shows an "Unexpected token '('" error message. The
// preparser does not understand the import keyword (this test is
// run without kAllowHarmonyDynamicImport flag), so this results in
- // an "Unexpected token import" error.
+ // an "Unexpected token 'import'" error.
RunParserSyncTest(context_data, data, kError);
RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
nullptr, 0, true, true);
@@ -4772,7 +4772,7 @@ TEST(ImportExpressionErrors) {
// as an import declaration. The parser parses the import token
// correctly and then shows an "Unexpected end of input" error
// message because of the '{'. The preparser shows an "Unexpected
- // token {" because it's not a valid token in a CallExpression.
+ // token '{'" because it's not a valid token in a CallExpression.
RunModuleParserSyncTest(context_data, data, kError, nullptr, 0, flags,
arraysize(flags), nullptr, 0, true, true);
}
@@ -7424,7 +7424,7 @@ TEST(EnumReserved) {
RunModuleParserSyncTest(context_data, kErrorSources, kError);
}
-static void CheckEntry(const i::ModuleDescriptor::Entry* entry,
+static void CheckEntry(const i::SourceTextModuleDescriptor::Entry* entry,
const char* export_name, const char* local_name,
const char* import_name, int module_request) {
CHECK_NOT_NULL(entry);
@@ -7487,7 +7487,7 @@ TEST(ModuleParsingInternals) {
CHECK(outer_scope->is_script_scope());
CHECK_NULL(outer_scope->outer_scope());
CHECK(module_scope->is_module_scope());
- const i::ModuleDescriptor::Entry* entry;
+ const i::SourceTextModuleDescriptor::Entry* entry;
i::Declaration::List* declarations = module_scope->declarations();
CHECK_EQ(13, declarations->LengthForTest());
@@ -7572,7 +7572,7 @@ TEST(ModuleParsingInternals) {
CHECK(declarations->AtForTest(12)->var()->location() ==
i::VariableLocation::MODULE);
- i::ModuleDescriptor* descriptor = module_scope->module();
+ i::SourceTextModuleDescriptor* descriptor = module_scope->module();
CHECK_NOT_NULL(descriptor);
CHECK_EQ(5u, descriptor->module_requests().size());
@@ -11319,15 +11319,9 @@ TEST(HashbangSyntax) {
const char* data[] = {"function\nFN\n(\n)\n {\n}\nFN();", nullptr};
- i::FLAG_harmony_hashbang = true;
RunParserSyncTest(context_data, data, kSuccess);
RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, nullptr, 0,
nullptr, 0, true);
-
- i::FLAG_harmony_hashbang = false;
- RunParserSyncTest(context_data, data, kError);
- RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0, nullptr,
- 0, true);
}
TEST(HashbangSyntaxErrors) {
@@ -11370,12 +11364,6 @@ TEST(HashbangSyntaxErrors) {
const char* hashbang_data[] = {"#!\n", "#!---IGNORED---\n", nullptr};
auto SyntaxErrorTest = [](const char* context_data[][2], const char* data[]) {
- i::FLAG_harmony_hashbang = true;
- RunParserSyncTest(context_data, data, kError);
- RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
- nullptr, 0, true);
-
- i::FLAG_harmony_hashbang = false;
RunParserSyncTest(context_data, data, kError);
RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0,
nullptr, 0, true);
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
index bde584c3fa..37bb4e1b39 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm.cc
@@ -5,53 +5,21 @@
// The C++ style guide recommends using <re2> instead of <regex>. However, the
// former isn't available in V8.
#include <regex> // NOLINT(build/c++11)
+#include <vector>
-#include "src/api/api-inl.h"
-#include "src/diagnostics/disassembler.h"
-#include "src/objects/objects-inl.h"
+#include "src/codegen/arm/register-arm.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/disasm-regex-helper.h"
namespace v8 {
namespace internal {
-std::string DisassembleFunction(const char* function) {
- v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
- Handle<JSFunction> f = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- CcTest::global()->Get(context, v8_str(function)).ToLocalChecked())));
-
- Address begin = f->code().raw_instruction_start();
- Address end = f->code().raw_instruction_end();
- Isolate* isolate = CcTest::i_isolate();
- std::ostringstream os;
- Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
- reinterpret_cast<byte*>(end),
- CodeReference(handle(f->code(), isolate)));
- return os.str();
-}
-
-struct Matchers {
- std::string start = "0x[0-9a-f]+ +[0-9a-f]+ +[0-9a-f]+ +";
- std::regex map_load_re =
- std::regex(start + "ldr r([0-9]+), \\[r([0-9]+), #-1\\]");
- std::regex load_const_re = std::regex(start + "ldr r([0-9]+), \\[pc, .*");
- std::regex cmp_re = std::regex(start + "cmp r([0-9]+), r([0-9]+)");
- std::regex bne_re = std::regex(start + "bne (.*)");
- std::regex beq_re = std::regex(start + "beq (.*)");
- std::regex b_re = std::regex(start + "b (.*)");
- std::regex eorne_re =
- std::regex(start + "eorne r([0-9]+), r([0-9]+), r([0-9]+)");
- std::regex eoreq_re =
- std::regex(start + "eoreq r([0-9]+), r([0-9]+), r([0-9]+)");
- std::regex csdb_re = std::regex(start + "csdb");
- std::regex load_field_re =
- std::regex(start + "ldr r([0-9]+), \\[r([0-9]+), #\\+[0-9]+\\]");
- std::regex mask_re =
- std::regex(start + "and r([0-9]+), r([0-9]+), r([0-9]+)");
- std::regex untag_re = std::regex(start + "mov r([0-9]+), r([0-9]+), asr #1");
-
- std::string poison_reg = "9";
-};
+namespace {
+// Poison register.
+const int kPRegCode = kSpeculationPoisonRegister.code();
+const std::string kPReg = // NOLINT(runtime/string)
+ "r" + std::to_string(kPRegCode);
+} // namespace
TEST(DisasmPoisonMonomorphicLoad) {
#ifdef ENABLE_DISASSEMBLER
@@ -71,64 +39,19 @@ TEST(DisasmPoisonMonomorphicLoad) {
"%OptimizeFunctionOnNextCall(mono);"
"mono({ x : 1 });");
- Matchers m;
-
- std::smatch match;
- std::string line;
- std::istringstream reader(DisassembleFunction("mono"));
- bool poisoning_sequence_found = false;
- while (std::getline(reader, line)) {
- if (std::regex_match(line, match, m.map_load_re)) {
- std::string map_reg = match[1];
- std::string object_reg = match[2];
- // Matches that the property access sequence is instrumented with
- // poisoning. We match the following sequence:
- //
- // ldr r1, [r0, #-1] ; load map
- // ldr r2, [pc, #+104] ; load expected map constant
- // cmp r1, r2 ; compare maps
- // bne ... ; deopt if different
- // eorne r9, r9, r9 ; update the poison
- // csdb ; speculation barrier
- // ldr r0, [r0, #+11] ; load the field
- // and r0, r0, r9 ; apply the poison
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_const_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.cmp_re));
- CHECK_EQ(match[1], map_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.bne_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.eorne_re));
- CHECK_EQ(match[1], m.poison_reg);
- CHECK_EQ(match[2], m.poison_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.csdb_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_field_re));
- CHECK_EQ(match[2], object_reg);
- std::string field_reg = match[1];
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.mask_re));
- CHECK_EQ(match[1], field_reg);
- CHECK_EQ(match[2], field_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- poisoning_sequence_found = true;
- break;
- }
- }
-
- CHECK(poisoning_sequence_found);
+ // Matches that the property access sequence is instrumented with
+ // poisoning.
+ std::vector<std::string> patterns_array = {
+ "ldr <<Map:r[0-9]+>>, \\[<<Obj:r[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:r[0-9]+>>, \\[pc, #", // load expected map
+ "cmp <<Map>>, <<ExpMap>>", // compare maps
+ "bne", // deopt if different
+ "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
+ "csdb", // spec. barrier
+ "ldr <<Field:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the field
+ "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
+ };
+ CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
#endif // ENABLE_DISASSEMBLER
}
@@ -155,141 +78,35 @@ TEST(DisasmPoisonPolymorphicLoad) {
"%OptimizeFunctionOnNextCall(poly);"
"poly(o1);");
- Matchers m;
-
- std::smatch match;
- std::string line;
- std::istringstream reader(DisassembleFunction("poly"));
- bool poisoning_sequence_found = false;
- while (std::getline(reader, line)) {
- if (std::regex_match(line, match, m.map_load_re)) {
- std::string map_reg = match[1];
- std::string object_reg = match[2];
- // Matches that the property access sequence is instrumented with
- // poisoning. We match the following sequence:
- //
- // ldr r1, [r0, #-1] ; load map
- // ldr r2, [pc, #+104] ; load map constant #1
- // cmp r1, r2 ; compare maps
- // beq +Lcase1 ; if match, got to the load
- // eoreq r9, r9, r9 ; update the poison
- // csdb ; speculation barrier
- // ldr r1, [r0, #-1] ; load map
- // ldr r2, [pc, #+304] ; load map constant #2
- // cmp r1, r2 ; compare maps
- // bne +Ldeopt ; deopt if different
- // eorne r9, r9, r9 ; update the poison
- // csdb ; speculation barrier
- // ldr r0, [r0, #+11] ; load the field
- // and r0, r0, r9 ; apply the poison
- // mov r0, r0, asr #1 ; untag
- // b +Ldone ; goto merge point
+ // Matches that the property access sequence is instrumented with
+ // poisoning.
+ std::vector<std::string> patterns_array = {
+ "ldr <<Map0:r[0-9]+>>, \\[<<Obj:r[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap0:r[0-9]+>>, \\[pc", // load map const #1
+ "cmp <<Map0>>, <<ExpMap0>>", // compare maps
+ "beq", // ? go to the load
+ "eoreq " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
+ "csdb", // spec. barrier
+ "ldr <<Map1:r[0-9]+>>, \\[<<Obj>>, #-1\\]", // load map
+ "ldr <<ExpMap1:r[0-9]+>>, \\[pc", // load map const #2
+ "cmp <<Map1>>, <<ExpMap1>>", // compare maps
+ "bne", // deopt if different
+ "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
+ "csdb", // spec. barrier
+ "ldr <<Field:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the field
+ "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
+ "mov r[0-9]+, <<Field>>, asr #1", // untag
+ "b", // goto merge point
// Lcase1:
- // eorne r9, r9, r9 ; update the poison
- // csdb ; speculation barrier
- // ldr r0, [r0, #+3] ; load property backing store
- // and r0, r0, r9 ; apply the poison
- // ldr r0, [r0, #+3] ; load the property
- // and r0, r0, r9 ; apply the poison
- // Ldone:
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_const_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.cmp_re));
- CHECK_EQ(match[1], map_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.beq_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.eoreq_re));
- CHECK_EQ(match[1], m.poison_reg);
- CHECK_EQ(match[2], m.poison_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.csdb_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.map_load_re));
- map_reg = match[1];
- CHECK_EQ(match[2], object_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_const_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.cmp_re));
- CHECK_EQ(match[1], map_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.bne_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.eorne_re));
- CHECK_EQ(match[1], m.poison_reg);
- CHECK_EQ(match[2], m.poison_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.csdb_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_field_re));
- CHECK_EQ(match[2], object_reg);
- std::string field_reg = match[1];
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.mask_re));
- CHECK_EQ(match[1], field_reg);
- CHECK_EQ(match[2], field_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.untag_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.b_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.eorne_re));
- CHECK_EQ(match[1], m.poison_reg);
- CHECK_EQ(match[2], m.poison_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.csdb_re));
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_field_re));
- CHECK_EQ(match[2], object_reg);
- std::string storage_reg = match[1];
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.mask_re));
- CHECK_EQ(match[1], storage_reg);
- CHECK_EQ(match[2], storage_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.load_field_re));
- CHECK_EQ(match[2], storage_reg);
- field_reg = match[1];
-
- CHECK(std::getline(reader, line));
- CHECK(std::regex_match(line, match, m.mask_re));
- CHECK_EQ(match[1], field_reg);
- CHECK_EQ(match[2], field_reg);
- CHECK_EQ(match[3], m.poison_reg);
-
- poisoning_sequence_found = true;
- break;
- }
- }
-
- CHECK(poisoning_sequence_found);
+ "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
+ "csdb", // spec. barrier
+ "ldr <<BSt:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load backing store
+ "and <<BSt>>, <<BSt>>, " + kPReg, // apply the poison
+ "ldr <<Prop:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the property
+ "and <<Prop>>, <<Prop>>, " + kPReg, // apply the poison
+ // Ldone:
+ };
+ CHECK(CheckDisassemblyRegexPatterns("poly", patterns_array));
#endif // ENABLE_DISASSEMBLER
}
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
new file mode 100644
index 0000000000..d767fea9fb
--- /dev/null
+++ b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
@@ -0,0 +1,158 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The C++ style guide recommends using <re2> instead of <regex>. However, the
+// former isn't available in V8.
+#include <regex> // NOLINT(build/c++11)
+#include <vector>
+
+#include "src/codegen/arm64/register-arm64.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/disasm-regex-helper.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+// Poison register.
+const int kPRegCode = kSpeculationPoisonRegister.code();
+const std::string kPReg = // NOLINT(runtime/string)
+ "x" + std::to_string(kPRegCode);
+} // namespace
+
+TEST(DisasmPoisonMonomorphicLoad) {
+#ifdef ENABLE_DISASSEMBLER
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_untrusted_code_mitigations = true;
+
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function mono(o) { return o.x; };"
+ "%PrepareFunctionForOptimization(mono);"
+ "mono({ x : 1 });"
+ "mono({ x : 1 });"
+ "%OptimizeFunctionOnNextCall(mono);"
+ "mono({ x : 1 });");
+
+ // Matches that the property access sequence is instrumented with
+ // poisoning.
+#if defined(V8_COMPRESS_POINTERS)
+ std::vector<std::string> patterns_array = {
+ "ldur <<Map:w[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:w[0-9]+>>, pc", // load expected map
+ "cmp <<Map>>, <<ExpMap>>", // compare maps
+ "b.ne", // deopt if different
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldursw x<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
+ "and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
+ };
+#else
+ std::vector<std::string> patterns_array = {
+ "ldur <<Map:x[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:x[0-9]+>>, pc", // load expected map
+ "cmp <<Map>>, <<ExpMap>>", // compare maps
+ "b.ne", // deopt if different
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldur <<Field:x[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
+ "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
+ };
+#endif
+ CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
+#endif // ENABLE_DISASSEMBLER
+}
+
+TEST(DisasmPoisonPolymorphicLoad) {
+#ifdef ENABLE_DISASSEMBLER
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
+
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_untrusted_code_mitigations = true;
+
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function poly(o) { return o.x + 1; };"
+ "let o1 = { x : 1 };"
+ "let o2 = { y : 1 };"
+ "o2.x = 2;"
+ "%PrepareFunctionForOptimization(poly);"
+ "poly(o1);"
+ "poly(o2);"
+ "poly(o1);"
+ "poly(o2);"
+ "%OptimizeFunctionOnNextCall(poly);"
+ "poly(o1);");
+
+ // Matches that the property access sequence is instrumented with
+ // poisoning.
+#if defined(V8_COMPRESS_POINTERS)
+ std::vector<std::string> patterns_array = {
+ "ldur <<Map0:w[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap:w[0-9]+>>, pc", // load map const #1
+ "cmp <<Map0>>, <<ExpMap>>", // compare maps
+ "b.eq", // ? go to the load
+ "csel " + kPReg + ", xzr, " + kPReg + ", eq", // update the poison
+ "csdb", // spec. barrier
+ "ldur <<Map1:w[0-9]+>>, \\[<<Obj>>, #-1\\]", // load map
+ "ldr <<ExpMap1:w[0-9]+>>, pc", // load map const #2
+ "cmp <<Map1>>, <<ExpMap1>>", // compare maps
+ "b.ne", // deopt if different
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldur w<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
+ "and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
+ "asr w[0-9]+, w<<Field>>, #1", // untag
+ "b", // goto merge point
+ // Lcase1:
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldursw x<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
+ "tbz w<<BSt>>, #0, #\\+0x8", // branchful decompress
+ "add x<<BSt>>, x26, x<<BSt>>", // Add root to ref
+ "and x<<BSt>>, x<<BSt>>, " + kPReg, // apply the poison
+ "ldur w<<Prop:[0-9]+>>, \\[x<<BSt>>, #[0-9]+\\]", // load the property
+ "and x<<Prop>>, x<<Prop>>, " + kPReg, // apply the poison
+ // Ldone:
+ };
+#else
+ std::vector<std::string> patterns_array = {
+ "ldur <<Map0:x[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
+ "ldr <<ExpMap0:x[0-9]+>>, pc", // load map const #1
+ "cmp <<Map0>>, <<ExpMap0>>", // compare maps
+ "b.eq", // ? go to the load
+ "csel " + kPReg + ", xzr, " + kPReg + ", eq", // update the poison
+ "csdb", // spec. barrier
+ "ldur <<Map1:x[0-9]+>>, \\[<<Obj>>, #-1\\]", // load map
+ "ldr <<ExpMap1:x[0-9]+>>, pc", // load map const #2
+ "cmp <<Map1>>, <<ExpMap1>>", // compare maps
+ "b.ne", // deopt if different
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldur <<Field:x[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
+ "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
+ "asr x[0-9]+, <<Field>>, #32", // untag
+ "b", // goto merge point
+ // Lcase1:
+ "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
+ "csdb", // spec. barrier
+ "ldur <<BSt:x[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
+ "and <<BSt>>, <<BSt>>, " + kPReg, // apply the poison
+ "ldur <<Prop:x[0-9]+>>, \\[<<BSt>>, #[0-9]+\\]", // load the property
+ "and <<Prop>>, <<Prop>>, " + kPReg, // apply the poison
+ // Ldone:
+ };
+#endif
+ CHECK(CheckDisassemblyRegexPatterns("poly", patterns_array));
+#endif // ENABLE_DISASSEMBLER
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 392782afea..051affc898 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -624,14 +624,12 @@ TEST(ProfileNodeScriptId) {
v8::Local<v8::Script> script_a =
v8_compile(v8_str("function a() { startProfiling(); }\n"));
- script_a->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
+ script_a->Run(env).ToLocalChecked();
v8::Local<v8::Script> script_b =
v8_compile(v8_str("function b() { a(); }\n"
"b();\n"
"stopProfiling();\n"));
- script_b->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
+ script_b->Run(env).ToLocalChecked();
CHECK_EQ(1, iprofiler->GetProfilesCount());
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
@@ -671,7 +669,8 @@ static const char* line_number_test_source_profile_time_functions =
"bar_at_the_second_line();\n"
"function lazy_func_at_6th_line() {}";
-int GetFunctionLineNumber(CpuProfiler& profiler, LocalContext& env,
+int GetFunctionLineNumber(CpuProfiler& profiler, // NOLINT(runtime/references)
+ LocalContext& env, // NOLINT(runtime/references)
const char* name) {
CodeMap* code_map = profiler.generator()->code_map();
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index d71223e73d..1dad8febda 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -36,38 +36,17 @@
#include "src/codegen/macro-assembler.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/interpreter-irregexp.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler-irregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-bytecode-generator.h"
+#include "src/regexp/regexp-compiler.h"
+#include "src/regexp/regexp-interpreter.h"
+#include "src/regexp/regexp-macro-assembler-arch.h"
#include "src/regexp/regexp-parser.h"
+#include "src/regexp/regexp.h"
#include "src/strings/char-predicates-inl.h"
#include "src/strings/string-stream.h"
#include "src/strings/unicode-inl.h"
#include "src/utils/ostreams.h"
-#include "src/utils/splay-tree-inl.h"
#include "src/zone/zone-list-inl.h"
-
-#if V8_TARGET_ARCH_ARM
-#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#elif V8_TARGET_ARCH_IA32
-#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
-#else
-#error Unknown architecture.
-#endif
-
#include "test/cctest/cctest.h"
namespace v8 {
@@ -281,8 +260,9 @@ TEST(RegExpParser) {
CheckParseEq("\\u0034", "'\x34'");
CheckParseEq("\\u003z", "'u003z'");
CheckParseEq("foo[z]*", "(: 'foo' (# 0 - g [z]))");
- CheckParseEq("^^^$$$\\b\\b\\b\\b", "(: @^i @$i @b)");
- CheckParseEq("\\b\\b\\b\\b\\B\\B\\B\\B\\b\\b\\b\\b", "(: @b @B @b)");
+ CheckParseEq("^^^$$$\\b\\b\\b\\b", "(: @^i @^i @^i @$i @$i @$i @b @b @b @b)");
+ CheckParseEq("\\b\\b\\b\\b\\B\\B\\B\\B\\b\\b\\b\\b",
+ "(: @b @b @b @b @B @B @B @B @b @b @b @b)");
CheckParseEq("\\b\\B\\b", "(: @b @B @b)");
// Unicode regexps
@@ -566,8 +546,8 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
.ToHandleChecked();
Handle<String> sample_subject =
isolate->factory()->NewStringFromUtf8(CStrVector("")).ToHandleChecked();
- RegExpEngine::Compile(isolate, zone, &compile_data, flags, pattern,
- sample_subject, is_one_byte);
+ RegExp::CompileForTesting(isolate, zone, &compile_data, flags, pattern,
+ sample_subject, is_one_byte);
return compile_data.node;
}
@@ -579,128 +559,10 @@ static void Execute(const char* input, bool multiline, bool unicode,
RegExpNode* node = Compile(input, multiline, unicode, is_one_byte, &zone);
USE(node);
#ifdef DEBUG
- if (dot_output) {
- RegExpEngine::DotPrint(input, node, false);
- }
+ if (dot_output) RegExp::DotPrintForTesting(input, node);
#endif // DEBUG
}
-
-class TestConfig {
- public:
- using Key = int;
- using Value = int;
- static const int kNoKey;
- static int NoValue() { return 0; }
- static inline int Compare(int a, int b) {
- if (a < b)
- return -1;
- else if (a > b)
- return 1;
- else
- return 0;
- }
-};
-
-
-const int TestConfig::kNoKey = 0;
-
-
-static unsigned PseudoRandom(int i, int j) {
- return ~(~((i * 781) ^ (j * 329)));
-}
-
-
-TEST(SplayTreeSimple) {
- static const unsigned kLimit = 1000;
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- ZoneSplayTree<TestConfig> tree(&zone);
- bool seen[kLimit];
- for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
-#define CHECK_MAPS_EQUAL() do { \
- for (unsigned k = 0; k < kLimit; k++) \
- CHECK_EQ(seen[k], tree.Find(k, &loc)); \
- } while (false)
- for (int i = 0; i < 50; i++) {
- for (int j = 0; j < 50; j++) {
- int next = PseudoRandom(i, j) % kLimit;
- if (seen[next]) {
- // We've already seen this one. Check the value and remove
- // it.
- ZoneSplayTree<TestConfig>::Locator loc;
- CHECK(tree.Find(next, &loc));
- CHECK_EQ(next, loc.key());
- CHECK_EQ(3 * next, loc.value());
- tree.Remove(next);
- seen[next] = false;
- CHECK_MAPS_EQUAL();
- } else {
- // Check that it wasn't there already and then add it.
- ZoneSplayTree<TestConfig>::Locator loc;
- CHECK(!tree.Find(next, &loc));
- CHECK(tree.Insert(next, &loc));
- CHECK_EQ(next, loc.key());
- loc.set_value(3 * next);
- seen[next] = true;
- CHECK_MAPS_EQUAL();
- }
- int val = PseudoRandom(j, i) % kLimit;
- if (seen[val]) {
- ZoneSplayTree<TestConfig>::Locator loc;
- CHECK(tree.FindGreatestLessThan(val, &loc));
- CHECK_EQ(loc.key(), val);
- break;
- }
- val = PseudoRandom(i + j, i - j) % kLimit;
- if (seen[val]) {
- ZoneSplayTree<TestConfig>::Locator loc;
- CHECK(tree.FindLeastGreaterThan(val, &loc));
- CHECK_EQ(loc.key(), val);
- break;
- }
- }
- }
-}
-
-
-TEST(DispatchTableConstruction) {
- // Initialize test data.
- static const int kLimit = 1000;
- static const int kRangeCount = 8;
- static const int kRangeSize = 16;
- uc16 ranges[kRangeCount][2 * kRangeSize];
- for (int i = 0; i < kRangeCount; i++) {
- Vector<uc16> range(ranges[i], 2 * kRangeSize);
- for (int j = 0; j < 2 * kRangeSize; j++) {
- range[j] = PseudoRandom(i + 25, j + 87) % kLimit;
- }
- std::sort(range.begin(), range.end());
- for (int j = 1; j < 2 * kRangeSize; j++) {
- CHECK(range[j-1] <= range[j]);
- }
- }
- // Enter test data into dispatch table.
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- DispatchTable table(&zone);
- for (int i = 0; i < kRangeCount; i++) {
- uc16* range = ranges[i];
- for (int j = 0; j < 2 * kRangeSize; j += 2)
- table.AddRange(CharacterRange::Range(range[j], range[j + 1]), i, &zone);
- }
- // Check that the table looks as we would expect
- for (int p = 0; p < kLimit; p++) {
- OutSet* outs = table.Get(p);
- for (int j = 0; j < kRangeCount; j++) {
- uc16* range = ranges[j];
- bool is_on = false;
- for (int k = 0; !is_on && (k < 2 * kRangeSize); k += 2)
- is_on = (range[k] <= p && p <= range[k + 1]);
- CHECK_EQ(is_on, outs->Get(j));
- }
- }
-}
-
-
// Test of debug-only syntax.
#ifdef DEBUG
@@ -1392,7 +1254,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
TEST(MacroAssembler) {
Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- RegExpMacroAssemblerIrregexp m(CcTest::i_isolate(), &zone);
+ RegExpBytecodeGenerator m(CcTest::i_isolate(), &zone);
// ^f(o)o.
Label start, fail, backtrack;
@@ -1451,43 +1313,6 @@ TEST(MacroAssembler) {
CHECK_EQ(42, captures[0]);
}
-TEST(AddInverseToTable) {
- static const int kLimit = 1000;
- static const int kRangeCount = 16;
- for (int t = 0; t < 10; t++) {
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- ZoneList<CharacterRange>* ranges =
- new(&zone) ZoneList<CharacterRange>(kRangeCount, &zone);
- for (int i = 0; i < kRangeCount; i++) {
- int from = PseudoRandom(t + 87, i + 25) % kLimit;
- int to = from + (PseudoRandom(i + 87, t + 25) % (kLimit / 20));
- if (to > kLimit) to = kLimit;
- ranges->Add(CharacterRange::Range(from, to), &zone);
- }
- DispatchTable table(&zone);
- DispatchTableConstructor cons(&table, false, &zone);
- cons.set_choice_index(0);
- cons.AddInverse(ranges);
- for (int i = 0; i < kLimit; i++) {
- bool is_on = false;
- for (int j = 0; !is_on && j < kRangeCount; j++)
- is_on = ranges->at(j).Contains(i);
- OutSet* set = table.Get(i);
- CHECK_EQ(is_on, set->Get(0) == false);
- }
- }
- Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
- ZoneList<CharacterRange>* ranges =
- new(&zone) ZoneList<CharacterRange>(1, &zone);
- ranges->Add(CharacterRange::Range(0xFFF0, 0xFFFE), &zone);
- DispatchTable table(&zone);
- DispatchTableConstructor cons(&table, false, &zone);
- cons.set_choice_index(0);
- cons.AddInverse(ranges);
- CHECK(!table.Get(0xFFFE)->Get(0));
- CHECK(table.Get(0xFFFF)->Get(0));
-}
-
#ifndef V8_INTL_SUPPORT
static uc32 canonicalize(uc32 c) {
unibrow::uchar canon[unibrow::Ecma262Canonicalize::kMaxWidth];
@@ -1649,10 +1474,10 @@ TEST(CharacterRangeCaseIndependence) {
#endif // !V8_INTL_SUPPORT
}
-
-static bool InClass(uc32 c, ZoneList<CharacterRange>* ranges) {
+static bool InClass(uc32 c,
+ const UnicodeRangeSplitter::CharacterRangeVector* ranges) {
if (ranges == nullptr) return false;
- for (int i = 0; i < ranges->length(); i++) {
+ for (size_t i = 0; i < ranges->size(); i++) {
CharacterRange range = ranges->at(i);
if (range.from() <= c && c <= range.to())
return true;
@@ -1660,13 +1485,12 @@ static bool InClass(uc32 c, ZoneList<CharacterRange>* ranges) {
return false;
}
-
TEST(UnicodeRangeSplitter) {
Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
base->Add(CharacterRange::Everything(), &zone);
- UnicodeRangeSplitter splitter(&zone, base);
+ UnicodeRangeSplitter splitter(base);
// BMP
for (uc32 c = 0; c < 0xD800; c++) {
CHECK(InClass(c, splitter.bmp()));
@@ -1959,6 +1783,10 @@ TEST(UncachedExternalString) {
ExpectString("external.substring(1).match(re)[1]", "z");
}
+#undef CHECK_PARSE_ERROR
+#undef CHECK_SIMPLE
+#undef CHECK_MIN_MAX
+
} // namespace test_regexp
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index 137053788d..d041903639 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -16,8 +16,7 @@ namespace {
AllocationSpace GetSpaceFromObject(Object object) {
DCHECK(object.IsHeapObject());
return MemoryChunk::FromHeapObject(HeapObject::cast(object))
- ->owner()
- ->identity();
+ ->owner_identity();
}
} // namespace
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 3c8b38898d..1ed1547b34 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -98,7 +98,8 @@ class TestSerializer {
return v8_isolate;
}
- static v8::Isolate* NewIsolateFromBlob(StartupBlobs& blobs) {
+ static v8::Isolate* NewIsolateFromBlob(
+ StartupBlobs& blobs) { // NOLINT(runtime/references)
SnapshotData startup_snapshot(blobs.startup);
SnapshotData read_only_snapshot(blobs.read_only);
ReadOnlyDeserializer read_only_deserializer(&read_only_snapshot);
@@ -148,8 +149,10 @@ namespace {
// Convenience wrapper around the convenience wrapper.
v8::StartupData CreateSnapshotDataBlob(const char* embedded_source) {
- return CreateSnapshotDataBlobInternal(
+ v8::StartupData data = CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling::kClear, embedded_source);
+ ReadOnlyHeap::ClearSharedHeapForTest();
+ return data;
}
} // namespace
@@ -201,14 +204,13 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
source_length);
}
-
-static v8::Isolate* Deserialize(StartupBlobs& blobs) {
+static v8::Isolate* Deserialize(
+ StartupBlobs& blobs) { // NOLINT(runtime/references)
v8::Isolate* isolate = TestSerializer::NewIsolateFromBlob(blobs);
CHECK(isolate);
return isolate;
}
-
static void SanityCheck(v8::Isolate* v8_isolate) {
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
v8::HandleScope scope(v8_isolate);
@@ -275,53 +277,6 @@ UNINITIALIZED_TEST(StartupSerializerOnce32K) {
TestStartupSerializerOnceImpl();
}
-UNINITIALIZED_TEST(StartupSerializerRootMapDependencies) {
- DisableAlwaysOpt();
- v8::SnapshotCreator snapshot_creator;
- v8::Isolate* isolate = snapshot_creator.GetIsolate();
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
- // Here is interesting retaining path:
- // - FreeSpaceMap
- // - Map for Map types itself
- // - NullValue
- // - Internalized one byte string
- // - Map for Internalized one byte string
- // - TheHoleValue
- // - HeapNumber
- // HeapNumber objects require kDoubleUnaligned on 32-bit
- // platforms. So, without special measures we're risking to serialize
- // object, requiring alignment before FreeSpaceMap is fully serialized.
- v8::internal::Handle<Map> map(
- ReadOnlyRoots(internal_isolate).one_byte_internalized_string_map(),
- internal_isolate);
- // Need to avoid DCHECKs inside SnapshotCreator.
- snapshot_creator.SetDefaultContext(v8::Context::New(isolate));
- }
-
- v8::StartupData startup_data = snapshot_creator.CreateBlob(
- v8::SnapshotCreator::FunctionCodeHandling::kKeep);
-
- v8::Isolate::CreateParams params;
- params.snapshot_blob = &startup_data;
- params.array_buffer_allocator = CcTest::array_buffer_allocator();
- isolate = v8::Isolate::New(params);
-
- {
- v8::HandleScope handle_scope(isolate);
- v8::Isolate::Scope isolate_scope(isolate);
-
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
-
- SanityCheck(isolate);
- }
- isolate->Dispose();
- delete[] startup_data.data;
-}
-
UNINITIALIZED_TEST(StartupSerializerTwice) {
DisableAlwaysOpt();
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
@@ -811,6 +766,7 @@ void TestCustomSnapshotDataBlobWithIrregexpCode(
DisableEmbeddedBlobRefcounting();
v8::StartupData data1 =
CreateSnapshotDataBlobInternal(function_code_handling, source);
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params1;
params1.snapshot_blob = &data1;
@@ -958,6 +914,7 @@ void TypedArrayTestHelper(
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1085,6 +1042,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobDetachedArrayBuffer) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1155,6 +1113,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobOnOrOffHeapTypedArray) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1208,6 +1167,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobTypedArrayNoEmbedderFieldCallback) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -1300,8 +1260,8 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope c_scope(context);
v8::Local<v8::Value> result = CompileRun(source2);
- v8::Maybe<bool> compare = v8_str("42")->Equals(
- v8::Isolate::GetCurrent()->GetCurrentContext(), result);
+ v8::Maybe<bool> compare =
+ v8_str("42")->Equals(isolate->GetCurrentContext(), result);
CHECK(compare.FromJust());
}
isolate->Dispose();
@@ -1518,11 +1478,11 @@ TEST(TestThatAlwaysFails) {
int CountBuiltins() {
// Check that we have not deserialized any additional builtin.
- HeapIterator iterator(CcTest::heap());
+ HeapObjectIterator iterator(CcTest::heap());
DisallowHeapAllocation no_allocation;
int counter = 0;
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
if (obj.IsCode() && Code::cast(obj).kind() == Code::BUILTIN) counter++;
}
return counter;
@@ -1969,10 +1929,10 @@ TEST(CodeSerializerThreeBigStrings) {
v8::Maybe<int32_t> result =
CompileRun("(a + b).length")
- ->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
+ ->Int32Value(CcTest::isolate()->GetCurrentContext());
CHECK_EQ(length_of_a + length_of_b, result.FromJust());
result = CompileRun("(b + c).length")
- ->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext());
+ ->Int32Value(CcTest::isolate()->GetCurrentContext());
CHECK_EQ(length_of_b + length_of_c, result.FromJust());
Heap* heap = isolate->heap();
v8::Local<v8::String> result_str =
@@ -2591,6 +2551,7 @@ UNINITIALIZED_TEST(SnapshotCreatorMultipleContexts) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2729,6 +2690,7 @@ UNINITIALIZED_TEST(SnapshotCreatorExternalReferences) {
// Deserialize with the original external reference.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2754,6 +2716,7 @@ UNINITIALIZED_TEST(SnapshotCreatorExternalReferences) {
// Deserialize with some other external reference.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2802,6 +2765,7 @@ UNINITIALIZED_TEST(SnapshotCreatorShortExternalReferences) {
// Deserialize with an incomplete list of external references.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2862,6 +2826,7 @@ UNINITIALIZED_TEST(SnapshotCreatorNoExternalReferencesDefault) {
// Deserialize with an incomplete list of external references.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2911,6 +2876,7 @@ UNINITIALIZED_TEST(SnapshotCreatorPreparseDataAndNoOuterScope) {
// Deserialize with an incomplete list of external references.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -2950,6 +2916,7 @@ UNINITIALIZED_TEST(SnapshotCreatorArrayJoinWithKeep) {
DisableAlwaysOpt();
DisableEmbeddedBlobRefcounting();
v8::StartupData blob = CreateCustomSnapshotArrayJoinWithKeep();
+ ReadOnlyHeap::ClearSharedHeapForTest();
// Deserialize with an incomplete list of external references.
{
@@ -2971,12 +2938,60 @@ UNINITIALIZED_TEST(SnapshotCreatorArrayJoinWithKeep) {
FreeCurrentEmbeddedBlob();
}
+v8::StartupData CreateCustomSnapshotWithDuplicateFunctions() {
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ {
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ CompileRun(
+ "function f() { return (() => 'a'); }\n"
+ "let g1 = f();\n"
+ "let g2 = f();\n");
+ ExpectString("g1()", "a");
+ ExpectString("g2()", "a");
+ creator.SetDefaultContext(context);
+ }
+ }
+ return creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kKeep);
+}
+
+UNINITIALIZED_TEST(SnapshotCreatorDuplicateFunctions) {
+ DisableAlwaysOpt();
+ DisableEmbeddedBlobRefcounting();
+ v8::StartupData blob = CreateCustomSnapshotWithDuplicateFunctions();
+ ReadOnlyHeap::ClearSharedHeapForTest();
+
+ // Deserialize with an incomplete list of external references.
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ // Test-appropriate equivalent of v8::Isolate::New.
+ v8::Isolate* isolate = TestSerializer::NewIsolate(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ ExpectString("g1()", "a");
+ ExpectString("g2()", "a");
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+ FreeCurrentEmbeddedBlob();
+}
+
TEST(SnapshotCreatorNoExternalReferencesCustomFail1) {
DisableAlwaysOpt();
v8::StartupData blob = CreateSnapshotWithDefaultAndCustom();
// Deserialize with an incomplete list of external references.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3002,6 +3017,7 @@ TEST(SnapshotCreatorNoExternalReferencesCustomFail2) {
// Deserialize with an incomplete list of external references.
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3120,6 +3136,7 @@ UNINITIALIZED_TEST(SnapshotCreatorTemplates) {
}
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3533,6 +3550,7 @@ UNINITIALIZED_TEST(SnapshotCreatorIncludeGlobalProxy) {
}
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3667,6 +3685,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedNotRehashable) {
CHECK(!blob.CanBeRehashed());
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
i::FLAG_hash_seed = 1337;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3802,6 +3821,7 @@ UNINITIALIZED_TEST(WeakArraySerializationInSnapshot) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams create_params;
create_params.snapshot_blob = &blob;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
diff --git a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
index 4e6b196e70..914444c634 100644
--- a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
+++ b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
@@ -14,7 +14,7 @@ namespace internal {
namespace {
-void AddSigned(std::set<Smi>& smis, int64_t x) {
+void AddSigned(std::set<Smi>& smis, int64_t x) { // NOLINT(runtime/references)
if (!Smi::IsValid(x)) return;
smis.insert(Smi::FromInt(static_cast<int>(x)));
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 7166b6b41f..796f38a73b 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -625,8 +625,8 @@ TEST(ConsStringWithEmptyFirstFlatten) {
i::Handle<i::String> new_fst = isolate->factory()->empty_string();
i::Handle<i::String> new_snd =
isolate->factory()->NewStringFromAsciiChecked("snd012345012345678");
- cons->set_first(isolate, *new_fst);
- cons->set_second(isolate, *new_snd);
+ cons->set_first(*new_fst);
+ cons->set_second(*new_snd);
CHECK(!cons->IsFlat());
CHECK_EQ(initial_length, new_fst->length() + new_snd->length());
CHECK_EQ(initial_length, cons->length());
@@ -1522,11 +1522,11 @@ TEST(SliceFromSlice) {
UNINITIALIZED_TEST(OneByteArrayJoin) {
v8::Isolate::CreateParams create_params;
// Set heap limits.
- create_params.constraints.set_max_semi_space_size_in_kb(1024);
+ create_params.constraints.set_max_young_generation_size_in_bytes(3 * MB);
#ifdef DEBUG
- create_params.constraints.set_max_old_space_size(20);
+ create_params.constraints.set_max_old_generation_size_in_bytes(20 * MB);
#else
- create_params.constraints.set_max_old_space_size(7);
+ create_params.constraints.set_max_old_generation_size_in_bytes(7 * MB);
#endif
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index e21c23e82c..ca9ac5efaa 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -345,7 +345,7 @@ TEST(TerminateAndReenterFromThreadItself) {
isolate, TerminateCurrentThread, ReenterAfterTermination);
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ CHECK(!isolate->IsExecutionTerminating());
// Create script strings upfront as it won't work when terminating.
reenter_script_1.Reset(isolate, v8_str(
"function f() {"
@@ -377,7 +377,7 @@ TEST(TerminateAndReenterFromThreadItselfWithOuterTryCatch) {
isolate, TerminateCurrentThread, ReenterAfterTermination);
v8::Local<v8::Context> context = v8::Context::New(isolate, nullptr, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ CHECK(!isolate->IsExecutionTerminating());
// Create script strings upfront as it won't work when terminating.
reenter_script_1.Reset(isolate, v8_str("function f() {"
" var term = true;"
@@ -411,25 +411,25 @@ TEST(TerminateAndReenterFromThreadItselfWithOuterTryCatch) {
}
void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::TryCatch try_catch(args.GetIsolate());
- CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
- v8::MaybeLocal<v8::Value> result =
- CompileRun(args.GetIsolate()->GetCurrentContext(),
- "var term = true;"
- "while(true) {"
- " if (term) terminate();"
- " term = false;"
- "}"
- "fail();");
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::TryCatch try_catch(isolate);
+ CHECK(!isolate->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result = CompileRun(isolate->GetCurrentContext(),
+ "var term = true;"
+ "while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ "}"
+ "fail();");
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ CHECK(isolate->IsExecutionTerminating());
CHECK(try_catch.HasTerminated());
- CcTest::isolate()->CancelTerminateExecution();
- CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ isolate->CancelTerminateExecution();
+ CHECK(!isolate->IsExecutionTerminating());
}
@@ -467,7 +467,7 @@ void MicrotaskLoopForever(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Function::New(isolate->GetCurrentContext(), MicrotaskShouldNotRun)
.ToLocalChecked());
CompileRun("terminate(); while (true) { }");
- CHECK(v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ CHECK(isolate->IsExecutionTerminating());
}
@@ -782,10 +782,10 @@ TEST(TerminationInInnerTryCall) {
CompileRun("inner_try_call_terminate()");
CHECK(try_catch.HasTerminated());
}
- v8::Maybe<int32_t> result = CompileRun("2 + 2")->Int32Value(
- v8::Isolate::GetCurrent()->GetCurrentContext());
+ v8::Maybe<int32_t> result =
+ CompileRun("2 + 2")->Int32Value(isolate->GetCurrentContext());
CHECK_EQ(4, result.FromJust());
- CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ CHECK(!isolate->IsExecutionTerminating());
}
@@ -816,8 +816,8 @@ TEST(TerminateAndTryCall) {
CHECK(isolate->IsExecutionTerminating());
}
// V8 then recovers.
- v8::Maybe<int32_t> result = CompileRun("2 + 2")->Int32Value(
- v8::Isolate::GetCurrent()->GetCurrentContext());
+ v8::Maybe<int32_t> result =
+ CompileRun("2 + 2")->Int32Value(isolate->GetCurrentContext());
CHECK_EQ(4, result.FromJust());
CHECK(!isolate->IsExecutionTerminating());
}
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 5f47cf8419..9e179b4662 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -289,7 +289,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
if (key == *name) {
// Attributes transition.
PropertyAttributes attributes =
- target.GetLastDescriptorDetails().attributes();
+ target.GetLastDescriptorDetails(isolate).attributes();
CHECK_EQ(*attr_maps[static_cast<int>(attributes)], target);
} else {
for (int j = 0; j < PROPS_COUNT; j++) {
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index b14debdba7..fb4740cb92 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -14,10 +14,12 @@
namespace v8 {
namespace internal {
-void TestArrayBufferViewContents(LocalContext& env, bool should_use_buffer) {
+void TestArrayBufferViewContents(
+ LocalContext& env, // NOLINT(runtime/references)
+ bool should_use_buffer) {
v8::Local<v8::Object> obj_a = v8::Local<v8::Object>::Cast(
env->Global()
- ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("a"))
+ ->Get(env->GetIsolate()->GetCurrentContext(), v8_str("a"))
.ToLocalChecked());
CHECK(obj_a->IsArrayBufferView());
v8::Local<v8::ArrayBufferView> array_buffer_view =
@@ -32,7 +34,6 @@ void TestArrayBufferViewContents(LocalContext& env, bool should_use_buffer) {
}
}
-
TEST(CopyContentsTypedArray) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 7f90a85625..78ec000d65 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -972,7 +972,7 @@ TEST(Regress436816) {
Handle<Map> normalized_map =
Map::Normalize(isolate, map, KEEP_INOBJECT_PROPERTIES, "testing");
- JSObject::MigrateToMap(object, normalized_map);
+ JSObject::MigrateToMap(isolate, object, normalized_map);
CHECK(!object->HasFastProperties());
CHECK(object->map().HasFastPointerLayout());
@@ -1470,7 +1470,7 @@ static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// Migrate |obj| to |new_map| which should shift fields and put the
// |boom_value| to the slot that was earlier recorded by write barrier.
- JSObject::MigrateToMap(obj, new_map);
+ JSObject::MigrateToMap(isolate, obj, new_map);
Address fake_object = obj_value->ptr() + kTaggedSize;
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
@@ -1553,7 +1553,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// Migrate |obj| to |new_map| which should shift fields and put the
// |boom_value| to the slot that was earlier recorded by incremental write
// barrier.
- JSObject::MigrateToMap(obj, new_map);
+ JSObject::MigrateToMap(isolate, obj, new_map);
uint64_t boom_value = UINT64_C(0xBAAD0176A37C28E1);
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 75d80329f5..8b67233963 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -19,7 +19,6 @@
#include "src/strings/char-predicates.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
-#include "torque-generated/builtins-test-gen-tq.h"
namespace v8 {
namespace internal {
@@ -499,6 +498,93 @@ TEST(TestStaticAssert) {
ft.Call();
}
+TEST(TestLoadEliminationFixed) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestLoadEliminationFixed(
+ m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ asm_tester.GenerateCode();
+}
+
+TEST(TestLoadEliminationVariable) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestLoadEliminationVariable(
+ m.UncheckedCast<Context>(m.HeapConstant(context)));
+ m.Return(m.UndefinedConstant());
+ }
+ asm_tester.GenerateCode();
+}
+
+TEST(TestRedundantArrayElementCheck) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.Return(m.TestRedundantArrayElementCheck(
+ m.UncheckedCast<Context>(m.HeapConstant(context))));
+ }
+ asm_tester.GenerateCode();
+}
+
+TEST(TestRedundantSmiCheck) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ Handle<Context> context =
+ Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.Return(m.TestRedundantSmiCheck(
+ m.UncheckedCast<Context>(m.HeapConstant(context))));
+ }
+ asm_tester.GenerateCode();
+}
+
+TEST(TestGenericStruct1) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestGenericStruct1();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
+TEST(TestGenericStruct2) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ { m.Return(m.TestGenericStruct2().fst); }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index c4b8adddf4..ab8545bf5d 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -7,6 +7,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/wasm/wasm-arguments.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -41,46 +42,33 @@ class CWasmEntryArgTester {
Handle<WasmInstanceObject> instance(runner_.builder().instance_object());
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
- c_wasm_entry_fn_ = WasmDebugInfo::GetCWasmEntry(debug_info, sig_);
+ c_wasm_entry_ = WasmDebugInfo::GetCWasmEntry(debug_info, sig_);
}
template <typename... Rest>
- void WriteToBuffer(Address buf, Rest... rest) {
+ void WriteToBuffer(CWasmArgumentsPacker* packer, Rest... rest) {
static_assert(sizeof...(rest) == 0, "this is the base case");
}
template <typename First, typename... Rest>
- void WriteToBuffer(Address buf, First first, Rest... rest) {
- WriteUnalignedValue(buf, first);
- WriteToBuffer(buf + sizeof(first), rest...);
+ void WriteToBuffer(CWasmArgumentsPacker* packer, First first, Rest... rest) {
+ packer->Push(first);
+ WriteToBuffer(packer, rest...);
}
void CheckCall(Args... args) {
- std::vector<uint8_t> arg_buffer(sizeof...(args) * 8);
- WriteToBuffer(reinterpret_cast<Address>(arg_buffer.data()), args...);
-
- Handle<Object> receiver = isolate_->factory()->undefined_value();
- Handle<Object> buffer_obj(
- Object(reinterpret_cast<Address>(arg_buffer.data())), isolate_);
- CHECK(!buffer_obj->IsHeapObject());
- Handle<Object> code_entry_obj(Object(wasm_code_->instruction_start()),
- isolate_);
- CHECK(!code_entry_obj->IsHeapObject());
- Handle<Object> call_args[]{code_entry_obj,
- runner_.builder().instance_object(), buffer_obj};
- static_assert(
- arraysize(call_args) == compiler::CWasmEntryParameters::kNumParameters,
- "adapt this test");
+ CWasmArgumentsPacker packer(CWasmArgumentsPacker::TotalSize(sig_));
+ WriteToBuffer(&packer, args...);
+ Address wasm_call_target = wasm_code_->instruction_start();
+ Handle<Object> object_ref = runner_.builder().instance_object();
wasm_code_->native_module()->SetExecutable(true);
- MaybeHandle<Object> return_obj = Execution::Call(
- isolate_, c_wasm_entry_fn_, receiver, arraysize(call_args), call_args);
- CHECK(!return_obj.is_null());
- CHECK(return_obj.ToHandleChecked()->IsSmi());
- CHECK_EQ(0, Smi::ToInt(*return_obj.ToHandleChecked()));
+ Execution::CallWasm(isolate_, c_wasm_entry_, wasm_call_target, object_ref,
+ packer.argv());
+ CHECK(!isolate_->has_pending_exception());
+ packer.Reset();
// Check the result.
- ReturnType result = ReadUnalignedValue<ReturnType>(
- reinterpret_cast<Address>(arg_buffer.data()));
+ ReturnType result = packer.Pop<ReturnType>();
ReturnType expected = expected_fn_(args...);
if (std::is_floating_point<ReturnType>::value) {
CHECK_DOUBLE_EQ(expected, result);
@@ -94,7 +82,7 @@ class CWasmEntryArgTester {
Isolate* isolate_;
std::function<ReturnType(Args...)> expected_fn_;
FunctionSig* sig_;
- Handle<JSFunction> c_wasm_entry_fn_;
+ Handle<Code> c_wasm_entry_;
WasmCode* wasm_code_;
};
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index 90b9f6e642..dc02cfd14a 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -224,7 +224,7 @@ TEST(JumpTablePatchingStress) {
Address slot_start = reinterpret_cast<Address>(buffer->start());
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
- uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
+ uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
Address thunk1 =
GenerateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 278a6ec7bc..921d3b8e05 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -19,7 +19,17 @@ namespace v8 {
namespace internal {
namespace wasm {
-WASM_EXEC_TEST(Int32AsmjsDivS) {
+// Liftoff does not support asm.js, and is never invoked with asm.js code in
+// production. Hence test asm.js with TurboFan and Interpreter only.
+#define ASMJS_EXEC_TEST(name) \
+ void RunWasm_##name(ExecutionTier execution_tier); \
+ TEST(RunWasmTurbofan_##name) { RunWasm_##name(ExecutionTier::kTurbofan); } \
+ TEST(RunWasmInterpreter_##name) { \
+ RunWasm_##name(ExecutionTier::kInterpreter); \
+ } \
+ void RunWasm_##name(ExecutionTier execution_tier)
+
+ASMJS_EXEC_TEST(Int32AsmjsDivS) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsDivS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -31,7 +41,7 @@ WASM_EXEC_TEST(Int32AsmjsDivS) {
CHECK_EQ(0, r.Call(kMin, 0));
}
-WASM_EXEC_TEST(Int32AsmjsRemS) {
+ASMJS_EXEC_TEST(Int32AsmjsRemS) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsRemS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -43,7 +53,7 @@ WASM_EXEC_TEST(Int32AsmjsRemS) {
CHECK_EQ(0, r.Call(kMin, 0));
}
-WASM_EXEC_TEST(Int32AsmjsDivU) {
+ASMJS_EXEC_TEST(Int32AsmjsDivU) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsDivU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -55,7 +65,7 @@ WASM_EXEC_TEST(Int32AsmjsDivU) {
CHECK_EQ(0, r.Call(kMin, 0));
}
-WASM_EXEC_TEST(Int32AsmjsRemU) {
+ASMJS_EXEC_TEST(Int32AsmjsRemU) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_BINOP(kExprI32AsmjsRemU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -67,7 +77,7 @@ WASM_EXEC_TEST(Int32AsmjsRemU) {
CHECK_EQ(kMin, r.Call(kMin, -1));
}
-WASM_EXEC_TEST(I32AsmjsSConvertF32) {
+ASMJS_EXEC_TEST(I32AsmjsSConvertF32) {
WasmRunner<int32_t, float> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_GET_LOCAL(0)));
@@ -78,7 +88,7 @@ WASM_EXEC_TEST(I32AsmjsSConvertF32) {
}
}
-WASM_EXEC_TEST(I32AsmjsSConvertF64) {
+ASMJS_EXEC_TEST(I32AsmjsSConvertF64) {
WasmRunner<int32_t, double> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_GET_LOCAL(0)));
@@ -89,7 +99,7 @@ WASM_EXEC_TEST(I32AsmjsSConvertF64) {
}
}
-WASM_EXEC_TEST(I32AsmjsUConvertF32) {
+ASMJS_EXEC_TEST(I32AsmjsUConvertF32) {
WasmRunner<uint32_t, float> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_GET_LOCAL(0)));
@@ -100,7 +110,7 @@ WASM_EXEC_TEST(I32AsmjsUConvertF32) {
}
}
-WASM_EXEC_TEST(I32AsmjsUConvertF64) {
+ASMJS_EXEC_TEST(I32AsmjsUConvertF64) {
WasmRunner<uint32_t, double> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_GET_LOCAL(0)));
@@ -111,7 +121,7 @@ WASM_EXEC_TEST(I32AsmjsUConvertF64) {
}
}
-WASM_EXEC_TEST(LoadMemI32_oob_asm) {
+ASMJS_EXEC_TEST(LoadMemI32_oob_asm) {
WasmRunner<int32_t, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
@@ -131,7 +141,7 @@ WASM_EXEC_TEST(LoadMemI32_oob_asm) {
}
}
-WASM_EXEC_TEST(LoadMemF32_oob_asm) {
+ASMJS_EXEC_TEST(LoadMemF32_oob_asm) {
WasmRunner<float, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
float* memory = r.builder().AddMemoryElems<float>(8);
@@ -151,7 +161,7 @@ WASM_EXEC_TEST(LoadMemF32_oob_asm) {
}
}
-WASM_EXEC_TEST(LoadMemF64_oob_asm) {
+ASMJS_EXEC_TEST(LoadMemF64_oob_asm) {
WasmRunner<double, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
double* memory = r.builder().AddMemoryElems<double>(8);
@@ -173,7 +183,7 @@ WASM_EXEC_TEST(LoadMemF64_oob_asm) {
}
}
-WASM_EXEC_TEST(StoreMemI32_oob_asm) {
+ASMJS_EXEC_TEST(StoreMemI32_oob_asm) {
WasmRunner<int32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().ChangeOriginToAsmjs();
int32_t* memory = r.builder().AddMemoryElems<int32_t>(8);
@@ -195,6 +205,42 @@ WASM_EXEC_TEST(StoreMemI32_oob_asm) {
}
}
+ASMJS_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; ++denom) {
+ WasmRunner<int32_t, int32_t> r(execution_tier);
+ r.builder().ChangeOriginToAsmjs();
+ BUILD(r, WASM_I32_ASMJS_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
+ FOR_INT32_INPUTS(i) {
+ if (denom == 0) {
+ CHECK_EQ(0, r.Call(i));
+ } else if (denom == -1 && i == std::numeric_limits<int32_t>::min()) {
+ CHECK_EQ(std::numeric_limits<int32_t>::min(), r.Call(i));
+ } else {
+ CHECK_EQ(i / denom, r.Call(i));
+ }
+ }
+ }
+}
+
+ASMJS_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; ++denom) {
+ WasmRunner<int32_t, int32_t> r(execution_tier);
+ r.builder().ChangeOriginToAsmjs();
+ BUILD(r, WASM_I32_ASMJS_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
+ FOR_INT32_INPUTS(i) {
+ if (denom == 0) {
+ CHECK_EQ(0, r.Call(i));
+ } else if (denom == -1 && i == std::numeric_limits<int32_t>::min()) {
+ CHECK_EQ(0, r.Call(i));
+ } else {
+ CHECK_EQ(i % denom, r.Call(i));
+ }
+ }
+ }
+}
+
+#undef ASMJS_EXEC_TEST
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
index 7895b192f8..60cda4adde 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-atomics.cc
@@ -304,6 +304,19 @@ WASM_EXEC_TEST(I32AtomicStoreParameter) {
CHECK_EQ(10, r.Call(10));
CHECK_EQ(20, r.builder().ReadMemory(&memory[0]));
}
+
+WASM_EXEC_TEST(AtomicFence) {
+ EXPERIMENTAL_FLAG_SCOPE(threads);
+ WasmRunner<uint32_t> r(execution_tier);
+ // Note that this test specifically doesn't use a shared memory, as the fence
+ // instruction does not target a particular linear memory. It may occur in
+ // modules which declare no memory, or a non-shared memory, without causing a
+ // validation error.
+
+ BUILD(r, WASM_ATOMICS_FENCE, WASM_ZERO);
+ CHECK_EQ(0, r.Call());
+}
+
} // namespace test_run_wasm_atomics
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index 097287c41d..e794c00ece 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -13,8 +13,9 @@ namespace wasm {
namespace test_run_wasm_bulk_memory {
namespace {
-void CheckMemoryEquals(TestingModuleBuilder& builder, size_t index,
- const std::vector<byte>& expected) {
+void CheckMemoryEquals(
+ TestingModuleBuilder& builder, // NOLINT(runtime/references)
+ size_t index, const std::vector<byte>& expected) {
const byte* mem_start = builder.raw_mem_start<byte>();
const byte* mem_end = builder.raw_mem_end<byte>();
size_t mem_size = mem_end - mem_start;
@@ -25,8 +26,9 @@ void CheckMemoryEquals(TestingModuleBuilder& builder, size_t index,
}
}
-void CheckMemoryEqualsZero(TestingModuleBuilder& builder, size_t index,
- size_t length) {
+void CheckMemoryEqualsZero(
+ TestingModuleBuilder& builder, // NOLINT(runtime/references)
+ size_t index, size_t length) {
const byte* mem_start = builder.raw_mem_start<byte>();
const byte* mem_end = builder.raw_mem_end<byte>();
size_t mem_size = mem_end - mem_start;
@@ -37,8 +39,9 @@ void CheckMemoryEqualsZero(TestingModuleBuilder& builder, size_t index,
}
}
-void CheckMemoryEqualsFollowedByZeroes(TestingModuleBuilder& builder,
- const std::vector<byte>& expected) {
+void CheckMemoryEqualsFollowedByZeroes(
+ TestingModuleBuilder& builder, // NOLINT(runtime/references)
+ const std::vector<byte>& expected) {
CheckMemoryEquals(builder, 0, expected);
CheckMemoryEqualsZero(builder, expected.size(),
builder.mem_size() - expected.size());
@@ -129,9 +132,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1));
- // Copy 0 out-of-bounds fails.
- CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, 0, 0));
- CHECK_EQ(0xDEADBEEF, r.Call(0, kWasmPageSize + 1, 0));
+ // Copy 0 out-of-bounds succeeds.
+ CHECK_EQ(0, r.Call(kWasmPageSize + 1, 0, 0));
+ CHECK_EQ(0, r.Call(0, kWasmPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF));
@@ -203,9 +206,10 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
const uint32_t last_5_bytes = kWasmPageSize - 5;
- // Write all values up to the out-of-bounds access.
+ // Copy with source < destination. Copy would happen backwards,
+ // but the first byte to copy is out-of-bounds, so no data should be written.
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, 6));
- CheckMemoryEquals(r.builder(), last_5_bytes, {11, 22, 33, 44, 55});
+ CheckMemoryEquals(r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// Copy overlapping with destination < source. Copy will happen forwards, up
// to the out-of-bounds access.
@@ -244,9 +248,9 @@ WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1));
- // Copy 0 out-of-bounds fails.
- CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, 0, 0));
- CHECK_EQ(0xDEADBEEF, r.Call(0, kWasmPageSize + 1, 0));
+ // Copy 0 out-of-bounds always succeeds.
+ CHECK_EQ(0, r.Call(kWasmPageSize + 1, 0, 0));
+ CHECK_EQ(0, r.Call(0, kWasmPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF));
@@ -319,8 +323,8 @@ WASM_EXEC_TEST(MemoryFillOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, v, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, v, 1));
- // Fill 0 out-of-bounds fails.
- CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, v, 0));
+ // Fill 0 out-of-bounds succeeds.
+ CHECK_EQ(0, r.Call(kWasmPageSize + 1, v, 0));
// Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, v, 0xFFFFFFFF));
@@ -351,15 +355,19 @@ WASM_EXEC_TEST(DataDropThenMemoryInit) {
CHECK_EQ(0xDEADBEEF, r.Call());
}
-WASM_EXEC_TEST(TableCopyInbounds) {
+void TestTableCopyInbounds(ExecutionTier execution_tier, int table_dst,
+ int table_src) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
const uint32_t kTableSize = 5;
- r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
- BUILD(
- r,
- WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- kExprI32Const, 0);
+ // Add 10 function tables, even though we only test one table.
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ }
+ BUILD(r,
+ WASM_TABLE_COPY(table_dst, table_src, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ kExprI32Const, 0);
for (uint32_t i = 0; i <= kTableSize; ++i) {
r.CheckCallViaJS(0, 0, 0, i); // nop
@@ -368,6 +376,25 @@ WASM_EXEC_TEST(TableCopyInbounds) {
}
}
+WASM_EXEC_TEST(TableCopyInboundsFrom0To0) {
+ TestTableCopyInbounds(execution_tier, 0, 0);
+}
+
+WASM_EXEC_TEST(TableCopyInboundsFrom3To0) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyInbounds(execution_tier, 3, 0);
+}
+
+WASM_EXEC_TEST(TableCopyInboundsFrom5To9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyInbounds(execution_tier, 5, 9);
+}
+
+WASM_EXEC_TEST(TableCopyInboundsFrom6To6) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyInbounds(execution_tier, 6, 6);
+}
+
namespace {
template <typename... Args>
void CheckTable(Isolate* isolate, Handle<WasmTableObject> table, Args... args) {
@@ -381,7 +408,8 @@ void CheckTable(Isolate* isolate, Handle<WasmTableObject> table, Args... args) {
template <typename WasmRunner, typename... Args>
void CheckTableCall(Isolate* isolate, Handle<WasmTableObject> table,
- WasmRunner& r, uint32_t function_index, Args... args) {
+ WasmRunner& r, // NOLINT(runtime/references)
+ uint32_t function_index, Args... args) {
uint32_t args_length = static_cast<uint32_t>(sizeof...(args));
CHECK_EQ(table->current_length(), args_length);
double expected[] = {args...};
@@ -392,7 +420,7 @@ void CheckTableCall(Isolate* isolate, Handle<WasmTableObject> table,
}
} // namespace
-WASM_EXEC_TEST(TableInitElems) {
+void TestTableInitElems(ExecutionTier execution_tier, int table_index) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -412,21 +440,26 @@ WASM_EXEC_TEST(TableInitElems) {
// Passive element segment has [f0, f1, f2, f3, f4, null].
function_indexes.push_back(WasmElemSegment::kNullIndex);
- r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ // Add 10 function tables, even though we only test one table.
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ }
r.builder().AddPassiveElementSegment(function_indexes);
WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
- BUILD(call, WASM_CALL_INDIRECT0(sig_index, WASM_GET_LOCAL(0)));
+ BUILD(call,
+ WASM_CALL_INDIRECT_TABLE0(table_index, sig_index, WASM_GET_LOCAL(0)));
const uint32_t call_index = call.function_index();
BUILD(r,
- WASM_TABLE_INIT(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_TABLE_INIT(table_index, 0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(2)),
kExprI32Const, 0);
- auto table = handle(
- WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
- isolate);
+ auto table =
+ handle(WasmTableObject::cast(
+ r.builder().instance_object()->tables().get(table_index)),
+ isolate);
const double null = 0xDEADBEEF;
CheckTableCall(isolate, table, r, call_index, null, null, null, null, null);
@@ -453,7 +486,17 @@ WASM_EXEC_TEST(TableInitElems) {
CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
}
-WASM_EXEC_TEST(TableInitOob) {
+WASM_EXEC_TEST(TableInitElems0) { TestTableInitElems(execution_tier, 0); }
+WASM_EXEC_TEST(TableInitElems7) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableInitElems(execution_tier, 7);
+}
+WASM_EXEC_TEST(TableInitElems9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableInitElems(execution_tier, 9);
+}
+
+void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -470,21 +513,25 @@ WASM_EXEC_TEST(TableInitOob) {
function_indexes.push_back(fn.function_index());
}
- r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ }
r.builder().AddPassiveElementSegment(function_indexes);
WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
- BUILD(call, WASM_CALL_INDIRECT0(sig_index, WASM_GET_LOCAL(0)));
+ BUILD(call,
+ WASM_CALL_INDIRECT_TABLE0(table_index, sig_index, WASM_GET_LOCAL(0)));
const uint32_t call_index = call.function_index();
BUILD(r,
- WASM_TABLE_INIT(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_TABLE_INIT(table_index, 0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(2)),
kExprI32Const, 0);
- auto table = handle(
- WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
- isolate);
+ auto table =
+ handle(WasmTableObject::cast(
+ r.builder().instance_object()->tables().get(table_index)),
+ isolate);
const double null = 0xDEADBEEF;
CheckTableCall(isolate, table, r, call_index, null, null, null, null, null);
@@ -497,9 +544,9 @@ WASM_EXEC_TEST(TableInitOob) {
r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
CheckTableCall(isolate, table, r, call_index, 3, 4, null, 0, 1);
- // 0-count is oob.
- r.CheckCallViaJS(0xDEADBEEF, kTableSize + 1, 0, 0);
- r.CheckCallViaJS(0xDEADBEEF, 0, kTableSize + 1, 0);
+ // 0-count is never oob.
+ r.CheckCallViaJS(0, kTableSize + 1, 0, 0);
+ r.CheckCallViaJS(0, 0, kTableSize + 1, 0);
r.CheckCallViaJS(0xDEADBEEF, 0, 0, 6);
r.CheckCallViaJS(0xDEADBEEF, 0, 1, 5);
@@ -519,7 +566,18 @@ WASM_EXEC_TEST(TableInitOob) {
r.CheckCallViaJS(0xDEADBEEF, 0, 10, 1);
}
-WASM_EXEC_TEST(TableCopyElems) {
+WASM_EXEC_TEST(TableInitOob0) { TestTableInitOob(execution_tier, 0); }
+WASM_EXEC_TEST(TableInitOob7) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableInitOob(execution_tier, 7);
+}
+WASM_EXEC_TEST(TableInitOob9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableInitOob(execution_tier, 9);
+}
+
+void TestTableCopyElems(ExecutionTier execution_tier, int table_dst,
+ int table_src) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -536,36 +594,72 @@ WASM_EXEC_TEST(TableCopyElems) {
function_indexes[i] = fn.function_index();
}
- r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
+ }
- BUILD(
- r,
- WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- kExprI32Const, 0);
+ BUILD(r,
+ WASM_TABLE_COPY(table_dst, table_src, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ kExprI32Const, 0);
r.builder().FreezeSignatureMapAndInitializeWrapperCache();
- auto table = handle(
- WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
- isolate);
+ auto table =
+ handle(WasmTableObject::cast(
+ r.builder().instance_object()->tables().get(table_dst)),
+ isolate);
+ r.CheckCallViaJS(0, 0, 0, kTableSize);
auto f0 = WasmTableObject::Get(isolate, table, 0);
auto f1 = WasmTableObject::Get(isolate, table, 1);
auto f2 = WasmTableObject::Get(isolate, table, 2);
auto f3 = WasmTableObject::Get(isolate, table, 3);
auto f4 = WasmTableObject::Get(isolate, table, 4);
- CheckTable(isolate, table, f0, f1, f2, f3, f4);
- r.CheckCallViaJS(0, 0, 1, 1);
- CheckTable(isolate, table, f1, f1, f2, f3, f4);
- r.CheckCallViaJS(0, 0, 1, 2);
- CheckTable(isolate, table, f1, f2, f2, f3, f4);
- r.CheckCallViaJS(0, 3, 0, 2);
- CheckTable(isolate, table, f1, f2, f2, f1, f2);
- r.CheckCallViaJS(0, 1, 0, 2);
- CheckTable(isolate, table, f1, f1, f2, f1, f2);
+ if (table_dst == table_src) {
+ CheckTable(isolate, table, f0, f1, f2, f3, f4);
+ r.CheckCallViaJS(0, 0, 1, 1);
+ CheckTable(isolate, table, f1, f1, f2, f3, f4);
+ r.CheckCallViaJS(0, 0, 1, 2);
+ CheckTable(isolate, table, f1, f2, f2, f3, f4);
+ r.CheckCallViaJS(0, 3, 0, 2);
+ CheckTable(isolate, table, f1, f2, f2, f1, f2);
+ r.CheckCallViaJS(0, 1, 0, 2);
+ CheckTable(isolate, table, f1, f1, f2, f1, f2);
+ } else {
+ CheckTable(isolate, table, f0, f1, f2, f3, f4);
+ r.CheckCallViaJS(0, 0, 1, 1);
+ CheckTable(isolate, table, f1, f1, f2, f3, f4);
+ r.CheckCallViaJS(0, 0, 1, 2);
+ CheckTable(isolate, table, f1, f2, f2, f3, f4);
+ r.CheckCallViaJS(0, 3, 0, 2);
+ CheckTable(isolate, table, f1, f2, f2, f0, f1);
+ r.CheckCallViaJS(0, 1, 0, 2);
+ CheckTable(isolate, table, f1, f0, f1, f0, f1);
+ }
}
-WASM_EXEC_TEST(TableCopyCalls) {
+WASM_EXEC_TEST(TableCopyElemsFrom0To0) {
+ TestTableCopyElems(execution_tier, 0, 0);
+}
+
+WASM_EXEC_TEST(TableCopyElemsFrom3To0) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyElems(execution_tier, 3, 0);
+}
+
+WASM_EXEC_TEST(TableCopyElemsFrom5To9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyElems(execution_tier, 5, 9);
+}
+
+WASM_EXEC_TEST(TableCopyElemsFrom6To6) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyElems(execution_tier, 6, 6);
+}
+
+void TestTableCopyCalls(ExecutionTier execution_tier, int table_dst,
+ int table_src) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -582,31 +676,65 @@ WASM_EXEC_TEST(TableCopyCalls) {
function_indexes[i] = fn.function_index();
}
- r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
+ }
WasmFunctionCompiler& call = r.NewFunction(sigs.i_i(), "call");
- BUILD(call, WASM_CALL_INDIRECT0(sig_index, WASM_GET_LOCAL(0)));
+ BUILD(call,
+ WASM_CALL_INDIRECT_TABLE0(table_dst, sig_index, WASM_GET_LOCAL(0)));
const uint32_t call_index = call.function_index();
- BUILD(
- r,
- WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- kExprI32Const, 0);
+ BUILD(r,
+ WASM_TABLE_COPY(table_dst, table_src, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ kExprI32Const, 0);
- auto table = handle(
- WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
- isolate);
+ auto table =
+ handle(WasmTableObject::cast(
+ r.builder().instance_object()->tables().get(table_dst)),
+ isolate);
+
+ if (table_dst == table_src) {
+ CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
+ r.CheckCallViaJS(0, 0, 1, 1);
+ CheckTableCall(isolate, table, r, call_index, 1, 1, 2, 3, 4);
+ r.CheckCallViaJS(0, 0, 1, 2);
+ CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 3, 4);
+ r.CheckCallViaJS(0, 3, 0, 2);
+ CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 1, 2);
+ } else {
+ CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
+ r.CheckCallViaJS(0, 0, 1, 1);
+ CheckTableCall(isolate, table, r, call_index, 1, 1, 2, 3, 4);
+ r.CheckCallViaJS(0, 0, 1, 2);
+ CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 3, 4);
+ r.CheckCallViaJS(0, 3, 0, 2);
+ CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 0, 1);
+ }
+}
- CheckTableCall(isolate, table, r, call_index, 0, 1, 2, 3, 4);
- r.CheckCallViaJS(0, 0, 1, 1);
- CheckTableCall(isolate, table, r, call_index, 1, 1, 2, 3, 4);
- r.CheckCallViaJS(0, 0, 1, 2);
- CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 3, 4);
- r.CheckCallViaJS(0, 3, 0, 2);
- CheckTableCall(isolate, table, r, call_index, 1, 2, 2, 1, 2);
+WASM_EXEC_TEST(TableCopyCallsFrom0To0) {
+ TestTableCopyCalls(execution_tier, 0, 0);
+}
+
+WASM_EXEC_TEST(TableCopyCallsFrom3To0) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyCalls(execution_tier, 3, 0);
+}
+
+WASM_EXEC_TEST(TableCopyCallsFrom5To9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyCalls(execution_tier, 5, 9);
+}
+
+WASM_EXEC_TEST(TableCopyCallsFrom6To6) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyCalls(execution_tier, 6, 6);
}
-WASM_EXEC_TEST(TableCopyOobWrites) {
+void TestTableCopyOobWrites(ExecutionTier execution_tier, int table_dst,
+ int table_src) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -623,18 +751,23 @@ WASM_EXEC_TEST(TableCopyOobWrites) {
function_indexes[i] = fn.function_index();
}
- r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(function_indexes, kTableSize);
+ }
- BUILD(
- r,
- WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- kExprI32Const, 0);
+ BUILD(r,
+ WASM_TABLE_COPY(table_dst, table_src, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ kExprI32Const, 0);
r.builder().FreezeSignatureMapAndInitializeWrapperCache();
- auto table = handle(
- WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
- isolate);
+ auto table =
+ handle(WasmTableObject::cast(
+ r.builder().instance_object()->tables().get(table_dst)),
+ isolate);
+ // Fill the dst table with values from the src table, to make checks easier.
+ r.CheckCallViaJS(0, 0, 0, kTableSize);
auto f0 = WasmTableObject::Get(isolate, table, 0);
auto f1 = WasmTableObject::Get(isolate, table, 1);
auto f2 = WasmTableObject::Get(isolate, table, 2);
@@ -643,35 +776,58 @@ WASM_EXEC_TEST(TableCopyOobWrites) {
CheckTable(isolate, table, f0, f1, f2, f3, f4);
- // Non-overlapping, src < dst.
+ // Non-overlapping, src < dst. Because of src < dst, we copy backwards.
+ // Therefore the first access already traps, and the table is not changed.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
- CheckTable(isolate, table, f0, f1, f2, f0, f1);
+ CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Non-overlapping, dst < src.
r.CheckCallViaJS(0xDEADBEEF, 0, 4, 2);
- CheckTable(isolate, table, f1, f1, f2, f0, f1);
+ CheckTable(isolate, table, f4, f1, f2, f3, f4);
// Overlapping, src < dst. This is required to copy backward, but the first
// access will be out-of-bounds, so nothing changes.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 99);
- CheckTable(isolate, table, f1, f1, f2, f0, f1);
+ CheckTable(isolate, table, f4, f1, f2, f3, f4);
// Overlapping, dst < src.
r.CheckCallViaJS(0xDEADBEEF, 0, 1, 99);
- CheckTable(isolate, table, f1, f2, f0, f1, f1);
+ CheckTable(isolate, table, f1, f2, f3, f4, f4);
}
-WASM_EXEC_TEST(TableCopyOob1) {
+WASM_EXEC_TEST(TableCopyOobWritesFrom0To0) {
+ TestTableCopyOobWrites(execution_tier, 0, 0);
+}
+
+WASM_EXEC_TEST(TableCopyOobWritesFrom3To0) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyOobWrites(execution_tier, 3, 0);
+}
+
+WASM_EXEC_TEST(TableCopyOobWritesFrom5To9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyOobWrites(execution_tier, 5, 9);
+}
+
+WASM_EXEC_TEST(TableCopyOobWritesFrom6To6) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyOobWrites(execution_tier, 6, 6);
+}
+
+void TestTableCopyOob1(ExecutionTier execution_tier, int table_dst,
+ int table_src) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
const uint32_t kTableSize = 5;
- r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ for (int i = 0; i < 10; ++i) {
+ r.builder().AddIndirectFunctionTable(nullptr, kTableSize);
+ }
- BUILD(
- r,
- WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- kExprI32Const, 0);
+ BUILD(r,
+ WASM_TABLE_COPY(table_dst, table_src, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ kExprI32Const, 0);
r.CheckCallViaJS(0, 0, 0, 1); // nop
r.CheckCallViaJS(0, 0, 0, kTableSize); // nop
@@ -681,8 +837,8 @@ WASM_EXEC_TEST(TableCopyOob1) {
{
const uint32_t big = 1000000;
- r.CheckCallViaJS(0xDEADBEEF, big, 0, 0);
- r.CheckCallViaJS(0xDEADBEEF, 0, big, 0);
+ r.CheckCallViaJS(0, big, 0, 0);
+ r.CheckCallViaJS(0, 0, big, 0);
}
for (uint32_t big = 4294967295; big > 1000; big >>= 1) {
@@ -698,6 +854,25 @@ WASM_EXEC_TEST(TableCopyOob1) {
}
}
+WASM_EXEC_TEST(TableCopyOob1From0To0) {
+ TestTableCopyOob1(execution_tier, 0, 0);
+}
+
+WASM_EXEC_TEST(TableCopyOob1From3To0) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyOob1(execution_tier, 3, 0);
+}
+
+WASM_EXEC_TEST(TableCopyOob1From5To9) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyOob1(execution_tier, 5, 9);
+}
+
+WASM_EXEC_TEST(TableCopyOob1From6To6) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ TestTableCopyOob1(execution_tier, 6, 6);
+}
+
WASM_EXEC_TEST(ElemDropTwice) {
EXPERIMENTAL_FLAG_SCOPE(bulk_memory);
WasmRunner<uint32_t> r(execution_tier);
@@ -715,7 +890,7 @@ WASM_EXEC_TEST(ElemDropThenTableInit) {
r.builder().AddIndirectFunctionTable(nullptr, 1);
r.builder().AddPassiveElementSegment({});
BUILD(r, WASM_ELEM_DROP(0),
- WASM_TABLE_INIT(0, WASM_I32V_1(0), WASM_I32V_1(0), WASM_I32V_1(0)),
+ WASM_TABLE_INIT(0, 0, WASM_I32V_1(0), WASM_I32V_1(0), WASM_I32V_1(0)),
kExprI32Const, 0);
r.CheckCallViaJS(0xDEADBEEF);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 8ef9f1fe94..526c5846a2 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -28,6 +28,8 @@ namespace internal {
namespace wasm {
namespace test_run_wasm_module {
+using base::ReadLittleEndianValue;
+using base::WriteLittleEndianValue;
using testing::CompileAndInstantiateForTesting;
namespace {
@@ -41,7 +43,7 @@ void Cleanup(Isolate* isolate = CcTest::InitIsolateOnce()) {
void TestModule(Zone* zone, WasmModuleBuilder* builder,
int32_t expected_result) {
ZoneBuffer buffer(zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -53,7 +55,7 @@ void TestModule(Zone* zone, WasmModuleBuilder* builder,
void TestModuleException(Zone* zone, WasmModuleBuilder* builder) {
ZoneBuffer buffer(zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
@@ -115,7 +117,7 @@ TEST(Run_WasmModule_CompilationHintsLazy) {
// Compile module. No function is actually compiled as the function is lazy.
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
@@ -176,7 +178,7 @@ TEST(Run_WasmModule_CompilationHintsNoTiering) {
// Compile module.
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
@@ -222,7 +224,7 @@ TEST(Run_WasmModule_CompilationHintsTierUp) {
// Compile module.
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
@@ -288,7 +290,7 @@ TEST(Run_WasmModule_CompilationHintsLazyBaselineEagerTopTier) {
// Compile module.
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
@@ -427,8 +429,8 @@ TEST(Run_WasmModule_Global) {
TestSignatures sigs;
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint32_t global1 = builder->AddGlobal(kWasmI32, false);
- uint32_t global2 = builder->AddGlobal(kWasmI32, false);
+ uint32_t global1 = builder->AddGlobal(kWasmI32);
+ uint32_t global2 = builder->AddGlobal(kWasmI32);
WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
byte code1[] = {
WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
@@ -566,7 +568,7 @@ TEST(TestInterruptLoop) {
WASM_I32V(121)};
EMIT_CODE_WITH_END(f, code);
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
HandleScope scope(isolate);
testing::SetupIsolateForWasmModule(isolate);
@@ -650,7 +652,7 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) {
HandleScope scope(isolate);
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
@@ -697,7 +699,7 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) {
HandleScope scope(isolate);
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
@@ -743,9 +745,9 @@ TEST(Run_WasmModule_Global_init) {
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint32_t global1 =
- builder->AddGlobal(kWasmI32, false, false, WasmInitExpr(777777));
+ builder->AddGlobal(kWasmI32, false, WasmInitExpr(777777));
uint32_t global2 =
- builder->AddGlobal(kWasmI32, false, false, WasmInitExpr(222222));
+ builder->AddGlobal(kWasmI32, false, WasmInitExpr(222222));
WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
byte code[] = {
WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
@@ -771,12 +773,11 @@ static void RunWasmModuleGlobalInitTest(ValueType type, CType expected) {
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
for (int i = 0; i < padding; i++) { // pad global before
- builder->AddGlobal(kWasmI32, false, false, WasmInitExpr(i + 20000));
+ builder->AddGlobal(kWasmI32, false, WasmInitExpr(i + 20000));
}
- uint32_t global =
- builder->AddGlobal(type, false, false, WasmInitExpr(expected));
+ uint32_t global = builder->AddGlobal(type, false, WasmInitExpr(expected));
for (int i = 0; i < padding; i++) { // pad global after
- builder->AddGlobal(kWasmI32, false, false, WasmInitExpr(i + 30000));
+ builder->AddGlobal(kWasmI32, false, WasmInitExpr(i + 30000));
}
WasmFunctionBuilder* f1 = builder->AddFunction(&sig);
@@ -984,7 +985,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
EMIT_CODE_WITH_END(f, code);
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
const Handle<WasmInstanceObject> instance =
@@ -1110,7 +1111,7 @@ TEST(AtomicOpDisassembly) {
HandleScope scope(isolate);
ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
+ builder->WriteTo(&buffer);
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 77488325b4..b1d95a12bb 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -19,9 +19,14 @@ namespace test_run_wasm_simd {
namespace {
+using DoubleUnOp = double (*)(double);
+using DoubleCompareOp = int64_t (*)(double, double);
using FloatUnOp = float (*)(float);
using FloatBinOp = float (*)(float, float);
using FloatCompareOp = int (*)(float, float);
+using Int64UnOp = int64_t (*)(int64_t);
+using Int64BinOp = int64_t (*)(int64_t, int64_t);
+using Int64ShiftOp = int64_t (*)(int64_t, int);
using Int32UnOp = int32_t (*)(int32_t);
using Int32BinOp = int32_t (*)(int32_t, int32_t);
using Int32CompareOp = int (*)(int32_t, int32_t);
@@ -266,6 +271,21 @@ T Sqrt(T a) {
return std::sqrt(a);
}
+#if V8_TARGET_ARCH_X64
+// only used for F64x2 tests below
+int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
+
+int64_t NotEqual(double a, double b) { return a != b ? -1 : 0; }
+
+int64_t Greater(double a, double b) { return a > b ? -1 : 0; }
+
+int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
+
+int64_t Less(double a, double b) { return a < b ? -1 : 0; }
+
+int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
+#endif // V8_TARGET_ARCH_X64
+
} // namespace
#define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
@@ -276,32 +296,45 @@ T Sqrt(T a) {
#define TO_BYTE(val) static_cast<byte>(val)
#define WASM_SIMD_OP(op) kSimdPrefix, TO_BYTE(op)
-#define WASM_SIMD_SPLAT(Type, x) x, WASM_SIMD_OP(kExpr##Type##Splat)
+#define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
#define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
#define WASM_SIMD_SELECT(format, x, y, z) x, y, z, WASM_SIMD_OP(kExprS128Select)
-#define WASM_SIMD_F32x4_SPLAT(x) x, WASM_SIMD_OP(kExprF32x4Splat)
+
+#define WASM_SIMD_F64x2_SPLAT(x) WASM_SIMD_SPLAT(F64x2, x)
+#define WASM_SIMD_F64x2_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprF64x2ExtractLane), TO_BYTE(lane)
+#define WASM_SIMD_F64x2_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprF64x2ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_F32x4_SPLAT(x) WASM_SIMD_SPLAT(F32x4, x)
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
x, WASM_SIMD_OP(kExprF32x4ExtractLane), TO_BYTE(lane)
#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
x, y, WASM_SIMD_OP(kExprF32x4ReplaceLane), TO_BYTE(lane)
-#define WASM_SIMD_I32x4_SPLAT(x) x, WASM_SIMD_OP(kExprI32x4Splat)
+#define WASM_SIMD_I64x2_SPLAT(x) WASM_SIMD_SPLAT(I64x2, x)
+#define WASM_SIMD_I64x2_EXTRACT_LANE(lane, x) \
+ x, WASM_SIMD_OP(kExprI64x2ExtractLane), TO_BYTE(lane)
+#define WASM_SIMD_I64x2_REPLACE_LANE(lane, x, y) \
+ x, y, WASM_SIMD_OP(kExprI64x2ReplaceLane), TO_BYTE(lane)
+
+#define WASM_SIMD_I32x4_SPLAT(x) WASM_SIMD_SPLAT(I32x4, x)
#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
x, WASM_SIMD_OP(kExprI32x4ExtractLane), TO_BYTE(lane)
#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
x, y, WASM_SIMD_OP(kExprI32x4ReplaceLane), TO_BYTE(lane)
-#define WASM_SIMD_I16x8_SPLAT(x) x, WASM_SIMD_OP(kExprI16x8Splat)
+#define WASM_SIMD_I16x8_SPLAT(x) WASM_SIMD_SPLAT(I16x8, x)
#define WASM_SIMD_I16x8_EXTRACT_LANE(lane, x) \
x, WASM_SIMD_OP(kExprI16x8ExtractLane), TO_BYTE(lane)
#define WASM_SIMD_I16x8_REPLACE_LANE(lane, x, y) \
x, y, WASM_SIMD_OP(kExprI16x8ReplaceLane), TO_BYTE(lane)
-#define WASM_SIMD_I8x16_SPLAT(x) x, WASM_SIMD_OP(kExprI8x16Splat)
+#define WASM_SIMD_I8x16_SPLAT(x) WASM_SIMD_SPLAT(I8x16, x)
#define WASM_SIMD_I8x16_EXTRACT_LANE(lane, x) \
x, WASM_SIMD_OP(kExprI8x16ExtractLane), TO_BYTE(lane)
#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
@@ -351,7 +384,8 @@ T Sqrt(T a) {
void RunWasm_##name##_Impl(LowerSimd lower_simd, ExecutionTier execution_tier)
// Returns true if the platform can represent the result.
-bool PlatformCanRepresent(float x) {
+template <typename T>
+bool PlatformCanRepresent(T x) {
#if V8_TARGET_ARCH_ARM
return std::fpclassify(x) != FP_SUBNORMAL;
#else
@@ -529,7 +563,8 @@ void RunF32x4UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
}
}
- FOR_FLOAT32_NAN_INPUTS(x) {
+ FOR_FLOAT32_NAN_INPUTS(i) {
+ float x = bit_cast<float>(nan_test_array[i]);
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
@@ -680,6 +715,444 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
+#if V8_TARGET_ARCH_X64
+WASM_SIMD_TEST_NO_LOWERING(F64x2Splat) {
+ WasmRunner<int32_t, double> r(execution_tier, lower_simd);
+ // Set up a global to hold output vector.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ byte param1 = 0;
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(param1))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ r.Call(x);
+ double expected = x;
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ } else {
+ CHECK_EQ(actual, expected);
+ }
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLaneWithI64x2) {
+ WasmRunner<int64_t> r(execution_tier, lower_simd);
+ BUILD(r, WASM_IF_ELSE_L(
+ WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(1e15))),
+ WASM_F64_REINTERPRET_I64(WASM_I64V(1e15))),
+ WASM_I64V(1), WASM_I64V(0)));
+ CHECK_EQ(1, r.Call());
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2ExtractLane) {
+ WasmRunner<double, double> r(execution_tier, lower_simd);
+ byte param1 = 0;
+ byte temp1 = r.AllocateLocal(kWasmF64);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r,
+ WASM_SET_LOCAL(temp1,
+ WASM_SIMD_F64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(param1)))),
+ WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(temp1))),
+ WASM_SIMD_F64x2_EXTRACT_LANE(1, WASM_GET_LOCAL(temp2)));
+ FOR_FLOAT64_INPUTS(x) {
+ double actual = r.Call(x);
+ double expected = x;
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ } else {
+ CHECK_EQ(actual, expected);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2ReplaceLane) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ // Set up a global to hold input/output vector.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build function to replace each lane with its (FP) index.
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e100))),
+ WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(temp1), WASM_F64(0.0f))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_REPLACE_LANE(
+ 1, WASM_GET_LOCAL(temp1), WASM_F64(1.0f))),
+ WASM_ONE);
+
+ r.Call();
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(static_cast<double>(i), ReadLittleEndianValue<double>(&g[i]));
+ }
+}
+
+void RunF64x2CompareOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleCompareOp expected_op) {
+ WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
+ // Set up global to hold mask output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform compare op, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
+ WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
+ WASM_GET_LOCAL(temp2))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ FOR_FLOAT64_INPUTS(y) {
+ if (!PlatformCanRepresent(y)) continue;
+ double diff = x - y; // Model comparison as subtraction.
+ if (!PlatformCanRepresent(diff)) continue;
+ r.Call(x, y);
+ int64_t expected = expected_op(x, y);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Eq) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Ne) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Gt) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Ge) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Lt) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Le) {
+ RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
+}
+
+bool IsSameNan(double expected, double actual) {
+ // Sign is non-deterministic.
+ uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
+ uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
+ // Some implementations convert signaling NaNs to quiet NaNs.
+ return (expected_bits == actual_bits) ||
+ ((expected_bits | 0x0008000000000000) == actual_bits);
+}
+
+bool IsCanonical(double actual) {
+ uint64_t actual_bits = bit_cast<uint64_t>(actual);
+ // Canonical NaN has quiet bit and no payload.
+ return (actual_bits & 0xFF80000000000000) == actual_bits;
+}
+
+void CheckDoubleResult(double x, double y, double expected, double actual,
+ bool exact = true) {
+ if (std::isnan(expected)) {
+ CHECK(std::isnan(actual));
+ if (std::isnan(x) && IsSameNan(x, actual)) return;
+ if (std::isnan(y) && IsSameNan(y, actual)) return;
+ if (IsSameNan(expected, actual)) return;
+ if (IsCanonical(actual)) return;
+ // This is expected to assert; it's useful for debugging.
+ CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
+ } else {
+ if (exact) {
+ CHECK_EQ(expected, actual);
+ // The sign of 0's must match.
+ CHECK_EQ(std::signbit(expected), std::signbit(actual));
+ return;
+ }
+ // Otherwise, perform an approximate equality test. First check for
+ // equality to handle +/-Infinity where approximate equality doesn't work.
+ if (expected == actual) return;
+
+ // 1% error allows all platforms to pass easily.
+ constexpr double kApproximationError = 0.01f;
+ double abs_error = std::abs(expected) * kApproximationError,
+ min = expected - abs_error, max = expected + abs_error;
+ CHECK_LE(min, actual);
+ CHECK_GE(max, actual);
+ }
+}
+
+// Test some values not included in the double inputs from value_helper. These
+// tests are useful for opcodes that are synthesized during code gen, like Min
+// and Max on ia32 and x64.
+static constexpr uint64_t double_nan_test_array[] = {
+ // quiet NaNs, + and -
+ 0x7FF8000000000001, 0xFFF8000000000001,
+ // with payload
+ 0x7FF8000000000011, 0xFFF8000000000011,
+ // signaling NaNs, + and -
+ 0x7FF0000000000001, 0xFFF0000000000001,
+ // with payload
+ 0x7FF0000000000011, 0xFFF0000000000011,
+ // Both Infinities.
+ 0x7FF0000000000000, 0xFFF0000000000000,
+ // Some "normal" numbers, 1 and -1.
+ 0x3FF0000000000000, 0xBFF0000000000000};
+
+#define FOR_FLOAT64_NAN_INPUTS(i) \
+ for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
+
+void RunF64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, DoubleUnOp expected_op,
+ bool exact = true) {
+ WasmRunner<int32_t, double> r(execution_tier, lower_simd);
+ // Global to hold output.
+ double* g = r.builder().AddGlobal<double>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
+ WASM_ONE);
+
+ FOR_FLOAT64_INPUTS(x) {
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ double expected = expected_op(x);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, exact);
+ }
+ }
+
+ FOR_FLOAT64_NAN_INPUTS(i) {
+ double x = bit_cast<double>(double_nan_test_array[i]);
+ if (!PlatformCanRepresent(x)) continue;
+ // Extreme values have larger errors so skip them for approximation tests.
+ if (!exact && IsExtreme(x)) continue;
+ double expected = expected_op(x);
+ if (!PlatformCanRepresent(expected)) continue;
+ r.Call(x);
+ for (int i = 0; i < 2; i++) {
+ double actual = ReadLittleEndianValue<double>(&g[i]);
+ CheckDoubleResult(x, x, expected, actual, exact);
+ }
+ }
+}
+#undef FOR_FLOAT64_NAN_INPUTS
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Abs) {
+ RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(F64x2Neg) {
+ RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Splat) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ // Set up a global to hold output vector.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte param1 = 0;
+ BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = x;
+ for (int i = 0; i < 2; i++) {
+ int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
+ CHECK_EQ(actual, expected);
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ExtractWithF64x2) {
+ WasmRunner<int64_t> r(execution_tier, lower_simd);
+ BUILD(r, WASM_IF_ELSE_L(
+ WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
+ 0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
+ WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
+ WASM_I64V(1), WASM_I64V(0)));
+ CHECK_EQ(1, r.Call());
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ReplaceLane) {
+ WasmRunner<int32_t> r(execution_tier, lower_simd);
+ // Set up a global to hold input/output vector.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build function to replace each lane with its index.
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
+ WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
+ 0, WASM_GET_LOCAL(temp1), WASM_I64V(0))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_REPLACE_LANE(
+ 1, WASM_GET_LOCAL(temp1), WASM_I64V(1))),
+ WASM_ONE);
+
+ r.Call();
+ for (int64_t i = 0; i < 2; i++) {
+ CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+}
+
+void RunI64x2UnOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64UnOp expected_op) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test value, perform unop, and write the result.
+ byte value = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Neg) {
+ RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
+ base::NegateWithWraparound);
+}
+
+void RunI64x2BinOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64BinOp expected_op) {
+ WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
+ // Global to hold output.
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ // Build fn to splat test values, perform binop, and write the result.
+ byte value1 = 0, value2 = 1;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value1))),
+ WASM_SET_LOCAL(temp2, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value2))),
+ WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
+ WASM_GET_LOCAL(temp2))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ FOR_INT64_INPUTS(y) {
+ r.Call(x, y);
+ int64_t expected = expected_op(x, y);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Add) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
+ base::AddWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Sub) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
+ base::SubWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Mul) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
+ base::MulWithWraparound);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Ne) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Ne, NotEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtS, Less);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeS, LessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtS, Greater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeS) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeS, GreaterEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LtU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LtU, UnsignedLess);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2LeU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2LeU,
+ UnsignedLessEqual);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GtU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GtU, UnsignedGreater);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2GeU) {
+ RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2GeU,
+ UnsignedGreaterEqual);
+}
+
+void RunI64x2ShiftOpTest(ExecutionTier execution_tier, LowerSimd lower_simd,
+ WasmOpcode opcode, Int64ShiftOp expected_op) {
+ for (int shift = 1; shift < 64; shift++) {
+ WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
+ int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
+ byte value = 0;
+ byte simd1 = r.AllocateLocal(kWasmS128);
+ BUILD(r,
+ WASM_SET_LOCAL(simd1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
+ WASM_SET_GLOBAL(
+ 0, WASM_SIMD_SHIFT_OP(opcode, shift, WASM_GET_LOCAL(simd1))),
+ WASM_ONE);
+
+ FOR_INT64_INPUTS(x) {
+ r.Call(x);
+ int64_t expected = expected_op(x, shift);
+ for (int i = 0; i < 2; i++) {
+ CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
+ }
+ }
+ }
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2Shl) {
+ RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
+ LogicalShiftLeft);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ShrS) {
+ RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
+ ArithmeticShiftRight);
+}
+
+WASM_SIMD_TEST_NO_LOWERING(I64x2ShrU) {
+ RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
+ LogicalShiftRight);
+}
+#endif // V8_TARGET_ARCH_X64
+
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
@@ -1887,7 +2360,8 @@ void AppendShuffle(const Shuffle& shuffle, std::vector<byte>* buffer) {
for (size_t i = 0; i < kSimd128Size; ++i) buffer->push_back((shuffle[i]));
}
-void BuildShuffle(std::vector<Shuffle>& shuffles, std::vector<byte>* buffer) {
+void BuildShuffle(std::vector<Shuffle>& shuffles, // NOLINT(runtime/references)
+ std::vector<byte>* buffer) {
// Perform the leaf shuffles on globals 0 and 1.
size_t row_index = (shuffles.size() - 1) / 2;
for (size_t i = row_index; i < shuffles.size(); ++i) {
@@ -2052,6 +2526,18 @@ WASM_SIMD_TEST(SimdF32x4ExtractWithI32x4) {
CHECK_EQ(1, r.Call());
}
+WASM_SIMD_TEST(SimdF32x4ExtractLane) {
+ WasmRunner<float> r(execution_tier, lower_simd);
+ r.AllocateLocal(kWasmF32);
+ r.AllocateLocal(kWasmS128);
+ BUILD(r,
+ WASM_SET_LOCAL(0, WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5)))),
+ WASM_SET_LOCAL(1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(0))),
+ WASM_SIMD_F32x4_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
+ CHECK_EQ(30.5, r.Call());
+}
+
WASM_SIMD_TEST(SimdF32x4AddWithI32x4) {
// Choose two floating point values whose sum is normal and exactly
// representable as a float.
@@ -2288,13 +2774,13 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
}
}
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM
// V8:8665 - Tracking bug to enable reduction tests in the interpreter,
// and for SIMD lowering.
-// TODO(gdeepti): Enable these tests for ARM/ARM64
-#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max) \
+#define WASM_SIMD_ANYTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST_NO_LOWERING(S##format##AnyTrue) { \
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
+ WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
byte simd = r.AllocateLocal(kWasmS128); \
BUILD( \
r, \
@@ -2304,25 +2790,33 @@ WASM_SIMD_COMPILED_TEST(SimdLoadStoreLoad) {
DCHECK_EQ(1, r.Call(5)); \
DCHECK_EQ(0, r.Call(0)); \
}
-WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff)
-WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff)
-WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff)
-
-#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max) \
+#if V8_TARGET_ARCH_X64
+WASM_SIMD_ANYTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
+#endif // V8_TARGET_ARCH_X64
+WASM_SIMD_ANYTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
+WASM_SIMD_ANYTRUE_TEST(16x8, 8, 0xffff, int32_t)
+WASM_SIMD_ANYTRUE_TEST(8x16, 16, 0xff, int32_t)
+
+#define WASM_SIMD_ALLTRUE_TEST(format, lanes, max, param_type) \
WASM_SIMD_TEST_NO_LOWERING(S##format##AllTrue) { \
- WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd); \
+ WasmRunner<int32_t, param_type> r(execution_tier, lower_simd); \
byte simd = r.AllocateLocal(kWasmS128); \
BUILD( \
r, \
WASM_SET_LOCAL(simd, WASM_SIMD_I##format##_SPLAT(WASM_GET_LOCAL(0))), \
WASM_SIMD_UNOP(kExprS1x##lanes##AllTrue, WASM_GET_LOCAL(simd))); \
DCHECK_EQ(1, r.Call(max)); \
+ DCHECK_EQ(1, r.Call(0x1)); \
DCHECK_EQ(0, r.Call(0)); \
}
-WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff)
-WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff)
-WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff)
-#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_X64
+WASM_SIMD_ALLTRUE_TEST(64x2, 2, 0xffffffffffffffff, int64_t)
+#endif // V8_TARGET_ARCH_X64
+WASM_SIMD_ALLTRUE_TEST(32x4, 4, 0xffffffff, int32_t)
+WASM_SIMD_ALLTRUE_TEST(16x8, 8, 0xffff, int32_t)
+WASM_SIMD_ALLTRUE_TEST(8x16, 16, 0xff, int32_t)
+#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 ||
+ // V8_TARGET_ARCH_ARM
WASM_SIMD_TEST(BitSelect) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
@@ -2425,9 +2919,15 @@ WASM_SIMD_TEST_NO_LOWERING(I16x8GtUMixed) {
#undef WASM_SIMD_SHIFT_OP
#undef WASM_SIMD_CONCAT_OP
#undef WASM_SIMD_SELECT
+#undef WASM_SIMD_F64x2_SPLAT
+#undef WASM_SIMD_F64x2_EXTRACT_LANE
+#undef WASM_SIMD_F64x2_REPLACE_LANE
#undef WASM_SIMD_F32x4_SPLAT
#undef WASM_SIMD_F32x4_EXTRACT_LANE
#undef WASM_SIMD_F32x4_REPLACE_LANE
+#undef WASM_SIMD_I64x2_SPLAT
+#undef WASM_SIMD_I64x2_EXTRACT_LANE
+#undef WASM_SIMD_I64x2_REPLACE_LANE
#undef WASM_SIMD_I32x4_SPLAT
#undef WASM_SIMD_I32x4_EXTRACT_LANE
#undef WASM_SIMD_I32x4_REPLACE_LANE
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 6437a4a0d9..26df61ceb8 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -385,40 +385,6 @@ WASM_EXEC_TEST(Int32DivS_byzero_const) {
}
}
-WASM_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
- for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_I32_ASMJS_DIVS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
- FOR_INT32_INPUTS(i) {
- if (denom == 0) {
- CHECK_EQ(0, r.Call(i));
- } else if (denom == -1 && i == std::numeric_limits<int32_t>::min()) {
- CHECK_EQ(std::numeric_limits<int32_t>::min(), r.Call(i));
- } else {
- CHECK_EQ(i / denom, r.Call(i));
- }
- }
- }
-}
-
-WASM_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
- for (int8_t denom = -2; denom < 8; ++denom) {
- WasmRunner<int32_t, int32_t> r(execution_tier);
- r.builder().ChangeOriginToAsmjs();
- BUILD(r, WASM_I32_ASMJS_REMS(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
- FOR_INT32_INPUTS(i) {
- if (denom == 0) {
- CHECK_EQ(0, r.Call(i));
- } else if (denom == -1 && i == std::numeric_limits<int32_t>::min()) {
- CHECK_EQ(0, r.Call(i));
- } else {
- CHECK_EQ(i % denom, r.Call(i));
- }
- }
- }
-}
-
WASM_EXEC_TEST(Int32DivU_byzero_const) {
for (uint32_t denom = 0xFFFFFFFE; denom < 8; ++denom) {
WasmRunner<uint32_t, uint32_t> r(execution_tier);
@@ -1070,6 +1036,18 @@ WASM_EXEC_TEST(BrTable_loop_target) {
CHECK_EQ(1, r.Call(0));
}
+WASM_EXEC_TEST(BrTableMeetBottom) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WasmRunner<int32_t> r(execution_tier);
+ BUILD(r,
+ WASM_BLOCK_I(WASM_STMTS(
+ WASM_BLOCK_F(WASM_STMTS(
+ WASM_UNREACHABLE, WASM_BR_TABLE(WASM_I32V_1(1), 2, BR_TARGET(0),
+ BR_TARGET(1), BR_TARGET(1)))),
+ WASM_DROP, WASM_I32V_1(14))));
+ CHECK_TRAP(r.Call());
+}
+
WASM_EXEC_TEST(F32ReinterpretI32) {
WasmRunner<int32_t> r(execution_tier);
int32_t* memory =
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 5e06db3ba0..93ae92d697 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -207,7 +207,7 @@ ZoneBuffer GetValidModuleBytes(Zone* zone) {
uint8_t code[] = {kExprGetLocal, 2, kExprEnd};
f->EmitCode(code, arraysize(code));
}
- builder.WriteTo(buffer);
+ builder.WriteTo(&buffer);
return buffer;
}
@@ -313,7 +313,7 @@ ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
// Add an invalid global to the module. The decoder will fail there.
- builder.AddGlobal(kWasmStmt, false, true,
+ builder.AddGlobal(kWasmStmt, true,
WasmInitExpr(WasmInitExpr::kGlobalIndex, 12));
{
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
@@ -330,7 +330,7 @@ ZoneBuffer GetModuleWithInvalidSection(Zone* zone) {
uint8_t code[] = {kExprGetLocal, 2, kExprEnd};
f->EmitCode(code, arraysize(code));
}
- builder.WriteTo(buffer);
+ builder.WriteTo(&buffer);
return buffer;
}
@@ -1019,7 +1019,7 @@ STREAM_TEST(TestModuleWithImportedFunction) {
uint8_t code[] = {kExprGetLocal, 0, kExprEnd};
f->EmitCode(code, arraysize(code));
}
- builder.WriteTo(buffer);
+ builder.WriteTo(&buffer);
tester.OnBytesReceived(buffer.begin(), buffer.end() - buffer.begin());
tester.FinishStream();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 597201da92..e287b1139e 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -112,8 +112,8 @@ class BreakHandler : public debug::DebugDelegate {
}
};
-
-void SetBreakpoint(WasmRunnerBase& runner, int function_index, int byte_offset,
+void SetBreakpoint(WasmRunnerBase& runner, // NOLINT(runtime/references)
+ int function_index, int byte_offset,
int expected_set_byte_offset = -1) {
int func_offset =
runner.builder().GetFunctionAt(function_index)->code.offset();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-codegen.cc b/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
index 9bb9145cc3..d362cc5cf2 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-codegen.cc
@@ -54,7 +54,7 @@ CallbackFn Callback[kNumTestValues] = {nullptr, FalseCallback, TrueCallback};
void BuildTrivialModule(Zone* zone, ZoneBuffer* buffer) {
WasmModuleBuilder* builder = new (zone) WasmModuleBuilder(zone);
- builder->WriteTo(*buffer);
+ builder->WriteTo(buffer);
}
bool TestModule(Isolate* isolate, v8::MemorySpan<const uint8_t> wire_bytes) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index a6901072de..736475ff55 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -32,10 +32,12 @@ namespace {
template <typename T>
class ArgPassingHelper {
public:
- ArgPassingHelper(WasmRunnerBase& runner, WasmFunctionCompiler& inner_compiler,
- std::initializer_list<uint8_t> bytes_inner_function,
- std::initializer_list<uint8_t> bytes_outer_function,
- const T& expected_lambda)
+ ArgPassingHelper(
+ WasmRunnerBase& runner, // NOLINT(runtime/references)
+ WasmFunctionCompiler& inner_compiler, // NOLINT(runtime/references)
+ std::initializer_list<uint8_t> bytes_inner_function,
+ std::initializer_list<uint8_t> bytes_outer_function,
+ const T& expected_lambda)
: isolate_(runner.main_isolate()),
expected_lambda_(expected_lambda),
debug_info_(WasmInstanceObject::GetOrCreateDebugInfo(
@@ -80,7 +82,8 @@ class ArgPassingHelper {
template <typename T>
static ArgPassingHelper<T> GetHelper(
- WasmRunnerBase& runner, WasmFunctionCompiler& inner_compiler,
+ WasmRunnerBase& runner, // NOLINT(runtime/references)
+ WasmFunctionCompiler& inner_compiler, // NOLINT(runtime/references)
std::initializer_list<uint8_t> bytes_inner_function,
std::initializer_list<uint8_t> bytes_outer_function,
const T& expected_lambda) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 901127055e..1ff2a899ad 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -61,7 +61,7 @@ class WasmSerializationTest {
EMIT_CODE_WITH_END(f, code);
builder->AddExport(CStrVector(kFunctionName), f);
- builder->WriteTo(*buffer);
+ builder->WriteTo(buffer);
}
void ClearSerializedData() { serialized_bytes_ = {nullptr, 0}; }
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 4fe4425e2f..855e44aba2 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -138,7 +138,7 @@ ZoneBuffer* BuildReturnConstantModule(Zone* zone, int constant) {
byte code[] = {WASM_I32V_2(constant)};
f->EmitCode(code, sizeof(code));
f->Emit(kExprEnd);
- builder->WriteTo(*buffer);
+ builder->WriteTo(buffer);
return buffer;
}
@@ -159,8 +159,9 @@ class MockInstantiationResolver : public InstantiationResultResolver {
class MockCompilationResolver : public CompilationResultResolver {
public:
- MockCompilationResolver(SharedEngineIsolate& isolate,
- Handle<Object>* out_instance)
+ MockCompilationResolver(
+ SharedEngineIsolate& isolate, // NOLINT(runtime/references)
+ Handle<Object>* out_instance)
: isolate_(isolate), out_instance_(out_instance) {}
void OnCompilationSucceeded(Handle<WasmModuleObject> result) override {
isolate_.isolate()->wasm_engine()->AsyncInstantiate(
@@ -177,7 +178,8 @@ class MockCompilationResolver : public CompilationResultResolver {
Handle<Object>* out_instance_;
};
-void PumpMessageLoop(SharedEngineIsolate& isolate) {
+void PumpMessageLoop(
+ SharedEngineIsolate& isolate) { // NOLINT(runtime/references)
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(),
isolate.v8_isolate(),
platform::MessageLoopBehavior::kWaitForWork);
@@ -186,7 +188,8 @@ void PumpMessageLoop(SharedEngineIsolate& isolate) {
}
Handle<WasmInstanceObject> CompileAndInstantiateAsync(
- SharedEngineIsolate& isolate, ZoneBuffer* buffer) {
+ SharedEngineIsolate& isolate, // NOLINT(runtime/references)
+ ZoneBuffer* buffer) {
Handle<Object> maybe_instance = handle(Smi::kZero, isolate.isolate());
auto enabled_features = WasmFeaturesFromIsolate(isolate.isolate());
constexpr const char* kAPIMethodName = "Test.CompileAndInstantiateAsync";
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 3fc9614023..2557978ae4 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -92,7 +92,10 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
// In the message, the line is 1-based, but the column is 0-based.
CHECK_EQ(topLocation.line_nr, message->GetLineNumber());
CHECK_LE(1, topLocation.column);
- CHECK_EQ(topLocation.column - 1, message->GetColumnNumber());
+ // TODO(szuend): Remove or re-enable the following check once it is decided
+ // whether Script::PositionInfo.column should be the offset
+ // relative to the module or relative to the function.
+ // CHECK_EQ(topLocation.column - 1, message->GetColumnNumber());
}
#undef CHECK_CSTREQ
@@ -139,11 +142,11 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
// Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"a", 3, 8}, // -
- {"js", 4, 2}, // -
- {"main", static_cast<int>(wasm_index_1) + 1, 3}, // -
- {"call_main", static_cast<int>(wasm_index_2) + 1, 2}, // -
- {"callFn", 1, 24} // -
+ {"a", 3, 8}, // -
+ {"js", 4, 2}, // -
+ {"main", static_cast<int>(wasm_index_1) + 1, 8}, // -
+ {"call_main", static_cast<int>(wasm_index_2) + 1, 21}, // -
+ {"callFn", 1, 24} // -
};
CheckExceptionInfos(isolate, maybe_exc.ToHandleChecked(),
expected_exceptions);
@@ -188,12 +191,20 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
Handle<Object> exception = maybe_exc.ToHandleChecked();
static constexpr int kMainLocalsLength = 1;
+ const int main_offset =
+ r.builder().GetFunctionAt(wasm_index_1)->code.offset();
+ const int call_main_offset =
+ r.builder().GetFunctionAt(wasm_index_2)->code.offset();
+
// Line and column are 1-based, so add 1 for the expected wasm output.
- const int expected_main_pos = unreachable_pos + kMainLocalsLength + 1;
+ const int expected_main_pos =
+ unreachable_pos + main_offset + kMainLocalsLength + 1;
+ const int expected_call_main_pos = call_main_offset + kMainLocalsLength + 1;
ExceptionInfo expected_exceptions[] = {
{"main", static_cast<int>(wasm_index_1) + 1, expected_main_pos}, // -
- {"call_main", static_cast<int>(wasm_index_2) + 1, 2}, // -
- {"callFn", 1, 24} //-
+ {"call_main", static_cast<int>(wasm_index_2) + 1,
+ expected_call_main_pos}, // -
+ {"callFn", 1, 24} //-
};
CheckExceptionInfos(isolate, exception, expected_exceptions);
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 31e661adb5..69aa517cce 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -93,7 +93,7 @@ WASM_EXEC_TEST(Unreachable) {
// Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"main", static_cast<int>(wasm_index) + 1, 2}, // --
+ {"main", static_cast<int>(wasm_index) + 1, 7}, // --
{"callFn", 1, 24} // --
};
CheckExceptionInfos(isolate, maybe_exc.ToHandleChecked(),
@@ -136,9 +136,9 @@ WASM_EXEC_TEST(IllegalLoad) {
// Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"main", static_cast<int>(wasm_index_1) + 1, 8}, // --
- {"call_main", static_cast<int>(wasm_index_2) + 1, 3}, // --
- {"callFn", 1, 24} // --
+ {"main", static_cast<int>(wasm_index_1) + 1, 13}, // --
+ {"call_main", static_cast<int>(wasm_index_2) + 1, 30}, // --
+ {"callFn", 1, 24} // --
};
CheckExceptionInfos(isolate, maybe_exc.ToHandleChecked(),
expected_exceptions);
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index c006966160..6a17b81c56 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -47,8 +47,10 @@ TestingModuleBuilder::TestingModuleBuilder(
if (maybe_import) {
// Manually compile an import wrapper and insert it into the instance.
CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
- auto kind = compiler::GetWasmImportCallKind(maybe_import->js_function,
- maybe_import->sig, false);
+ auto resolved = compiler::ResolveWasmImportCall(maybe_import->js_function,
+ maybe_import->sig, false);
+ compiler::WasmImportCallKind kind = resolved.first;
+ Handle<JSReceiver> callable = resolved.second;
WasmImportWrapperCache::ModificationScope cache_scope(
native_module_->import_wrapper_cache());
WasmImportWrapperCache::CacheKey key(kind, maybe_import->sig);
@@ -60,7 +62,7 @@ TestingModuleBuilder::TestingModuleBuilder(
}
ImportedFunctionEntry(instance_object_, maybe_import_index)
- .SetWasmToJs(isolate_, maybe_import->js_function, import_wrapper);
+ .SetWasmToJs(isolate_, callable, import_wrapper);
}
if (tier == ExecutionTier::kInterpreter) {
@@ -170,9 +172,24 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
table.initial_size = table_size;
table.maximum_size = table_size;
table.has_maximum_size = true;
- table.type = kWasmAnyFunc;
+ table.type = kWasmFuncRef;
+
+ {
+ // Allocate the indirect function table.
+ Handle<FixedArray> old_tables =
+ table_index == 0
+ ? isolate_->factory()->empty_fixed_array()
+ : handle(instance_object_->indirect_function_tables(), isolate_);
+ Handle<FixedArray> new_tables =
+ isolate_->factory()->CopyFixedArrayAndGrow(old_tables, 1);
+ Handle<WasmIndirectFunctionTable> table_obj =
+ WasmIndirectFunctionTable::New(isolate_, table.initial_size);
+ new_tables->set(table_index, *table_obj);
+ instance_object_->set_indirect_function_tables(*new_tables);
+ }
+
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance_object(), table_size);
+ instance_object(), table_index, table_size);
Handle<WasmTableObject> table_obj =
WasmTableObject::New(isolate_, table.type, table.initial_size,
table.has_maximum_size, table.maximum_size, nullptr);
@@ -184,7 +201,7 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
for (uint32_t i = 0; i < table_size; ++i) {
WasmFunction& function = test_module_->functions[function_indexes[i]];
int sig_id = test_module_->signature_map.Find(*function.sig);
- IndirectFunctionTableEntry(instance, i)
+ IndirectFunctionTableEntry(instance, table_index, i)
.Set(sig_id, instance, function.func_index);
WasmTableObject::SetFunctionTablePlaceholder(
isolate_, table_obj, i, instance_object_, function_indexes[i]);
@@ -192,8 +209,8 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
}
Handle<FixedArray> old_tables(instance_object_->tables(), isolate_);
- Handle<FixedArray> new_tables = isolate_->factory()->CopyFixedArrayAndGrow(
- old_tables, old_tables->length() + 1);
+ Handle<FixedArray> new_tables =
+ isolate_->factory()->CopyFixedArrayAndGrow(old_tables, 1);
new_tables->set(old_tables->length(), *table_obj);
instance_object_->set_tables(*new_tables);
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 98ec5e1048..bf0dd711a6 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -47,6 +47,9 @@ namespace v8 {
namespace internal {
namespace wasm {
+using base::ReadLittleEndianValue;
+using base::WriteLittleEndianValue;
+
constexpr uint32_t kMaxFunctions = 10;
constexpr uint32_t kMaxGlobalsSize = 128;
@@ -87,7 +90,7 @@ class TestingModuleBuilder {
TestingModuleBuilder(Zone*, ManuallyImportedJSFunction*, ExecutionTier,
RuntimeExceptionSupport, LowerSimd);
- void ChangeOriginToAsmjs() { test_module_->origin = kAsmJsOrigin; }
+ void ChangeOriginToAsmjs() { test_module_->origin = kAsmJsSloppyOrigin; }
byte* AddMemory(uint32_t size, SharedFlag shared = SharedFlag::kNotShared);
diff --git a/deps/v8/test/common/types-fuzz.h b/deps/v8/test/common/types-fuzz.h
index 06ab9067d8..f3acabb5ad 100644
--- a/deps/v8/test/common/types-fuzz.h
+++ b/deps/v8/test/common/types-fuzz.h
@@ -29,6 +29,7 @@
#define V8_TEST_CCTEST_TYPES_H_
#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/init/v8.h"
@@ -40,7 +41,9 @@ namespace compiler {
class Types {
public:
Types(Zone* zone, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
- : zone_(zone), js_heap_broker_(isolate, zone), rng_(rng) {
+ : zone_(zone),
+ js_heap_broker_(isolate, zone, FLAG_trace_heap_broker),
+ rng_(rng) {
#define DECLARE_TYPE(name, value) \
name = Type::name(); \
types.push_back(name);
diff --git a/deps/v8/test/common/wasm/OWNERS b/deps/v8/test/common/wasm/OWNERS
new file mode 100644
index 0000000000..4b6b34d24a
--- /dev/null
+++ b/deps/v8/test/common/wasm/OWNERS
@@ -0,0 +1 @@
+file://src/wasm/OWNERS
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index 120e81cf1e..dc04cdf56b 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -27,7 +27,7 @@ class TestSignatures {
sig_i_dd(1, 2, kIntDoubleTypes4),
sig_i_r(1, 1, kIntAnyRefTypes4),
sig_i_rr(1, 2, kIntAnyRefTypes4),
- sig_i_a(1, 1, kIntAnyFuncTypes4),
+ sig_i_a(1, 1, kIntFuncRefTypes4),
sig_l_v(1, 0, kLongTypes4),
sig_l_l(1, 1, kLongTypes4),
sig_l_ll(1, 2, kLongTypes4),
@@ -55,18 +55,18 @@ class TestSignatures {
for (int i = 0; i < 4; i++) kFloatTypes4[i] = kWasmF32;
for (int i = 0; i < 4; i++) kDoubleTypes4[i] = kWasmF64;
for (int i = 0; i < 4; i++) kRefTypes4[i] = kWasmAnyRef;
- for (int i = 0; i < 4; i++) kFuncTypes4[i] = kWasmAnyFunc;
+ for (int i = 0; i < 4; i++) kFuncTypes4[i] = kWasmFuncRef;
for (int i = 1; i < 4; i++) kIntLongTypes4[i] = kWasmI64;
for (int i = 1; i < 4; i++) kIntFloatTypes4[i] = kWasmF32;
for (int i = 1; i < 4; i++) kIntDoubleTypes4[i] = kWasmF64;
for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef;
- for (int i = 1; i < 4; i++) kIntAnyFuncTypes4[i] = kWasmAnyFunc;
+ for (int i = 1; i < 4; i++) kIntFuncRefTypes4[i] = kWasmFuncRef;
for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kWasmS128;
kIntLongTypes4[0] = kWasmI32;
kIntFloatTypes4[0] = kWasmI32;
kIntDoubleTypes4[0] = kWasmI32;
kIntAnyRefTypes4[0] = kWasmI32;
- kIntAnyFuncTypes4[0] = kWasmI32;
+ kIntFuncRefTypes4[0] = kWasmI32;
kSimd128IntTypes4[1] = kWasmI32;
}
@@ -129,7 +129,7 @@ class TestSignatures {
ValueType kIntFloatTypes4[4];
ValueType kIntDoubleTypes4[4];
ValueType kIntAnyRefTypes4[4];
- ValueType kIntAnyFuncTypes4[4];
+ ValueType kIntFuncRefTypes4[4];
ValueType kSimd128IntTypes4[4];
FunctionSig sig_i_v;
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index ecdd0a8b30..ed20641c65 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -151,7 +151,7 @@
#define WASM_SELECT_R(tval, fval, cond) \
tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalAnyRef
#define WASM_SELECT_A(tval, fval, cond) \
- tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalAnyFunc
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalFuncRef
#define WASM_RETURN0 kExprReturn
#define WASM_RETURN1(val) val, kExprReturn
@@ -368,10 +368,10 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
#define WASM_SET_GLOBAL(index, val) \
val, kExprSetGlobal, static_cast<byte>(index)
-#define WASM_GET_TABLE(table_index, index) \
- index, kExprGetTable, static_cast<byte>(table_index)
-#define WASM_SET_TABLE(table_index, index, val) \
- index, val, kExprSetTable, static_cast<byte>(table_index)
+#define WASM_TABLE_GET(table_index, index) \
+ index, kExprTableGet, static_cast<byte>(table_index)
+#define WASM_TABLE_SET(table_index, index, val) \
+ index, val, kExprTableSet, static_cast<byte>(table_index)
#define WASM_LOAD_MEM(type, index) \
index, \
static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, false)), \
@@ -423,6 +423,8 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECTN(arity, index, func, ...) \
__VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
+#define WASM_CALL_INDIRECT_TABLE0(table, index, func) \
+ func, kExprCallIndirect, static_cast<byte>(index), static_cast<byte>(table)
#define WASM_RETURN_CALL_INDIRECT0(index, func) \
func, kExprReturnCallIndirect, static_cast<byte>(index), TABLE_ZERO
@@ -626,11 +628,13 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
dst, src, size, WASM_NUMERIC_OP(kExprMemoryCopy), MEMORY_ZERO, MEMORY_ZERO
#define WASM_MEMORY_FILL(dst, val, size) \
dst, val, size, WASM_NUMERIC_OP(kExprMemoryFill), MEMORY_ZERO
-#define WASM_TABLE_INIT(seg, dst, src, size) \
- dst, src, size, WASM_NUMERIC_OP(kExprTableInit), U32V_1(seg), TABLE_ZERO
+#define WASM_TABLE_INIT(table, seg, dst, src, size) \
+ dst, src, size, WASM_NUMERIC_OP(kExprTableInit), U32V_1(seg), \
+ static_cast<byte>(table)
#define WASM_ELEM_DROP(seg) WASM_NUMERIC_OP(kExprElemDrop), U32V_1(seg)
-#define WASM_TABLE_COPY(dst, src, size) \
- dst, src, size, WASM_NUMERIC_OP(kExprTableCopy), TABLE_ZERO, TABLE_ZERO
+#define WASM_TABLE_COPY(table_dst, table_src, dst, src, size) \
+ dst, src, size, WASM_NUMERIC_OP(kExprTableCopy), \
+ static_cast<byte>(table_dst), static_cast<byte>(table_src)
#define WASM_TABLE_GROW(table, initial_value, delta) \
initial_value, delta, WASM_NUMERIC_OP(kExprTableGrow), \
static_cast<byte>(table)
@@ -693,6 +697,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
static_cast<byte>(ElementSizeLog2Of(representation)), ZERO_OFFSET
#define WASM_ATOMICS_WAIT(op, index, value, timeout, offset) \
index, value, timeout, WASM_ATOMICS_OP(op), ZERO_ALIGNMENT, offset
+#define WASM_ATOMICS_FENCE WASM_ATOMICS_OP(kExprAtomicFence), ZERO_OFFSET
//------------------------------------------------------------------------------
// Sign Externsion Operations.
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index d193cbc5ac..8de7655914 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -125,7 +125,11 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
arguments.get());
WasmInterpreter::State interpreter_result = thread->Run(kMaxNumSteps);
- isolate->clear_pending_exception();
+ if (isolate->has_pending_exception()) {
+ // Stack overflow during interpretation.
+ isolate->clear_pending_exception();
+ return false;
+ }
return interpreter_result != WasmInterpreter::PAUSED;
}
@@ -158,7 +162,7 @@ int32_t CompileAndRunAsmWasmModule(Isolate* isolate, const byte* module_start,
MaybeHandle<AsmWasmData> data =
isolate->wasm_engine()->SyncCompileTranslatedAsmJs(
isolate, &thrower, ModuleWireBytes(module_start, module_end),
- Vector<const byte>(), Handle<HeapNumber>());
+ Vector<const byte>(), Handle<HeapNumber>(), LanguageMode::kSloppy);
DCHECK_EQ(thrower.error(), data.is_null());
if (data.is_null()) return -1;
diff --git a/deps/v8/test/debugger/OWNERS b/deps/v8/test/debugger/OWNERS
new file mode 100644
index 0000000000..39aa08cd8c
--- /dev/null
+++ b/deps/v8/test/debugger/OWNERS
@@ -0,0 +1 @@
+file://src/debug/OWNERS
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-function-var.js b/deps/v8/test/debugger/debug/debug-evaluate-function-var.js
new file mode 100644
index 0000000000..42a6716c2e
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-evaluate-function-var.js
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ var frame = exec_state.frame(0);
+ try {
+ assertTrue(frame.evaluate("f").value().startsWith("function f()"));
+ } catch {
+ assertTrue(frame.sourceLineText().endsWith("throws"));
+ }
+ }
+ } catch(e) {
+ exception = e;
+ print(e, e.stack);
+ }
+};
+
+Debug.setListener(listener);
+
+(function f() {
+ f;
+ debugger; // works
+})();
+
+(function f() {
+ () => f;
+ debugger; // works
+})();
+
+(function f() {
+ debugger; // throws
+})();
+
+assertNull(exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/debugger/debug/debug-modules-set-variable-value.js b/deps/v8/test/debugger/debug/debug-modules-set-variable-value.mjs
index 32127eee25..368ab49854 100644
--- a/deps/v8/test/debugger/debug/debug-modules-set-variable-value.js
+++ b/deps/v8/test/debugger/debug/debug-modules-set-variable-value.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --no-always-opt --no-opt
// The first part of this file is copied over from debug-set-variable-value.js
@@ -328,7 +326,7 @@ export let ham = 1;
// Imported variable. Setting is currently not supported.
-import { salami as wurst } from "./debug-modules-set-variable-value.js";
+import { salami as wurst } from "./debug-modules-set-variable-value.mjs";
{
let exception;
function listener(event, exec_state) {
@@ -350,7 +348,7 @@ import { salami as wurst } from "./debug-modules-set-variable-value.js";
// Imported variable, nested access. Setting is currently not supported.
-import { salami as wurstl } from "./debug-modules-set-variable-value.js";
+import { salami as wurstl } from "./debug-modules-set-variable-value.mjs";
{
let exception;
function listener(event, exec_state) {
diff --git a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.mjs
index 4c0f188f87..17897981bc 100644
--- a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
+++ b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --allow-natives-syntax --noanalyze-environment-liveness
// These tests are copied from mjsunit/debug-scopes.js and adapted for modules.
diff --git a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.mjs
index be729bcf06..a23ee9f93d 100644
--- a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.js
+++ b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes2.mjs
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
-
var Debug = debug.Debug;
var test_name;
@@ -152,8 +149,8 @@ let local_let = 1;
var local_var = 2;
export let exported_let = 3;
export var exported_var = 4;
-import {exported_let as imported_let} from "modules-debug-scopes2.js";
-import {exported_var as imported_var} from "modules-debug-scopes2.js";
+import {exported_let as imported_let} from "modules-debug-scopes2.mjs";
+import {exported_var as imported_var} from "modules-debug-scopes2.mjs";
BeginTest();
listener_delegate = function(exec_state) {
diff --git a/deps/v8/test/debugger/debug/regress/regress-5279.js b/deps/v8/test/debugger/debug/regress/regress-5279.js
index f2031dc518..0f6f96f7c7 100644
--- a/deps/v8/test/debugger/debug/regress/regress-5279.js
+++ b/deps/v8/test/debugger/debug/regress/regress-5279.js
@@ -13,6 +13,7 @@ function f() {
for (let i = 0; i < 10; i++) {
%OptimizeOsr();
%ScheduleBreak();
+ %PrepareFunctionForOptimization(f);
}
}
%PrepareFunctionForOptimization(f);
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js b/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
index 985f62ea8c..6eee8d7e4d 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
@@ -7,11 +7,15 @@ Debug = debug.Debug;
Debug.setListener(function() {});
function f() {
- for (var i = 0; i < 100; i++) %OptimizeOsr();
+ for (var i = 0; i < 100; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(f);
+ }
}
%PrepareFunctionForOptimization(f);
Debug.setBreakPoint(f, 0, 0);
f();
+%PrepareFunctionForOptimization(f);
f();
Debug.setListener(null);
diff --git a/deps/v8/test/debugger/regress/regress-7421.js b/deps/v8/test/debugger/regress/regress-7421.js
index cb1e612d8b..dfac06d864 100644
--- a/deps/v8/test/debugger/regress/regress-7421.js
+++ b/deps/v8/test/debugger/regress/regress-7421.js
@@ -20,6 +20,7 @@ function wrapper1() {
// Get this function optimized before calling to increment.
// Check that that call performs the necessary side-effect checks.
%OptimizeOsr();
+ %PrepareFunctionForOptimization(wrapper1);
}
f1();
}
diff --git a/deps/v8/test/debugger/regress/regress-crbug-760225.js b/deps/v8/test/debugger/regress/regress-crbug-760225.js
new file mode 100644
index 0000000000..86697143d7
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-crbug-760225.js
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug
+
+let thisValue;
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ thisValue = exec_state.frame(0).evaluate('this').value();
+ }
+};
+
+Debug.setListener(listener);
+
+class Foo {}
+class Bar extends Foo {
+ constructor() {
+ super();
+ var b = () => this;
+ this.c = 'b'; // <-- Inspect 'this' (it will be undefined)
+ debugger;
+ }
+}
+
+new Bar();
+
+Debug.setListener(null);
+
+assertNotEquals(undefined, thisValue);
diff --git a/deps/v8/test/debugger/test-api.js b/deps/v8/test/debugger/test-api.js
index 10aa3c1b83..511fb33816 100644
--- a/deps/v8/test/debugger/test-api.js
+++ b/deps/v8/test/debugger/test-api.js
@@ -541,6 +541,9 @@ class DebugWrapper {
case "boolean": {
break;
}
+ case "function": {
+ value = obj.description;
+ }
default: {
break;
}
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index eebb578fb7..74d885bb44 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -9,7 +9,6 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
-MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class TestLoader(testsuite.JSTestLoader):
@@ -51,9 +50,7 @@ class TestCase(testcase.D8TestCase):
files.append(os.path.join(self.suite.root, "test-api.js"))
files.extend([os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list])
- if MODULE_PATTERN.search(source):
- files.append("--module")
- files.append(os.path.join(self.suite.root, self.path + self._get_suffix()))
+ files.append(self._get_source_path())
return files
def _get_files_params(self):
@@ -69,7 +66,13 @@ class TestCase(testcase.D8TestCase):
return ['--enable-inspector', '--allow-natives-syntax']
def _get_source_path(self):
- return os.path.join(self.suite.root, self.path + self._get_suffix())
+ base_path = os.path.join(self.suite.root, self.path)
+ # Try .js first, and fall back to .mjs.
+ # TODO(v8:9406): clean this up by never separating the path from
+ # the extension in the first place.
+ if os.path.exists(base_path + self._get_suffix()):
+ return base_path + self._get_suffix()
+ return base_path + '.mjs'
def GetSuite(*args, **kwargs):
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index 12513b58af..028ce7083a 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -69,7 +69,7 @@ class InputProvider {
int NextInt32(int limit) {
if (current_ + sizeof(uint32_t) > end_) return 0;
int result =
- ReadLittleEndianValue<int>(reinterpret_cast<Address>(current_));
+ base::ReadLittleEndianValue<int>(reinterpret_cast<Address>(current_));
current_ += sizeof(uint32_t);
return result % limit;
}
@@ -85,31 +85,31 @@ MachineType RandomType(InputProvider* input) {
int index(MachineType type) { return static_cast<int>(type.representation()); }
-Node* Constant(RawMachineAssembler& m, MachineType type, int value) {
+Node* Constant(RawMachineAssembler* m, MachineType type, int value) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
- return m.Int32Constant(static_cast<int32_t>(value));
+ return m->Int32Constant(static_cast<int32_t>(value));
case MachineRepresentation::kWord64:
- return m.Int64Constant(static_cast<int64_t>(value));
+ return m->Int64Constant(static_cast<int64_t>(value));
case MachineRepresentation::kFloat32:
- return m.Float32Constant(static_cast<float>(value));
+ return m->Float32Constant(static_cast<float>(value));
case MachineRepresentation::kFloat64:
- return m.Float64Constant(static_cast<double>(value));
+ return m->Float64Constant(static_cast<double>(value));
default:
UNREACHABLE();
}
}
-Node* ToInt32(RawMachineAssembler& m, MachineType type, Node* a) {
+Node* ToInt32(RawMachineAssembler* m, MachineType type, Node* a) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return a;
case MachineRepresentation::kWord64:
- return m.TruncateInt64ToInt32(a);
+ return m->TruncateInt64ToInt32(a);
case MachineRepresentation::kFloat32:
- return m.TruncateFloat32ToInt32(a);
+ return m->TruncateFloat32ToInt32(a);
case MachineRepresentation::kFloat64:
- return m.RoundFloat64ToInt32(a);
+ return m->RoundFloat64ToInt32(a);
default:
UNREACHABLE();
}
@@ -222,7 +222,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
MachineType type = desc->GetReturnType(i);
// Find a random same-type parameter to return. Use a constant if none.
if (counts[index(type)] == 0) {
- returns[i] = Constant(callee, type, 42);
+ returns[i] = Constant(&callee, type, 42);
outputs[i] = 42;
} else {
int n = input.NextInt32(counts[index(type)]);
@@ -264,18 +264,18 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// WasmContext dummy.
params[1] = caller.PointerConstant(nullptr);
for (size_t i = 0; i < param_count; ++i) {
- params[i + 2] = Constant(caller, desc->GetParameterType(i + 1), inputs[i]);
+ params[i + 2] = Constant(&caller, desc->GetParameterType(i + 1), inputs[i]);
}
Node* call = caller.AddNode(caller.common()->Call(desc),
static_cast<int>(param_count + 2), params.get());
- Node* ret = Constant(caller, MachineType::Int32(), 0);
+ Node* ret = Constant(&caller, MachineType::Int32(), 0);
for (size_t i = 0; i < desc->ReturnCount(); ++i) {
// Skip roughly one third of the outputs.
if (input.NextInt8(3) == 0) continue;
Node* ret_i = (desc->ReturnCount() == 1)
? call
: caller.AddNode(caller.common()->Projection(i), call);
- ret = caller.Int32Add(ret, ToInt32(caller, desc->GetReturnType(i), ret_i));
+ ret = caller.Int32Add(ret, ToInt32(&caller, desc->GetReturnType(i), ret_i));
expect += outputs[i];
}
caller.Return(ret);
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index 08d68600c6..20cb024a1a 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -12,7 +12,7 @@
#include "include/v8.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "test/fuzzer/fuzzer-support.h"
// This is a hexdump of test/fuzzer/regexp_builtins/mjsunit.js generated using
@@ -61,9 +61,8 @@ enum RegExpBuiltin {
REGEXP_BUILTINS(CASE)
#undef CASE
-v8::Local<v8::String> v8_str(const char* s) {
- return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), s,
- v8::NewStringType::kNormal)
+v8::Local<v8::String> v8_str(v8::Isolate* isolate, const char* s) {
+ return v8::String::NewFromUtf8(isolate, s, v8::NewStringType::kNormal)
.ToLocalChecked();
}
@@ -71,7 +70,7 @@ v8::MaybeLocal<v8::Value> CompileRun(v8::Local<v8::Context> context,
const char* source) {
v8::Local<v8::Script> script;
v8::MaybeLocal<v8::Script> maybe_script =
- v8::Script::Compile(context, v8_str(source));
+ v8::Script::Compile(context, v8_str(context->GetIsolate(), source));
if (!maybe_script.ToLocal(&script)) return v8::MaybeLocal<v8::Value>();
return script->Run(context);
@@ -242,7 +241,7 @@ std::string PickLimitForSplit(FuzzerArgs* args) {
}
std::string GenerateRandomFlags(FuzzerArgs* args) {
- constexpr size_t kFlagCount = JSRegExp::FlagCount();
+ constexpr size_t kFlagCount = JSRegExp::kFlagCount;
CHECK_EQ(JSRegExp::kDotAll, 1 << (kFlagCount - 1));
STATIC_ASSERT((1 << kFlagCount) - 1 < 0xFF);
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index e532af8d2d..c7b2f146d2 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -9,7 +9,7 @@
#include "include/v8.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "test/fuzzer/fuzzer-support.h"
namespace i = v8::internal;
@@ -19,8 +19,7 @@ void Test(v8::Isolate* isolate, i::Handle<i::JSRegExp> regexp,
i::Handle<i::RegExpMatchInfo> results_array) {
v8::TryCatch try_catch(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (i::RegExpImpl::Exec(i_isolate, regexp, subject, 0, results_array)
- .is_null()) {
+ if (i::RegExp::Exec(i_isolate, regexp, subject, 0, results_array).is_null()) {
i_isolate->OptionalRescheduleException(true);
}
}
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index c888dd823c..b97f96a302 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -21,9 +21,9 @@ namespace fuzzer {
class WasmCodeFuzzer : public WasmExecutionFuzzer {
bool GenerateModule(
Isolate* isolate, Zone* zone, Vector<const uint8_t> data,
- ZoneBuffer& buffer, int32_t& num_args,
- std::unique_ptr<WasmValue[]>& interpreter_args,
- std::unique_ptr<Handle<Object>[]>& compiler_args) override {
+ ZoneBuffer* buffer, int32_t* num_args,
+ std::unique_ptr<WasmValue[]>* interpreter_args,
+ std::unique_ptr<Handle<Object>[]>* compiler_args) override {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
@@ -34,13 +34,14 @@ class WasmCodeFuzzer : public WasmExecutionFuzzer {
builder.SetMaxMemorySize(32);
builder.WriteTo(buffer);
- num_args = 3;
- interpreter_args.reset(
+ *num_args = 3;
+ interpreter_args->reset(
new WasmValue[3]{WasmValue(1), WasmValue(2), WasmValue(3)});
- compiler_args.reset(new Handle<Object>[3]{
- handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(2), isolate),
- handle(Smi::FromInt(3), isolate)});
+ compiler_args->reset(new Handle<Object>[3] {
+ handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(2), isolate),
+ handle(Smi::FromInt(3), isolate)
+ });
return true;
}
};
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index a373ca665e..29f2ebb02d 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -10,12 +10,13 @@
#include "include/v8.h"
#include "src/execution/isolate.h"
-#include "src/utils/ostreams.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
+#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
@@ -77,8 +78,8 @@ class DataRange {
DISALLOW_COPY_AND_ASSIGN(DataRange);
};
-ValueType GetValueType(DataRange& data) {
- switch (data.get<uint8_t>() % 4) {
+ValueType GetValueType(DataRange* data) {
+ switch (data->get<uint8_t>() % 4) {
case 0:
return kWasmI32;
case 1:
@@ -93,7 +94,7 @@ ValueType GetValueType(DataRange& data) {
class WasmGenerator {
template <WasmOpcode Op, ValueType... Args>
- void op(DataRange& data) {
+ void op(DataRange* data) {
Generate<Args...>(data);
builder_->Emit(Op);
}
@@ -118,13 +119,13 @@ class WasmGenerator {
};
template <ValueType T>
- void block(DataRange& data) {
+ void block(DataRange* data) {
BlockScope block_scope(this, kExprBlock, T, T);
Generate<T>(data);
}
template <ValueType T>
- void loop(DataRange& data) {
+ void loop(DataRange* data) {
// When breaking to a loop header, don't provide any input value (hence
// kWasmStmt).
BlockScope block_scope(this, kExprLoop, T, kWasmStmt);
@@ -134,7 +135,7 @@ class WasmGenerator {
enum IfType { kIf, kIfElse };
template <ValueType T, IfType type>
- void if_(DataRange& data) {
+ void if_(DataRange* data) {
static_assert(T == kWasmStmt || type == kIfElse,
"if without else cannot produce a value");
Generate<kWasmI32>(data);
@@ -146,10 +147,10 @@ class WasmGenerator {
}
}
- void br(DataRange& data) {
+ void br(DataRange* data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
- const uint32_t target_block = data.get<uint32_t>() % blocks_.size();
+ const uint32_t target_block = data->get<uint32_t>() % blocks_.size();
const ValueType break_type = blocks_[target_block];
Generate(break_type, data);
@@ -158,10 +159,10 @@ class WasmGenerator {
}
template <ValueType wanted_type>
- void br_if(DataRange& data) {
+ void br_if(DataRange* data) {
// There is always at least the block representing the function body.
DCHECK(!blocks_.empty());
- const uint32_t target_block = data.get<uint32_t>() % blocks_.size();
+ const uint32_t target_block = data->get<uint32_t>() % blocks_.size();
const ValueType break_type = blocks_[target_block];
Generate(break_type, data);
@@ -207,9 +208,9 @@ class WasmGenerator {
}
template <WasmOpcode memory_op, ValueType... arg_types>
- void memop(DataRange& data) {
- const uint8_t align = data.get<uint8_t>() % (max_alignment(memory_op) + 1);
- const uint32_t offset = data.get<uint32_t>();
+ void memop(DataRange* data) {
+ const uint8_t align = data->get<uint8_t>() % (max_alignment(memory_op) + 1);
+ const uint32_t offset = data->get<uint32_t>();
// Generate the index and the arguments, if any.
Generate<kWasmI32, arg_types...>(data);
@@ -219,13 +220,13 @@ class WasmGenerator {
builder_->EmitU32V(offset);
}
- void drop(DataRange& data) {
+ void drop(DataRange* data) {
Generate(GetValueType(data), data);
builder_->Emit(kExprDrop);
}
template <ValueType wanted_type>
- void call(DataRange& data) {
+ void call(DataRange* data) {
call(data, wanted_type);
}
@@ -257,7 +258,7 @@ class WasmGenerator {
builder_->Emit(kConvertOpcodes[arr_idx]);
}
- void ConvertOrGenerate(ValueType src, ValueType dst, DataRange& data) {
+ void ConvertOrGenerate(ValueType src, ValueType dst, DataRange* data) {
if (src == dst) return;
if (src == kWasmStmt && dst != kWasmStmt) {
Generate(dst, data);
@@ -268,8 +269,8 @@ class WasmGenerator {
}
}
- void call(DataRange& data, ValueType wanted_type) {
- int func_index = data.get<uint8_t>() % functions_.size();
+ void call(DataRange* data, ValueType wanted_type) {
+ int func_index = data->get<uint8_t>() % functions_.size();
FunctionSig* sig = functions_[func_index];
// Generate arguments.
for (size_t i = 0; i < sig->parameter_count(); ++i) {
@@ -300,19 +301,19 @@ class WasmGenerator {
bool is_valid() const { return type != kWasmStmt; }
};
- Var GetRandomLocal(DataRange& data) {
+ Var GetRandomLocal(DataRange* data) {
uint32_t num_params =
static_cast<uint32_t>(builder_->signature()->parameter_count());
uint32_t num_locals = static_cast<uint32_t>(locals_.size());
if (num_params + num_locals == 0) return {};
- uint32_t index = data.get<uint8_t>() % (num_params + num_locals);
+ uint32_t index = data->get<uint8_t>() % (num_params + num_locals);
ValueType type = index < num_params ? builder_->signature()->GetParam(index)
: locals_[index - num_params];
return {index, type};
}
template <ValueType wanted_type>
- void local_op(DataRange& data, WasmOpcode opcode) {
+ void local_op(DataRange* data, WasmOpcode opcode) {
Var local = GetRandomLocal(data);
// If there are no locals and no parameters, just generate any value (if a
// value is needed), or do nothing.
@@ -329,43 +330,43 @@ class WasmGenerator {
}
template <ValueType wanted_type>
- void get_local(DataRange& data) {
+ void get_local(DataRange* data) {
static_assert(wanted_type != kWasmStmt, "illegal type");
local_op<wanted_type>(data, kExprGetLocal);
}
- void set_local(DataRange& data) { local_op<kWasmStmt>(data, kExprSetLocal); }
+ void set_local(DataRange* data) { local_op<kWasmStmt>(data, kExprSetLocal); }
template <ValueType wanted_type>
- void tee_local(DataRange& data) {
+ void tee_local(DataRange* data) {
local_op<wanted_type>(data, kExprTeeLocal);
}
template <size_t num_bytes>
- void i32_const(DataRange& data) {
- builder_->EmitI32Const(data.get<int32_t, num_bytes>());
+ void i32_const(DataRange* data) {
+ builder_->EmitI32Const(data->get<int32_t, num_bytes>());
}
template <size_t num_bytes>
- void i64_const(DataRange& data) {
- builder_->EmitI64Const(data.get<int64_t, num_bytes>());
+ void i64_const(DataRange* data) {
+ builder_->EmitI64Const(data->get<int64_t, num_bytes>());
}
- Var GetRandomGlobal(DataRange& data, bool ensure_mutable) {
+ Var GetRandomGlobal(DataRange* data, bool ensure_mutable) {
uint32_t index;
if (ensure_mutable) {
if (mutable_globals_.empty()) return {};
- index = mutable_globals_[data.get<uint8_t>() % mutable_globals_.size()];
+ index = mutable_globals_[data->get<uint8_t>() % mutable_globals_.size()];
} else {
if (globals_.empty()) return {};
- index = data.get<uint8_t>() % globals_.size();
+ index = data->get<uint8_t>() % globals_.size();
}
ValueType type = globals_[index];
return {index, type};
}
template <ValueType wanted_type>
- void global_op(DataRange& data) {
+ void global_op(DataRange* data) {
constexpr bool is_set = wanted_type == kWasmStmt;
Var global = GetRandomGlobal(data, is_set);
// If there are no globals, just generate any value (if a value is needed),
@@ -384,13 +385,13 @@ class WasmGenerator {
}
template <ValueType wanted_type>
- void get_global(DataRange& data) {
+ void get_global(DataRange* data) {
static_assert(wanted_type != kWasmStmt, "illegal type");
global_op<wanted_type>(data);
}
template <ValueType select_type>
- void select_with_type(DataRange& data) {
+ void select_with_type(DataRange* data) {
static_assert(select_type != kWasmStmt, "illegal type for select");
Generate<select_type, select_type, kWasmI32>(data);
// num_types is always 1.
@@ -399,26 +400,26 @@ class WasmGenerator {
ValueTypes::ValueTypeCodeFor(select_type));
}
- void set_global(DataRange& data) { global_op<kWasmStmt>(data); }
+ void set_global(DataRange* data) { global_op<kWasmStmt>(data); }
template <ValueType... Types>
- void sequence(DataRange& data) {
+ void sequence(DataRange* data) {
Generate<Types...>(data);
}
- void current_memory(DataRange& data) {
+ void current_memory(DataRange* data) {
builder_->EmitWithU8(kExprMemorySize, 0);
}
- void grow_memory(DataRange& data);
+ void grow_memory(DataRange* data);
- using generate_fn = void (WasmGenerator::*const)(DataRange&);
+ using generate_fn = void (WasmGenerator::*const)(DataRange*);
template <size_t N>
- void GenerateOneOf(generate_fn (&alternates)[N], DataRange& data) {
+ void GenerateOneOf(generate_fn (&alternates)[N], DataRange* data) {
static_assert(N < std::numeric_limits<uint8_t>::max(),
"Too many alternates. Replace with a bigger type if needed.");
- const auto which = data.get<uint8_t>();
+ const auto which = data->get<uint8_t>();
generate_fn alternate = alternates[which % N];
(this->*alternate)(data);
@@ -440,7 +441,7 @@ class WasmGenerator {
WasmGenerator(WasmFunctionBuilder* fn,
const std::vector<FunctionSig*>& functions,
const std::vector<ValueType>& globals,
- const std::vector<uint8_t>& mutable_globals, DataRange& data)
+ const std::vector<uint8_t>& mutable_globals, DataRange* data)
: builder_(fn),
functions_(functions),
globals_(globals),
@@ -450,23 +451,23 @@ class WasmGenerator {
blocks_.push_back(sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0));
constexpr uint32_t kMaxLocals = 32;
- locals_.resize(data.get<uint8_t>() % kMaxLocals);
+ locals_.resize(data->get<uint8_t>() % kMaxLocals);
for (ValueType& local : locals_) {
local = GetValueType(data);
fn->AddLocal(local);
}
}
- void Generate(ValueType type, DataRange& data);
+ void Generate(ValueType type, DataRange* data);
template <ValueType T>
- void Generate(DataRange& data);
+ void Generate(DataRange* data);
template <ValueType T1, ValueType T2, ValueType... Ts>
- void Generate(DataRange& data) {
+ void Generate(DataRange* data) {
// TODO(clemensh): Implement a more even split.
- auto first_data = data.split();
- Generate<T1>(first_data);
+ auto first_data = data->split();
+ Generate<T1>(&first_data);
Generate<T2, Ts...>(data);
}
@@ -487,9 +488,9 @@ class WasmGenerator {
};
template <>
-void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
+void WasmGenerator::Generate<kWasmStmt>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() == 0) return;
+ if (recursion_limit_reached() || data->size() == 0) return;
constexpr generate_fn alternates[] = {
&WasmGenerator::sequence<kWasmStmt, kWasmStmt>,
@@ -524,10 +525,10 @@ void WasmGenerator::Generate<kWasmStmt>(DataRange& data) {
}
template <>
-void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
+void WasmGenerator::Generate<kWasmI32>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() <= 1) {
- builder_->EmitI32Const(data.get<uint32_t>());
+ if (recursion_limit_reached() || data->size() <= 1) {
+ builder_->EmitI32Const(data->get<uint32_t>());
return;
}
@@ -622,10 +623,10 @@ void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
}
template <>
-void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
+void WasmGenerator::Generate<kWasmI64>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() <= 1) {
- builder_->EmitI64Const(data.get<int64_t>());
+ if (recursion_limit_reached() || data->size() <= 1) {
+ builder_->EmitI64Const(data->get<int64_t>());
return;
}
@@ -690,10 +691,10 @@ void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
}
template <>
-void WasmGenerator::Generate<kWasmF32>(DataRange& data) {
+void WasmGenerator::Generate<kWasmF32>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() <= sizeof(float)) {
- builder_->EmitF32Const(data.get<float>());
+ if (recursion_limit_reached() || data->size() <= sizeof(float)) {
+ builder_->EmitF32Const(data->get<float>());
return;
}
@@ -725,10 +726,10 @@ void WasmGenerator::Generate<kWasmF32>(DataRange& data) {
}
template <>
-void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
+void WasmGenerator::Generate<kWasmF64>(DataRange* data) {
GeneratorRecursionScope rec_scope(this);
- if (recursion_limit_reached() || data.size() <= sizeof(double)) {
- builder_->EmitF64Const(data.get<double>());
+ if (recursion_limit_reached() || data->size() <= sizeof(double)) {
+ builder_->EmitF64Const(data->get<double>());
return;
}
@@ -759,12 +760,12 @@ void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
GenerateOneOf(alternates, data);
}
-void WasmGenerator::grow_memory(DataRange& data) {
+void WasmGenerator::grow_memory(DataRange* data) {
Generate<kWasmI32>(data);
builder_->EmitWithU8(kExprMemoryGrow, 0);
}
-void WasmGenerator::Generate(ValueType type, DataRange& data) {
+void WasmGenerator::Generate(ValueType type, DataRange* data) {
switch (type) {
case kWasmStmt:
return Generate<kWasmStmt>(data);
@@ -781,11 +782,11 @@ void WasmGenerator::Generate(ValueType type, DataRange& data) {
}
}
-FunctionSig* GenerateSig(Zone* zone, DataRange& data) {
+FunctionSig* GenerateSig(Zone* zone, DataRange* data) {
// Generate enough parameters to spill some to the stack.
constexpr int kMaxParameters = 15;
- int num_params = int{data.get<uint8_t>()} % (kMaxParameters + 1);
- bool has_return = data.get<bool>();
+ int num_params = int{data->get<uint8_t>()} % (kMaxParameters + 1);
+ bool has_return = data->get<bool>();
FunctionSig::Builder builder(zone, has_return ? 1 : 0, num_params);
if (has_return) builder.AddReturn(GetValueType(data));
@@ -798,9 +799,9 @@ FunctionSig* GenerateSig(Zone* zone, DataRange& data) {
class WasmCompileFuzzer : public WasmExecutionFuzzer {
bool GenerateModule(
Isolate* isolate, Zone* zone, Vector<const uint8_t> data,
- ZoneBuffer& buffer, int32_t& num_args,
- std::unique_ptr<WasmValue[]>& interpreter_args,
- std::unique_ptr<Handle<Object>[]>& compiler_args) override {
+ ZoneBuffer* buffer, int32_t* num_args,
+ std::unique_ptr<WasmValue[]>* interpreter_args,
+ std::unique_ptr<Handle<Object>[]>* compiler_args) override {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
@@ -813,7 +814,7 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
int num_functions = 1 + (range.get<uint8_t>() % kMaxFunctions);
for (int i = 1; i < num_functions; ++i) {
- function_signatures.push_back(GenerateSig(zone, range));
+ function_signatures.push_back(GenerateSig(zone, &range));
}
int num_globals = range.get<uint8_t>() % (kMaxGlobals + 1);
@@ -823,11 +824,10 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
mutable_globals.reserve(num_globals);
for (int i = 0; i < num_globals; ++i) {
- ValueType type = GetValueType(range);
- const bool exported = range.get<bool>();
+ ValueType type = GetValueType(&range);
// 1/8 of globals are immutable.
const bool mutability = (range.get<uint8_t>() % 8) != 0;
- builder.AddGlobal(type, exported, mutability, WasmInitExpr());
+ builder.AddGlobal(type, mutability, WasmInitExpr());
globals.push_back(type);
if (mutability) mutable_globals.push_back(static_cast<uint8_t>(i));
}
@@ -840,10 +840,10 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
WasmFunctionBuilder* f = builder.AddFunction(sig);
WasmGenerator gen(f, function_signatures, globals, mutable_globals,
- function_range);
+ &function_range);
ValueType return_type =
sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
- gen.Generate(return_type, function_range);
+ gen.Generate(return_type, &function_range);
f->Emit(kExprEnd);
if (i == 0) builder.AddExport(CStrVector("main"), f);
@@ -852,19 +852,21 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
builder.SetMaxMemorySize(32);
builder.WriteTo(buffer);
- num_args = 3;
- interpreter_args.reset(
+ *num_args = 3;
+ interpreter_args->reset(
new WasmValue[3]{WasmValue(1), WasmValue(2), WasmValue(3)});
- compiler_args.reset(new Handle<Object>[3]{
- handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(2), isolate),
- handle(Smi::FromInt(3), isolate)});
+ compiler_args->reset(new Handle<Object>[3] {
+ handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(2), isolate),
+ handle(Smi::FromInt(3), isolate)
+ });
return true;
}
};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
constexpr bool require_valid = true;
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
WasmCompileFuzzer().FuzzWasmModule({data, size}, require_valid);
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index b35d2ee2a6..5a60eb63aa 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -278,8 +278,8 @@ void WasmExecutionFuzzer::FuzzWasmModule(Vector<const uint8_t> data,
// compiled with Turbofan and which one with Liftoff.
uint8_t tier_mask = data.empty() ? 0 : data[0];
if (!data.empty()) data += 1;
- if (!GenerateModule(i_isolate, &zone, data, buffer, num_args,
- interpreter_args, compiler_args)) {
+ if (!GenerateModule(i_isolate, &zone, data, &buffer, &num_args,
+ &interpreter_args, &compiler_args)) {
return;
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
index 34b6e58479..8ab802a702 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.h
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -37,9 +37,9 @@ class WasmExecutionFuzzer {
protected:
virtual bool GenerateModule(
Isolate* isolate, Zone* zone, Vector<const uint8_t> data,
- ZoneBuffer& buffer, int32_t& num_args,
- std::unique_ptr<WasmValue[]>& interpreter_args,
- std::unique_ptr<Handle<Object>[]>& compiler_args) = 0;
+ ZoneBuffer* buffer, int32_t* num_args,
+ std::unique_ptr<WasmValue[]>* interpreter_args,
+ std::unique_ptr<Handle<Object>[]>* compiler_args) = 0;
};
} // namespace fuzzer
diff --git a/deps/v8/test/inspector/DEPS b/deps/v8/test/inspector/DEPS
index 1b6dd06238..622048015f 100644
--- a/deps/v8/test/inspector/DEPS
+++ b/deps/v8/test/inspector/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+src/base/macros.h",
"+src/base/platform/platform.h",
"+src/flags/flags.h",
+ "+src/heap/read-only-heap.h",
"+src/inspector/test-interface.h",
"+src/utils/locked-queue-inl.h",
"+src/utils/utils.h",
diff --git a/deps/v8/test/inspector/OWNERS b/deps/v8/test/inspector/OWNERS
index b1ddebddfc..eef15ad6d8 100644
--- a/deps/v8/test/inspector/OWNERS
+++ b/deps/v8/test/inspector/OWNERS
@@ -1,5 +1,3 @@
-alph@chromium.org
-dgozman@chromium.org
-kozyatinskiy@chromium.org
+file://src/inspector/OWNERS
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt b/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
index 755be67cba..4be4e96efe 100644
--- a/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/class-fields-scopes-expected.txt
@@ -90,6 +90,26 @@ Running test: testScopesPaused
scopeChain : [
[0] : {
endLocation : {
+ columnNumber : 13
+ lineNumber : 14
+ scriptId : <scriptId>
+ }
+ name : run
+ object : {
+ className : Object
+ description : Object
+ objectId : <objectId>
+ type : object
+ }
+ startLocation : {
+ columnNumber : 4
+ lineNumber : 12
+ scriptId : <scriptId>
+ }
+ type : local
+ }
+ [1] : {
+ endLocation : {
columnNumber : 3
lineNumber : 15
scriptId : <scriptId>
@@ -108,7 +128,7 @@ Running test: testScopesPaused
}
type : block
}
- [1] : {
+ [2] : {
endLocation : {
columnNumber : 1
lineNumber : 19
@@ -128,7 +148,7 @@ Running test: testScopesPaused
}
type : local
}
- [2] : {
+ [3] : {
object : {
className : global
description : global
diff --git a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt
index dfca60e0c2..359282b851 100644
--- a/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt
+++ b/deps/v8/test/inspector/debugger/evaluate-on-call-frame-return-values-expected.txt
@@ -105,7 +105,7 @@ Running test: testConsoleLog
callFrames : [
[0] : {
columnNumber : 8
- functionName :
+ functionName : eval
lineNumber : 0
scriptId : <scriptId>
url :
@@ -308,7 +308,7 @@ ReleaseObjectGroup with invalid params
Running test: testEvaluateSyntaxError
{
className : SyntaxError
- description : SyntaxError: Unexpected token ] at <anonymous>:1:1
+ description : SyntaxError: Unexpected token ']' at <anonymous>:1:1
objectId : <objectId>
subtype : error
type : object
@@ -351,4 +351,4 @@ Running test: testNullExpression
message : Invalid parameters
}
id : <messageId>
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
index 2e6e589b25..6f18b7b3e3 100644
--- a/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
+++ b/deps/v8/test/inspector/debugger/resource-name-to-url-expected.txt
@@ -96,7 +96,7 @@ Test runtime stack trace:
}
[1] : {
columnNumber : 0
- functionName :
+ functionName : eval
lineNumber : 0
scriptId : <scriptId>
url : boo.js
diff --git a/deps/v8/test/inspector/debugger/restart-frame-expected.txt b/deps/v8/test/inspector/debugger/restart-frame-expected.txt
new file mode 100644
index 0000000000..3f6efb8e62
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/restart-frame-expected.txt
@@ -0,0 +1,10 @@
+Checks that Debugger.restartFrame works
+Paused at debugger:
+function foo() { #debugger; }; foo();
+
+Call restart and dump location of restart:
+function foo() { debugger; }; #foo();
+
+Location after restart:
+function foo() { #debugger; }; foo();
+
diff --git a/deps/v8/test/inspector/debugger/restart-frame.js b/deps/v8/test/inspector/debugger/restart-frame.js
new file mode 100644
index 0000000000..480ab492fc
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/restart-frame.js
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {session, Protocol} =
+ InspectorTest.start('Checks that Debugger.restartFrame works');
+
+session.setupScriptMap();
+
+(async function test() {
+ Protocol.Debugger.enable();
+ const evalPromise = Protocol.Runtime.evaluate({
+ expression: 'function foo() { debugger; }; foo();'
+ });
+ InspectorTest.log('Paused at debugger:');
+ const { params: { callFrames: before } } =
+ await Protocol.Debugger.oncePaused();
+ await session.logSourceLocation(before[0].location);
+ InspectorTest.log('Call restart and dump location of restart:');
+ const { result: { callFrames: restart }} =
+ await Protocol.Debugger.restartFrame({
+ callFrameId: before[0].callFrameId
+ });
+ await session.logSourceLocation(restart[0].location);
+ InspectorTest.log('Location after restart:');
+ Protocol.Debugger.resume();
+ const { params: { callFrames: after } } =
+ await Protocol.Debugger.oncePaused();
+ await session.logSourceLocation(after[0].location);
+ Protocol.Debugger.resume();
+ await evalPromise;
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt b/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
new file mode 100644
index 0000000000..fba9bb4cf2
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-clone-module-expected.txt
@@ -0,0 +1,5 @@
+Tests that cloning a module notifies the debugger
+Got URL: wasm://wasm/wasm-cae8f226/wasm-cae8f226-0
+Got URL: wasm://wasm/wasm-cae8f226/wasm-cae8f226-0
+Got URL: wasm://wasm/wasm-cae8f226/wasm-cae8f226-0
+Done!
diff --git a/deps/v8/test/inspector/debugger/wasm-clone-module.js b/deps/v8/test/inspector/debugger/wasm-clone-module.js
new file mode 100644
index 0000000000..ac027a2f8b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-clone-module.js
@@ -0,0 +1,40 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Tests that cloning a module notifies the debugger');
+
+utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_v_v).addBody([]).exportAs('f');
+let moduleBytes = JSON.stringify(builder.toArray());
+
+contextGroup.addScript(`
+ function test(moduleBytes) {
+ let wireBytes = new Uint8Array(moduleBytes);
+ let module = new WebAssembly.Module(wireBytes.buffer);
+ let serialized = %SerializeWasmModule(module);
+ let module2 = %DeserializeWasmModule(serialized, wireBytes);
+ let module3 = %CloneWasmModule(module);
+ }
+`);
+
+let scriptsSeen = 0;
+
+Protocol.Debugger.onScriptParsed(msg => {
+ let url = msg.params.url;
+ if (url.startsWith('wasm://')) {
+ InspectorTest.log(`Got URL: ${url}`);
+ if (++scriptsSeen == 3) {
+ InspectorTest.log('Done!');
+ InspectorTest.completeTest();
+ }
+ }
+});
+
+Protocol.Debugger.enable();
+Protocol.Runtime.evaluate({expression: `test(${moduleBytes});`});
diff --git a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
index 073c8af942..d67d49742c 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
@@ -18,8 +18,8 @@ end
Getting current stack trace via "new Error().stack".
Error
at v8://test/getStack:1:1
- at func (wasm-function[0]:1)
- at main (wasm-function[1]:1)
+ at func (wasm-function[0]:0x21)
+ at main (wasm-function[1]:0x2f)
at v8://test/runWasm:1:22
exports.main returned.
Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
index bba3009f3e..1d6f51b997 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
@@ -12,8 +12,8 @@ Result of evaluate (string):
Error: this is your stack trace:
-- skipped --
at call_debugger (<anonymous>:3:5)
- at call_func (wasm-function[1]:1)
- at main (wasm-function[2]:3)
+ at call_func (wasm-function[1]:0x37)
+ at main (wasm-function[2]:0x3e)
at testFunction (<anonymous>:15:20)
at <anonymous>:1:1
Finished!
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 4321edccac..0a4cd48e92 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -15,6 +15,7 @@
#include "src/base/platform/platform.h"
#include "src/flags/flags.h"
+#include "src/heap/read-only-heap.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
@@ -1070,6 +1071,7 @@ int main(int argc, char* argv[]) {
printf("Embedding script '%s'\n", argv[i]);
startup_data = i::CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling::kClear, argv[i], nullptr);
+ v8::internal::ReadOnlyHeap::ClearSharedHeapForTest();
argv[i] = nullptr;
}
}
@@ -1077,8 +1079,9 @@ int main(int argc, char* argv[]) {
{
IsolateData::SetupGlobalTasks frontend_extensions;
frontend_extensions.emplace_back(new UtilsExtension());
- TaskRunner frontend_runner(std::move(frontend_extensions), true,
- &ready_semaphore, nullptr, false);
+ TaskRunner frontend_runner(
+ std::move(frontend_extensions), true, &ready_semaphore,
+ startup_data.data ? &startup_data : nullptr, false);
ready_semaphore.Wait();
int frontend_context_group_id = 0;
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index bfc7934d31..ae41264072 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -260,7 +260,7 @@ void IsolateData::DumpAsyncTaskStacksStateForTest() {
// static
int IsolateData::HandleMessage(v8::Local<v8::Message> message,
v8::Local<v8::Value> exception) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = message->GetIsolate();
v8::Local<v8::Context> context = isolate->GetEnteredOrMicrotaskContext();
if (context.IsEmpty()) return 0;
v8_inspector::V8Inspector* inspector =
@@ -304,7 +304,7 @@ void IsolateData::MessageHandler(v8::Local<v8::Message> message,
// static
void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Isolate* isolate = data.GetPromise()->GetIsolate();
v8::Local<v8::Context> context = isolate->GetEnteredOrMicrotaskContext();
if (context.IsEmpty()) return;
v8::Local<v8::Promise> promise = data.GetPromise();
@@ -370,12 +370,11 @@ std::vector<int> IsolateData::GetSessionIds(int context_group_id) {
}
bool IsolateData::formatAccessorsAsProperties(v8::Local<v8::Value> object) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Context> context = isolate()->GetCurrentContext();
v8::Local<v8::Private> shouldFormatAccessorsPrivate = v8::Private::ForApi(
- isolate, v8::String::NewFromUtf8(isolate, "allowAccessorFormatting",
- v8::NewStringType::kNormal)
- .ToLocalChecked());
+ isolate(), v8::String::NewFromUtf8(isolate(), "allowAccessorFormatting",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked());
CHECK(object->IsObject());
return object.As<v8::Object>()
->HasPrivate(context, shouldFormatAccessorsPrivate)
@@ -383,11 +382,10 @@ bool IsolateData::formatAccessorsAsProperties(v8::Local<v8::Value> object) {
}
bool IsolateData::isInspectableHeapObject(v8::Local<v8::Object> object) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Context> context = isolate()->GetCurrentContext();
v8::MicrotasksScope microtasks_scope(
- isolate, v8::MicrotasksScope::kDoNotRunMicrotasks);
- return !object->HasPrivate(context, not_inspectable_private_.Get(isolate))
+ isolate(), v8::MicrotasksScope::kDoNotRunMicrotasks);
+ return !object->HasPrivate(context, not_inspectable_private_.Get(isolate()))
.FromMaybe(false);
}
@@ -455,7 +453,7 @@ void IsolateData::maxAsyncCallStackDepthChanged(int depth) {
}
void IsolateData::SetResourceNamePrefix(v8::Local<v8::String> prefix) {
- resource_name_prefix_.Reset(v8::Isolate::GetCurrent(), prefix);
+ resource_name_prefix_.Reset(isolate(), prefix);
}
namespace {
@@ -475,10 +473,10 @@ class StringBufferImpl : public v8_inspector::StringBuffer {
std::unique_ptr<v8_inspector::StringBuffer> IsolateData::resourceNameToUrl(
const v8_inspector::StringView& resourceName) {
if (resource_name_prefix_.IsEmpty()) return nullptr;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::String> name = ToString(isolate, resourceName);
- v8::Local<v8::String> prefix = resource_name_prefix_.Get(isolate);
- v8::Local<v8::String> url = v8::String::Concat(isolate, prefix, name);
- return std::unique_ptr<StringBufferImpl>(new StringBufferImpl(isolate, url));
+ v8::HandleScope handle_scope(isolate());
+ v8::Local<v8::String> name = ToString(isolate(), resourceName);
+ v8::Local<v8::String> prefix = resource_name_prefix_.Get(isolate());
+ v8::Local<v8::String> url = v8::String::Concat(isolate(), prefix, name);
+ return std::unique_ptr<StringBufferImpl>(
+ new StringBufferImpl(isolate(), url));
}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
index 930999bb2b..1a64b576c3 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
@@ -43,7 +43,7 @@ Running test: testSyntaxErrorInFunction
columnNumber : 2
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
objectId : <objectId>
subtype : error
type : object
@@ -55,7 +55,7 @@ Running test: testSyntaxErrorInFunction
}
result : {
className : SyntaxError
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 25ba52e034..65c543e45f 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -39,13 +39,13 @@ Checks basic ES6 modules support.
method : Debugger.scriptParsed
params : {
endColumn : 0
- endLine : 8
+ endLine : 11
executionContextId : <executionContextId>
hasSourceURL : false
- hash : 2eda454b04ad45b62a894c7590954c5074ca4569
+ hash : 2e8186096446efdc472a6e0559ea22216a664cb5
isLiveEdit : false
isModule : true
- length : 191
+ length : 286
scriptId : <scriptId>
sourceMapURL :
startColumn : 0
@@ -55,17 +55,17 @@ Checks basic ES6 modules support.
}
console.log(module1)
foo (module1:2:10)
-(anonymous) (module3:3:12)
+(anonymous) (module3:5:12)
console.log(42)
-(anonymous) (module3:3:8)
+(anonymous) (module3:5:8)
console.log(module2)
foo (module2:2:10)
-(anonymous) (module3:4:12)
+(anonymous) (module3:6:12)
console.log(239)
-(anonymous) (module3:4:8)
+(anonymous) (module3:6:8)
{
method : Debugger.paused
@@ -81,14 +81,14 @@ console.log(239)
functionName :
location : {
columnNumber : 0
- lineNumber : 7
+ lineNumber : 9
scriptId : <scriptId>
}
scopeChain : [
[0] : {
endLocation : {
columnNumber : 0
- lineNumber : 8
+ lineNumber : 11
scriptId : <scriptId>
}
object : {
@@ -159,6 +159,19 @@ console.log(239)
configurable : true
enumerable : true
isOwn : true
+ name : foo3
+ value : {
+ className : Module
+ description : Module
+ objectId : <objectId>
+ type : object
+ }
+ writable : true
+ }
+ [3] : {
+ configurable : true
+ enumerable : true
+ isOwn : true
name : a1
value : {
description : 1
@@ -167,7 +180,7 @@ console.log(239)
}
writable : true
}
- [3] : {
+ [4] : {
configurable : true
enumerable : true
isOwn : true
@@ -206,21 +219,21 @@ console.log(239)
columnNumber : 0
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
objectId : <objectId>
preview : {
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
overflow : false
properties : [
[0] : {
name : stack
type : string
- value : SyntaxError: Unexpected token }
+ value : SyntaxError: Unexpected token '}'
}
[1] : {
name : message
type : string
- value : Unexpected token }
+ value : Unexpected token '}'
}
]
subtype : error
@@ -233,9 +246,10 @@ console.log(239)
executionContextId : <executionContextId>
lineNumber : 0
scriptId : <scriptId>
- text : Uncaught SyntaxError: Unexpected token }
+ text : Uncaught SyntaxError: Unexpected token '}'
url : module4
}
timestamp : <timestamp>
}
}
+queryObjects returns Array(N)
diff --git a/deps/v8/test/inspector/runtime/es6-module.js b/deps/v8/test/inspector/runtime/es6-module.js
index 6d9e43486b..7108ba5aaf 100644
--- a/deps/v8/test/inspector/runtime/es6-module.js
+++ b/deps/v8/test/inspector/runtime/es6-module.js
@@ -21,41 +21,59 @@ export let a2 = 2`;
var module3 = `
import { foo as foo1 } from 'module1';
import { foo as foo2 } from 'module2';
+// check that queryObjects works with JSModuleNamespace
+import * as foo3 from 'module2';
console.log(foo1());
console.log(foo2());
import { a1 } from 'module1';
import { a2 } from 'module2';
debugger;
+foo3;
`;
var module4 = '}';
-session.setupScriptMap();
-// We get scriptParsed events for modules ..
-Protocol.Debugger.onScriptParsed(InspectorTest.logMessage);
-// .. scriptFailed to parse for modules with syntax error ..
-Protocol.Debugger.onScriptFailedToParse(InspectorTest.logMessage);
-// .. API messages from modules contain correct stack trace ..
-Protocol.Runtime.onConsoleAPICalled(message => {
- InspectorTest.log(`console.log(${message.params.args[0].value})`);
- session.logCallFrames(message.params.stackTrace.callFrames);
- InspectorTest.log('');
-});
-// .. we could break inside module and scope contains correct list of variables ..
-Protocol.Debugger.onPaused(message => {
- InspectorTest.logMessage(message);
- Protocol.Runtime.getProperties({ objectId: message.params.callFrames[0].scopeChain[0].object.objectId})
- .then(InspectorTest.logMessage)
- .then(() => Protocol.Debugger.resume());
-});
-// .. we process uncaught errors from modules correctly.
-Protocol.Runtime.onExceptionThrown(InspectorTest.logMessage);
-
-Protocol.Runtime.enable();
-Protocol.Debugger.enable()
- .then(() => contextGroup.addModule(module1, "module1"))
- .then(() => contextGroup.addModule(module2, "module2"))
- .then(() => contextGroup.addModule(module3, "module3"))
- .then(() => contextGroup.addModule(module4, "module4"))
- .then(() => InspectorTest.waitForPendingTasks())
- .then(InspectorTest.completeTest);
+(async function test() {
+ session.setupScriptMap();
+ // We get scriptParsed events for modules ..
+ Protocol.Debugger.onScriptParsed(InspectorTest.logMessage);
+ // .. scriptFailed to parse for modules with syntax error ..
+ Protocol.Debugger.onScriptFailedToParse(InspectorTest.logMessage);
+ // .. API messages from modules contain correct stack trace ..
+ Protocol.Runtime.onConsoleAPICalled(message => {
+ InspectorTest.log(`console.log(${message.params.args[0].value})`);
+ session.logCallFrames(message.params.stackTrace.callFrames);
+ InspectorTest.log('');
+ });
+ // .. we could break inside module and scope contains correct list of variables ..
+ Protocol.Debugger.onPaused(message => {
+ InspectorTest.logMessage(message);
+ Protocol.Runtime.getProperties({ objectId: message.params.callFrames[0].scopeChain[0].object.objectId})
+ .then(InspectorTest.logMessage)
+ .then(() => Protocol.Debugger.resume());
+ });
+
+ // .. we process uncaught errors from modules correctly.
+ Protocol.Runtime.onExceptionThrown(InspectorTest.logMessage);
+
+ Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ await contextGroup.addModule(module1, 'module1');
+ await contextGroup.addModule(module2, 'module2');
+ await contextGroup.addModule(module3, 'module3');
+ await contextGroup.addModule(module4, 'module4');
+ await InspectorTest.waitForPendingTasks();
+
+ Protocol.Debugger.onScriptParsed(null);
+ Protocol.Runtime.evaluate({
+ includeCommandLineAPI: true,
+ expression: 'queryObjects(Function)'
+ });
+ const {params:{object:{objectId}}} = await Protocol.Runtime.onceInspectRequested();
+ const {result:{objects}} = await Protocol.Runtime.queryObjects({
+ prototypeObjectId: objectId
+ });
+ InspectorTest.log('queryObjects returns ' + objects.description.replace(/\d+/, 'N'));
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
index ea47520ef8..95d2955252 100644
--- a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
@@ -89,7 +89,7 @@ Running test: testRejectedPromiseWithSyntaxError
columnNumber : 5
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token } at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3)
+ description : SyntaxError: Unexpected token '}' at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3)
objectId : <objectId>
subtype : error
type : object
@@ -115,11 +115,11 @@ Running test: testRejectedPromiseWithSyntaxError
}
]
}
- text : Uncaught (in promise) SyntaxError: Unexpected token }
+ text : Uncaught (in promise) SyntaxError: Unexpected token '}'
}
result : {
className : SyntaxError
- description : SyntaxError: Unexpected token } at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3)
+ description : SyntaxError: Unexpected token '}' at foo (<anonymous>:21:5) at throwSyntaxError (<anonymous>:23:3)
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
index 7c58a0421e..916ed61b68 100644
--- a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
+++ b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
@@ -50,21 +50,21 @@ Check that exceptionThrown is supported by test runner.
columnNumber : 1
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
objectId : <objectId>
preview : {
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
overflow : false
properties : [
[0] : {
name : stack
type : string
- value : SyntaxError: Unexpected token }
+ value : SyntaxError: Unexpected token '}'
}
[1] : {
name : message
type : string
- value : Unexpected token }
+ value : Unexpected token '}'
}
]
subtype : error
@@ -77,7 +77,7 @@ Check that exceptionThrown is supported by test runner.
executionContextId : <executionContextId>
lineNumber : 0
scriptId : <scriptId>
- text : Uncaught SyntaxError: Unexpected token }
+ text : Uncaught SyntaxError: Unexpected token '}'
}
timestamp : <timestamp>
}
diff --git a/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt b/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt
index 4bcde813fc..976eaa4d4f 100644
--- a/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt
+++ b/deps/v8/test/inspector/runtime/exceptionthrown-on-connect-expected.txt
@@ -7,7 +7,7 @@ Enabling Runtime Domain.
columnNumber : 12
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token ;
+ description : SyntaxError: Unexpected token ';'
objectId : <objectId>
subtype : error
type : object
@@ -16,7 +16,7 @@ Enabling Runtime Domain.
executionContextId : <executionContextId>
lineNumber : 1
scriptId : <scriptId>
- text : Uncaught SyntaxError: Unexpected token ;
+ text : Uncaught SyntaxError: Unexpected token ';'
url : syntaxError.js
}
timestamp : <timestamp>
diff --git a/deps/v8/test/inspector/runtime/query-objects-expected.txt b/deps/v8/test/inspector/runtime/query-objects-expected.txt
index db16402986..c11f021101 100644
--- a/deps/v8/test/inspector/runtime/query-objects-expected.txt
+++ b/deps/v8/test/inspector/runtime/query-objects-expected.txt
@@ -95,6 +95,9 @@ Dump each object constructor name.
[1] : Object,object
]
+Running test: testQueryObjectsWithFeedbackVector
+Before/After difference: 1
+
Running test: testWithObjectGroup
Query for Array.prototype 3 times
Results since initial: 0
diff --git a/deps/v8/test/inspector/runtime/query-objects.js b/deps/v8/test/inspector/runtime/query-objects.js
index f37f1ed581..63a26deaef 100644
--- a/deps/v8/test/inspector/runtime/query-objects.js
+++ b/deps/v8/test/inspector/runtime/query-objects.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
let {session, contextGroup, Protocol} =
InspectorTest.start('Checks Runtime.queryObjects');
@@ -118,6 +120,33 @@ InspectorTest.runAsyncTestSuite([
session.disconnect();
},
+ async function testQueryObjectsWithFeedbackVector() {
+ let contextGroup = new InspectorTest.ContextGroup();
+ let session = contextGroup.connect();
+ let Protocol = session.Protocol;
+
+ let {result:{result:{objectId}}} = await Protocol.Runtime.evaluate({
+ expression: 'Object.prototype',
+ });
+ let countBefore = await countObjects(session, objectId);
+ await Protocol.Runtime.evaluate({
+ returnByValue: true,
+ expression: `
+ global.dummyFunction = () => {
+ [42];
+ {foo: 'bar'};
+ [1,2,3];
+ }
+ %EnsureFeedbackVectorForFunction(dummyFunction);
+ dummyFunction();
+ `
+ });
+ let countAfter = await countObjects(session, objectId);
+ // Difference should be 1 since |dummyFunction| is retained.
+ InspectorTest.log('Before/After difference: ' + (countAfter - countBefore));
+ session.disconnect();
+ },
+
async function testWithObjectGroup() {
let contextGroup = new InspectorTest.ContextGroup();
let session = contextGroup.connect();
@@ -174,3 +203,18 @@ async function queryObjects(sesion, prototypeObjectId, name) {
InspectorTest.log('Dump each object constructor name.');
InspectorTest.logMessage(value);
}
+
+async function countObjects(session, prototypeObjectId) {
+ let {result:{objects}} = await session.Protocol.Runtime.queryObjects({
+ prototypeObjectId
+ });
+ let {result:{result:{value}}} = await session.Protocol.Runtime.callFunctionOn({
+ objectId: objects.objectId,
+ functionDeclaration: `function() { return this.length; }`,
+ returnByValue: true
+ });
+ await session.Protocol.Runtime.releaseObject({
+ objectId: objects.objectId,
+ });
+ return value;
+}
diff --git a/deps/v8/test/inspector/runtime/run-script-async-expected.txt b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
index 29b9c526dd..8befa1399c 100644
--- a/deps/v8/test/inspector/runtime/run-script-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
@@ -24,7 +24,7 @@ Running test: testSyntaxErrorInScript
columnNumber : 1
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token }
+ description : SyntaxError: Unexpected token '}'
objectId : <objectId>
subtype : error
type : object
@@ -45,7 +45,7 @@ Running test: testSyntaxErrorInEvalInScript
columnNumber : 0
exception : {
className : SyntaxError
- description : SyntaxError: Unexpected token } at boo.js:2:2
+ description : SyntaxError: Unexpected token '}' at boo.js:2:2
objectId : <objectId>
subtype : error
type : object
@@ -68,7 +68,7 @@ Running test: testSyntaxErrorInEvalInScript
}
result : {
className : SyntaxError
- description : SyntaxError: Unexpected token } at boo.js:2:2
+ description : SyntaxError: Unexpected token '}' at boo.js:2:2
objectId : <objectId>
subtype : error
type : object
diff --git a/deps/v8/test/intl/general/supported-locales-of.js b/deps/v8/test/intl/general/supported-locales-of.js
index 5cc0095614..eb5c426f07 100644
--- a/deps/v8/test/intl/general/supported-locales-of.js
+++ b/deps/v8/test/intl/general/supported-locales-of.js
@@ -86,8 +86,10 @@ for (const service of services) {
privateuseLocale2 = service.supportedLocalesOf("x-twain");
assertEquals(undefined, privateuseLocale2[0]);
- grandfatheredLocale = service.supportedLocalesOf("art-lojban");
- assertEquals(undefined, grandfatheredLocale[0]);
+ if (service != Intl.PluralRules) {
+ grandfatheredLocale = service.supportedLocalesOf("art-lojban");
+ assertEquals(undefined, grandfatheredLocale[0]);
+ }
grandfatheredLocale2 = service.supportedLocalesOf("i-pwn");
assertEquals(undefined, grandfatheredLocale2[0]);
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index 81950f13b9..ba54743d67 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -34,6 +34,9 @@
'collator/check-kf-option': [FAIL],
'collator/check-kn-option': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=9312
+ 'regress-9312': [FAIL],
+
# Slow tests.
'regress-903566': [PASS, SLOW],
}], # ALWAYS
diff --git a/deps/v8/test/intl/number-format/property-override.js b/deps/v8/test/intl/number-format/property-override.js
index 1fbe78e080..590b1c2e4b 100644
--- a/deps/v8/test/intl/number-format/property-override.js
+++ b/deps/v8/test/intl/number-format/property-override.js
@@ -40,6 +40,8 @@
// Update both number-format.js and number-format.cc so they have the same
// list of properties.
+// Flags: --noharmony-intl-numberformat-unified
+
// First get supported properties.
var properties = [];
// Some properties are optional and won't show up in resolvedOptions if
diff --git a/deps/v8/test/intl/number-format/unified/constructor-order.js b/deps/v8/test/intl/number-format/unified/constructor-order.js
index 266426c7d4..be716371f5 100644
--- a/deps/v8/test/intl/number-format/unified/constructor-order.js
+++ b/deps/v8/test/intl/number-format/unified/constructor-order.js
@@ -34,26 +34,26 @@ new Intl.NumberFormat(['en-US'], {
get unitDisplay() {
assertEquals(6, getCount++);
},
- // End of new options
- get minimumIntegerDigits() {
+ get notation() {
assertEquals(7, getCount++);
},
- get minimumFractionDigits() {
+ // End of new options
+ get minimumIntegerDigits() {
assertEquals(8, getCount++);
},
- get maximumFractionDigits() {
+ get minimumFractionDigits() {
assertEquals(9, getCount++);
},
- get minimumSignificantDigits() {
+ get maximumFractionDigits() {
assertEquals(10, getCount++);
},
- get maximumSignificantDigits() {
+ get minimumSignificantDigits() {
assertEquals(11, getCount++);
},
- // Begin of new options
- get notation() {
+ get maximumSignificantDigits() {
assertEquals(12, getCount++);
},
+ // Begin of new options
get compactDisplay() {
assertEquals(13, getCount++);
},
diff --git a/deps/v8/test/intl/number-format/unified/currency-display.js b/deps/v8/test/intl/number-format/unified/currency-display.js
index 4f7acb97c8..effd026778 100644
--- a/deps/v8/test/intl/number-format/unified/currency-display.js
+++ b/deps/v8/test/intl/number-format/unified/currency-display.js
@@ -27,7 +27,7 @@ const testData = [
["name", "123.00 New Taiwan dollars"],
["code", "TWD 123.00"],
["symbol", "NT$123.00"],
- ["narrow-symbol", "$123.00"], // new
+ ["narrowSymbol", "$123.00"], // new
];
for (const [currencyDisplay, expectation] of testData) {
diff --git a/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js b/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js
new file mode 100644
index 0000000000..280771b2e7
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/notation-engineering-formatToParts.js
@@ -0,0 +1,175 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test notation: "engineering" with formatToParts.
+
+const nf = Intl.NumberFormat("en", {notation: "engineering"});
+
+let parts = nf.formatToParts(123.456);
+// [{type: "integer", value: "123"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "456"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("integer", parts[0].type);
+assertEquals("123", parts[0].value);
+assertEquals("decimal", parts[1].type);
+assertEquals(".", parts[1].value);
+assertEquals("fraction", parts[2].type);
+assertEquals("456", parts[2].value);
+assertEquals("exponentSeparator", parts[3].type);
+assertEquals("E", parts[3].value);
+assertEquals("exponentInteger", parts[4].type);
+assertEquals("0", parts[4].value);
+assertEquals(5, parts.length);
+
+parts = nf.formatToParts(-123.456);
+// [{type: "minusSign", value: "-"}, {type: "integer", value: "123"},
+// {type: "decimal", value: "."}, {type: "fraction", value: "456"},
+// {type: "exponentSeparator", value: "E"}, {type: "exponentInteger", value: "0"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("123", parts[1].value);
+assertEquals("decimal", parts[2].type);
+assertEquals(".", parts[2].value);
+assertEquals("fraction", parts[3].type);
+assertEquals("456", parts[3].value);
+assertEquals("exponentSeparator", parts[4].type);
+assertEquals("E", parts[4].value);
+assertEquals("exponentInteger", parts[5].type);
+assertEquals("0", parts[5].value);
+assertEquals(6, parts.length);
+
+parts = nf.formatToParts(12345678901234567890);
+// [{type: "integer", value: "12"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "346"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "18"}]
+assertEquals("integer", parts[0].type);
+assertEquals("12", parts[0].value);
+assertEquals("decimal", parts[1].type);
+assertEquals(".", parts[1].value);
+assertEquals("fraction", parts[2].type);
+assertEquals("346", parts[2].value);
+assertEquals("exponentSeparator", parts[3].type);
+assertEquals("E", parts[3].value);
+assertEquals("exponentInteger", parts[4].type);
+assertEquals("18", parts[4].value);
+assertEquals(5, parts.length);
+
+parts = nf.formatToParts(-12345678901234567890);
+// [{type: "minusSign", value: "-"}, {type: "integer", value: "12"},
+// {type: "decimal", value: "."}, {type: "fraction", value: "346"},
+// {type: "exponentSeparator", value: "E"}, {type: "exponentInteger", value: "18"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("12", parts[1].value);
+assertEquals("decimal", parts[2].type);
+assertEquals(".", parts[2].value);
+assertEquals("fraction", parts[3].type);
+assertEquals("346", parts[3].value);
+assertEquals("exponentSeparator", parts[4].type);
+assertEquals("E", parts[4].value);
+assertEquals("exponentInteger", parts[5].type);
+assertEquals("18", parts[5].value);
+assertEquals(6, parts.length);
+
+parts = nf.formatToParts(0.000000000000123456);
+// [{type: "integer", value: "123"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "456"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentMinusSign", value: "-"}, {type: "exponentInteger", value: "15"}]
+assertEquals("integer", parts[0].type);
+assertEquals("123", parts[0].value);
+assertEquals("decimal", parts[1].type);
+assertEquals(".", parts[1].value);
+assertEquals("fraction", parts[2].type);
+assertEquals("456", parts[2].value);
+assertEquals("exponentSeparator", parts[3].type);
+assertEquals("E", parts[3].value);
+assertEquals("exponentMinusSign", parts[4].type);
+assertEquals("-", parts[4].value);
+assertEquals("exponentInteger", parts[5].type);
+assertEquals("15", parts[5].value);
+assertEquals(6, parts.length);
+
+parts = nf.formatToParts(-0.000000000000123456);
+// [{type: "minusSign", value: "-"}, {type: "integer", value: "123"},
+// {type: "decimal", value: "."}, {type: "fraction", value: "456"},
+// {type: "exponentSeparator", value: "E"},
+// {type: "exponentMinusSign", value: "-"}, {type: "exponentInteger", value: "15"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("123", parts[1].value);
+assertEquals("decimal", parts[2].type);
+assertEquals(".", parts[2].value);
+assertEquals("fraction", parts[3].type);
+assertEquals("456", parts[3].value);
+assertEquals("exponentSeparator", parts[4].type);
+assertEquals("E", parts[4].value);
+assertEquals("exponentMinusSign", parts[5].type);
+assertEquals("-", parts[5].value);
+assertEquals("exponentInteger", parts[6].type);
+assertEquals("15", parts[6].value);
+assertEquals(7, parts.length);
+
+parts = nf.formatToParts(0);
+// [{type: "integer", value: "0"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("integer", parts[0].type);
+assertEquals("0", parts[0].value);
+assertEquals("exponentSeparator", parts[1].type);
+assertEquals("E", parts[1].value);
+assertEquals("exponentInteger", parts[2].type);
+assertEquals("0", parts[2].value);
+assertEquals(3, parts.length);
+
+parts = nf.formatToParts(-0);
+// [{type: "minusSign", value: "-"}, {type: "integer", value: "0"},
+// {type: "exponentSeparator", value: "E"}, {type: "exponentInteger", value: "0"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("0", parts[1].value);
+assertEquals("exponentSeparator", parts[2].type);
+assertEquals("E", parts[2].value);
+assertEquals("exponentInteger", parts[3].type);
+assertEquals("0", parts[3].value);
+assertEquals(4, parts.length);
+
+parts = nf.formatToParts(Infinity);
+// [{type: "infinity", value: "∞"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("infinity", parts[0].type);
+assertEquals("∞", parts[0].value);
+assertEquals("exponentSeparator", parts[1].type);
+assertEquals("E", parts[1].value);
+assertEquals("exponentInteger", parts[2].type);
+assertEquals("0", parts[2].value);
+assertEquals(3, parts.length);
+
+parts = nf.formatToParts(-Infinity);
+// [{type: "minusSign", value: "-"}, {type: "infinity", value: "∞"},
+// {type: "exponentSeparator", value: "E"}, {type: "exponentInteger", value: "0"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("infinity", parts[1].type);
+assertEquals("∞", parts[1].value);
+assertEquals("exponentSeparator", parts[2].type);
+assertEquals("E", parts[2].value);
+assertEquals("exponentInteger", parts[3].type);
+assertEquals("0", parts[3].value);
+assertEquals(4, parts.length);
+
+parts = nf.formatToParts(NaN);
+// [{type: "nan", value: "NaN"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("nan", parts[0].type);
+assertEquals("NaN", parts[0].value);
+assertEquals("exponentSeparator", parts[1].type);
+assertEquals("E", parts[1].value);
+assertEquals("exponentInteger", parts[2].type);
+assertEquals("0", parts[2].value);
+assertEquals(3, parts.length);
diff --git a/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js b/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js
new file mode 100644
index 0000000000..9ffd5f8709
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/notation-scientific-formatToParts.js
@@ -0,0 +1,177 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test notation: "scientific" with formatToParts.
+
+const nf = Intl.NumberFormat("en", {notation: "scientific"});
+
+let parts = nf.formatToParts(123.456);
+// [{type: "integer", value: "1"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "235"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "2"}]
+assertEquals("integer", parts[0].type);
+assertEquals("1", parts[0].value);
+assertEquals("decimal", parts[1].type);
+assertEquals(".", parts[1].value);
+assertEquals("fraction", parts[2].type);
+assertEquals("235", parts[2].value);
+assertEquals("exponentSeparator", parts[3].type);
+assertEquals("E", parts[3].value);
+assertEquals("exponentInteger", parts[4].type);
+assertEquals("2", parts[4].value);
+assertEquals(5, parts.length);
+
+parts = nf.formatToParts(-123.456);
+// [{type: "minusSign", value: "-"},i
+// {type: "integer", value: "1"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "235"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "2"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("1", parts[1].value);
+assertEquals("decimal", parts[2].type);
+assertEquals(".", parts[2].value);
+assertEquals("fraction", parts[3].type);
+assertEquals("235", parts[3].value);
+assertEquals("exponentSeparator", parts[4].type);
+assertEquals("E", parts[4].value);
+assertEquals("exponentInteger", parts[5].type);
+assertEquals("2", parts[5].value);
+assertEquals(6, parts.length);
+
+parts = nf.formatToParts(12345678901234567890);
+// [{type: "integer", value: "1"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "235"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "19"}]
+assertEquals("integer", parts[0].type);
+assertEquals("1", parts[0].value);
+assertEquals("decimal", parts[1].type);
+assertEquals(".", parts[1].value);
+assertEquals("fraction", parts[2].type);
+assertEquals("235", parts[2].value);
+assertEquals("exponentSeparator", parts[3].type);
+assertEquals("E", parts[3].value);
+assertEquals("exponentInteger", parts[4].type);
+assertEquals("19", parts[4].value);
+assertEquals(5, parts.length);
+
+parts = nf.formatToParts(-12345678901234567890);
+// [{type: "minusSign", value: "-"},
+// {type: "integer", value: "1"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "235"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "19"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("1", parts[1].value);
+assertEquals("decimal", parts[2].type);
+assertEquals(".", parts[2].value);
+assertEquals("fraction", parts[3].type);
+assertEquals("235", parts[3].value);
+assertEquals("exponentSeparator", parts[4].type);
+assertEquals("E", parts[4].value);
+assertEquals("exponentInteger", parts[5].type);
+assertEquals("19", parts[5].value);
+assertEquals(6, parts.length);
+
+parts = nf.formatToParts(0.000000000000123456);
+// [{type: "integer", value: "1"}, {type: "decimal", value: "."},
+// {type: "fraction", value: "235"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentMinusSign", value: "-"}, {type: "exponentInteger", value: "13"}]
+assertEquals("integer", parts[0].type);
+assertEquals("1", parts[0].value);
+assertEquals("decimal", parts[1].type);
+assertEquals(".", parts[1].value);
+assertEquals("fraction", parts[2].type);
+assertEquals("235", parts[2].value);
+assertEquals("exponentSeparator", parts[3].type);
+assertEquals("E", parts[3].value);
+assertEquals("exponentMinusSign", parts[4].type);
+assertEquals("-", parts[4].value);
+assertEquals("exponentInteger", parts[5].type);
+assertEquals("13", parts[5].value);
+assertEquals(6, parts.length);
+
+parts = nf.formatToParts(-0.000000000000123456);
+// [{type: "minusSign", value: "-"}, {type: "integer", value: "1"},
+// {type: "decimal", value: "."}, {type: "fraction", value: "235"},
+// {type: "exponentSeparator", value: "E"},
+// {type: "exponentMinusSign", value: "-"}, {type: "exponentInteger", value: "13"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("1", parts[1].value);
+assertEquals("decimal", parts[2].type);
+assertEquals(".", parts[2].value);
+assertEquals("fraction", parts[3].type);
+assertEquals("235", parts[3].value);
+assertEquals("exponentSeparator", parts[4].type);
+assertEquals("E", parts[4].value);
+assertEquals("exponentMinusSign", parts[5].type);
+assertEquals("-", parts[5].value);
+assertEquals("exponentInteger", parts[6].type);
+assertEquals("13", parts[6].value);
+assertEquals(7, parts.length);
+
+parts = nf.formatToParts(0);
+// [{type: "integer", value: "0"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("integer", parts[0].type);
+assertEquals("0", parts[0].value);
+assertEquals("exponentSeparator", parts[1].type);
+assertEquals("E", parts[1].value);
+assertEquals("exponentInteger", parts[2].type);
+assertEquals("0", parts[2].value);
+assertEquals(3, parts.length);
+
+parts = nf.formatToParts(-0);
+// [{type: "minusSign", value: "-"}, {type: "integer", value: "0"},
+// {type: "exponentSeparator", value: "E"}, {type: "exponentInteger", value: "0"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("integer", parts[1].type);
+assertEquals("0", parts[1].value);
+assertEquals("exponentSeparator", parts[2].type);
+assertEquals("E", parts[2].value);
+assertEquals("exponentInteger", parts[3].type);
+assertEquals("0", parts[3].value);
+assertEquals(4, parts.length);
+
+parts = nf.formatToParts(Infinity);
+// [{type: "infinity", value: "∞"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("infinity", parts[0].type);
+assertEquals("∞", parts[0].value);
+assertEquals("exponentSeparator", parts[1].type);
+assertEquals("E", parts[1].value);
+assertEquals("exponentInteger", parts[2].type);
+assertEquals("0", parts[2].value);
+assertEquals(3, parts.length);
+
+parts = nf.formatToParts(-Infinity);
+// [{type: "minusSign", value: "-"}, {type: "infinity", value: "∞"},
+// {type: "exponentSeparator", value: "E"}, {type: "exponentInteger", value: "0"}]
+assertEquals("minusSign", parts[0].type);
+assertEquals("-", parts[0].value);
+assertEquals("infinity", parts[1].type);
+assertEquals("∞", parts[1].value);
+assertEquals("exponentSeparator", parts[2].type);
+assertEquals("E", parts[2].value);
+assertEquals("exponentInteger", parts[3].type);
+assertEquals("0", parts[3].value);
+assertEquals(4, parts.length);
+
+parts = nf.formatToParts(NaN);
+// [{type: "nan", value: "NaN"}, {type: "exponentSeparator", value: "E"},
+// {type: "exponentInteger", value: "0"}]
+assertEquals("nan", parts[0].type);
+assertEquals("NaN", parts[0].value);
+assertEquals("exponentSeparator", parts[1].type);
+assertEquals("E", parts[1].value);
+assertEquals("exponentInteger", parts[2].type);
+assertEquals("0", parts[2].value);
+assertEquals(3, parts.length);
diff --git a/deps/v8/test/intl/number-format/unified/notation.js b/deps/v8/test/intl/number-format/unified/notation.js
index 9c451773bd..b26ee01f5c 100644
--- a/deps/v8/test/intl/number-format/unified/notation.js
+++ b/deps/v8/test/intl/number-format/unified/notation.js
@@ -19,9 +19,9 @@ const testData = [
["standard", undefined, "987,654,321"],
["scientific", undefined, "9.877E8"],
["engineering", undefined, "987.654E6"],
- ["compact", undefined, "987.654M"],
- ["compact", "short", "987.654M"],
- ["compact", "long", "987.654 million"],
+ ["compact", undefined, "988M"],
+ ["compact", "short", "988M"],
+ ["compact", "long", "988 million"],
];
for (const [notation, compactDisplay, expect1] of testData) {
@@ -40,50 +40,50 @@ for (const [notation, compactDisplay, expect1] of testData) {
// Test Germany which has different decimal marks.
let notation = "compact";
nf = new Intl.NumberFormat("de", {notation, compactDisplay: "short"});
-assertEquals("987,654 Mio.", nf.format(987654321));
-assertEquals("98,765 Mio.", nf.format(98765432));
+assertEquals("988 Mio.", nf.format(987654321));
+assertEquals("99 Mio.", nf.format(98765432));
assertEquals("98.765", nf.format(98765));
assertEquals("9876", nf.format(9876));
nf = new Intl.NumberFormat("de", {notation, compactDisplay: "long"});
-assertEquals("987,654 Millionen", nf.format(987654321));
-assertEquals("98,765 Millionen", nf.format(98765432));
-assertEquals("98,765 Tausend", nf.format(98765));
-assertEquals("9,876 Tausend", nf.format(9876));
+assertEquals("988 Millionen", nf.format(987654321));
+assertEquals("99 Millionen", nf.format(98765432));
+assertEquals("99 Tausend", nf.format(98765));
+assertEquals("9,9 Tausend", nf.format(9876));
// Test Chinese, Japanese and Korean, which group by 4 digits.
nf = new Intl.NumberFormat("zh-TW", {notation, compactDisplay: "short"});
-assertEquals("9.877億", nf.format(987654321));
-assertEquals("9876.543萬", nf.format(98765432));
-assertEquals("9.877萬", nf.format(98765));
+assertEquals("9.9億", nf.format(987654321));
+assertEquals("9877萬", nf.format(98765432));
+assertEquals("9.9萬", nf.format(98765));
assertEquals("9876", nf.format(9876));
nf = new Intl.NumberFormat("zh-TW", {notation, compactDisplay: "long"});
-assertEquals("9.877億", nf.format(987654321));
-assertEquals("9876.543萬", nf.format(98765432));
-assertEquals("9.877萬", nf.format(98765));
+assertEquals("9.9億", nf.format(987654321));
+assertEquals("9877萬", nf.format(98765432));
+assertEquals("9.9萬", nf.format(98765));
assertEquals("9876", nf.format(9876));
// Test Japanese with compact.
nf = new Intl.NumberFormat("ja", {notation, compactDisplay: "short"});
-assertEquals("9.877億", nf.format(987654321));
-assertEquals("9876.543万", nf.format(98765432));
-assertEquals("9.877万", nf.format(98765));
+assertEquals("9.9億", nf.format(987654321));
+assertEquals("9877万", nf.format(98765432));
+assertEquals("9.9万", nf.format(98765));
assertEquals("9876", nf.format(9876));
nf = new Intl.NumberFormat("ja", {notation, compactDisplay: "long"});
-assertEquals("9.877億", nf.format(987654321));
-assertEquals("9876.543万", nf.format(98765432));
-assertEquals("9.877万", nf.format(98765));
+assertEquals("9.9億", nf.format(987654321));
+assertEquals("9877万", nf.format(98765432));
+assertEquals("9.9万", nf.format(98765));
assertEquals("9876", nf.format(9876));
// Test Korean with compact.
nf = new Intl.NumberFormat("ko", {notation, compactDisplay: "short"});
-assertEquals("9.877억", nf.format(987654321));
-assertEquals("9876.543만", nf.format(98765432));
-assertEquals("9.877만", nf.format(98765));
-assertEquals("9.876천", nf.format(9876));
+assertEquals("9.9억", nf.format(987654321));
+assertEquals("9877만", nf.format(98765432));
+assertEquals("9.9만", nf.format(98765));
+assertEquals("9.9천", nf.format(9876));
assertEquals("987", nf.format(987));
nf = new Intl.NumberFormat("ko", {notation, compactDisplay: "long"});
-assertEquals("9.877억", nf.format(987654321));
-assertEquals("9876.543만", nf.format(98765432));
-assertEquals("9.877만", nf.format(98765));
-assertEquals("9.876천", nf.format(9876));
+assertEquals("9.9억", nf.format(987654321));
+assertEquals("9877만", nf.format(98765432));
+assertEquals("9.9만", nf.format(98765));
+assertEquals("9.9천", nf.format(9876));
assertEquals("987", nf.format(987));
diff --git a/deps/v8/test/intl/number-format/unified/percent.js b/deps/v8/test/intl/number-format/unified/percent.js
new file mode 100644
index 0000000000..9918210ec7
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/percent.js
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+//
+// Test the handling of "percent" w/ "unit"
+
+let nf1 = new Intl.NumberFormat("en-US", {
+ style: "percent",
+ unitDisplay: "long" // Read, but ignored.
+});
+
+let resolved1 = nf1.resolvedOptions();
+assertEquals("percent", resolved1.style);
+assertEquals(undefined, resolved1.unit);
+assertEquals(undefined, resolved1.unitDisplay);
+
+let parts1 = nf1.formatToParts(100);
+assertEquals(4, parts1.length);
+assertEquals("integer", parts1[0].type);
+assertEquals("10", parts1[0].value);
+assertEquals("group", parts1[1].type);
+assertEquals(",", parts1[1].value);
+assertEquals("integer", parts1[2].type);
+assertEquals("000", parts1[2].value);
+assertEquals("percentSign", parts1[3].type);
+assertEquals("%", parts1[3].value);
+
+let nf2 = new Intl.NumberFormat("en-US", {
+ style: "unit",
+ unit: "percent",
+ unitDisplay: "long" // This is OK
+});
+
+let resolved2 = nf2.resolvedOptions();
+assertEquals("unit", resolved2.style);
+assertEquals("percent", resolved2.unit);
+assertEquals("long", resolved2.unitDisplay);
+
+let parts2 = nf2.formatToParts(100);
+assertEquals(3, parts2.length);
+assertEquals("integer", parts2[0].type);
+assertEquals("100", parts2[0].value);
+assertEquals("literal", parts2[1].type);
+assertEquals(" ", parts2[1].value);
+assertEquals("unit", parts2[2].type);
+assertEquals("percent", parts2[2].value);
+
+let nf3 = new Intl.NumberFormat("en-US", {
+ style: "unit",
+ unit: "percent"
+});
+
+let resolved3 = nf3.resolvedOptions();
+assertEquals("unit", resolved3.style);
+assertEquals("percent", resolved3.unit);
+assertEquals("short", resolved3.unitDisplay);
+
+let parts3 = nf3.formatToParts(100);
+assertEquals(2, parts3.length);
+assertEquals("integer", parts3[0].type);
+assertEquals("100", parts3[0].value);
+assertEquals("unit", parts3[1].type);
+assertEquals("%", parts3[1].value);
diff --git a/deps/v8/test/intl/number-format/unified/sign-display.js b/deps/v8/test/intl/number-format/unified/sign-display.js
index cdd7de0061..c71f57e67c 100644
--- a/deps/v8/test/intl/number-format/unified/sign-display.js
+++ b/deps/v8/test/intl/number-format/unified/sign-display.js
@@ -15,7 +15,7 @@ const testData = [
["auto", "-123", "-0", "0", "123"],
["always", "-123", "-0", "+0", "+123"],
["never", "123", "0", "0", "123"],
- ["except-zero", "-123", "-0", "0", "+123"],
+ ["exceptZero", "-123", "-0", "0", "+123"],
];
for (const [signDisplay, neg, negZero, zero, pos] of testData) {
diff --git a/deps/v8/test/intl/number-format/unified/style-unit.js b/deps/v8/test/intl/number-format/unified/style-unit.js
index af35618fda..b88af0fb76 100644
--- a/deps/v8/test/intl/number-format/unified/style-unit.js
+++ b/deps/v8/test/intl/number-format/unified/style-unit.js
@@ -19,7 +19,7 @@ nf = new Intl.NumberFormat("en", {style: 'currency', currency: 'TWD'});
assertEquals(undefined, nf.resolvedOptions().unit);
nf = new Intl.NumberFormat("en", {style: 'percent'});
-assertEquals('percent', nf.resolvedOptions().unit);
+assertEquals(undefined, nf.resolvedOptions().unit);
assertThrows(() => new Intl.NumberFormat("en", {style: 'unit'}), TypeError);
diff --git a/deps/v8/test/intl/number-format/unified/unit-display.js b/deps/v8/test/intl/number-format/unified/unit-display.js
index eeb2c69ece..d4d814d70e 100644
--- a/deps/v8/test/intl/number-format/unified/unit-display.js
+++ b/deps/v8/test/intl/number-format/unified/unit-display.js
@@ -21,7 +21,7 @@ nf = new Intl.NumberFormat("en", {style: 'unit', unit: "meter"});
assertEquals("short", nf.resolvedOptions().unitDisplay);
nf = new Intl.NumberFormat("en", {style: 'percent'});
-assertEquals("short", nf.resolvedOptions().unitDisplay);
+assertEquals(undefined, nf.resolvedOptions().unitDisplay);
const testData = [
["short"],
diff --git a/deps/v8/test/intl/regress-8866.js b/deps/v8/test/intl/regress-8866.js
new file mode 100644
index 0000000000..ed5b4a0ce4
--- /dev/null
+++ b/deps/v8/test/intl/regress-8866.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test to ensure Intl.PluralRules respect minimumFractionDigits
+
+assertEquals('other',
+ new Intl.PluralRules("en", {minimumFractionDigits: 2}).select(1));
+
+assertEquals('zero',
+ new Intl.PluralRules("lv", {minimumFractionDigits: 2}).select(1.13));
diff --git a/deps/v8/test/intl/regress-9312.js b/deps/v8/test/intl/regress-9312.js
new file mode 100644
index 0000000000..7bcd241001
--- /dev/null
+++ b/deps/v8/test/intl/regress-9312.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test the locales in src/third_party/icu/source/data/locales/ which
+// has %%ALIAS output the same as what it alias to instead of root.
+const aliases = new Map([
+ ['sh', 'sr-Latn'],
+ ['in', 'id'],
+ ['mo', 'ro'],
+ ['iw', 'he'],
+ ['no', 'nb'],
+ ['tl', 'fil'],
+ ['iw-IL', 'he-IL'],
+ ['sr-CS', 'sr-Cyrl-RS'],
+]);
+
+const date = new Date();
+const number = 123456789.123456789;
+for (const [from, to] of aliases) {
+ const fromDTF = new Intl.DateTimeFormat(from, {month: 'long', weekday: 'long'});
+ const toDTF = new Intl.DateTimeFormat(to, {month: 'long', weekday: 'long'});
+ for (let m = 0; m < 12; m++) {
+ date.setMonth(m);
+ assertEquals(fromDTF.format(date), toDTF.format(date),
+ `Expected to see the same output from "${from}" and "${to}".`);
+ }
+ const fromNF = new Intl.NumberFormat(from);
+ const toNF = new Intl.NumberFormat(to);
+ assertEquals(fromNF.format(number), toNF.format(number),
+ `Expected to see the same output from "${from}" and "${to}".`);
+}
diff --git a/deps/v8/test/intl/regress-9408.js b/deps/v8/test/intl/regress-9408.js
new file mode 100644
index 0000000000..88883981f3
--- /dev/null
+++ b/deps/v8/test/intl/regress-9408.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+// Test precision of compact-rounding
+
+let compact = {notation: "compact"};
+assertEquals("1.2K", (1234).toLocaleString("en", compact));
+assertEquals("12K", (12345).toLocaleString("en", compact));
+assertEquals("123K", (123456).toLocaleString("en", compact));
+assertEquals("1.2M", (1234567).toLocaleString("en", compact));
+
+assertEquals("54K", (54321).toLocaleString("en", compact));
+
+let compact_no_rounding = {notation: "compact", minimumFractionDigits: 0};
+assertEquals("1.234K", (1234).toLocaleString("en", compact_no_rounding));
+assertEquals("12.345K", (12345).toLocaleString("en", compact_no_rounding));
+assertEquals("123.456K", (123456).toLocaleString("en", compact_no_rounding));
+assertEquals("1.235M", (1234567).toLocaleString("en", compact_no_rounding));
+
+assertEquals("54.321K", (54321).toLocaleString("en", compact_no_rounding));
+
+let fmt = new Intl.NumberFormat("en", compact_no_rounding);
+assertEquals("1", fmt.format(1));
+assertEquals("1K", fmt.format(1000));
+assertEquals("1.234K", fmt.format(1234));
+assertEquals("1.25K", fmt.format(1250));
diff --git a/deps/v8/test/intl/regress-9513.js b/deps/v8/test/intl/regress-9513.js
new file mode 100644
index 0000000000..e23b5cf77e
--- /dev/null
+++ b/deps/v8/test/intl/regress-9513.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+// Test Infinity, -Infinity, NaN won't crash with any notation in formatToParts.
+
+let validNotations = [
+ "standard",
+ "compact",
+ "engineering",
+ "scientific",
+];
+
+let tests = [
+ 123,
+ Infinity,
+ -Infinity,
+ NaN
+];
+
+for (const notation of validNotations) {
+ let nf = new Intl.NumberFormat("en", {notation});
+ for (const test of tests) {
+ assertDoesNotThrow(() => nf.format(test));
+ assertDoesNotThrow(() => nf.formatToParts(test));
+ }
+}
diff --git a/deps/v8/test/intl/relative-time-format/format-en.js b/deps/v8/test/intl/relative-time-format/format-en.js
index a365749f0a..d802902587 100644
--- a/deps/v8/test/intl/relative-time-format/format-en.js
+++ b/deps/v8/test/intl/relative-time-format/format-en.js
@@ -23,8 +23,8 @@ assertEquals('in 345 seconds', longAuto.format(345, 'second'));
assertEquals('3 minutes ago', longAuto.format(-3, 'minute'));
assertEquals('2 minutes ago', longAuto.format(-2, 'minute'));
assertEquals('1 minute ago', longAuto.format(-1, 'minute'));
-assertEquals('in 0 minutes', longAuto.format(0, 'minute'));
-assertEquals('0 minutes ago', longAuto.format(-0, 'minute'));
+assertEquals('this minute', longAuto.format(0, 'minute'));
+assertEquals('this minute', longAuto.format(-0, 'minute'));
assertEquals('in 1 minute', longAuto.format(1, 'minute'));
assertEquals('in 2 minutes', longAuto.format(2, 'minute'));
assertEquals('in 345 minutes', longAuto.format(345, 'minute'));
@@ -32,8 +32,8 @@ assertEquals('in 345 minutes', longAuto.format(345, 'minute'));
assertEquals('3 hours ago', longAuto.format(-3, 'hour'));
assertEquals('2 hours ago', longAuto.format(-2, 'hour'));
assertEquals('1 hour ago', longAuto.format(-1, 'hour'));
-assertEquals('in 0 hours', longAuto.format(0, 'hour'));
-assertEquals('0 hours ago', longAuto.format(-0, 'hour'));
+assertEquals('this hour', longAuto.format(0, 'hour'));
+assertEquals('this hour', longAuto.format(-0, 'hour'));
assertEquals('in 1 hour', longAuto.format(1, 'hour'));
assertEquals('in 2 hours', longAuto.format(2, 'hour'));
assertEquals('in 345 hours', longAuto.format(345, 'hour'));
@@ -98,8 +98,8 @@ assertEquals('in 345 sec.', shortAuto.format(345, 'second'));
assertEquals('3 min. ago', shortAuto.format(-3, 'minute'));
assertEquals('2 min. ago', shortAuto.format(-2, 'minute'));
assertEquals('1 min. ago', shortAuto.format(-1, 'minute'));
-assertEquals('in 0 min.', shortAuto.format(0, 'minute'));
-assertEquals('0 min. ago', shortAuto.format(-0, 'minute'));
+assertEquals('this minute', shortAuto.format(0, 'minute'));
+assertEquals('this minute', shortAuto.format(-0, 'minute'));
assertEquals('in 1 min.', shortAuto.format(1, 'minute'));
assertEquals('in 2 min.', shortAuto.format(2, 'minute'));
assertEquals('in 345 min.', shortAuto.format(345, 'minute'));
@@ -107,8 +107,8 @@ assertEquals('in 345 min.', shortAuto.format(345, 'minute'));
assertEquals('3 hr. ago', shortAuto.format(-3, 'hour'));
assertEquals('2 hr. ago', shortAuto.format(-2, 'hour'));
assertEquals('1 hr. ago', shortAuto.format(-1, 'hour'));
-assertEquals('in 0 hr.', shortAuto.format(0, 'hour'));
-assertEquals('0 hr. ago', shortAuto.format(-0, 'hour'));
+assertEquals('this hour', shortAuto.format(0, 'hour'));
+assertEquals('this hour', shortAuto.format(-0, 'hour'));
assertEquals('in 1 hr.', shortAuto.format(1, 'hour'));
assertEquals('in 2 hr.', shortAuto.format(2, 'hour'));
assertEquals('in 345 hr.', shortAuto.format(345, 'hour'));
@@ -174,8 +174,8 @@ assertEquals('in 345 sec.', narrowAuto.format(345, 'second'));
assertEquals('3 min. ago', narrowAuto.format(-3, 'minute'));
assertEquals('2 min. ago', narrowAuto.format(-2, 'minute'));
assertEquals('1 min. ago', narrowAuto.format(-1, 'minute'));
-assertEquals('in 0 min.', narrowAuto.format(0, 'minute'));
-assertEquals('0 min. ago', narrowAuto.format(-0, 'minute'));
+assertEquals('this minute', narrowAuto.format(0, 'minute'));
+assertEquals('this minute', narrowAuto.format(-0, 'minute'));
assertEquals('in 1 min.', narrowAuto.format(1, 'minute'));
assertEquals('in 2 min.', narrowAuto.format(2, 'minute'));
assertEquals('in 345 min.', narrowAuto.format(345, 'minute'));
@@ -183,8 +183,8 @@ assertEquals('in 345 min.', narrowAuto.format(345, 'minute'));
assertEquals('3 hr. ago', narrowAuto.format(-3, 'hour'));
assertEquals('2 hr. ago', narrowAuto.format(-2, 'hour'));
assertEquals('1 hr. ago', narrowAuto.format(-1, 'hour'));
-assertEquals('in 0 hr.', narrowAuto.format(0, 'hour'));
-assertEquals('0 hr. ago', narrowAuto.format(-0, 'hour'));
+assertEquals('this hour', narrowAuto.format(0, 'hour'));
+assertEquals('this hour', narrowAuto.format(-0, 'hour'));
assertEquals('in 1 hr.', narrowAuto.format(1, 'hour'));
assertEquals('in 2 hr.', narrowAuto.format(2, 'hour'));
assertEquals('in 345 hr.', narrowAuto.format(345, 'hour'));
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 66da4c77b5..f04bf19c8c 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -58,6 +58,9 @@ class TestCase(testcase.D8TestCase):
def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
+ # https://crbug.com/v8/8845
+ if 'LC_ALL' in os.environ:
+ del os.environ['LC_ALL']
env = {}
if env_match:
for env_pair in env_match.group(1).strip().split():
diff --git a/deps/v8/test/js-perf-test/BigInt/add.js b/deps/v8/test/js-perf-test/BigInt/add.js
new file mode 100644
index 0000000000..f0397f2393
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BigInt/add.js
@@ -0,0 +1,142 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+load('bigint-util.js');
+
+let initial_sum = 0n;
+let a = 0n;
+let random_bigints = [];
+
+// This dummy ensures that the feedback for benchmark.run() in the Measure
+// function from base.js is not monomorphic, thereby preventing the benchmarks
+// below from being inlined. This ensures consistent behavior and comparable
+// results.
+new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
+ new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
+]);
+
+
+new BenchmarkSuite('Add-TypeError', [10000], [
+ new Benchmark('Add-TypeError', true, false, 0, TestAddTypeError,
+ SetUpTestAddTypeError)
+]);
+
+
+new BenchmarkSuite('Add-Zero', [1000], [
+ new Benchmark('Add-Zero', true, false, 0, TestAddZero, SetUpTestAddZero)
+]);
+
+
+BITS_CASES.forEach((d) => {
+ new BenchmarkSuite(`Add-SameSign-${d}`, [1000], [
+ new Benchmark(`Add-SameSign-${d}`, true, false, 0, TestAddSameSign,
+ () => SetUpTestAddSameSign(d))
+ ]);
+});
+
+
+BITS_CASES.forEach((d) => {
+ new BenchmarkSuite(`Add-DifferentSign-${d}`, [1000], [
+ new Benchmark(`Add-DifferentSign-${d}`, true, false, 0,
+ TestAddDifferentSign, () => SetUpTestAddDifferentSign(d))
+ ]);
+});
+
+
+new BenchmarkSuite('Add-Random', [1000], [
+ new Benchmark('Add-Random', true, false, 0, TestAddRandom,
+ SetUpTestAddRandom)
+]);
+
+
+function SetUpTestAddTypeError() {
+ initial_sum = 42n;
+}
+
+function TestAddTypeError() {
+ let sum = initial_sum;
+ for (let i = 0; i < SLOW_TEST_ITERATIONS; ++i) {
+ try {
+ sum = 0 + sum;
+ }
+ catch(e) {
+ }
+ }
+ return sum;
+}
+
+
+function SetUpTestAddZero() {
+ initial_sum = 42n;
+}
+
+function TestAddZero() {
+ let sum = initial_sum;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ sum = 0n + sum;
+ }
+
+ return sum;
+}
+
+
+function SetUpTestAddSameSign(bits) {
+ // Add two small random positive values to make sure the sum does not grow
+ // in digits.
+ initial_sum = SmallRandomBigIntWithBits(bits);
+ a = SmallRandomBigIntWithBits(bits);
+}
+
+function TestAddSameSign() {
+ let sum = initial_sum;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ sum = a + sum;
+ }
+
+ return sum;
+}
+
+
+function SetUpTestAddDifferentSign(bits) {
+ // Add a small random negative value to a large positive one to make sure the
+ // sum does not shrink in digits.
+ initial_sum = MaxBigIntWithBits(bits);
+ a = -SmallRandomBigIntWithBits(bits);
+}
+
+function TestAddDifferentSign() {
+ let sum = initial_sum;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ sum = a + sum;
+ }
+
+ return sum;
+}
+
+
+function SetUpTestAddRandom() {
+ random_bigints = [];
+ // RandomBigIntWithBits needs multiples of 4 bits.
+ const max_in_4bits = RANDOM_BIGINTS_MAX_BITS / 4;
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ const bits = Math.floor(Math.random() * max_in_4bits) * 4;
+ const bigint = RandomBigIntWithBits(bits);
+ random_bigints.push(Math.random() < 0.5 ? -bigint : bigint);
+ }
+}
+
+function TestAddRandom() {
+ let sum = 0n;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ sum = random_bigints[i] + sum;
+ }
+
+ return sum;
+}
diff --git a/deps/v8/test/js-perf-test/BigInt/as-uint-n.js b/deps/v8/test/js-perf-test/BigInt/as-uint-n.js
new file mode 100644
index 0000000000..f69d4978db
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BigInt/as-uint-n.js
@@ -0,0 +1,86 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+load('bigint-util.js');
+
+// This dummy ensures that the feedback for benchmark.run() in the Measure
+// function from base.js is not monomorphic, thereby preventing the benchmarks
+// below from being inlined. This ensures consistent behavior and comparable
+// results.
+new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
+ new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
+]);
+
+
+[32, 64, 128, 256].forEach((d) => {
+ new BenchmarkSuite(`AsUint64-${d}`, [1000], [
+ new Benchmark(`AsUint64-${d}`, true, false, 0, TestAsUint64,
+ () => SetUpTestAsUintN(d))
+ ]);
+});
+
+
+[32, 64, 128, 256].forEach((d) => {
+ new BenchmarkSuite(`AsUint32-${d}`, [1000], [
+ new Benchmark(`AsUint32-${d}`, true, false, 0, TestAsUint32,
+ () => SetUpTestAsUintN(d))
+ ]);
+});
+
+
+[32, 64, 128, 256].forEach((d) => {
+ new BenchmarkSuite(`AsUint8-${d}`, [1000], [
+ new Benchmark(`AsUint8-${d}`, true, false, 0, TestAsUint8,
+ () => SetUpTestAsUintN(d))
+ ]);
+});
+
+
+function SetUpTestAsUintN(d) {
+ random_bigints = [
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d),
+ RandomBigIntWithBits(d)
+ ];
+}
+
+
+function TestAsUint64() {
+ let result = 0n;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ result = BigInt.asUintN(64, random_bigints[i % 8]);
+ }
+
+ return result;
+}
+
+
+function TestAsUint32() {
+ let result = 0n;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ result = BigInt.asUintN(32, random_bigints[i % 8]);
+ }
+
+ return result;
+}
+
+
+function TestAsUint8() {
+ let result = 0n;
+
+ for (let i = 0; i < TEST_ITERATIONS; ++i) {
+ result = BigInt.asUintN(8, random_bigints[i % 8]);
+ }
+
+ return result;
+}
diff --git a/deps/v8/test/js-perf-test/BigInt/bigint-util.js b/deps/v8/test/js-perf-test/BigInt/bigint-util.js
new file mode 100644
index 0000000000..7e426060c1
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BigInt/bigint-util.js
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+
+function RandomHexDigit(allow_zero) {
+ const chars = allow_zero ? '0123456789ABCDEF' : '123456789ABCDEF';
+ return chars.charAt(Math.floor(Math.random() * chars.length));
+}
+
+
+// Some benchmarks shall compute sums but the result must not grow in terms
+// of digits. These can use "small" BigInts, which are BigInts where the most
+// significant bits of a digit are 0, so it does not overflow.
+function SmallRandomBigIntWithBits(bits) {
+ console.assert(bits % 4 === 0);
+ if (bits <= 0) {
+ return 0n;
+ }
+
+ // Make sure it does not start with four 0-bits.
+ let s = "0x" + RandomHexDigit(false);
+ bits -= 4;
+ // Digits are at least 32 bits long, so we round down to the next smaller
+ // multiple of 32 to keep the most significant digit small.
+ bits = Math.floor(bits / 32) * 32;
+ for (; bits > 0; bits -= 4) {
+ s += RandomHexDigit(true);
+ }
+ return BigInt(s);
+}
+
+
+function MaxBigIntWithBits(bits) {
+ console.assert(bits % 4 === 0);
+ if (bits <= 0) {
+ return 0n;
+ }
+
+ let s = "0x";
+ for (; bits > 0; bits -= 4) {
+ s += "F";
+ }
+ return BigInt(s);
+}
+
+
+// Generates a random BigInt between 2^(bits-4) and 2^bits-1 (for bits > 0).
+function RandomBigIntWithBits(bits) {
+ console.assert(bits % 4 === 0);
+ if (bits <= 0) {
+ return 0n;
+ }
+
+ // Make sure it does not start with four 0-bits.
+ let s = "0x" + RandomHexDigit(false);
+ bits -= 4;
+ // Randomly generate remaining bits.
+ for (; bits > 0; bits -= 4) {
+ s += RandomHexDigit(true);
+ }
+ return BigInt(s);
+}
diff --git a/deps/v8/test/js-perf-test/BigInt/run.js b/deps/v8/test/js-perf-test/BigInt/run.js
index 8589f20f34..2f2cc30285 100644
--- a/deps/v8/test/js-perf-test/BigInt/run.js
+++ b/deps/v8/test/js-perf-test/BigInt/run.js
@@ -5,7 +5,10 @@
"use strict";
load('../base.js');
+load('test-config.js');
load('to-boolean.js');
+load('add.js');
+load('as-uint-n.js');
var success = true;
diff --git a/deps/v8/test/js-perf-test/BigInt/test-config.js b/deps/v8/test/js-perf-test/BigInt/test-config.js
new file mode 100644
index 0000000000..8d77e70431
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BigInt/test-config.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+const TEST_ITERATIONS = 1000;
+const SLOW_TEST_ITERATIONS = 50;
+const BITS_CASES = [32, 64, 128, 256, 512, 1024, 2048, 4096, 8192];
+const RANDOM_BIGINTS_MAX_BITS = 64 * 100;
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js b/deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js
new file mode 100644
index 0000000000..b0da481a4c
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/LdaKeyedProperty.js
@@ -0,0 +1,40 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addBenchmark(name, test) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test)
+ ]);
+}
+
+function objectLookupStringConstant() {
+ const x = { 1: "foo" };
+
+ for (var i = 0; i < 1000; ++i) {
+ const p = x["1"];
+ }
+}
+
+function objectLookupIndexNumber() {
+ const x = { 1: "foo" };
+ const a = 1;
+
+ for (var i = 0; i < 1000; ++i) {
+ const p = x[a];
+ }
+}
+
+function objectLookupIndexString() {
+ const x = { 1: "foo" };
+ const a = "1";
+
+ for (var i = 0; i < 1000; ++i) {
+ const p = x[a];
+ }
+}
+
+addBenchmark('Object-Lookup-String-Constant', objectLookupStringConstant);
+addBenchmark('Object-Lookup-Index-Number', objectLookupIndexNumber);
+addBenchmark('Object-Lookup-Index-String', objectLookupIndexString);
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js b/deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js
new file mode 100644
index 0000000000..c924db84c6
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/LdaNamedProperty.js
@@ -0,0 +1,60 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addBenchmark(name, test) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test)
+ ]);
+}
+
+addBenchmark('Smi-Value', smiValue);
+addBenchmark('Prototype-Chain-Value', functionOnPrototypeValue);
+
+// TODO(all): might be a good idea to also do with receivers: double, HeapObject
+// with map, HeapObject, tagged, empty getter / accessor.
+// Also, element keyed loads (in another file, probably?) both with strings and
+// numbers for normal, cons,and sliced strings (for named properties).
+// Also, monomorphic vs poly vs mega.
+
+function smiValue() {
+ function constructSmi() {
+ this.smi = 0;
+ }
+ let o = new constructSmi();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi; o.smi;
+ }
+}
+
+function functionOnPrototypeValue() {
+ function objectWithPrototypeChain() {
+ }
+ objectWithPrototypeChain.prototype.__proto__ =
+ {__proto__:{__proto__:{__proto__:{f(){}}}}};
+ let o = new objectWithPrototypeChain();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f; o.f;
+ }
+}
diff --git a/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/arguments.js b/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/arguments.js
new file mode 100644
index 0000000000..2855fd44f8
--- /dev/null
+++ b/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/arguments.js
@@ -0,0 +1,119 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addBenchmark(name, test) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test)
+ ]);
+}
+
+addBenchmark('Calls-No-Argument', callsNoArgument);
+addBenchmark('Calls-One-Argument', callsOneArgument);
+addBenchmark('Calls-Six-Arguments', callsSixArguments);
+addBenchmark('Calls-With-Receiver', callsWithReceiver);
+
+function callsNoArgument() {
+ function f() {
+ return 0;
+ }
+
+ for (var i = 0; i < 1000; ++i) {
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ }
+}
+
+function callsOneArgument() {
+ function f(a) {
+ return a;
+ }
+ let z = 0;
+
+ for (var i = 0; i < 1000; ++i) {
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ f(z); f(z); f(z); f(z); f(z); f(z); f(z); f(z);
+ }
+}
+
+function callsSixArguments() {
+ function g(a,b,c,d,e,f) {
+ return c;
+ }
+ let a = 0;
+ let b = 1;
+ let c = 2;
+ let d = 3;
+ let e = 4;
+ let f = 5;
+
+ for (var i = 0; i < 1000; ++i) {
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f); g(a,b,c,d,e,f);
+ }
+}
+
+function callsWithReceiver() {
+ function constructObject() {
+ this.f = function() {
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
diff --git a/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/locals.js b/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/locals.js
new file mode 100644
index 0000000000..533cd6be0c
--- /dev/null
+++ b/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/locals.js
@@ -0,0 +1,326 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addBenchmark(name, test) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test)
+ ]);
+}
+
+addBenchmark('Calls-No-Argument', callsNoArgument);
+addBenchmark('Calls-No-Argument-1-Local', callsNoArgument_OneLocal);
+addBenchmark('Calls-No-Argument-2-Locals', callsNoArgument_2Locals);
+addBenchmark('Calls-No-Argument-3-Locals', callsNoArgument_3Locals);
+addBenchmark('Calls-No-Argument-4-Locals', callsNoArgument_4Locals);
+addBenchmark('Calls-No-Argument-5-Locals', callsNoArgument_5Locals);
+addBenchmark('Calls-No-Argument-10-Locals', callsNoArgument_10Locals);
+addBenchmark('Calls-No-Argument-100-Locals', callsNoArgument_100Locals);
+
+
+function callsNoArgument() {
+ function f() {
+ return 0;
+ }
+
+ for (var i = 0; i < 1000; ++i) {
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ f(); f(); f(); f(); f(); f(); f(); f(); f();
+ }
+}
+
+function callsNoArgument_OneLocal() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
+
+function callsNoArgument_2Locals() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ var x2 = 2;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
+
+function callsNoArgument_3Locals() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ var x2 = 2;
+ var x3 = 3;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
+
+function callsNoArgument_4Locals() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ var x2 = 2;
+ var x3 = 3;
+ var x4 = 4;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
+
+function callsNoArgument_5Locals() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ var x2 = 2;
+ var x3 = 3;
+ var x4 = 4;
+ var x5 = 5;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
+
+function callsNoArgument_10Locals() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ var x2 = 2;
+ var x3 = 3;
+ var x4 = 4;
+ var x5 = 5;
+ var x6 = 6;
+ var x7 = 7;
+ var x8 = 8;
+ var x9 = 9;
+ var x10 = 10;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
+
+function callsNoArgument_100Locals() {
+ function constructObject() {
+ this.f = function() {
+ var x1 = 1;
+ var x2 = 2;
+ var x3 = 3;
+ var x4 = 4;
+ var x5 = 5;
+ var x6 = 6;
+ var x7 = 7;
+ var x8 = 8;
+ var x9 = 9;
+ var x10 = 10;
+ var x11 = 11;
+ var x12 = 12;
+ var x13 = 13;
+ var x14 = 14;
+ var x15 = 15;
+ var x16 = 16;
+ var x17 = 17;
+ var x18 = 18;
+ var x19 = 19;
+ var x20 = 20;
+ var x21 = 21;
+ var x22 = 22;
+ var x23 = 23;
+ var x24 = 24;
+ var x25 = 25;
+ var x26 = 26;
+ var x27 = 27;
+ var x28 = 28;
+ var x29 = 29;
+ var x30 = 30;
+ var x31 = 31;
+ var x32 = 32;
+ var x33 = 33;
+ var x34 = 34;
+ var x35 = 35;
+ var x36 = 36;
+ var x37 = 37;
+ var x38 = 38;
+ var x39 = 39;
+ var x40 = 40;
+ var x41 = 41;
+ var x42 = 42;
+ var x43 = 43;
+ var x44 = 44;
+ var x45 = 45;
+ var x46 = 46;
+ var x47 = 47;
+ var x48 = 48;
+ var x49 = 49;
+ var x50 = 50;
+ var x51 = 51;
+ var x52 = 52;
+ var x53 = 53;
+ var x54 = 54;
+ var x55 = 55;
+ var x56 = 56;
+ var x57 = 57;
+ var x58 = 58;
+ var x59 = 59;
+ var x60 = 60;
+ var x61 = 61;
+ var x62 = 62;
+ var x63 = 63;
+ var x64 = 64;
+ var x65 = 65;
+ var x66 = 66;
+ var x67 = 67;
+ var x68 = 68;
+ var x69 = 69;
+ var x70 = 70;
+ var x71 = 71;
+ var x72 = 72;
+ var x73 = 73;
+ var x74 = 74;
+ var x75 = 75;
+ var x76 = 76;
+ var x77 = 77;
+ var x78 = 78;
+ var x79 = 79;
+ var x80 = 80;
+ var x81 = 81;
+ var x82 = 82;
+ var x83 = 83;
+ var x84 = 84;
+ var x85 = 85;
+ var x86 = 86;
+ var x87 = 87;
+ var x88 = 88;
+ var x89 = 89;
+ var x90 = 90;
+ var x91 = 91;
+ var x92 = 92;
+ var x93 = 93;
+ var x94 = 94;
+ var x95 = 95;
+ var x96 = 96;
+ var x97 = 97;
+ var x98 = 98;
+ var x99 = 99;
+ var x100 = 100;
+ return 0;
+ };
+ }
+ let o = new constructObject();
+
+ for (var i = 0; i < 1000; ++i) {
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f(); o.f();
+ }
+}
diff --git a/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/run.js b/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/run.js
new file mode 100644
index 0000000000..bd792c70c3
--- /dev/null
+++ b/deps/v8/test/js-perf-test/InterpreterEntryTrampoline/run.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+load(arguments[0] + '.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-InterpreterEntryTrampoline(Score): ' + result);
+}
+
+function PrintStep(name) {}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError,
+ NotifyStep: PrintStep });
diff --git a/deps/v8/test/js-perf-test/JSTests1.json b/deps/v8/test/js-perf-test/JSTests1.json
index afec057821..9ac44cdfb8 100644
--- a/deps/v8/test/js-perf-test/JSTests1.json
+++ b/deps/v8/test/js-perf-test/JSTests1.json
@@ -50,14 +50,51 @@
"path": ["BigInt"],
"main": "run.js",
"resources": [
- "to-boolean.js"
+ "bigint-util.js",
+ "test-config.js",
+ "to-boolean.js",
+ "add.js",
+ "as-uint-n.js"
],
"results_regexp": "^%s\\-BigInt\\(Score\\): (.+)$",
"tests": [
{ "name": "BigInt-ToBoolean" },
{ "name": "BigInt-BooleanConstructor" },
- { "name": "BigInt-NewBooleanConstructor" }
- ]
+ { "name": "BigInt-NewBooleanConstructor" },
+ { "name": "Add-TypeError" },
+ { "name": "Add-Zero" },
+ { "name": "Add-SameSign-32" },
+ { "name": "Add-DifferentSign-32" },
+ { "name": "Add-SameSign-64" },
+ { "name": "Add-DifferentSign-64" },
+ { "name": "Add-SameSign-128" },
+ { "name": "Add-DifferentSign-128" },
+ { "name": "Add-SameSign-256" },
+ { "name": "Add-DifferentSign-256" },
+ { "name": "Add-SameSign-512" },
+ { "name": "Add-DifferentSign-512" },
+ { "name": "Add-SameSign-1024" },
+ { "name": "Add-DifferentSign-1024" },
+ { "name": "Add-SameSign-2048" },
+ { "name": "Add-DifferentSign-2048" },
+ { "name": "Add-SameSign-4096" },
+ { "name": "Add-DifferentSign-4096" },
+ { "name": "Add-SameSign-8192" },
+ { "name": "Add-DifferentSign-8192" },
+ { "name": "Add-Random" },
+ { "name": "AsUint64-32" },
+ { "name": "AsUint64-64" },
+ { "name": "AsUint64-128" },
+ { "name": "AsUint64-256" },
+ { "name": "AsUint32-32" },
+ { "name": "AsUint32-64" },
+ { "name": "AsUint32-128" },
+ { "name": "AsUint32-256" },
+ { "name": "AsUint8-32" },
+ { "name": "AsUint8-64" },
+ { "name": "AsUint8-128" },
+ { "name": "AsUint8-256" }
+ ]
},
{
"name": "BigInt-Jitless",
diff --git a/deps/v8/test/js-perf-test/JSTests3.json b/deps/v8/test/js-perf-test/JSTests3.json
index d9a2735d04..598e9fc6d1 100644
--- a/deps/v8/test/js-perf-test/JSTests3.json
+++ b/deps/v8/test/js-perf-test/JSTests3.json
@@ -348,6 +348,66 @@
{"name": "Smi-Constant-ShiftRight"},
{"name": "Smi-Constant-ShiftRightLogical"}
]
+ },
+ {
+ "name": "LdaNamedProperty",
+ "main": "run.js",
+ "resources": [ "LdaNamedProperty.js" ],
+ "test_flags": [ "LdaNamedProperty" ],
+ "results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Smi-Value"},
+ {"name": "Prototype-Chain-Value"}
+ ]
+ },
+ {
+ "name": "LdaKeyedProperty",
+ "main": "run.js",
+ "resources": [ "LdaKeyedProperty.js" ],
+ "test_flags": [ "LdaKeyedProperty" ],
+ "results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Object-Lookup-String-Constant"},
+ {"name": "Object-Lookup-Index-Number"},
+ {"name": "Object-Lookup-Index-String"}
+ ]
+ }
+ ]
+ },
+ {
+ "name": "InterpreterEntryTrampoline",
+ "path": ["InterpreterEntryTrampoline"],
+ "flags": ["--no-opt"],
+ "tests": [
+ {
+ "name": "Arguments",
+ "main": "run.js",
+ "resources": [ "arguments.js" ],
+ "test_flags": [ "arguments" ],
+ "results_regexp": "^%s\\-InterpreterEntryTrampoline\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Calls-No-Argument"},
+ {"name": "Calls-One-Argument"},
+ {"name": "Calls-Six-Arguments"},
+ {"name": "Calls-With-Receiver"}
+ ]
+ },
+ {
+ "name": "Locals",
+ "main": "run.js",
+ "resources": [ "locals.js" ],
+ "test_flags": [ "locals" ],
+ "results_regexp": "^%s\\-InterpreterEntryTrampoline\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Calls-No-Argument"},
+ {"name": "Calls-No-Argument-1-Local"},
+ {"name": "Calls-No-Argument-2-Locals"},
+ {"name": "Calls-No-Argument-3-Locals"},
+ {"name": "Calls-No-Argument-4-Locals"},
+ {"name": "Calls-No-Argument-5-Locals"},
+ {"name": "Calls-No-Argument-10-Locals"},
+ {"name": "Calls-No-Argument-100-Locals"}
+ ]
}
]
}
diff --git a/deps/v8/test/js-perf-test/JSTests4.json b/deps/v8/test/js-perf-test/JSTests4.json
index 5d070c0de7..42cbaba720 100644
--- a/deps/v8/test/js-perf-test/JSTests4.json
+++ b/deps/v8/test/js-perf-test/JSTests4.json
@@ -33,7 +33,15 @@
{"name": "SetIndexWithTrap"},
{"name": "SetSymbolWithoutTrap"},
{"name": "SetSymbolWithTrap"},
- {"name": "HasInIdiom"}
+ {"name": "HasInIdiom"},
+ {"name": "IsExtensibleWithoutTrap"},
+ {"name": "IsExtensibleWithTrap"},
+ {"name": "PreventExtensionsWithoutTrap"},
+ {"name": "PreventExtensionsWithTrap"},
+ {"name": "GetPrototypeOfWithoutTrap"},
+ {"name": "GetPrototypeOfWithTrap"},
+ {"name": "SetPrototypeOfWithoutTrap"},
+ {"name": "SetPrototypeOfWithTrap"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/JSTests5.json b/deps/v8/test/js-perf-test/JSTests5.json
index 376a8cae27..66504bf575 100644
--- a/deps/v8/test/js-perf-test/JSTests5.json
+++ b/deps/v8/test/js-perf-test/JSTests5.json
@@ -570,7 +570,10 @@
"resources": [
"array-indexof-includes.js",
"spread-call.js",
- "tagged-template.js"
+ "tagged-template.js",
+ "has-own-property.js",
+ "array-map.js",
+ "array-reduce.js"
],
"results_regexp": "^%s\\-Numbers\\(Score\\): (.+)$",
"tests": [
@@ -580,7 +583,11 @@
{"name": "ArrayIncludes"},
{"name": "ApplySpreadLiteral"},
{"name": "SpreadCall"},
- {"name": "SpreadCallSpreadLiteral"}
+ {"name": "SpreadCallSpreadLiteral"},
+ {"name": "HasOwnProperty"},
+ {"name": "ArrayMap"},
+ {"name": "ArrayReduce"},
+ {"name": "ArrayReduceRight"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/array-map.js b/deps/v8/test/js-perf-test/ObjectFreeze/array-map.js
new file mode 100644
index 0000000000..4a5651c209
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/array-map.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function setupArrayMap(length) {
+ var a = new Array(length);
+ for (var i=0;i<length;i++) {
+ a[i] = ''+i;
+ }
+ return Object.freeze(a);
+}
+
+const frozenArrayMap = setupArrayMap(200);
+
+function driverArrayMap(n) {
+ let result = 0;
+ for (var i=0;i<n;i++) {
+ result = frozenArrayMap.map(Number);
+ }
+ return result;
+}
+
+function ArrayMap() {
+ driverArrayMap(1e3);
+}
+
+function ArrayMapWarmUp() {
+ driverArrayMap(1e1);
+ driverArrayMap(1e2);
+}
+
+createSuite('ArrayMap', 10, ArrayMap, ArrayMapWarmUp);
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/array-reduce.js b/deps/v8/test/js-perf-test/ObjectFreeze/array-reduce.js
new file mode 100644
index 0000000000..1acf179e13
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/array-reduce.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function setupArrayReduce(length) {
+ let a = new Array(length);
+ for (let i=0;i<length;i++) {
+ a[i] = ''+i;
+ }
+ return Object.freeze(a);
+}
+
+const kArraylength = 200;
+const frozenArrayReduce = setupArrayReduce(kArraylength);
+
+const reducer = (accumulator, currentValue) => accumulator + Number(currentValue);
+
+function driverArrayReduce(n) {
+ let result = 0;
+ for (let i=0;i<n;i++) {
+ result = frozenArrayReduce.reduce(reducer);
+ }
+ return result;
+}
+
+const kIterations = 1e3;
+
+function ArrayReduce() {
+ driverArrayReduce(kIterations);
+}
+
+const kIterationsWarmUp = [1e1, 1e2];
+
+function ArrayReduceWarmUp() {
+ driverArrayReduce(kIterationsWarmUp[0]);
+ driverArrayReduce(kIterationsWarmUp[1]);
+}
+
+const kRun = 1e1;
+createSuite('ArrayReduce', kRun, ArrayReduce, ArrayReduceWarmUp);
+
+function driverArrayReduceRight(n) {
+ let result = 0;
+ for (let i=0;i<n;i++) {
+ result = frozenArrayReduce.reduceRight(reducer);
+ }
+ return result;
+}
+
+function ArrayReduceRight() {
+ driverArrayReduceRight(kIterations);
+}
+
+function ArrayReduceRightWarmUp() {
+ driverArrayReduceRight(kIterationsWarmUp[0]);
+ driverArrayReduceRight(kIterationsWarmUp[1]);
+}
+
+createSuite('ArrayReduceRight', kRun, ArrayReduceRight, ArrayReduceRightWarmUp);
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/has-own-property.js b/deps/v8/test/js-perf-test/ObjectFreeze/has-own-property.js
new file mode 100644
index 0000000000..5f17c64ebf
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/has-own-property.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function driver(n) {
+ let result = 0;
+ for (var i=0;i<n;i++) {
+ result += frozenArray.hasOwnProperty(''+i)==-1?0:1;
+ }
+ return result;
+}
+
+function HasOwnProperty() {
+ driver(1e4);
+}
+
+function HasOwnPropertyWarmUp() {
+ driver(1e1);
+ driver(1e2);
+ driver(1e3);
+}
+
+createSuite('HasOwnProperty', 10, HasOwnProperty, HasOwnPropertyWarmUp);
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/run.js b/deps/v8/test/js-perf-test/ObjectFreeze/run.js
index adc1a475b4..a8e31a8715 100644
--- a/deps/v8/test/js-perf-test/ObjectFreeze/run.js
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/run.js
@@ -5,6 +5,9 @@ load('../base.js');
load('tagged-template.js');
load('array-indexof-includes.js');
load('spread-call.js');
+load('has-own-property.js');
+load('array-map.js');
+load('array-reduce.js');
function PrintResult(name, result) {
console.log(name);
diff --git a/deps/v8/test/js-perf-test/Proxies/proxies.js b/deps/v8/test/js-perf-test/Proxies/proxies.js
index 6d5e808449..cb8167a634 100644
--- a/deps/v8/test/js-perf-test/Proxies/proxies.js
+++ b/deps/v8/test/js-perf-test/Proxies/proxies.js
@@ -506,3 +506,161 @@ newBenchmark("HasInIdiom", {
return result === 20 * SOME_NUMBER;
}
});
+
+// ----------------------------------------------------------------------------
+
+obj = {};
+value = false;
+
+newBenchmark("IsExtensibleWithoutTrap", {
+ setup() {
+ p = new Proxy(obj, {});
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.isExtensible(p);
+ }
+ return value;
+ },
+ teardown() {
+ return value === true;
+ }
+});
+
+// ----------------------------------------------------------------------------
+
+obj = {};
+value = false;
+
+newBenchmark("IsExtensibleWithTrap", {
+ setup() {
+ p = new Proxy(obj, {
+ isExtensible: function(target) {
+ return true;
+ }
+ });
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.isExtensible(p);
+ }
+ return value;
+ },
+ teardown() {
+ return value === true;
+ }
+});
+
+// ----------------------------------------------------------------------------
+
+obj = {};
+value = false;
+
+newBenchmark("PreventExtensionsWithoutTrap", {
+ setup() {
+ p = new Proxy(obj, {});
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.preventExtensions(p);
+ }
+ return value;
+ },
+ teardown() {}
+});
+
+// ----------------------------------------------------------------------------
+
+obj = {};
+value = false;
+
+newBenchmark("PreventExtensionsWithTrap", {
+ setup() {
+ p = new Proxy(obj, {
+ preventExtensions: function(target) {
+ Object.preventExtensions(target);
+ return true;
+ }
+ });
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.preventExtensions(p);
+ }
+ return value;
+ },
+ teardown() {}
+});
+
+// ----------------------------------------------------------------------------
+
+newBenchmark("GetPrototypeOfWithoutTrap", {
+ setup() {
+ p = new Proxy({}, {});
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.getPrototypeOf(p);
+ }
+ return value;
+ },
+ teardown() {}
+});
+
+// ----------------------------------------------------------------------------
+
+newBenchmark("GetPrototypeOfWithTrap", {
+ setup() {
+ p = new Proxy({}, {
+ getPrototypeOf: function(target) {
+ return Array.prototype;
+ }
+ });
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.getPrototypeOf(p);
+ }
+ return value;
+ },
+ teardown() {}
+});
+
+// ----------------------------------------------------------------------------
+
+newBenchmark("SetPrototypeOfWithoutTrap", {
+ setup() {
+ var obj = { x: 1 };
+ obj.__proto__ = {};
+ p = new Proxy(obj, {});
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.setPrototypeOf(p, [1]);
+ }
+ return value;
+ },
+ teardown() {}
+});
+
+// ----------------------------------------------------------------------------
+
+newBenchmark("SetPrototypeOfWithTrap", {
+ setup() {
+ var obj = { x: 1 };
+ obj.__proto__ = {};
+ p = new Proxy(obj, {
+ setPrototypeOf: function(target, proto) {
+ Object.setPrototypeOf(target, proto);
+ return true;
+ }
+ });
+ },
+ run() {
+ for(var i = 0; i < ITERATIONS; i++) {
+ value = Object.setPrototypeOf(p, [1]);
+ }
+ return value;
+ },
+ teardown() {}
+});
diff --git a/deps/v8/test/js-perf-test/RegExp.json b/deps/v8/test/js-perf-test/RegExp.json
index 33f908d97a..a5ee41b399 100644
--- a/deps/v8/test/js-perf-test/RegExp.json
+++ b/deps/v8/test/js-perf-test/RegExp.json
@@ -24,6 +24,7 @@
"base_test.js",
"base.js",
"case_test.js",
+ "complex_case_test.js",
"ctor.js",
"exec.js",
"flags.js",
@@ -44,6 +45,7 @@
"results_regexp": "^%s\\-RegExp\\(Score\\): (.+)$",
"tests": [
{"name": "CaseInsensitiveTest"},
+ {"name": "ComplexCaseInsensitiveTest"},
{"name": "Ctor"},
{"name": "Exec"},
{"name": "Flags"},
diff --git a/deps/v8/test/js-perf-test/RegExp/RegExpTests.json b/deps/v8/test/js-perf-test/RegExp/RegExpTests.json
index c408ea648f..cea4393a1a 100644
--- a/deps/v8/test/js-perf-test/RegExp/RegExpTests.json
+++ b/deps/v8/test/js-perf-test/RegExp/RegExpTests.json
@@ -14,6 +14,7 @@
"main": "run.js",
"resources": [
"case_test.js",
+ "complex_case_test.js",
"base_ctor.js",
"base_exec.js",
"base_flags.js",
@@ -43,6 +44,7 @@
"results_regexp": "^%s\\-RegExp\\(Score\\): (.+)$",
"tests": [
{"name": "CaseInsensitiveTest"},
+ {"name": "ComplexCaseInsensitiveTest"},
{"name": "Ctor"},
{"name": "Exec"},
{"name": "Flags"},
diff --git a/deps/v8/test/js-perf-test/RegExp/complex_case_test.js b/deps/v8/test/js-perf-test/RegExp/complex_case_test.js
new file mode 100644
index 0000000000..f51a646c7f
--- /dev/null
+++ b/deps/v8/test/js-perf-test/RegExp/complex_case_test.js
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The following RegExp regression case from
+// https://bugs.chromium.org/p/chromium/issues/detail?id=977003
+let g = "[\\t\\n\\f ]";
+let W = g + "*";
+let h = "[\\ud800-\\udbff][\\udc00-\\udfff]";
+let k = "[\\u0080-\\ud7ff\\ue000-\\ufffd]|" + h;
+let U = "[0-9a-fA-F]{1,6}" + g + "?";
+let E = "(?:" + U + "|[\\u0020-\\u007e\\u0080-\\ud7ff\\ue000\\ufffd]|" + h + ")";
+let m = "\\\\" + E;
+let o = "(?:[\\t\\x21\\x23-\\x26\\x28-\\x5b\\x5d-\\x7e]|" + k + "|" + m + ")";
+let p = '[^\'"\\n\\f\\\\]|\\\\[\\s\\S]';
+let q = '"(?:\'|' + p + ')*"' + '|\'(?:\"|' + p + ')*\'';
+let r = "[-+]?(?:[0-9]+(?:[.][0-9]+)?|[.][0-9]+)";
+let t = "(?:[a-zA-Z_]|" + k + "|" + m + ")";
+let u = "(?:[a-zA-Z0-9_-]|" + k + "|" + m + ")";
+let v = u + "+";
+let I = "-?" + t + u + "*";
+let x = "(?:@?-?" + t + "|#)" + u + "*";
+let y = r + "(?:%|" + I + ")?";
+let z = "url[(]" + W + "(?:" + q + "|" + o + "*)" + W + "[)]";
+let B = "U[+][0-9A-F?]{1,6}(?:-[0-9A-F]{1,6})?";
+let C = "<\!--";
+let F = "-->";
+let S = g + "+";
+let G = "/(?:[*][^*]*[*]+(?:[^/][^*]*[*]+)*/|/[^\\n\\f]*)";
+let J = "(?!url[(])" + I + "[(]";
+let R = "[~|^$*]=";
+let T = '[^"\'\\\\/]|/(?![/*])';
+let V = "\\uFEFF";
+let Y = [V, B, z, J, x, q, y, C, F, S, G, R, T].join("|");
+
+function ComplexGlobalCaseInsensitiveMatch() {
+ // keep the RegExp in the measurement but not string concat nor join.
+ let X = new RegExp(Y, "gi");
+ "abcſABCβκς".match(X);
+ "color:".match(X);
+}
+
+benchmarks = [ [ComplexGlobalCaseInsensitiveMatch, () => {}],
+ ];
+
+createBenchmarkSuite("ComplexCaseInsensitiveTest");
diff --git a/deps/v8/test/js-perf-test/RegExp/run.js b/deps/v8/test/js-perf-test/RegExp/run.js
index 8c96a74cb7..aedb5e9a10 100644
--- a/deps/v8/test/js-perf-test/RegExp/run.js
+++ b/deps/v8/test/js-perf-test/RegExp/run.js
@@ -9,6 +9,7 @@ load('ctor.js');
load('exec.js');
load('flags.js');
load('inline_test.js')
+load('complex_case_test.js');
load('case_test.js');
load('match.js');
load('replace.js');
diff --git a/deps/v8/test/message/fail/arrow-bare-rest-param.out b/deps/v8/test/message/fail/arrow-bare-rest-param.out
index 76a25a455d..18e5cb825b 100644
--- a/deps/v8/test/message/fail/arrow-bare-rest-param.out
+++ b/deps/v8/test/message/fail/arrow-bare-rest-param.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token ...
+*%(basename)s:7: SyntaxError: Unexpected token '...'
...x => 10
^^^
-SyntaxError: Unexpected token ...
+SyntaxError: Unexpected token '...'
diff --git a/deps/v8/test/message/fail/arrow-missing.out b/deps/v8/test/message/fail/arrow-missing.out
index bad6157a0a..1784ada6a5 100644
--- a/deps/v8/test/message/fail/arrow-missing.out
+++ b/deps/v8/test/message/fail/arrow-missing.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token )
+*%(basename)s:7: SyntaxError: Unexpected token ')'
function foo() { return(); }
^
-SyntaxError: Unexpected token )
+SyntaxError: Unexpected token ')'
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-in-module.js b/deps/v8/test/message/fail/class-fields-private-throw-in-module.mjs
index 5e75c72086..21fb882b9d 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-in-module.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-in-module.mjs
@@ -1,8 +1,6 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
class X {
constructor() {
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-in-module.out b/deps/v8/test/message/fail/class-fields-private-throw-in-module.out
index 2c0df1acef..7fa9da0951 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-in-module.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-in-module.out
@@ -1,4 +1,4 @@
-*%(basename)s:9: SyntaxError: Private field '#x' must be declared in an enclosing class
+*%(basename)s:7: SyntaxError: Private field '#x' must be declared in an enclosing class
this.#x = 1;
^
SyntaxError: Private field '#x' must be declared in an enclosing class \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-methods-private-throw-write.js b/deps/v8/test/message/fail/class-methods-private-throw-write.js
new file mode 100644
index 0000000000..3181fea1b6
--- /dev/null
+++ b/deps/v8/test/message/fail/class-methods-private-throw-write.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+class C {
+ #a() {}
+ constructor() {
+ this.#a = 1;
+ }
+}
+new C;
diff --git a/deps/v8/test/message/fail/class-methods-private-throw-write.out b/deps/v8/test/message/fail/class-methods-private-throw-write.out
new file mode 100644
index 0000000000..2aadad9c3c
--- /dev/null
+++ b/deps/v8/test/message/fail/class-methods-private-throw-write.out
@@ -0,0 +1,6 @@
+*%(basename)s:10: TypeError: Private method '#a' is not writable
+ this.#a = 1;
+ ^
+TypeError: Private method '#a' is not writable
+ at new C (*%(basename)s:10:13)
+ at *%(basename)s:13:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-spread-property.out b/deps/v8/test/message/fail/class-spread-property.out
index df15e50262..cfdefc425d 100644
--- a/deps/v8/test/message/fail/class-spread-property.out
+++ b/deps/v8/test/message/fail/class-spread-property.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: SyntaxError: Unexpected token ...
+*%(basename)s:5: SyntaxError: Unexpected token '...'
class C { ...[] }
^^^
-SyntaxError: Unexpected token ... \ No newline at end of file
+SyntaxError: Unexpected token '...'
diff --git a/deps/v8/test/message/fail/export-duplicate-as.js b/deps/v8/test/message/fail/export-duplicate-as.mjs
index 416180b093..1d3dc51853 100644
--- a/deps/v8/test/message/fail/export-duplicate-as.js
+++ b/deps/v8/test/message/fail/export-duplicate-as.mjs
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
var a, b, c;
export { a as c };
diff --git a/deps/v8/test/message/fail/export-duplicate-as.out b/deps/v8/test/message/fail/export-duplicate-as.out
index 729de8a904..42eb9b166d 100644
--- a/deps/v8/test/message/fail/export-duplicate-as.out
+++ b/deps/v8/test/message/fail/export-duplicate-as.out
@@ -1,7 +1,7 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:9: SyntaxError: Duplicate export of 'c'
+*%(basename)s:7: SyntaxError: Duplicate export of 'c'
export { a, b as c, c, b };
^
SyntaxError: Duplicate export of 'c'
diff --git a/deps/v8/test/message/fail/export-duplicate-default.js b/deps/v8/test/message/fail/export-duplicate-default.mjs
index de1a8807c1..d0630ed084 100644
--- a/deps/v8/test/message/fail/export-duplicate-default.js
+++ b/deps/v8/test/message/fail/export-duplicate-default.mjs
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
export default function f() {};
export default 42;
diff --git a/deps/v8/test/message/fail/export-duplicate-default.out b/deps/v8/test/message/fail/export-duplicate-default.out
index 685e289891..8c21693272 100644
--- a/deps/v8/test/message/fail/export-duplicate-default.out
+++ b/deps/v8/test/message/fail/export-duplicate-default.out
@@ -1,7 +1,7 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:9: SyntaxError: Duplicate export of 'default'
+*%(basename)s:7: SyntaxError: Duplicate export of 'default'
export default class C {};
^^^^^^^
SyntaxError: Duplicate export of 'default'
diff --git a/deps/v8/test/message/fail/export-duplicate.js b/deps/v8/test/message/fail/export-duplicate.mjs
index 93011f0c1c..6274f0a1af 100644
--- a/deps/v8/test/message/fail/export-duplicate.js
+++ b/deps/v8/test/message/fail/export-duplicate.mjs
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
var a, b, c;
export { a };
diff --git a/deps/v8/test/message/fail/export-duplicate.out b/deps/v8/test/message/fail/export-duplicate.out
index 9811cb122c..86b4c6a507 100644
--- a/deps/v8/test/message/fail/export-duplicate.out
+++ b/deps/v8/test/message/fail/export-duplicate.out
@@ -1,7 +1,7 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-*%(basename)s:10: SyntaxError: Duplicate export of 'b'
+*%(basename)s:8: SyntaxError: Duplicate export of 'b'
export { b, c };
^
SyntaxError: Duplicate export of 'b'
diff --git a/deps/v8/test/message/fail/formal-parameters-trailing-comma.out b/deps/v8/test/message/fail/formal-parameters-trailing-comma.out
index 5c46552628..9f44ea8573 100644
--- a/deps/v8/test/message/fail/formal-parameters-trailing-comma.out
+++ b/deps/v8/test/message/fail/formal-parameters-trailing-comma.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: SyntaxError: Unexpected token ,
+*%(basename)s:5: SyntaxError: Unexpected token ','
function foo(b, a, a,,) { return a }
^
-SyntaxError: Unexpected token ,
+SyntaxError: Unexpected token ','
diff --git a/deps/v8/test/message/fail/import-as-eval.js b/deps/v8/test/message/fail/import-as-eval.mjs
index 66adc32cbe..9ff6196de0 100644
--- a/deps/v8/test/message/fail/import-as-eval.js
+++ b/deps/v8/test/message/fail/import-as-eval.mjs
@@ -1,7 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
import { foo as eval } from "mod";
diff --git a/deps/v8/test/message/fail/import-as-eval.out b/deps/v8/test/message/fail/import-as-eval.out
index 622f7fe9e1..7a48c786fa 100644
--- a/deps/v8/test/message/fail/import-as-eval.out
+++ b/deps/v8/test/message/fail/import-as-eval.out
@@ -1,7 +1,4 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:7: SyntaxError: Unexpected eval or arguments in strict mode
+*%(basename)s:5: SyntaxError: Unexpected eval or arguments in strict mode
import { foo as eval } from "mod";
^^^^
SyntaxError: Unexpected eval or arguments in strict mode
diff --git a/deps/v8/test/message/fail/import-as-redeclaration.js b/deps/v8/test/message/fail/import-as-redeclaration.mjs
index 43bf278d1b..6bc4032f74 100644
--- a/deps/v8/test/message/fail/import-as-redeclaration.js
+++ b/deps/v8/test/message/fail/import-as-redeclaration.mjs
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
let foo = 42;
import { bar as foo } from "mod";
diff --git a/deps/v8/test/message/fail/import-as-redeclaration.out b/deps/v8/test/message/fail/import-as-redeclaration.out
index 51c4c032dc..bbdc21c3bf 100644
--- a/deps/v8/test/message/fail/import-as-redeclaration.out
+++ b/deps/v8/test/message/fail/import-as-redeclaration.out
@@ -1,7 +1,4 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:8: SyntaxError: Identifier 'foo' has already been declared
+*%(basename)s:6: SyntaxError: Identifier 'foo' has already been declared
import { bar as foo } from "mod";
^^^
SyntaxError: Identifier 'foo' has already been declared
diff --git a/deps/v8/test/message/fail/import-as-reserved-word.js b/deps/v8/test/message/fail/import-as-reserved-word.mjs
index 562699d45f..ba13f8a63f 100644
--- a/deps/v8/test/message/fail/import-as-reserved-word.js
+++ b/deps/v8/test/message/fail/import-as-reserved-word.mjs
@@ -1,7 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
import { foo as import } from "mod";
diff --git a/deps/v8/test/message/fail/import-as-reserved-word.out b/deps/v8/test/message/fail/import-as-reserved-word.out
index 1ee8d41c1a..1f75ea9b63 100644
--- a/deps/v8/test/message/fail/import-as-reserved-word.out
+++ b/deps/v8/test/message/fail/import-as-reserved-word.out
@@ -1,7 +1,4 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:7: SyntaxError: Unexpected reserved word
+*%(basename)s:5: SyntaxError: Unexpected reserved word
import { foo as import } from "mod";
^^^^^^
SyntaxError: Unexpected reserved word
diff --git a/deps/v8/test/message/fail/import-blah-module.mjs b/deps/v8/test/message/fail/import-blah-module.mjs
new file mode 100644
index 0000000000..3af9956ea7
--- /dev/null
+++ b/deps/v8/test/message/fail/import-blah-module.mjs
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(import{blah})
diff --git a/deps/v8/test/message/fail/import-blah-module.out b/deps/v8/test/message/fail/import-blah-module.out
new file mode 100644
index 0000000000..573a3e3991
--- /dev/null
+++ b/deps/v8/test/message/fail/import-blah-module.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Unexpected token '{'
+(import{blah})
+ ^
+SyntaxError: Unexpected token '{'
diff --git a/deps/v8/test/message/fail/import-blah-script.js b/deps/v8/test/message/fail/import-blah-script.js
new file mode 100644
index 0000000000..3af9956ea7
--- /dev/null
+++ b/deps/v8/test/message/fail/import-blah-script.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(import{blah})
diff --git a/deps/v8/test/message/fail/import-blah-script.out b/deps/v8/test/message/fail/import-blah-script.out
new file mode 100644
index 0000000000..a7cc4e88c5
--- /dev/null
+++ b/deps/v8/test/message/fail/import-blah-script.out
@@ -0,0 +1,7 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:5: SyntaxError: Cannot use import statement outside a module
+(import{blah})
+ ^^^^^^
+SyntaxError: Cannot use import statement outside a module
diff --git a/deps/v8/test/message/fail/import-eval.js b/deps/v8/test/message/fail/import-eval.mjs
index 8ab35baef6..b9d6fc8c32 100644
--- a/deps/v8/test/message/fail/import-eval.js
+++ b/deps/v8/test/message/fail/import-eval.mjs
@@ -1,7 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
import { eval } from "mod";
diff --git a/deps/v8/test/message/fail/import-eval.out b/deps/v8/test/message/fail/import-eval.out
index 148662a28c..e06f050978 100644
--- a/deps/v8/test/message/fail/import-eval.out
+++ b/deps/v8/test/message/fail/import-eval.out
@@ -1,7 +1,4 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:7: SyntaxError: Unexpected eval or arguments in strict mode
+*%(basename)s:5: SyntaxError: Unexpected eval or arguments in strict mode
import { eval } from "mod";
^^^^
SyntaxError: Unexpected eval or arguments in strict mode
diff --git a/deps/v8/test/message/fail/import-redeclaration.js b/deps/v8/test/message/fail/import-redeclaration.mjs
index 27b0cdccef..861a9e6a8d 100644
--- a/deps/v8/test/message/fail/import-redeclaration.js
+++ b/deps/v8/test/message/fail/import-redeclaration.mjs
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
let foo = 42;
import { foo } from "mod";
diff --git a/deps/v8/test/message/fail/import-redeclaration.out b/deps/v8/test/message/fail/import-redeclaration.out
index 641948810f..daa4c93e31 100644
--- a/deps/v8/test/message/fail/import-redeclaration.out
+++ b/deps/v8/test/message/fail/import-redeclaration.out
@@ -1,7 +1,4 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:8: SyntaxError: Identifier 'foo' has already been declared
+*%(basename)s:6: SyntaxError: Identifier 'foo' has already been declared
import { foo } from "mod";
^^^
SyntaxError: Identifier 'foo' has already been declared
diff --git a/deps/v8/test/message/fail/import-reserved-word.js b/deps/v8/test/message/fail/import-reserved-word.mjs
index 1fd7ba291e..aabbf65cad 100644
--- a/deps/v8/test/message/fail/import-reserved-word.js
+++ b/deps/v8/test/message/fail/import-reserved-word.mjs
@@ -1,7 +1,5 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
import { import } from "mod";
diff --git a/deps/v8/test/message/fail/import-reserved-word.out b/deps/v8/test/message/fail/import-reserved-word.out
index 5b990e9e59..daaf6a38cd 100644
--- a/deps/v8/test/message/fail/import-reserved-word.out
+++ b/deps/v8/test/message/fail/import-reserved-word.out
@@ -1,7 +1,4 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-*%(basename)s:7: SyntaxError: Unexpected reserved word
+*%(basename)s:5: SyntaxError: Unexpected reserved word
import { import } from "mod";
^^^^^^
SyntaxError: Unexpected reserved word
diff --git a/deps/v8/test/message/fail/import-script.js b/deps/v8/test/message/fail/import-script.js
new file mode 100644
index 0000000000..4964fb650c
--- /dev/null
+++ b/deps/v8/test/message/fail/import-script.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+import { test } from "mod";
diff --git a/deps/v8/test/message/fail/import-script.out b/deps/v8/test/message/fail/import-script.out
new file mode 100644
index 0000000000..d18bf5c727
--- /dev/null
+++ b/deps/v8/test/message/fail/import-script.out
@@ -0,0 +1,7 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:6: SyntaxError: Cannot use import statement outside a module
+import { test } from "mod";
+^^^^^^
+SyntaxError: Cannot use import statement outside a module \ No newline at end of file
diff --git a/deps/v8/test/message/fail/invalid-spread.out b/deps/v8/test/message/fail/invalid-spread.out
index 5694ad6e88..3710f60317 100644
--- a/deps/v8/test/message/fail/invalid-spread.out
+++ b/deps/v8/test/message/fail/invalid-spread.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token ...
+*%(basename)s:7: SyntaxError: Unexpected token '...'
(x, ...y)
^^^
-SyntaxError: Unexpected token ...
+SyntaxError: Unexpected token '...'
diff --git a/deps/v8/test/message/fail/modules-cycle1.js b/deps/v8/test/message/fail/modules-cycle1.mjs
index e3497cace8..49fd3839fd 100644
--- a/deps/v8/test/message/fail/modules-cycle1.js
+++ b/deps/v8/test/message/fail/modules-cycle1.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a} from "modules-cycle1.js";
+import {a} from "modules-cycle1.mjs";
export {a};
diff --git a/deps/v8/test/message/fail/modules-cycle1.out b/deps/v8/test/message/fail/modules-cycle1.out
index ef6b57f074..d82bed8d0d 100644
--- a/deps/v8/test/message/fail/modules-cycle1.out
+++ b/deps/v8/test/message/fail/modules-cycle1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-cycle1.js'
-import {a} from "modules-cycle1.js";
+*%(basename)s:5: SyntaxError: Detected cycle while resolving name 'a' in 'modules-cycle1.mjs'
+import {a} from "modules-cycle1.mjs";
^
-SyntaxError: Detected cycle while resolving name 'a' in 'modules-cycle1.js'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-cycle1.mjs'
diff --git a/deps/v8/test/message/fail/modules-cycle2.js b/deps/v8/test/message/fail/modules-cycle2.mjs
index 1121c3098f..d4ab1fd16a 100644
--- a/deps/v8/test/message/fail/modules-cycle2.js
+++ b/deps/v8/test/message/fail/modules-cycle2.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a} from "modules-skip-cycle2.js";
+import {a} from "modules-skip-cycle2.mjs";
export {a as b};
diff --git a/deps/v8/test/message/fail/modules-cycle2.out b/deps/v8/test/message/fail/modules-cycle2.out
index c0d785fe6c..9817de9d78 100644
--- a/deps/v8/test/message/fail/modules-cycle2.out
+++ b/deps/v8/test/message/fail/modules-cycle2.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle2.js'
-import {a} from "modules-skip-cycle2.js";
+*%(basename)s:5: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle2.mjs'
+import {a} from "modules-skip-cycle2.mjs";
^
-SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle2.js'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle2.mjs'
diff --git a/deps/v8/test/message/fail/modules-cycle3.mjs b/deps/v8/test/message/fail/modules-cycle3.mjs
new file mode 100644
index 0000000000..b43d086771
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-cycle3.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {a as x} from "modules-skip-cycle3.mjs";
+export {b as y} from "modules-skip-cycle3.mjs";
diff --git a/deps/v8/test/message/fail/modules-cycle3.out b/deps/v8/test/message/fail/modules-cycle3.out
index 51bf9078cc..00a97c93a3 100644
--- a/deps/v8/test/message/fail/modules-cycle3.out
+++ b/deps/v8/test/message/fail/modules-cycle3.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
-export {a as x} from "modules-skip-cycle3.js";
+*%(basename)s:5: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.mjs'
+export {a as x} from "modules-skip-cycle3.mjs";
^^^^^^
-SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.mjs'
diff --git a/deps/v8/test/message/fail/modules-skip-cycle2.js b/deps/v8/test/message/fail/modules-cycle4.mjs
index 8b5ea93468..2ec0fb222b 100644
--- a/deps/v8/test/message/fail/modules-skip-cycle2.js
+++ b/deps/v8/test/message/fail/modules-cycle4.mjs
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export {b as a} from "modules-cycle2.js";
+import {x} from "modules-cycle3.mjs";
diff --git a/deps/v8/test/message/fail/modules-cycle4.out b/deps/v8/test/message/fail/modules-cycle4.out
index 6e27ced3ad..881c4ba6ef 100644
--- a/deps/v8/test/message/fail/modules-cycle4.out
+++ b/deps/v8/test/message/fail/modules-cycle4.out
@@ -1,5 +1,4 @@
-*modules-cycle3.js:7: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
-export {a as x} from "modules-skip-cycle3.js";
+*modules-cycle3.mjs:5: SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.mjs'
+export {a as x} from "modules-skip-cycle3.mjs";
^^^^^^
-SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.js'
-
+SyntaxError: Detected cycle while resolving name 'a' in 'modules-skip-cycle3.mjs'
diff --git a/deps/v8/test/message/fail/modules-cycle5.mjs b/deps/v8/test/message/fail/modules-cycle5.mjs
new file mode 100644
index 0000000000..205922b59c
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-cycle5.mjs
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "modules-skip-cycle5.mjs";
+export {foo} from "modules-cycle5.mjs";
diff --git a/deps/v8/test/message/fail/modules-cycle5.out b/deps/v8/test/message/fail/modules-cycle5.out
index c97cc8d0a4..dc0aab48f3 100644
--- a/deps/v8/test/message/fail/modules-cycle5.out
+++ b/deps/v8/test/message/fail/modules-cycle5.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: Detected cycle while resolving name 'foo' in 'modules-cycle5.js'
-export {foo} from "modules-cycle5.js";
+*%(basename)s:6: SyntaxError: Detected cycle while resolving name 'foo' in 'modules-cycle5.mjs'
+export {foo} from "modules-cycle5.mjs";
^^^
-SyntaxError: Detected cycle while resolving name 'foo' in 'modules-cycle5.js'
+SyntaxError: Detected cycle while resolving name 'foo' in 'modules-cycle5.mjs'
diff --git a/deps/v8/test/message/fail/modules-cycle6.js b/deps/v8/test/message/fail/modules-cycle6.js
deleted file mode 100644
index 3043e1d73e..0000000000
--- a/deps/v8/test/message/fail/modules-cycle6.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import "modules-skip-cycle6.js";
-export * from "modules-cycle6.js";
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4.js b/deps/v8/test/message/fail/modules-cycle6.mjs
index 761153671c..f4935caddc 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-4.js
+++ b/deps/v8/test/message/fail/modules-cycle6.mjs
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export * from 'modules-skip-1.js';
-export * from 'modules-skip-3.js';
+import "modules-skip-cycle6.mjs";
+export * from "modules-cycle6.mjs";
diff --git a/deps/v8/test/message/fail/modules-cycle6.out b/deps/v8/test/message/fail/modules-cycle6.out
index ed0e409d5c..7e2513d3a7 100644
--- a/deps/v8/test/message/fail/modules-cycle6.out
+++ b/deps/v8/test/message/fail/modules-cycle6.out
@@ -1,5 +1,4 @@
-*modules-skip-cycle6.js:5: SyntaxError: The requested module 'modules-cycle6.js' does not provide an export named 'foo'
-export {foo} from "modules-cycle6.js";
+*modules-skip-cycle6.mjs:5: SyntaxError: The requested module 'modules-cycle6.mjs' does not provide an export named 'foo'
+export {foo} from "modules-cycle6.mjs";
^^^
-SyntaxError: The requested module 'modules-cycle6.js' does not provide an export named 'foo'
-
+SyntaxError: The requested module 'modules-cycle6.mjs' does not provide an export named 'foo'
diff --git a/deps/v8/test/message/fail/modules-duplicate-export2.js b/deps/v8/test/message/fail/modules-duplicate-export1.mjs
index 3aec862341..26204ce00f 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export2.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export1.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-export let x = 42;
export {x};
+export let x = 42;
diff --git a/deps/v8/test/message/fail/modules-duplicate-export1.out b/deps/v8/test/message/fail/modules-duplicate-export1.out
index 5b2478b3a2..e501d53b74 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export1.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export1.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Duplicate export of 'x'
+*%(basename)s:6: SyntaxError: Duplicate export of 'x'
export let x = 42;
^^^^^^^^^^^
SyntaxError: Duplicate export of 'x'
diff --git a/deps/v8/test/message/fail/modules-duplicate-export1.js b/deps/v8/test/message/fail/modules-duplicate-export2.mjs
index 0ba421a0b4..360a5d051d 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export1.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export2.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-export {x};
export let x = 42;
+export {x};
diff --git a/deps/v8/test/message/fail/modules-duplicate-export2.out b/deps/v8/test/message/fail/modules-duplicate-export2.out
index 17e831886c..5515897796 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export2.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export2.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Duplicate export of 'x'
+*%(basename)s:6: SyntaxError: Duplicate export of 'x'
export {x};
^
SyntaxError: Duplicate export of 'x'
diff --git a/deps/v8/test/message/fail/modules-duplicate-export3.js b/deps/v8/test/message/fail/modules-duplicate-export3.mjs
index 36fc27b6c0..db2508ee6a 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export3.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export3.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
export let x = 42;
let y;
diff --git a/deps/v8/test/message/fail/modules-duplicate-export3.out b/deps/v8/test/message/fail/modules-duplicate-export3.out
index 3913a75c02..73652e600d 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export3.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export3.out
@@ -1,4 +1,4 @@
-*%(basename)s:9: SyntaxError: Duplicate export of 'x'
+*%(basename)s:7: SyntaxError: Duplicate export of 'x'
export {y as x};
^^^^^^
SyntaxError: Duplicate export of 'x'
diff --git a/deps/v8/test/message/fail/modules-duplicate-export4.js b/deps/v8/test/message/fail/modules-duplicate-export4.mjs
index 1bc60dad60..15ce0c1f7c 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export4.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export4.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
export let x = 42;
let y;
diff --git a/deps/v8/test/message/fail/modules-duplicate-export4.out b/deps/v8/test/message/fail/modules-duplicate-export4.out
index 73e0fdcc91..e4514caa99 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export4.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export4.out
@@ -1,4 +1,4 @@
-*%(basename)s:9: SyntaxError: Duplicate export of 'x'
+*%(basename)s:7: SyntaxError: Duplicate export of 'x'
export {y as z, y as x, y};
^^^^^^
SyntaxError: Duplicate export of 'x'
diff --git a/deps/v8/test/message/fail/modules-duplicate-export5.js b/deps/v8/test/message/fail/modules-duplicate-export5.mjs
index 82fbca1e01..e936914eab 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export5.js
+++ b/deps/v8/test/message/fail/modules-duplicate-export5.mjs
@@ -1,9 +1,8 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
+
// Flags: --harmony-namespace-exports
export let foo = 42;
-export * as foo from "./doesnt-even-matter.js";
+export * as foo from "./doesnt-even-matter.mjs";
diff --git a/deps/v8/test/message/fail/modules-duplicate-export5.out b/deps/v8/test/message/fail/modules-duplicate-export5.out
index 85cd76c68c..2efaef10c4 100644
--- a/deps/v8/test/message/fail/modules-duplicate-export5.out
+++ b/deps/v8/test/message/fail/modules-duplicate-export5.out
@@ -1,5 +1,4 @@
-*%(basename)s:9: SyntaxError: Duplicate export of 'foo'
-export * as foo from "./doesnt-even-matter.js";
+*%(basename)s:8: SyntaxError: Duplicate export of 'foo'
+export * as foo from "./doesnt-even-matter.mjs";
^^^
SyntaxError: Duplicate export of 'foo'
-
diff --git a/deps/v8/test/message/fail/modules-export-illformed-class.js b/deps/v8/test/message/fail/modules-export-illformed-class.mjs
index 9a5fc05f63..e770ca6b3a 100644
--- a/deps/v8/test/message/fail/modules-export-illformed-class.js
+++ b/deps/v8/test/message/fail/modules-export-illformed-class.mjs
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
export class foo {[]};
diff --git a/deps/v8/test/message/fail/modules-export-illformed-class.out b/deps/v8/test/message/fail/modules-export-illformed-class.out
index cf26e55134..8fa01b48b7 100644
--- a/deps/v8/test/message/fail/modules-export-illformed-class.out
+++ b/deps/v8/test/message/fail/modules-export-illformed-class.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: Unexpected token ]
+*%(basename)s:5: SyntaxError: Unexpected token ']'
export class foo {[]};
^
-SyntaxError: Unexpected token ]
+SyntaxError: Unexpected token ']'
diff --git a/deps/v8/test/message/fail/modules-import-redeclare1.js b/deps/v8/test/message/fail/modules-import-redeclare1.js
deleted file mode 100644
index 22e1ce35a9..0000000000
--- a/deps/v8/test/message/fail/modules-import-redeclare1.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-export let x = 42;
-import {x} from "modules-import-redeclare1.js";
diff --git a/deps/v8/test/message/fail/modules-import-redeclare1.mjs b/deps/v8/test/message/fail/modules-import-redeclare1.mjs
new file mode 100644
index 0000000000..0a5b70b8f5
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-redeclare1.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let x = 42;
+import {x} from "modules-import-redeclare1.mjs";
diff --git a/deps/v8/test/message/fail/modules-import-redeclare1.out b/deps/v8/test/message/fail/modules-import-redeclare1.out
index 09b5e8b713..b84da61a9b 100644
--- a/deps/v8/test/message/fail/modules-import-redeclare1.out
+++ b/deps/v8/test/message/fail/modules-import-redeclare1.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
-import {x} from "modules-import-redeclare1.js";
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
+import {x} from "modules-import-redeclare1.mjs";
^
SyntaxError: Identifier 'x' has already been declared
diff --git a/deps/v8/test/message/fail/modules-import-redeclare2.js b/deps/v8/test/message/fail/modules-import-redeclare2.js
deleted file mode 100644
index af7ec2b4d2..0000000000
--- a/deps/v8/test/message/fail/modules-import-redeclare2.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-export let x = 42;
-import {y, x, z} from "modules-import-redeclare1.js";
diff --git a/deps/v8/test/message/fail/modules-import-redeclare2.mjs b/deps/v8/test/message/fail/modules-import-redeclare2.mjs
new file mode 100644
index 0000000000..be7d3dbb3f
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-redeclare2.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let x = 42;
+import {y, x, z} from "modules-import-redeclare1.mjs";
diff --git a/deps/v8/test/message/fail/modules-import-redeclare2.out b/deps/v8/test/message/fail/modules-import-redeclare2.out
index c972a382e5..32f2c03a9e 100644
--- a/deps/v8/test/message/fail/modules-import-redeclare2.out
+++ b/deps/v8/test/message/fail/modules-import-redeclare2.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
-import {y, x, z} from "modules-import-redeclare1.js";
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
+import {y, x, z} from "modules-import-redeclare1.mjs";
^
SyntaxError: Identifier 'x' has already been declared
diff --git a/deps/v8/test/message/fail/modules-import-redeclare3.js b/deps/v8/test/message/fail/modules-import-redeclare3.js
deleted file mode 100644
index 60ae6f20e5..0000000000
--- a/deps/v8/test/message/fail/modules-import-redeclare3.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import {y, x, z} from "modules-import-redeclare1.js";
-export let x = 42;
diff --git a/deps/v8/test/message/fail/modules-import-redeclare3.mjs b/deps/v8/test/message/fail/modules-import-redeclare3.mjs
new file mode 100644
index 0000000000..4b8fd02cdc
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import-redeclare3.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {y, x, z} from "modules-import-redeclare1.mjs";
+export let x = 42;
diff --git a/deps/v8/test/message/fail/modules-import-redeclare3.out b/deps/v8/test/message/fail/modules-import-redeclare3.out
index 3a1080736d..9775875814 100644
--- a/deps/v8/test/message/fail/modules-import-redeclare3.out
+++ b/deps/v8/test/message/fail/modules-import-redeclare3.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
+*%(basename)s:6: SyntaxError: Identifier 'x' has already been declared
export let x = 42;
^
SyntaxError: Identifier 'x' has already been declared
diff --git a/deps/v8/test/message/fail/modules-import1.mjs b/deps/v8/test/message/fail/modules-import1.mjs
new file mode 100644
index 0000000000..6ec6ec02e4
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import1.mjs
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {a} from "modules-import1.mjs";
diff --git a/deps/v8/test/message/fail/modules-import1.out b/deps/v8/test/message/fail/modules-import1.out
index ce023698f8..3f8959cc44 100644
--- a/deps/v8/test/message/fail/modules-import1.out
+++ b/deps/v8/test/message/fail/modules-import1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module 'modules-import1.js' does not provide an export named 'a'
-import {a} from "modules-import1.js";
+*%(basename)s:5: SyntaxError: The requested module 'modules-import1.mjs' does not provide an export named 'a'
+import {a} from "modules-import1.mjs";
^
-SyntaxError: The requested module 'modules-import1.js' does not provide an export named 'a'
+SyntaxError: The requested module 'modules-import1.mjs' does not provide an export named 'a'
diff --git a/deps/v8/test/message/fail/modules-import2.js b/deps/v8/test/message/fail/modules-import2.js
deleted file mode 100644
index 8a719ace18..0000000000
--- a/deps/v8/test/message/fail/modules-import2.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import {a as b} from "modules-import2.js";
diff --git a/deps/v8/test/message/fail/modules-import2.mjs b/deps/v8/test/message/fail/modules-import2.mjs
new file mode 100644
index 0000000000..ed0c7ba811
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import2.mjs
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {a as b} from "modules-import2.mjs";
diff --git a/deps/v8/test/message/fail/modules-import2.out b/deps/v8/test/message/fail/modules-import2.out
index 485419721d..57e4885a1b 100644
--- a/deps/v8/test/message/fail/modules-import2.out
+++ b/deps/v8/test/message/fail/modules-import2.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module 'modules-import2.js' does not provide an export named 'a'
-import {a as b} from "modules-import2.js";
+*%(basename)s:5: SyntaxError: The requested module 'modules-import2.mjs' does not provide an export named 'a'
+import {a as b} from "modules-import2.mjs";
^
-SyntaxError: The requested module 'modules-import2.js' does not provide an export named 'a'
+SyntaxError: The requested module 'modules-import2.mjs' does not provide an export named 'a'
diff --git a/deps/v8/test/message/fail/modules-import3.js b/deps/v8/test/message/fail/modules-import3.js
deleted file mode 100644
index 7e93ff3155..0000000000
--- a/deps/v8/test/message/fail/modules-import3.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import foo from "modules-import3.js";
diff --git a/deps/v8/test/message/fail/modules-import3.mjs b/deps/v8/test/message/fail/modules-import3.mjs
new file mode 100644
index 0000000000..602dee20a0
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import3.mjs
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import foo from "modules-import3.mjs";
diff --git a/deps/v8/test/message/fail/modules-import3.out b/deps/v8/test/message/fail/modules-import3.out
index fd6904d003..e65efda18a 100644
--- a/deps/v8/test/message/fail/modules-import3.out
+++ b/deps/v8/test/message/fail/modules-import3.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module 'modules-import3.js' does not provide an export named 'default'
-import foo from "modules-import3.js";
+*%(basename)s:5: SyntaxError: The requested module 'modules-import3.mjs' does not provide an export named 'default'
+import foo from "modules-import3.mjs";
^^^
-SyntaxError: The requested module 'modules-import3.js' does not provide an export named 'default'
+SyntaxError: The requested module 'modules-import3.mjs' does not provide an export named 'default'
diff --git a/deps/v8/test/message/fail/modules-import4.js b/deps/v8/test/message/fail/modules-import4.js
deleted file mode 100644
index 0410e38ec8..0000000000
--- a/deps/v8/test/message/fail/modules-import4.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import {a as b} from "modules-import4.js";
-export {c as a} from "modules-import4.js";
diff --git a/deps/v8/test/message/fail/modules-import4.mjs b/deps/v8/test/message/fail/modules-import4.mjs
new file mode 100644
index 0000000000..508b76d4e7
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-import4.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {a as b} from "modules-import4.mjs";
+export {c as a} from "modules-import4.mjs";
diff --git a/deps/v8/test/message/fail/modules-import4.out b/deps/v8/test/message/fail/modules-import4.out
index 8193d9e4c8..f574c4c2e5 100644
--- a/deps/v8/test/message/fail/modules-import4.out
+++ b/deps/v8/test/message/fail/modules-import4.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
-export {c as a} from "modules-import4.js";
+*%(basename)s:6: SyntaxError: The requested module 'modules-import4.mjs' does not provide an export named 'c'
+export {c as a} from "modules-import4.mjs";
^^^^^^
-SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import4.mjs' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-import5.js b/deps/v8/test/message/fail/modules-import5.js
deleted file mode 100644
index d4cb6559bd..0000000000
--- a/deps/v8/test/message/fail/modules-import5.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import {a as b} from "modules-import5.js";
-export {c as a} from "modules-import5.js";
-import {c} from "modules-import5.js";
diff --git a/deps/v8/test/message/fail/modules-cycle3.js b/deps/v8/test/message/fail/modules-import5.mjs
index 133d203886..4402ae0919 100644
--- a/deps/v8/test/message/fail/modules-cycle3.js
+++ b/deps/v8/test/message/fail/modules-import5.mjs
@@ -1,8 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-export {a as x} from "modules-skip-cycle3.js";
-export {b as y} from "modules-skip-cycle3.js";
+import {a as b} from "modules-import5.mjs";
+export {c as a} from "modules-import5.mjs";
+import {c} from "modules-import5.mjs";
diff --git a/deps/v8/test/message/fail/modules-import5.out b/deps/v8/test/message/fail/modules-import5.out
index 3efb69258e..c852ea42e9 100644
--- a/deps/v8/test/message/fail/modules-import5.out
+++ b/deps/v8/test/message/fail/modules-import5.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: SyntaxError: The requested module 'modules-import5.js' does not provide an export named 'c'
-export {c as a} from "modules-import5.js";
+*%(basename)s:6: SyntaxError: The requested module 'modules-import5.mjs' does not provide an export named 'c'
+export {c as a} from "modules-import5.mjs";
^^^^^^
-SyntaxError: The requested module 'modules-import5.js' does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import5.mjs' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-import6.js b/deps/v8/test/message/fail/modules-import6.mjs
index f625a342b1..43a5438a5a 100644
--- a/deps/v8/test/message/fail/modules-import6.js
+++ b/deps/v8/test/message/fail/modules-import6.mjs
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a as b} from "modules-import6.js";
+import {a as b} from "modules-import6.mjs";
export {c as a};
-import {c} from "modules-import6.js";
+import {c} from "modules-import6.mjs";
diff --git a/deps/v8/test/message/fail/modules-import6.out b/deps/v8/test/message/fail/modules-import6.out
index 43e81135c4..e86370b006 100644
--- a/deps/v8/test/message/fail/modules-import6.out
+++ b/deps/v8/test/message/fail/modules-import6.out
@@ -1,5 +1,5 @@
-*%(basename)s:9: SyntaxError: The requested module 'modules-import6.js' does not provide an export named 'c'
-import {c} from "modules-import6.js";
+*%(basename)s:7: SyntaxError: The requested module 'modules-import6.mjs' does not provide an export named 'c'
+import {c} from "modules-import6.mjs";
^
-SyntaxError: The requested module 'modules-import6.js' does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import6.mjs' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-skip-cycle2.mjs b/deps/v8/test/message/fail/modules-skip-cycle2.mjs
new file mode 100644
index 0000000000..16d0c964ef
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-cycle2.mjs
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {b as a} from "modules-cycle2.mjs";
diff --git a/deps/v8/test/message/fail/modules-skip-cycle3.js b/deps/v8/test/message/fail/modules-skip-cycle3.js
deleted file mode 100644
index a63a660c16..0000000000
--- a/deps/v8/test/message/fail/modules-skip-cycle3.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-export {y as a} from "modules-cycle3.js";
-export {x as b} from "modules-cycle3.js";
diff --git a/deps/v8/test/message/fail/modules-skip-cycle3.mjs b/deps/v8/test/message/fail/modules-skip-cycle3.mjs
new file mode 100644
index 0000000000..6c97de1938
--- /dev/null
+++ b/deps/v8/test/message/fail/modules-skip-cycle3.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {y as a} from "modules-cycle3.mjs";
+export {x as b} from "modules-cycle3.mjs";
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-10.js b/deps/v8/test/message/fail/modules-skip-cycle5.mjs
index fff168a581..d678c66755 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-10.js
+++ b/deps/v8/test/message/fail/modules-skip-cycle5.mjs
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {x} from "./modules-skip-10.js"
+export {foo} from "modules-cycle5.mjs";
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-9.js b/deps/v8/test/message/fail/modules-skip-cycle6.mjs
index c474988833..f5c13d827f 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-9.js
+++ b/deps/v8/test/message/fail/modules-skip-cycle6.mjs
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import x from 'modules-skip-empty.js';
+export {foo} from "modules-cycle6.mjs";
diff --git a/deps/v8/test/message/fail/modules-star-conflict1.js b/deps/v8/test/message/fail/modules-star-conflict1.js
deleted file mode 100644
index 4281f7c085..0000000000
--- a/deps/v8/test/message/fail/modules-star-conflict1.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import {a} from "../../mjsunit/modules-skip-7.js";
diff --git a/deps/v8/test/message/fail/modules-import1.js b/deps/v8/test/message/fail/modules-star-conflict1.mjs
index fbfe907995..d1b2ecedb5 100644
--- a/deps/v8/test/message/fail/modules-import1.js
+++ b/deps/v8/test/message/fail/modules-star-conflict1.mjs
@@ -1,7 +1,5 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a} from "modules-import1.js";
+import {a} from "../../mjsunit/modules-skip-7.mjs";
diff --git a/deps/v8/test/message/fail/modules-star-conflict1.out b/deps/v8/test/message/fail/modules-star-conflict1.out
index db8671165b..d6b46ab935 100644
--- a/deps/v8/test/message/fail/modules-star-conflict1.out
+++ b/deps/v8/test/message/fail/modules-star-conflict1.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module '../../mjsunit/modules-skip-7.js' contains conflicting star exports for name 'a'
-import {a} from "../../mjsunit/modules-skip-7.js";
+*%(basename)s:5: SyntaxError: The requested module '../../mjsunit/modules-skip-7.mjs' contains conflicting star exports for name 'a'
+import {a} from "../../mjsunit/modules-skip-7.mjs";
^
-SyntaxError: The requested module '../../mjsunit/modules-skip-7.js' contains conflicting star exports for name 'a'
+SyntaxError: The requested module '../../mjsunit/modules-skip-7.mjs' contains conflicting star exports for name 'a'
diff --git a/deps/v8/test/message/fail/modules-star-conflict2.js b/deps/v8/test/message/fail/modules-star-conflict2.mjs
index 6235851ee1..e63e5791fe 100644
--- a/deps/v8/test/message/fail/modules-star-conflict2.js
+++ b/deps/v8/test/message/fail/modules-star-conflict2.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-export * from "../../mjsunit/modules-skip-star-exports-conflict.js";
-export * from "../../mjsunit/modules-skip-6.js";
+export * from "../../mjsunit/modules-skip-star-exports-conflict.mjs";
+export * from "../../mjsunit/modules-skip-6.mjs";
-import {a} from "modules-star-conflict2.js";
+import {a} from "modules-star-conflict2.mjs";
diff --git a/deps/v8/test/message/fail/modules-star-conflict2.out b/deps/v8/test/message/fail/modules-star-conflict2.out
index d6decf733d..c3fe80b6a9 100644
--- a/deps/v8/test/message/fail/modules-star-conflict2.out
+++ b/deps/v8/test/message/fail/modules-star-conflict2.out
@@ -1,5 +1,5 @@
-*%(basename)s:7: SyntaxError: The requested module '../../mjsunit/modules-skip-star-exports-conflict.js' contains conflicting star exports for name 'a'
-export * from "../../mjsunit/modules-skip-star-exports-conflict.js";
+*%(basename)s:5: SyntaxError: The requested module '../../mjsunit/modules-skip-star-exports-conflict.mjs' contains conflicting star exports for name 'a'
+export * from "../../mjsunit/modules-skip-star-exports-conflict.mjs";
^
-SyntaxError: The requested module '../../mjsunit/modules-skip-star-exports-conflict.js' contains conflicting star exports for name 'a'
+SyntaxError: The requested module '../../mjsunit/modules-skip-star-exports-conflict.mjs' contains conflicting star exports for name 'a'
diff --git a/deps/v8/test/message/fail/modules-star-default.js b/deps/v8/test/message/fail/modules-star-default.mjs
index 30bc8f271a..532cdb7677 100644
--- a/deps/v8/test/message/fail/modules-star-default.js
+++ b/deps/v8/test/message/fail/modules-star-default.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
// Star exports do not propagate a default export.
-import a from "modules-import4.js";
+import a from "modules-import4.mjs";
diff --git a/deps/v8/test/message/fail/modules-star-default.out b/deps/v8/test/message/fail/modules-star-default.out
index 1524a7cba6..e6e60b45b7 100644
--- a/deps/v8/test/message/fail/modules-star-default.out
+++ b/deps/v8/test/message/fail/modules-star-default.out
@@ -1,5 +1,5 @@
-*modules-import4.js:8: SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
-export {c as a} from "modules-import4.js";
+*modules-import4.mjs:6: SyntaxError: The requested module 'modules-import4.mjs' does not provide an export named 'c'
+export {c as a} from "modules-import4.mjs";
^^^^^^
-SyntaxError: The requested module 'modules-import4.js' does not provide an export named 'c'
+SyntaxError: The requested module 'modules-import4.mjs' does not provide an export named 'c'
diff --git a/deps/v8/test/message/fail/modules-undefined-export1.js b/deps/v8/test/message/fail/modules-undefined-export1.mjs
index ddedbaaded..070b73987d 100644
--- a/deps/v8/test/message/fail/modules-undefined-export1.js
+++ b/deps/v8/test/message/fail/modules-undefined-export1.mjs
@@ -1,7 +1,5 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
export {x};
diff --git a/deps/v8/test/message/fail/modules-undefined-export1.out b/deps/v8/test/message/fail/modules-undefined-export1.out
index 66b862c167..e34186b35b 100644
--- a/deps/v8/test/message/fail/modules-undefined-export1.out
+++ b/deps/v8/test/message/fail/modules-undefined-export1.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Export 'x' is not defined in module
+*%(basename)s:5: SyntaxError: Export 'x' is not defined in module
export {x};
^
SyntaxError: Export 'x' is not defined in module
diff --git a/deps/v8/test/message/fail/modules-undefined-export2.js b/deps/v8/test/message/fail/modules-undefined-export2.mjs
index bf8c2a0c94..4b0047571c 100644
--- a/deps/v8/test/message/fail/modules-undefined-export2.js
+++ b/deps/v8/test/message/fail/modules-undefined-export2.mjs
@@ -1,7 +1,5 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
export {x as y};
diff --git a/deps/v8/test/message/fail/modules-undefined-export2.out b/deps/v8/test/message/fail/modules-undefined-export2.out
index fd036f190a..1f1b1b6e45 100644
--- a/deps/v8/test/message/fail/modules-undefined-export2.out
+++ b/deps/v8/test/message/fail/modules-undefined-export2.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Export 'x' is not defined in module
+*%(basename)s:5: SyntaxError: Export 'x' is not defined in module
export {x as y};
^^^^^^
SyntaxError: Export 'x' is not defined in module
diff --git a/deps/v8/test/message/fail/new-target-assignment.out b/deps/v8/test/message/fail/new-target-assignment.out
index bc1492ca92..e868c4c82d 100644
--- a/deps/v8/test/message/fail/new-target-assignment.out
+++ b/deps/v8/test/message/fail/new-target-assignment.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: ReferenceError: Invalid left-hand side in assignment
+*%(basename)s:5: SyntaxError: Invalid left-hand side in assignment
function f() { new.target = 5 }
^^^^^^^^^^
-ReferenceError: Invalid left-hand side in assignment
+SyntaxError: Invalid left-hand side in assignment
diff --git a/deps/v8/test/message/fail/new-target-postfix-op.out b/deps/v8/test/message/fail/new-target-postfix-op.out
index 1e8ef6be21..840f316cf9 100644
--- a/deps/v8/test/message/fail/new-target-postfix-op.out
+++ b/deps/v8/test/message/fail/new-target-postfix-op.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: ReferenceError: Invalid left-hand side expression in postfix operation
+*%(basename)s:5: SyntaxError: Invalid left-hand side expression in postfix operation
function f() { new.target++ }
^^^^^^^^^^
-ReferenceError: Invalid left-hand side expression in postfix operation
+SyntaxError: Invalid left-hand side expression in postfix operation
diff --git a/deps/v8/test/message/fail/new-target-prefix-op.out b/deps/v8/test/message/fail/new-target-prefix-op.out
index a444087ec9..450e7af28a 100644
--- a/deps/v8/test/message/fail/new-target-prefix-op.out
+++ b/deps/v8/test/message/fail/new-target-prefix-op.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: ReferenceError: Invalid left-hand side expression in prefix operation
+*%(basename)s:5: SyntaxError: Invalid left-hand side expression in prefix operation
function f() { ++new.target }
^^^^^^^^^^
-ReferenceError: Invalid left-hand side expression in prefix operation
+SyntaxError: Invalid left-hand side expression in prefix operation
diff --git a/deps/v8/test/message/fail/redeclaration5.js b/deps/v8/test/message/fail/redeclaration5.mjs
index d47593ef08..107ad2e309 100644
--- a/deps/v8/test/message/fail/redeclaration5.js
+++ b/deps/v8/test/message/fail/redeclaration5.mjs
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
function foo() {}
function foo() {}
diff --git a/deps/v8/test/message/fail/redeclaration5.out b/deps/v8/test/message/fail/redeclaration5.out
index 241582e0ee..ac81dae089 100644
--- a/deps/v8/test/message/fail/redeclaration5.out
+++ b/deps/v8/test/message/fail/redeclaration5.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Identifier 'foo' has already been declared
+*%(basename)s:6: SyntaxError: Identifier 'foo' has already been declared
function foo() {}
^
SyntaxError: Identifier 'foo' has already been declared
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.out b/deps/v8/test/message/fail/wasm-exception-rethrow.out
index 98fdbc9376..0133ee6bd7 100644
--- a/deps/v8/test/message/fail/wasm-exception-rethrow.out
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.out
@@ -1,4 +1,4 @@
wasm-function[0]:5: RuntimeError: wasm exception
RuntimeError: wasm exception
- at rethrow0 (wasm-function[0]:5)
+ at rethrow0 (wasm-function[0]:0x32)
at *%(basename)s:21:18
diff --git a/deps/v8/test/message/fail/wasm-exception-throw.out b/deps/v8/test/message/fail/wasm-exception-throw.out
index 65083190c6..0c302cb3c4 100644
--- a/deps/v8/test/message/fail/wasm-exception-throw.out
+++ b/deps/v8/test/message/fail/wasm-exception-throw.out
@@ -1,4 +1,4 @@
wasm-function[0]:3: RuntimeError: wasm exception
RuntimeError: wasm exception
- at throw0 (wasm-function[0]:3)
+ at throw0 (wasm-function[0]:0x2e)
at *%(basename)s:17:18
diff --git a/deps/v8/test/message/fail/wasm-function-name.out b/deps/v8/test/message/fail/wasm-function-name.out
index ff9b04eaab..19770670c2 100644
--- a/deps/v8/test/message/fail/wasm-function-name.out
+++ b/deps/v8/test/message/fail/wasm-function-name.out
@@ -1,5 +1,5 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
- at main (wasm-function[0]:1)
+ at main (wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-and-function-name.out b/deps/v8/test/message/fail/wasm-module-and-function-name.out
index 0bff25cfd0..d77049d633 100644
--- a/deps/v8/test/message/fail/wasm-module-and-function-name.out
+++ b/deps/v8/test/message/fail/wasm-module-and-function-name.out
@@ -1,4 +1,4 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
- at test-module.main (wasm-function[0]:1)
+ at test-module.main (wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-module-name.out b/deps/v8/test/message/fail/wasm-module-name.out
index 9163088efe..5ed57721e4 100644
--- a/deps/v8/test/message/fail/wasm-module-name.out
+++ b/deps/v8/test/message/fail/wasm-module-name.out
@@ -1,5 +1,5 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
- at test-module (wasm-function[0]:1)
+ at test-module (wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-no-name.out b/deps/v8/test/message/fail/wasm-no-name.out
index 90d068d557..cc68ab0f8f 100644
--- a/deps/v8/test/message/fail/wasm-no-name.out
+++ b/deps/v8/test/message/fail/wasm-no-name.out
@@ -1,5 +1,5 @@
wasm-function[0]:1: RuntimeError: unreachable
RuntimeError: unreachable
- at wasm-function[0]:1
+ at wasm-function[0]:0x22
at *%(basename)s:{NUMBER}:31
diff --git a/deps/v8/test/message/fail/wasm-trap.out b/deps/v8/test/message/fail/wasm-trap.out
index 41c017f100..7f72baee90 100644
--- a/deps/v8/test/message/fail/wasm-trap.out
+++ b/deps/v8/test/message/fail/wasm-trap.out
@@ -1,5 +1,5 @@
wasm-function[0]:5: RuntimeError: divide by zero
RuntimeError: divide by zero
- at main (wasm-function[0]:5)
+ at main (wasm-function[0]:0x26)
at *%(basename)s:{NUMBER}:16
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.js b/deps/v8/test/message/fail/weak-refs-unregister.js
new file mode 100644
index 0000000000..e0cf0282d2
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-unregister.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs
+
+let fg = new FinalizationGroup(() => {});
+fg.unregister(1);
diff --git a/deps/v8/test/message/fail/weak-refs-unregister.out b/deps/v8/test/message/fail/weak-refs-unregister.out
new file mode 100644
index 0000000000..938665157d
--- /dev/null
+++ b/deps/v8/test/message/fail/weak-refs-unregister.out
@@ -0,0 +1,6 @@
+*%(basename)s:8: TypeError: unregisterToken ('1') must be an object
+fg.unregister(1);
+ ^
+TypeError: unregisterToken ('1') must be an object
+ at FinalizationGroup.unregister (<anonymous>)
+ at *%(basename)s:8:4
diff --git a/deps/v8/test/message/mjsunit/fail/assert_not_same.js b/deps/v8/test/message/mjsunit/fail/assert_not_same.js
new file mode 100644
index 0000000000..0f4241abb7
--- /dev/null
+++ b/deps/v8/test/message/mjsunit/fail/assert_not_same.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/mjsunit.js");
+
+assertNotSame(1, 1);
diff --git a/deps/v8/test/message/mjsunit/fail/assert_not_same.out b/deps/v8/test/message/mjsunit/fail/assert_not_same.out
new file mode 100644
index 0000000000..f0000f5ed3
--- /dev/null
+++ b/deps/v8/test/message/mjsunit/fail/assert_not_same.out
@@ -0,0 +1,10 @@
+test/mjsunit/mjsunit.js:{NUMBER}: Failure: expected <not same as 1> found <1>
+
+Stack: MjsUnitAssertionError
+ at assertNotSame *mjsunit.js {NUMBER}:{NUMBER}
+ at *%(basename)s 7:1
+ throw new MjsUnitAssertionError(message);
+ ^
+MjsUnitAssertionError
+ at assertNotSame *mjsunit.js {NUMBER}:{NUMBER}
+ at *%(basename)s 7:1
diff --git a/deps/v8/test/message/regress/fail/regress-8409.out b/deps/v8/test/message/regress/fail/regress-8409.out
index 3ca7690366..4387259420 100644
--- a/deps/v8/test/message/regress/fail/regress-8409.out
+++ b/deps/v8/test/message/regress/fail/regress-8409.out
@@ -1,4 +1,4 @@
-*%(basename)s:5: SyntaxError: Unexpected token )
+*%(basename)s:5: SyntaxError: Unexpected token ')'
[().x] = 1
^
-SyntaxError: Unexpected token )
+SyntaxError: Unexpected token ')'
diff --git a/deps/v8/test/message/regress/fail/regress-900383.js b/deps/v8/test/message/regress/fail/regress-900383.mjs
index b35bc32bb1..1e1d4abe30 100644
--- a/deps/v8/test/message/regress/fail/regress-900383.js
+++ b/deps/v8/test/message/regress/fail/regress-900383.mjs
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
export default x = 1;
export default x = 1;
diff --git a/deps/v8/test/message/regress/fail/regress-900383.out b/deps/v8/test/message/regress/fail/regress-900383.out
index f718b4f46d..e81e4730e5 100644
--- a/deps/v8/test/message/regress/fail/regress-900383.out
+++ b/deps/v8/test/message/regress/fail/regress-900383.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Identifier '*default*' has already been declared
+*%(basename)s:6: SyntaxError: Identifier '*default*' has already been declared
export default x = 1;
^^^^^^^^^^^^^
SyntaxError: Identifier '*default*' has already been declared
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 74c26b8525..f99c377661 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -35,7 +35,6 @@ from testrunner.outproc import message
INVALID_FLAGS = ["--enable-slow-asserts"]
-MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class TestSuite(testsuite.TestSuite):
@@ -56,9 +55,7 @@ class TestCase(testcase.D8TestCase):
def _parse_source_files(self, source):
files = []
- if MODULE_PATTERN.search(source):
- files.append("--module")
- files.append(os.path.join(self.suite.root, self.path + ".js"))
+ files.append(self._get_source_path())
return files
def _expected_fail(self):
@@ -81,7 +78,13 @@ class TestCase(testcase.D8TestCase):
return self._source_flags
def _get_source_path(self):
- return os.path.join(self.suite.root, self.path + self._get_suffix())
+ base_path = os.path.join(self.suite.root, self.path)
+ # Try .js first, and fall back to .mjs.
+ # TODO(v8:9406): clean this up by never separating the path from
+ # the extension in the first place.
+ if os.path.exists(base_path + self._get_suffix()):
+ return base_path + self._get_suffix()
+ return base_path + '.mjs'
def skip_predictable(self):
# Message tests expected to fail don't print allocation output for
diff --git a/deps/v8/test/message/wasm-function-name-async.out b/deps/v8/test/message/wasm-function-name-async.out
index b025f650c6..a298b07414 100644
--- a/deps/v8/test/message/wasm-function-name-async.out
+++ b/deps/v8/test/message/wasm-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at main (wasm-function[0]:1)
+ at main (wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-function-name-streaming.out b/deps/v8/test/message/wasm-function-name-streaming.out
index f5dde3dd87..e880c3c322 100644
--- a/deps/v8/test/message/wasm-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at main (wasm-function[0]:1)
+ at main (wasm-function[0]:0x22)
at test/message/wasm-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-async.out b/deps/v8/test/message/wasm-module-and-function-name-async.out
index e1ca097e64..0cb61c4a6e 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-async.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (wasm-function[0]:1)
+ at test-module.main (wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-and-function-name-streaming.out b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
index 4afaa66bbe..2367c6f3cf 100644
--- a/deps/v8/test/message/wasm-module-and-function-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-and-function-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module.main (wasm-function[0]:1)
+ at test-module.main (wasm-function[0]:0x22)
at test/message/wasm-module-and-function-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-async.out b/deps/v8/test/message/wasm-module-name-async.out
index 9163b31277..a1c9f69b2b 100644
--- a/deps/v8/test/message/wasm-module-name-async.out
+++ b/deps/v8/test/message/wasm-module-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module (wasm-function[0]:1)
+ at test-module (wasm-function[0]:0x22)
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-module-name-streaming.out b/deps/v8/test/message/wasm-module-name-streaming.out
index 98fba539db..c9e029c250 100644
--- a/deps/v8/test/message/wasm-module-name-streaming.out
+++ b/deps/v8/test/message/wasm-module-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at test-module (wasm-function[0]:1)
+ at test-module (wasm-function[0]:0x22)
at test/message/wasm-module-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-async.out b/deps/v8/test/message/wasm-no-name-async.out
index 4c622a7583..55a722dcb1 100644
--- a/deps/v8/test/message/wasm-no-name-async.out
+++ b/deps/v8/test/message/wasm-no-name-async.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at wasm-function[0]:1
+ at wasm-function[0]:0x22
at *%(basename)s:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/message/wasm-no-name-streaming.out b/deps/v8/test/message/wasm-no-name-streaming.out
index 59e070b0b8..b117074f78 100644
--- a/deps/v8/test/message/wasm-no-name-streaming.out
+++ b/deps/v8/test/message/wasm-no-name-streaming.out
@@ -1,4 +1,4 @@
RuntimeError: unreachable
- at wasm-function[0]:1
+ at wasm-function[0]:0x22
at test/message/wasm-no-name-async.js:{NUMBER}:27
at test/mjsunit/mjsunit.js:*
diff --git a/deps/v8/test/mjsunit/allocation-folding.js b/deps/v8/test/mjsunit/allocation-folding.js
index fcf909d6af..ffb4bd0ecf 100644
--- a/deps/v8/test/mjsunit/allocation-folding.js
+++ b/deps/v8/test/mjsunit/allocation-folding.js
@@ -38,6 +38,7 @@ function f() {
return elem2;
}
+%PrepareFunctionForOptimization(f);
f(); f(); f();
%OptimizeFunctionOnNextCall(f);
var result = f();
@@ -54,6 +55,7 @@ function doubles() {
return elem2;
}
+%PrepareFunctionForOptimization(doubles);
doubles(); doubles(); doubles();
%OptimizeFunctionOnNextCall(doubles);
result = doubles();
@@ -70,6 +72,7 @@ function doubles_int() {
return elem2;
}
+%PrepareFunctionForOptimization(doubles_int);
doubles_int(); doubles_int(); doubles_int();
%OptimizeFunctionOnNextCall(doubles_int);
result = doubles_int();
@@ -91,6 +94,7 @@ function branch_int(left) {
return elem2;
}
+%PrepareFunctionForOptimization(branch_int);
branch_int(1); branch_int(1); branch_int(1);
%OptimizeFunctionOnNextCall(branch_int);
result = branch_int(1);
@@ -110,6 +114,7 @@ function boom() {
return [ a1, a2, a3 ];
}
+%PrepareFunctionForOptimization(boom);
boom(); boom(); boom();
%OptimizeFunctionOnNextCall(boom);
boom();
diff --git a/deps/v8/test/mjsunit/arguments-apply-deopt.js b/deps/v8/test/mjsunit/arguments-apply-deopt.js
index b7251af5aa..48d2966374 100644
--- a/deps/v8/test/mjsunit/arguments-apply-deopt.js
+++ b/deps/v8/test/mjsunit/arguments-apply-deopt.js
@@ -36,6 +36,7 @@
}
var y = invoker(m, selfOf);
+ %PrepareFunctionForOptimization(y);
function selfOf(c) {
var sssss = c.self_;
@@ -67,6 +68,7 @@
return f.apply(deopt(), arguments);
};
+ %PrepareFunctionForOptimization(XXXXX);
XXXXX();
XXXXX();
XXXXX();
diff --git a/deps/v8/test/mjsunit/arguments-deopt.js b/deps/v8/test/mjsunit/arguments-deopt.js
index 192739fdba..996249df9e 100644
--- a/deps/v8/test/mjsunit/arguments-deopt.js
+++ b/deps/v8/test/mjsunit/arguments-deopt.js
@@ -116,6 +116,7 @@
args.push(i);
if (i%10 == 0) %OptimizeFunctionOnNextCall(sum1);
assertEquals(i*(i+1)/2, sum1(...args));
+ if (i%10 == 0) %PrepareFunctionForOptimization(sum1);
}
})();
@@ -136,6 +137,7 @@
args.push(i);
if (i%10 == 0) %OptimizeFunctionOnNextCall(sum2);
assertEquals(i*(i+1)/2, sum2(...args));
+ if (i%10 == 0) %PrepareFunctionForOptimization(sum2);
}
})();
@@ -156,6 +158,7 @@
args.push(i);
if (i%10 == 0) %OptimizeFunctionOnNextCall(sum3);
assertEquals(i*(i+1)/2, sum3(...args));
+ if (i%10 == 0) %PrepareFunctionForOptimization(sum3);
}
})();
@@ -176,6 +179,7 @@
args.push(i);
if (i%10 == 0) %OptimizeFunctionOnNextCall(sum4);
assertEquals(i*(i+1)/2-1, sum4(...args));
+ if (i%10 == 0) %PrepareFunctionForOptimization(sum4);
}
})();
@@ -192,5 +196,6 @@
args.push(i);
if (i%10 == 0) %OptimizeFunctionOnNextCall(read);
assertEquals(i, read(...args));
+ if (i%10 == 0) %PrepareFunctionForOptimization(read);
}
})();
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index 17be095143..f2625c4590 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -45,6 +45,7 @@ function test_do_not_assert_on_non_int32(vector, base) {
}
return r;
}
+%PrepareFunctionForOptimization(test_do_not_assert_on_non_int32);
test_do_not_assert_on_non_int32(v,1);
test_do_not_assert_on_non_int32(v,1);
test_do_not_assert_on_non_int32(v,"a");
@@ -86,7 +87,7 @@ function check_test_base(a, base, condition) {
}
}
-
+%PrepareFunctionForOptimization(test_base);
test_base(a, 1, true);
test_base(a, 2, true);
test_base(a, 1, false);
@@ -100,6 +101,7 @@ check_test_base(a, 3, false);
// Test that we deopt on failed bounds checks.
var dictionary_map_array = new Int32Array(128);
test_base(dictionary_map_array, 5, true);
+%PrepareFunctionForOptimization(test_base);
test_base(dictionary_map_array, 6, true);
test_base(dictionary_map_array, 5, false);
test_base(dictionary_map_array, 6, false);
@@ -109,6 +111,7 @@ assertUnoptimized(test_base);
// Forget about the dictionary_map_array's map.
%ClearFunctionFeedback(test_base);
+%PrepareFunctionForOptimization(test_base);
test_base(a, 5, true);
test_base(a, 6, true);
@@ -153,6 +156,7 @@ function check_test_minus(base,cond) {
}
}
+%PrepareFunctionForOptimization(test_minus);
test_minus(5,true);
test_minus(6,true);
%OptimizeFunctionOnNextCall(test_minus);
@@ -168,6 +172,7 @@ function short_test(a, i) {
a[i + 9] = 0;
a[i - 10] = 0;
}
+%PrepareFunctionForOptimization(short_test);
short_test(short_a, 50);
short_test(short_a, 50);
%OptimizeFunctionOnNextCall(short_test);
@@ -190,6 +195,7 @@ function test_phi(a, base, check) {
result += a[index - 1];
return result;
}
+%PrepareFunctionForOptimization(test_phi);
var result_phi = 0;
result_phi = test_phi(data_phi, 3, true);
assertEquals(12, result_phi);
@@ -218,6 +224,7 @@ function test_composition(a, base0, check) {
return result;
}
+%PrepareFunctionForOptimization(test_composition);
var result_composition = 0;
result_composition = test_composition(data_composition_long, 2);
assertEquals(19, result_composition);
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index f26781ee95..b3c65124c7 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -126,7 +126,8 @@ function assertKind(expected, obj, name_opt) {
assertTrue(a instanceof Array);
var contextB = Realm.create();
- Realm.eval(contextB, "function bar2() { return new Array(); };");
+ Realm.eval(contextB,
+ "function bar2() { return new Array(); }; %PrepareFunctionForOptimization(bar2)");
Realm.eval(contextB, "bar2(); bar2();");
Realm.eval(contextB, "%OptimizeFunctionOnNextCall(bar2);");
Realm.eval(contextB, "bar2();");
diff --git a/deps/v8/test/mjsunit/array-non-smi-length.js b/deps/v8/test/mjsunit/array-non-smi-length.js
index 11277aff60..346ce19c4f 100644
--- a/deps/v8/test/mjsunit/array-non-smi-length.js
+++ b/deps/v8/test/mjsunit/array-non-smi-length.js
@@ -32,6 +32,8 @@ function TestNonSmiArrayLength() {
return a.length+1;
}
+ %PrepareFunctionForOptimization(f);
+
var a = [];
a.length = 0xFFFF;
assertSame(0x10000, f(a));
diff --git a/deps/v8/test/mjsunit/array-pop.js b/deps/v8/test/mjsunit/array-pop.js
index 17771839ab..47e882f6b4 100644
--- a/deps/v8/test/mjsunit/array-pop.js
+++ b/deps/v8/test/mjsunit/array-pop.js
@@ -131,6 +131,7 @@
return v;
}
+ %PrepareFunctionForOptimization(f);
var a = [true, true, true, true]
assertEquals(1, f(a, false));
assertEquals(1, f(a, false));
diff --git a/deps/v8/test/mjsunit/array-push.js b/deps/v8/test/mjsunit/array-push.js
index ece3319bc1..cb95c93e05 100644
--- a/deps/v8/test/mjsunit/array-push.js
+++ b/deps/v8/test/mjsunit/array-push.js
@@ -135,7 +135,7 @@
function f(a, i) {
a.push(i);
}
-
+ %PrepareFunctionForOptimization(f);
var a = [1,2,3];
a.f = function() { return 10; }
f(a, 4);
diff --git a/deps/v8/test/mjsunit/array-push4.js b/deps/v8/test/mjsunit/array-push4.js
index 678873fb3a..a206fda2a3 100644
--- a/deps/v8/test/mjsunit/array-push4.js
+++ b/deps/v8/test/mjsunit/array-push4.js
@@ -42,6 +42,7 @@ v = 0;
function push_wrapper_1(array, value) {
array.push(value);
}
+%PrepareFunctionForOptimization(push_wrapper_1);
array = [];
array.__proto__ = my_array_proto;
push_wrapper_1(array, 100);
diff --git a/deps/v8/test/mjsunit/array-push6.js b/deps/v8/test/mjsunit/array-push6.js
index 336b43d8d8..bce9c54f00 100644
--- a/deps/v8/test/mjsunit/array-push6.js
+++ b/deps/v8/test/mjsunit/array-push6.js
@@ -8,6 +8,8 @@ function push_wrapper(array, value) {
array.push(value);
}
+%PrepareFunctionForOptimization(push_wrapper);
+
// Test that optimization of Array.push() for non-Arrays works correctly.
var object = { x : 8, length: 3 };
object[18] = 5;
diff --git a/deps/v8/test/mjsunit/array-push8.js b/deps/v8/test/mjsunit/array-push8.js
index b1a87669d9..cd90573da2 100644
--- a/deps/v8/test/mjsunit/array-push8.js
+++ b/deps/v8/test/mjsunit/array-push8.js
@@ -7,9 +7,12 @@
function push_wrapper(array, value) {
array.push(value);
}
+%PrepareFunctionForOptimization(push_wrapper);
+
function pop_wrapper(array) {
return array.pop();
}
+%PrepareFunctionForOptimization(pop_wrapper);
// Test the frzon arrays throw an exception if you try to push to them, both in
// optimized and non-optimized code.
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index f9e8aeee22..5d431b024d 100644
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -571,7 +571,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let f = (a,current) => a + current;
let g = function(a) {
return a.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [1,2,3,4,5,6,7,8,9,10];
g(a); g(a);
let total = g(a);
@@ -583,7 +584,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let f = (a,current) => a + current;
let g = function(a) {
return a.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [1,2,3,4,5,6,7,8,9,10];
g(a); g(a); g(a);
%OptimizeFunctionOnNextCall(g);
@@ -596,7 +598,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + current; };
let g = function(a) {
return a.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [1,2,3,4,5,6,7,8,9,10];
g(a); g(a);
let total = g(a);
@@ -614,7 +617,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
};
let g = function(a) {
return a.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [11,22,33,45,56,6,77,84,93,101];
g(a); g(a);
let total = g(a);
@@ -633,13 +637,15 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
};
let g = function() {
return array.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
g();
deopt = true;
g();
+ %PrepareFunctionForOptimization(g);
deopt = false;
array = [11,22,33,45,56,6,77,84,93,101];
%OptimizeFunctionOnNextCall(g);
@@ -657,13 +663,15 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
};
let g = function() {
return array.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
g();
deopt = true;
g();
+ %PrepareFunctionForOptimization(g);
deopt = false;
array = [11,22,33,45,56,6,77,84,93,101];
%OptimizeFunctionOnNextCall(g);
@@ -675,7 +683,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
(function TriggerReduceRightPreLoopDeopt() {
function f(a) {
a.reduceRight((x) => { return x + 1 });
- }
+ };
+ %PrepareFunctionForOptimization(f);
f([1,2,]);
f([1,2,]);
%OptimizeFunctionOnNextCall(f);
@@ -691,13 +700,15 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
};
let g = function() {
return array.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
g();
deopt = true;
g();
+ %PrepareFunctionForOptimization(g);
deopt = false;
array = [11,22,33,45,56,6,77,84,93,101];
%OptimizeFunctionOnNextCall(g);
@@ -715,7 +726,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
return array.reduce(f);
} catch (e) {
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -737,7 +749,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} catch (e) {
return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -746,6 +759,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -768,7 +782,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} catch (e) {
return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -777,6 +792,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -799,7 +815,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} finally {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -808,6 +825,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -831,7 +849,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} finally {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -840,6 +859,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -856,7 +876,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let array = [1,2,3];
let g = function() {
return array.reduce(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -882,7 +903,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} catch (e) {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -891,6 +913,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -916,7 +939,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} finally {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -925,6 +949,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -939,7 +964,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let g = function(a) {
count = 0;
return a.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [1,2,3,4,5,6,7,8,9,10];
g(a); g(a);
let total = g(a);
@@ -953,7 +979,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let g = function(a) {
count = 0;
return a.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [1,2,3,4,5,6,7,8,9,10];
g(a); g(a); g(a);
%OptimizeFunctionOnNextCall(g);
@@ -966,7 +993,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + current; };
let g = function(a) {
return a.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [1,2,3,4,5,6,7,8,9,10];
g(a); g(a);
let total = g(a);
@@ -984,7 +1012,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
};
let g = function(a) {
return a.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
let a = [11,22,33,45,56,6,77,84,93,101];
g(a); g(a);
let total = g(a);
@@ -1003,13 +1032,15 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
};
let g = function() {
return array.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
g();
deopt = true;
g();
+ %PrepareFunctionForOptimization(g);
deopt = false;
array = [11,22,33,45,56,6,77,84,93,101];
%OptimizeFunctionOnNextCall(g);
@@ -1027,7 +1058,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
return array.reduceRight(f);
} catch (e) {
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1049,7 +1081,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} catch (e) {
return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1058,6 +1091,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -1080,7 +1114,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} catch (e) {
return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1089,6 +1124,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -1111,7 +1147,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} finally {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1120,6 +1157,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -1143,7 +1181,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} finally {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1152,6 +1191,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -1168,7 +1208,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
let array = [1,2,3];
let g = function() {
return array.reduceRight(f);
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1194,7 +1235,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} catch (e) {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1203,6 +1245,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -1228,7 +1271,8 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
} finally {
if (done) return null;
}
- }
+ };
+ %PrepareFunctionForOptimization(g);
g(); g();
let total = g();
%OptimizeFunctionOnNextCall(g);
@@ -1237,6 +1281,7 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
done = true;
assertEquals(null, g());
done = false;
+ %PrepareFunctionForOptimization(g);
g(); g();
%OptimizeFunctionOnNextCall(g);
g();
@@ -1246,71 +1291,76 @@ assertEquals(undefined, arr.reduceRight(function(val) { return val }));
})();
(function ReduceHoleyArrayWithDefaultAccumulator() {
- var __v_12258 = new Array(10);
- function __f_3253(a) {
- let __f_3252 = function(accumulator, currentValue) {
+ var holey = new Array(10);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
return currentValue;
- }
- return a.reduce(__f_3252, 13);
- }
- assertEquals(13, __f_3253(__v_12258));
- assertEquals(13, __f_3253(__v_12258));
- assertEquals(13, __f_3253(__v_12258));
- %OptimizeFunctionOnNextCall(__f_3253);
- assertEquals(13, __f_3253(__v_12258));
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
})();
(function ReduceRightHoleyArrayWithDefaultAccumulator() {
- var __v_12258 = new Array(10);
- function __f_3253(a) {
- let __f_3252 = function(accumulator, currentValue) {
+ var holey = new Array(10);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
return currentValue;
- }
- return a.reduceRight(__f_3252, 13);
- }
- assertEquals(13, __f_3253(__v_12258));
- assertEquals(13, __f_3253(__v_12258));
- assertEquals(13, __f_3253(__v_12258));
- %OptimizeFunctionOnNextCall(__f_3253);
- assertEquals(13, __f_3253(__v_12258));
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
})();
(function ReduceHoleyArrayOneElementWithDefaultAccumulator() {
- var __v_12258 = new Array(10);
- __v_12258[1] = 5;
- function __f_3253(a) {
- let __f_3252 = function(accumulator, currentValue) {
+ var holey = new Array(10);
+ holey[1] = 5;
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
return currentValue + accumulator;
- }
- return a.reduce(__f_3252, 13);
- }
- assertEquals(18, __f_3253(__v_12258));
- assertEquals(18, __f_3253(__v_12258));
- assertEquals(18, __f_3253(__v_12258));
- %OptimizeFunctionOnNextCall(__f_3253);
- assertEquals(18, __f_3253(__v_12258));
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
})();
(function ReduceRightHoleyArrayOneElementWithDefaultAccumulator() {
- var __v_12258 = new Array(10);
- __v_12258[1] = 5;
- function __f_3253(a) {
- let __f_3252 = function(accumulator, currentValue) {
+ var holey = new Array(10);
+ holey[1] = 5;
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
return currentValue + accumulator;
- }
- return a.reduceRight(__f_3252, 13);
- }
- assertEquals(18, __f_3253(__v_12258));
- assertEquals(18, __f_3253(__v_12258));
- assertEquals(18, __f_3253(__v_12258));
- %OptimizeFunctionOnNextCall(__f_3253);
- assertEquals(18, __f_3253(__v_12258));
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
})();
(function ReduceMixedHoleyArrays() {
function r(a) {
return a.reduce((acc, i) => {acc[0]});
- }
+ };
+ %PrepareFunctionForOptimization(r);
r([[0]]);
r([[0]]);
r([0,,]);
diff --git a/deps/v8/test/mjsunit/array-shift3.js b/deps/v8/test/mjsunit/array-shift3.js
index 3a0afc596b..199d76a4f2 100644
--- a/deps/v8/test/mjsunit/array-shift3.js
+++ b/deps/v8/test/mjsunit/array-shift3.js
@@ -8,8 +8,9 @@ Array.prototype[1] = "element 1";
function test(a) {
a.shift();
return a;
-}
-assertEquals(["element 1",{}], test([0,,{}]));
-assertEquals(["element 1",10], test([9,,10]));
+};
+%PrepareFunctionForOptimization(test);
+assertEquals(['element 1', {}], test([0, , {}]));
+assertEquals(['element 1', 10], test([9, , 10]));
%OptimizeFunctionOnNextCall(test);
-assertEquals(["element 1",10], test([9,,10]));
+assertEquals(['element 1', 10], test([9, , 10]));
diff --git a/deps/v8/test/mjsunit/array-shift5.js b/deps/v8/test/mjsunit/array-shift5.js
index a097e2f797..ebf510dda4 100644
--- a/deps/v8/test/mjsunit/array-shift5.js
+++ b/deps/v8/test/mjsunit/array-shift5.js
@@ -5,61 +5,69 @@
// Flags: --allow-natives-syntax
(function() {
- function doShift(a) { return a.shift(); }
+function doShift(a) {
+ return a.shift();
+};
+%PrepareFunctionForOptimization(doShift);
+function test() {
+ var a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+ assertEquals(0, doShift(a));
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], a);
+}
- function test() {
- var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16];
- assertEquals(0, doShift(a));
- assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16], a);
- }
-
- test();
- test();
- %OptimizeFunctionOnNextCall(doShift);
- test();
+test();
+test();
+%OptimizeFunctionOnNextCall(doShift);
+test();
})();
(function() {
- function doShift(a) { return a.shift(); }
-
- function test() {
- var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.1];
- assertEquals(0, doShift(a));
- assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.1], a);
- }
+function doShift(a) {
+ return a.shift();
+};
+%PrepareFunctionForOptimization(doShift);
+function test() {
+ var a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16.1];
+ assertEquals(0, doShift(a));
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16.1], a);
+}
- test();
- test();
- %OptimizeFunctionOnNextCall(doShift);
- test();
+test();
+test();
+%OptimizeFunctionOnNextCall(doShift);
+test();
})();
(function() {
- function doShift(a) { return a.shift(); }
+function doShift(a) {
+ return a.shift();
+};
+%PrepareFunctionForOptimization(doShift);
+function test() {
+ var a = [, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16.1];
+ assertEquals(undefined, doShift(a));
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16.1], a);
+}
- function test() {
- var a = [,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.1];
- assertEquals(undefined, doShift(a));
- assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.1], a);
- }
-
- test();
- test();
- %OptimizeFunctionOnNextCall(doShift);
- test();
+test();
+test();
+%OptimizeFunctionOnNextCall(doShift);
+test();
})();
(function() {
- function doShift(a) { return a.shift(); }
-
- function test() {
- var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,"16"];
- assertEquals(0, doShift(a));
- assertEquals([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,"16"], a);
- }
+function doShift(a) {
+ return a.shift();
+};
+%PrepareFunctionForOptimization(doShift);
+function test() {
+ var a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, '16'];
+ assertEquals(0, doShift(a));
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, '16'], a);
+}
- test();
- test();
- %OptimizeFunctionOnNextCall(doShift);
- test();
+test();
+test();
+%OptimizeFunctionOnNextCall(doShift);
+test();
})();
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index 6db875947a..d791cbebf4 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -559,11 +559,24 @@ function TestPrototypeHoles() {
assertEquals(19, xs[9]);
}
- test(true);
test(false);
+ // Expect a TypeError when trying to delete the accessor.
+ assertThrows(() => test(true), TypeError);
}
TestPrototypeHoles();
+// The following test ensures that [[Delete]] is called and it throws.
+function TestArrayWithAccessorThrowsOnDelete() {
+ let array = [5, 4, 1, /*hole*/, /*hole*/];
+
+ Object.defineProperty(array, '4', {
+ get: () => array.foo,
+ set: (val) => array.foo = val
+ });
+ assertThrows(() => array.sort((a, b) => a - b), TypeError);
+}
+TestArrayWithAccessorThrowsOnDelete();
+
// The following test ensures that elements on the prototype are also copied
// for JSArrays and not only JSObjects.
function TestArrayPrototypeHasElements() {
diff --git a/deps/v8/test/mjsunit/asm/regress-9531.js b/deps/v8/test/mjsunit/asm/regress-9531.js
new file mode 100644
index 0000000000..f52e531fd3
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-9531.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function Module(stdlib, ffi, buffer) {
+ "use asm";
+ var MEM8 = new stdlib.Uint8Array(buffer);
+ function foo() { return MEM8[0] | 0; }
+ return { foo: foo };
+}
+
+
+function RunOnce() {
+ let buffer = new ArrayBuffer(4096);
+ let ffi = {};
+ let stdlib = {Uint8Array: Uint8Array};
+ let module = Module(stdlib, ffi, buffer);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(0, module.foo());
+}
+
+(function RunTest() {
+ for (let i = 0; i < 3000; i++) {
+ RunOnce();
+ }
+})();
diff --git a/deps/v8/test/mjsunit/async-stack-traces-realms.js b/deps/v8/test/mjsunit/async-stack-traces-realms.js
new file mode 100644
index 0000000000..9145b93377
--- /dev/null
+++ b/deps/v8/test/mjsunit/async-stack-traces-realms.js
@@ -0,0 +1,115 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --async-stack-traces
+
+// Basic test with an explicit throw.
+(function() {
+ const realm = Realm.createAllowCrossRealmAccess();
+
+ async function one(x) {
+ await two(x);
+ }
+
+ const two = Realm.eval(realm, `(async function two(x) {
+ await x;
+ throw new Error();
+ })`);
+
+ async function test(f) {
+ try {
+ await f(new Promise(resolve => setTimeout(resolve)));
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Realm.global(realm).Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ Realm.dispose(realm);
+ })());
+})();
+
+// Basic test with an implicit throw (via ToNumber on Symbol).
+(function() {
+ const realm = Realm.createAllowCrossRealmAccess();
+
+ async function one(x) {
+ return await two(x);
+ }
+
+ const two = Realm.eval(realm, `(async function two(x) {
+ await x;
+ return +Symbol(); // This will raise a TypeError.
+ })`);
+
+ async function test(f) {
+ try {
+ await f(new Promise(resolve => setTimeout(resolve)));
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Realm.global(realm).TypeError);
+ assertMatches(/TypeError.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async() => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ Realm.dispose(realm);
+ })());
+})();
+
+// Basic test with async functions and promises chained via
+// Promise.prototype.then(), which should still work following
+// the generic chain upwards.
+(function() {
+ const realm = Realm.createAllowCrossRealmAccess();
+
+ async function one(x) {
+ return await two(x).then(x => x);
+ }
+
+ const two = Realm.eval(realm, `(async function two(x) {
+ await x.then(x => x);
+ throw new Error();
+ })`);
+
+ async function test(f) {
+ try {
+ await f(new Promise(resolve => setTimeout(resolve)));
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, Realm.global(realm).Error);
+ assertMatches(/Error.+at two.+at async one.+at async test/ms, e.stack);
+ }
+ }
+
+ assertPromiseResult((async() => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
+ await test(one);
+ await test(one);
+ %OptimizeFunctionOnNextCall(two);
+ await test(one);
+ %OptimizeFunctionOnNextCall(one);
+ await test(one);
+ Realm.dispose(realm);
+ })());
+})();
diff --git a/deps/v8/test/mjsunit/bounds-checks-elimination.js b/deps/v8/test/mjsunit/bounds-checks-elimination.js
index 622e791fa5..e2073ccf1c 100644
--- a/deps/v8/test/mjsunit/bounds-checks-elimination.js
+++ b/deps/v8/test/mjsunit/bounds-checks-elimination.js
@@ -8,6 +8,7 @@ var a = []
for (var i = 0; i < 9; i++) a[i] = i + 1;
function test(f, arg1, arg2, expected) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f(arg1));
f(arg2);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/closures.js b/deps/v8/test/mjsunit/closures.js
index 5ce26ca747..6f482e03e3 100644
--- a/deps/v8/test/mjsunit/closures.js
+++ b/deps/v8/test/mjsunit/closures.js
@@ -35,9 +35,10 @@ function test(n) {
function MyFunction() {
var result = n * 2 + arguments.length;
return result;
- }
+ };
+ %PrepareFunctionForOptimization(MyFunction);
for (var i = 0; i < 5; ++i) MyFunction();
- %OptimizeFunctionOnNextCall(MyFunction)
+ %OptimizeFunctionOnNextCall(MyFunction);
runner(MyFunction, n * 2);
}
diff --git a/deps/v8/test/mjsunit/code-coverage-block-noopt.js b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
index 9865e6ee27..301a05ad5d 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-noopt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-noopt.js
@@ -13,10 +13,11 @@ TestCoverage(
`
function g() { if (true) nop(); } // 0000
function f() { g(); g(); } // 0050
-f(); f(); %OptimizeFunctionOnNextCall(f); // 0100
-f(); f(); f(); f(); f(); f(); // 0150
+%PrepareFunctionForOptimization(f); // 0100
+f(); f(); %OptimizeFunctionOnNextCall(f); // 0150
+f(); f(); f(); f(); f(); f(); // 0200
`,
-[{"start":0,"end":199,"count":1},
+[{"start":0,"end":249,"count":1},
{"start":0,"end":33,"count":16},
{"start":50,"end":76,"count":8}]
);
@@ -29,11 +30,12 @@ TestCoverage("Partial coverage collection",
function f(x) { // 0050
if (x) { nop(); } else { nop(); } // 0100
} // 0150
- f(true); f(true); // 0200
- %OptimizeFunctionOnNextCall(f); // 0250
- %DebugCollectCoverage(); // 0300
- f(false); // 0350
-}(); // 0400
+ %PrepareFunctionForOptimization(f); // 0200
+ f(true); f(true); // 0250
+ %OptimizeFunctionOnNextCall(f); // 0300
+ %DebugCollectCoverage(); // 0350
+ f(false); // 0400
+}(); // 0450
`,
[{"start":52,"end":153,"count":1},
{"start":111,"end":121,"count":0}]
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index 3901fad0ed..7c87dd4113 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -18,10 +18,11 @@ TestCoverage(
`
function g() { if (true) nop(); } // 0000
function f() { g(); g(); } // 0050
-f(); f(); %OptimizeFunctionOnNextCall(f); // 0100
-f(); f(); f(); f(); f(); f(); // 0150
+%PrepareFunctionForOptimization(f); // 0100
+f(); f(); %OptimizeFunctionOnNextCall(f); // 0150
+f(); f(); f(); f(); f(); f(); // 0200
`,
-[{"start":0,"end":199,"count":1},
+[{"start":0,"end":249,"count":1},
{"start":0,"end":33,"count":16},
{"start":50,"end":76,"count":8}]
);
@@ -37,11 +38,12 @@ TestCoverage("Partial coverage collection",
function f(x) { // 0050
if (x) { nop(); } else { nop(); } // 0100
} // 0150
- f(true); f(true); // 0200
- %OptimizeFunctionOnNextCall(f); // 0250
- %DebugCollectCoverage(); // 0300
- f(false); // 0350
-}(); // 0400
+ %PrepareFunctionForOptimization(f); // 0200
+ f(true); f(true); // 0250
+ %OptimizeFunctionOnNextCall(f); // 0300
+ %DebugCollectCoverage(); // 0350
+ f(false); // 0400
+}(); // 0450
`,
[{"start":52,"end":153,"count":1},
{"start":111,"end":121,"count":0}]
diff --git a/deps/v8/test/mjsunit/compare-objects.js b/deps/v8/test/mjsunit/compare-objects.js
index fb31203b74..4cf17603b0 100644
--- a/deps/v8/test/mjsunit/compare-objects.js
+++ b/deps/v8/test/mjsunit/compare-objects.js
@@ -59,6 +59,7 @@ function test(a, b, less, greater) {
assertEquals(lt(b, a), greater);
assertEquals(gt(b, a), less);
}
+%PrepareFunctionForOptimization(test);
var obj1 = {toString: function() {return "1";}};
var obj2 = {toString: function() {return "2";}};
@@ -88,6 +89,7 @@ var greater = obj1 > obj2;
test(obj1, obj2, less, greater);
test(obj1, obj2, less, greater);
test(obj1, obj2, less, greater);
+%PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
test(obj1, obj2, less, greater);
test(obj1, obj2, less, greater);
@@ -96,6 +98,7 @@ obj1.toString = function() {return "1"};
var less = obj1 < obj2;
var greater = obj1 > obj2;
test(obj1, obj2, less, greater);
+%PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
test(obj1, obj2, less, greater);
diff --git a/deps/v8/test/mjsunit/comparison-ops-and-undefined.js b/deps/v8/test/mjsunit/comparison-ops-and-undefined.js
index 06db076580..04bf13d340 100644
--- a/deps/v8/test/mjsunit/comparison-ops-and-undefined.js
+++ b/deps/v8/test/mjsunit/comparison-ops-and-undefined.js
@@ -35,6 +35,7 @@ function test_helper_for_ics(func, b1, b2, b3, b4) {
}
function test_helper_for_crankshaft(func, b1, b2, b3, b4) {
+ %PrepareFunctionForOptimization(func);
assertEquals(b1, func(.5, .5));
%OptimizeFunctionOnNextCall(func);
assertEquals(b1, func(.5, .5));
diff --git a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
index 8f386d06ef..1a50b8cd52 100644
--- a/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
+++ b/deps/v8/test/mjsunit/compiler/array-multiple-receiver-maps.js
@@ -7,8 +7,8 @@
let id = 0;
-function runTest(f, message, mkICTraining, deoptArg) {
- function test(f, message, ictraining, deoptArg) {
+function runTest(f, message, mkICTraining, deoptArg, speculationCheck) {
+ function test(f, message, ictraining, deoptArg, speculationCheck) {
// Train the call ic to the maps.
let t = ictraining;
@@ -41,15 +41,22 @@ function runTest(f, message, mkICTraining, deoptArg) {
// Trigger deopt, causing no-speculation bit to be set.
let a1 = deoptArg;
let a2 = deoptArg;
+ let a3 = deoptArg;
message += " for args " + JSON.stringify(a1);
message_unoptimized = message + " should have been unoptimized"
- message_optimized = message + " should have been unoptimized"
- f(a1.arr, () => a1.el);
+ message_optimized = message + " should have been optimized"
+ f(a1.darr, () => a1.del);
assertUnoptimized(f, undefined, message_unoptimized);
+ if (speculationCheck) {
+ %PrepareFunctionForOptimization(f);
+ %OptimizeFunctionOnNextCall(f);
+ f(a2.darr, () => a2.del);
+ assertUnoptimized(f, undefined, message_unoptimized);
+ }
%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
// No speculation should protect against further deopts.
- f(a2.arr, () => a2.el);
+ f(a3.darr, () => a3.del);
assertOptimized(f, undefined, message_optimized);
}
}
@@ -64,6 +71,8 @@ function runTest(f, message, mkICTraining, deoptArg) {
testString = testString.replace(new RegExp("ictraining", 'g'), mkICTraining.toString());
testString = testString.replace(new RegExp("deoptArg", 'g'),
deoptArg ? JSON.stringify(deoptArg).replace(/"/g,'') : "undefined");
+ testString = testString.replace(new RegExp("speculationCheck", 'g'),
+ speculationCheck ? JSON.stringify(deoptArg).replace(/"/g,'') : "undefined");
// Make field names unique to avoid learning of types.
id = id + 1;
@@ -71,16 +80,17 @@ function runTest(f, message, mkICTraining, deoptArg) {
testString = testString.replace(/el:/g, 'el' + id + ':');
testString = testString.replace(/[.]arr/g, '.arr' + id);
testString = testString.replace(/arr:/g, 'arr' + id + ':');
+ testString = testString.replace(/[.]del/g, '.del' + id);
+ testString = testString.replace(/[.]darr/g, '.darr' + id);
var modTest = new Function("message", testString);
- //print(modTest);
modTest(message);
}
let checks = {
smiReceiver:
{ mkTrainingArguments : () => [{arr:[1], el:3}],
- deoptingArguments : [{arr:[0.1], el:1}, {arr:[{}], el:1}]
+ deoptingArguments : [{darr:[0.1], del:1}, {darr:[{}], del:1}]
},
objectReceiver:
{ mkTrainingArguments : () => [{arr:[{}], el:0.1}],
@@ -88,30 +98,53 @@ let checks = {
},
multipleSmiReceivers:
{ mkTrainingArguments : () => { let b = [1]; b.x=3; return [{arr:[1], el:3}, {arr:b, el:3}] },
- deoptingArguments : [{arr:[0.1], el:1}, {arr:[{}], el:1}]
+ deoptingArguments : [{darr:[0.1], del:1}, {darr:[{}], del:1}]
},
multipleSmiReceiversPackedUnpacked:
{ mkTrainingArguments : () => { let b = [1]; b[100] = 3; return [{arr:[1], el:3}, {arr:b, el:3}] },
- deoptingArguments : [{arr:[0.1], el:1}, {arr:[{}], el:1}]
+ deoptingArguments : [{darr:[0.1], del:1}, {darr:[{}], del:1}]
},
multipleDoubleReceivers:
{ mkTrainingArguments : () => { let b = [0.1]; b.x=0.3; return [{arr:[0.1], el:0.3}, {arr:b, el:0.3}] },
- deoptingArguments : [{arr:[{}], el:true}, {arr:[1], el:true}]
+ deoptingArguments : [{darr:[{}], del:true}, {darr:[1], del: 1}]
},
multipleDoubleReceiversPackedUnpacked:
{ mkTrainingArguments : () => { let b = [0.1]; b[100] = 0.3; return [{arr:[0.1], el:0.3}, {arr:b, el:0.3}] },
- deoptingArguments : [{arr:[{}], el:true}, {arr:[1], el:true}]
+ deoptingArguments : [{darr:[{}], del:true}, {darr:[1], del: 1}]
},
multipleMixedReceivers:
- { mkTrainingArguments : () => { let b = [0.1]; b.x=0.3; return [{arr:[1], el:0.3}, {arr:[{}], el:true}, {arr:b, el:0.3}] },
+ { mkTrainingArguments : () => { let b = [0.1]; b.x=0.3; return [{arr:[1], el:1}, {arr:[{}], el:true}, {arr:b, el:0.3}] },
deoptingArguments : []
},
multipleMixedReceiversPackedUnpacked:
- { mkTrainingArguments : () => { let b = [0.1]; b[100] = 0.3; return [{arr:[1], el:0.3}, {arr:[{}], el:true}, {arr:b, el:0.3}] },
+ { mkTrainingArguments : () => { let b = [0.1]; b[100] = 0.3; return [{arr:[1], el:1}, {arr:[{}], el:true}, {arr:b, el:0.3}] },
deoptingArguments : []
},
};
+let no_speculation_checks = {
+ smiReceiver:
+ { mkTrainingArguments : () => [{arr:[1], el:3}],
+ deoptingArguments : [{darr:[0.1], del:true}]
+ },
+ multipleSmiReceivers:
+ { mkTrainingArguments : () => { let b = [1]; b.x=3; return [{arr:[1], el:3}, {arr:[1], el:3}] },
+ deoptingArguments : [{darr:[0.1], del:true}]
+ },
+ multipleSmiReceiversPackedUnpacked:
+ { mkTrainingArguments : () => { let b = [1]; b[100] = 3; return [{arr:[1], el:3}, {arr:b, el:3}] },
+ deoptingArguments : [{darr:[0.1], del:true}]
+ },
+ multipleDoubleReceivers:
+ { mkTrainingArguments : () => { let b = [0.1]; b.x=0.3; return [{arr:[0.1], el:0.3}, {arr:b, el:0.3}] },
+ deoptingArguments : [{darr:[1], del:true}]
+ },
+ multipleDoubleReceiversPackedUnpacked:
+ { mkTrainingArguments : () => { let b = [0.1]; b[100] = 0.3; return [{arr:[0.1], el:0.3}, {arr:b, el:0.3}] },
+ deoptingArguments : [{darr:[1], del:true}]
+ },
+};
+
const functions = {
push_reliable: (a,g) => { let b = g(); return a.push(2, b); },
push_unreliable: (a,g) => { return a.push(2, g()); },
@@ -121,6 +154,11 @@ const functions = {
shift_unreliable: (a,g) => { return a.shift(2, g()); }
}
+const push_functions = {
+ push_reliable: (a,g) => { let b = g(); return a.push(2, b); },
+ push_unreliable: (a,g) => { return a.push(2, g()); },
+}
+
Object.keys(checks).forEach(
key => {
let check = checks[key];
@@ -134,3 +172,17 @@ Object.keys(checks).forEach(
}
}
);
+
+Object.keys(no_speculation_checks).forEach(
+ key => {
+ let check = no_speculation_checks[key];
+
+ for (fnc in push_functions) {
+ runTest(functions[fnc], "test-spec-check-" + fnc + "-" + key, check.mkTrainingArguments);
+ // Test each deopting arg separately.
+ for (let deoptArg of check.deoptingArguments) {
+ runTest(functions[fnc], "testDeopt-spec-check-" + fnc + "-" + key, check.mkTrainingArguments, deoptArg, true);
+ }
+ }
+ }
+);
diff --git a/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js b/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
new file mode 100644
index 0000000000..1390acc356
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-add-no-deopt-loop.js
@@ -0,0 +1,36 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+
+const big = 2n ** BigInt((2 ** 30)-1);
+
+
+function testAdd(x, y) {
+ return x + y;
+}
+
+
+%PrepareFunctionForOptimization(testAdd);
+testAdd(3n, 7n);
+testAdd(17n, -54n);
+%OptimizeFunctionOnNextCall(testAdd);
+assertEquals(testAdd(6n, 2n), 8n);
+assertOptimized(testAdd);
+
+assertThrows(() => testAdd(big, big), RangeError);
+assertUnoptimized(testAdd);
+
+%PrepareFunctionForOptimization(testAdd);
+testAdd(30n, -50n);
+testAdd(23n, 5n);
+%OptimizeFunctionOnNextCall(testAdd);
+assertEquals(testAdd(-7n, -12n), -19n);
+assertOptimized(testAdd);
+
+assertThrows(() => testAdd(big, big), RangeError);
+assertOptimized(testAdd);
+assertThrows(() => testAdd(big, big), RangeError);
+assertOptimized(testAdd);
diff --git a/deps/v8/test/mjsunit/compiler/bigint-add.js b/deps/v8/test/mjsunit/compiler/bigint-add.js
new file mode 100644
index 0000000000..0db3f49c1f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-add.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+{
+ let a = 0n;
+ a = 3n;
+
+ function TestAdd() {
+ let sum = 0n;
+
+ for (let i = 0; i < 3; ++i) {
+ sum = a + sum;
+ }
+
+ return sum;
+ }
+
+ %PrepareFunctionForOptimization(TestAdd);
+ TestAdd();
+ TestAdd();
+ %OptimizeFunctionOnNextCall(TestAdd);
+ TestAdd();
+ TestAdd();
+}
diff --git a/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js b/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js
new file mode 100644
index 0000000000..f669c17c29
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-int64-lowered.js
@@ -0,0 +1,82 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestAsUintN() {
+ assertEquals(0n, BigInt.asUintN(64, 0n));
+ assertEquals(0n, BigInt.asUintN(8, 0n));
+ assertEquals(0n, BigInt.asUintN(1, 0n));
+ assertEquals(0n, BigInt.asUintN(0, 0n));
+ assertEquals(0n, BigInt.asUintN(100, 0n));
+
+ assertEquals(123n, BigInt.asUintN(64, 123n));
+ assertEquals(123n, BigInt.asUintN(32, 123n));
+ assertEquals(123n, BigInt.asUintN(8, 123n));
+ assertEquals(59n, BigInt.asUintN(6, 123n));
+ assertEquals(27n, BigInt.asUintN(5, 123n));
+ assertEquals(11n, BigInt.asUintN(4, 123n));
+ assertEquals(1n, BigInt.asUintN(1, 123n));
+ assertEquals(0n, BigInt.asUintN(0, 123n));
+ assertEquals(123n, BigInt.asUintN(72, 123n));
+
+ assertEquals(BigInt("0xFFFFFFFFFFFFFF85"), BigInt.asUintN(64, -123n));
+ assertEquals(BigInt("0xFFFFFF85"), BigInt.asUintN(32, -123n));
+ assertEquals(BigInt("0x85"), BigInt.asUintN(8, -123n));
+ assertEquals(5n, BigInt.asUintN(6, -123n));
+ assertEquals(5n, BigInt.asUintN(5, -123n));
+ assertEquals(5n, BigInt.asUintN(4, -123n));
+ assertEquals(1n, BigInt.asUintN(1, -123n));
+ assertEquals(0n, BigInt.asUintN(0, -123n));
+ assertEquals(BigInt("0xFFFFFFFFFFFFFFFF85"), BigInt.asUintN(72, -123n));
+}
+
+function TestInt64LoweredOperations() {
+ assertEquals(0n, BigInt.asUintN(64, -0n));
+ assertEquals(0n, BigInt.asUintN(64, 15n + -15n));
+ assertEquals(0n, BigInt.asUintN(64, 0n + 0n));
+ assertEquals(14n, BigInt.asUintN(32, 8n + 6n));
+ assertEquals(813n, BigInt.asUintN(10, 1013n + -200n));
+ assertEquals(15n, BigInt.asUintN(4, -319n + 302n));
+
+ for (let i = 0; i < 2; ++i) {
+ let x = 32n; // x = 32n
+ if (i === 1) {
+ x = BigInt.asUintN(64, x + 3n); // x = 35n
+ const y = x + -8n + x; // x = 35n, y = 62n
+ x = BigInt.asUintN(6, y + x); // x = 33n, y = 62n
+ x = -9n + y + -x; // x = 20n
+ x = BigInt.asUintN(10000 * i, x); // x = 20n
+ } else {
+ x = x + 400n; // x = 432n
+ x = -144n + BigInt.asUintN(8, 500n) + x; // x = 532n
+ }
+ assertEquals(20n, BigInt.asUintN(8, x));
+ }
+
+ let x = 7n;
+ for (let i = 0; i < 10; ++i) {
+ x = x + 5n;
+ }
+ assertEquals(57n, BigInt.asUintN(8, x));
+
+ let y = 7n;
+ for(let i = 0; i < 10; ++i) {
+ y = BigInt.asUintN(4, y + 16n);
+ }
+ assertEquals(7n, y);
+}
+
+function OptimizeAndTest(fn) {
+ %PrepareFunctionForOptimization(fn);
+ fn();
+ fn();
+ %OptimizeFunctionOnNextCall(fn);
+ fn();
+ assertOptimized(fn);
+ fn();
+}
+
+OptimizeAndTest(TestAsUintN);
+OptimizeAndTest(TestInt64LoweredOperations);
diff --git a/deps/v8/test/mjsunit/compiler/bigint-negate.js b/deps/v8/test/mjsunit/compiler/bigint-negate.js
new file mode 100644
index 0000000000..616f74ce12
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/bigint-negate.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function TestNegate() {
+ assertEquals(0n, -0n);
+
+ const x = 15n;
+ assertEquals(-15n, -x);
+ assertEquals(15n, - -x);
+ assertEquals(30n, -(-x + -x));
+}
+
+function OptimizeAndTest(fn) {
+ %PrepareFunctionForOptimization(fn);
+ fn();
+ fn();
+ %OptimizeFunctionOnNextCall(fn);
+ fn();
+ assertOptimized(fn);
+ fn();
+}
+
+OptimizeAndTest(TestNegate);
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
index cdeb7f2ffc..e824cabda6 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --no-assert-types
// Check that constant-folding of arithmetic results in identical nodes.
(function() {
- function foo(x) {
- %TurbofanStaticAssert(1 * x == x + 0);
- }
- foo(121);
- foo(122);
- %OptimizeFunctionOnNextCall(foo);
- foo(123);
+function foo(x) {
+ %TurbofanStaticAssert(1 * x == x + 0);
+};
+%PrepareFunctionForOptimization(foo);
+foo(121);
+foo(122);
+%OptimizeFunctionOnNextCall(foo);
+foo(123);
})();
diff --git a/deps/v8/test/mjsunit/compiler/dataview-deopt.js b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
index cf8132d3ee..3868fbbd2c 100644
--- a/deps/v8/test/mjsunit/compiler/dataview-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/dataview-deopt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --opt --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt --no-stress-flush-bytecode
// Check that there are no deopt loops for DataView methods.
diff --git a/deps/v8/test/mjsunit/compiler/generator-jump-targets.js b/deps/v8/test/mjsunit/compiler/generator-jump-targets.js
new file mode 100644
index 0000000000..85251b5d19
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/generator-jump-targets.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var gaga = 42;
+
+function* foo(x, b) {
+ if (b) return;
+ x.p;
+ while (true) {
+ gaga;
+ yield;
+ }
+}
+%PrepareFunctionForOptimization(foo);
+foo({p:42}, true);
+foo({p:42}, true);
+%OptimizeFunctionOnNextCall(foo);
+const g = foo({p:42}, false);
diff --git a/deps/v8/test/mjsunit/compiler/keyed-load-on-string.js b/deps/v8/test/mjsunit/compiler/keyed-load-on-string.js
index 4c3f6d8c74..e0cd397fc1 100644
--- a/deps/v8/test/mjsunit/compiler/keyed-load-on-string.js
+++ b/deps/v8/test/mjsunit/compiler/keyed-load-on-string.js
@@ -8,15 +8,17 @@ var s = "hello";
function foo() {
return s[4];
-}
-assertTrue("o" === foo());
+};
+%PrepareFunctionForOptimization(foo);
+assertTrue('o' === foo());
assertTrue("o" === foo());
%OptimizeFunctionOnNextCall(foo);
assertTrue("o" === foo());
function bar() {
return s[5];
-}
+};
+%PrepareFunctionForOptimization(bar);
assertSame(undefined, bar());
assertSame(undefined, bar());
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
index e873dd0e55..4d1425272a 100644
--- a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
@@ -28,6 +28,7 @@
%TurbofanStaticAssert(Object.is(v2, 123));
}
+ %PrepareFunctionForOptimization(lit_const_smi);
lit_const_smi(); lit_const_smi();
%OptimizeFunctionOnNextCall(lit_const_smi); lit_const_smi();
@@ -43,6 +44,7 @@
%TurbofanStaticAssert(Object.is(v2, o));
}
+ %PrepareFunctionForOptimization(lit_const_object);
lit_const_object(); lit_const_object();
%OptimizeFunctionOnNextCall(lit_const_object); lit_const_object();
@@ -58,6 +60,7 @@
%TurbofanStaticAssert(Object.is(v2, kk));
}
+ %PrepareFunctionForOptimization(lit_computed_smi);
lit_computed_smi(1); lit_computed_smi(2);
%OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3);
@@ -76,6 +79,7 @@
%TurbofanStaticAssert(Object.is(v2, k));
}
+ %PrepareFunctionForOptimization(lit_param_object);
lit_param_object({x: 1}); lit_param_object({x: 2});
%OptimizeFunctionOnNextCall(lit_param_object); lit_param_object({x: 3});
@@ -90,6 +94,7 @@
%TurbofanStaticAssert(Object.is(v2, k));
}
+ %PrepareFunctionForOptimization(nested_lit_param);
nested_lit_param(1); nested_lit_param(2);
%OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3);
@@ -108,12 +113,12 @@
%TurbofanStaticAssert(Object.is(v2, k));
}
+ %PrepareFunctionForOptimization(nested_lit_param_object);
nested_lit_param_object({x: 1}); nested_lit_param_object({x: 2});
%OptimizeFunctionOnNextCall(nested_lit_param_object);
nested_lit_param_object({x: 3});
- %EnsureFeedbackVectorForFunction(inst_param);
function inst_param(k) {
let b = new B(k);
maybe_sideeffect(b);
@@ -124,6 +129,8 @@
%TurbofanStaticAssert(Object.is(v2, k));
}
+ %EnsureFeedbackVectorForFunction(B);
+ %PrepareFunctionForOptimization(inst_param);
inst_param(1); inst_param(2);
%OptimizeFunctionOnNextCall(inst_param); inst_param(3);
@@ -132,11 +139,11 @@
// inst_param(1.1); inst_param(2.2);
// %OptimizeFunctionOnNextCall(inst_param); inst_param(3.3);
+ %PrepareFunctionForOptimization(inst_param);
inst_param({x: 1}); inst_param({x: 2});
%OptimizeFunctionOnNextCall(inst_param); inst_param({x: 3});
- %EnsureFeedbackVectorForFunction(inst_computed);
function inst_computed(k) {
let kk = 2 * k;
let b = new B(kk);
@@ -148,9 +155,12 @@
%TurbofanStaticAssert(Object.is(v2, kk));
}
+ %EnsureFeedbackVectorForFunction(B);
+ %PrepareFunctionForOptimization(inst_computed);
inst_computed(1); inst_computed(2);
%OptimizeFunctionOnNextCall(inst_computed); inst_computed(3);
+ %PrepareFunctionForOptimization(inst_computed);
inst_computed(1.1); inst_computed(2.2);
%OptimizeFunctionOnNextCall(inst_computed); inst_computed(3.3);
})();
diff --git a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
index 19b4d2a0f1..65ac39bd18 100644
--- a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
+++ b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
@@ -38,14 +38,14 @@ function f(x) {
var xx = x * x;
var xxstr = xx.toString();
return xxstr.length;
-}
-
+};
+%PrepareFunctionForOptimization(f);
function g(x) {
var xxx = Math.sqrt(x) | 0;
var xxxstr = xxx.toString();
return xxxstr.length;
-}
-
+};
+%PrepareFunctionForOptimization(g);
function k(x) {
return x * x;
}
@@ -59,11 +59,11 @@ assertUnoptimized(g);
%OptimizeFunctionOnNextCall(g, "concurrent");
f(g(3)); // Kick off recompilation.
-assertUnoptimized(f, "no sync"); // Not yet optimized since recompilation
-assertUnoptimized(g, "no sync"); // is still blocked.
+assertUnoptimized(f, 'no sync'); // Not yet optimized since recompilation
+assertUnoptimized(g, 'no sync'); // is still blocked.
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
-assertOptimized(f, "sync"); // Optimized once we sync with the
-assertOptimized(g, "sync"); // background thread.
+assertOptimized(f, 'sync'); // Optimized once we sync with the
+assertOptimized(g, 'sync'); // background thread.
diff --git a/deps/v8/test/mjsunit/compiler/named-store.js b/deps/v8/test/mjsunit/compiler/named-store.js
index 037d8593e5..629f2f1cfd 100644
--- a/deps/v8/test/mjsunit/compiler/named-store.js
+++ b/deps/v8/test/mjsunit/compiler/named-store.js
@@ -28,13 +28,16 @@ for (var i = 0.25; i < 6.25; i++) {
// stirngs
for (var i = 0; i < 6; i++) {
- var f = new Foo(i + "", (i + 2) + "");
- assertEquals((i + "") + ((i + 2) + ""), f.x);
+ var f = new Foo(i + '', i + 2 + '');
+ assertEquals(i + '' + (i + 2 + ''), f.x);
}
{
- function Global(i) { this.bla = i }
+ function Global(i) {
+ this.bla = i;
+ };
+ %PrepareFunctionForOptimization(Global);
Global(0);
Global(1);
%OptimizeFunctionOnNextCall(Global);
@@ -44,14 +47,20 @@ for (var i = 0; i < 6; i++) {
{
- function access(obj) { obj.bla = 42 }
+ function access(obj) {
+ obj.bla = 42;
+ }
access({a: 0});
access({b: 0});
access({c: 0});
access({d: 0});
access({e: 0});
var global = this;
- function foo() { access(global) };
+ function foo() {
+ access(global);
+ };
+ %PrepareFunctionForOptimization(foo);
+ ;
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index 70c45cecfe..e3a0bb0c98 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -266,6 +266,7 @@ function osr_inner(t, limit) {
}
r += x;
}
+ %PrepareFunctionForOptimization(osr_inner);
}
return r;
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-alignment.js b/deps/v8/test/mjsunit/compiler/osr-alignment.js
index 7a348ca377..97a65351c8 100644
--- a/deps/v8/test/mjsunit/compiler/osr-alignment.js
+++ b/deps/v8/test/mjsunit/compiler/osr-alignment.js
@@ -38,7 +38,6 @@ function f1() {
}
return sum;
}
-%PrepareFunctionForOptimization(f1);
function f2() {
var sum = 0;
@@ -51,7 +50,6 @@ function f2() {
}
return sum;
}
-%PrepareFunctionForOptimization(f2);
function f3() {
var sum = 0;
@@ -64,11 +62,11 @@ function f3() {
}
return sum;
}
-%PrepareFunctionForOptimization(f3);
function test1() {
var j = 11;
for (var i = 0; i < 2; i++) {
+ %PrepareFunctionForOptimization(f1);
assertEquals(509500, f1());
}
}
@@ -76,6 +74,7 @@ function test1() {
function test2() {
for (var i = 0; i < 2; i++) {
var j = 11, k = 12;
+ %PrepareFunctionForOptimization(f2);
assertEquals(509500, f2());
}
}
@@ -83,6 +82,7 @@ function test2() {
function test3() {
for (var i = 0; i < 2; i++) {
var j = 11, k = 13, m = 14;
+ %PrepareFunctionForOptimization(f3);
assertEquals(509500, f3());
}
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-arguments.js b/deps/v8/test/mjsunit/compiler/osr-arguments.js
index 14a769fc44..68e5feab28 100644
--- a/deps/v8/test/mjsunit/compiler/osr-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/osr-arguments.js
@@ -13,5 +13,6 @@ function f1() {
return sum;
}
+%PrepareFunctionForOptimization(f1);
let result = f1(1, 1, 2, 3);
assertEquals(7000, result);
diff --git a/deps/v8/test/mjsunit/compiler/osr-array-len.js b/deps/v8/test/mjsunit/compiler/osr-array-len.js
index f691380e6a..2a1386d3fd 100644
--- a/deps/v8/test/mjsunit/compiler/osr-array-len.js
+++ b/deps/v8/test/mjsunit/compiler/osr-array-len.js
@@ -12,12 +12,14 @@ function fastaRandom(n, table) {
%OptimizeOsr();
line[0] = n;
n--;
+ %PrepareFunctionForOptimization(fastaRandom);
}
}
-%PrepareFunctionForOptimization(fastaRandom);
print("---BEGIN 1");
+%PrepareFunctionForOptimization(fastaRandom);
assertEquals(undefined, fastaRandom(6, null));
print("---BEGIN 2");
+%PrepareFunctionForOptimization(fastaRandom);
assertEquals(undefined, fastaRandom(6, null));
print("---END");
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
index 7c41f54074..c7107c795a 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
@@ -22,6 +22,9 @@ function foo() {
return result;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(4950, foo()());
+%PrepareFunctionForOptimization(foo);
assertEquals(4950, foo()());
+%PrepareFunctionForOptimization(foo);
assertEquals(4950, foo()());
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
index ed5bbf1b57..2d7f558c0d 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
@@ -28,6 +28,7 @@ function foo() {
function check() {
+ %PrepareFunctionForOptimization(foo);
var r = foo();
assertEquals(45, r.pop());
for (var i = 9; i >= 0; i--) {
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope.js b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
index 7593909673..cb4a290a09 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
@@ -14,13 +14,21 @@ function nest(body, name, depth) {
body = body + "}"
}
- return body.replace(new RegExp("function " + name + "\\(\\) {"),
- "function " + name + "_" + x + "() {\n" + header);
+ // Replace function name
+ var new_func = body.replace(new RegExp("function " + name + "\\(\\) {"),
+ "function " + name + "_" + x + "() {\n" + header);
+
+ // Replace PrepareForOptimize
+ return new_func.replace(new RegExp("%PrepareFunctionForOptimization\\(" + name + "\\);"),
+ "%PrepareFunctionForOptimization(" + name + "_" + x + ");");
}
function test(expected, func, depth) {
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
var orig = func.toString();
@@ -28,10 +36,12 @@ function test(expected, func, depth) {
for (var depth = 1; depth < 4; depth++) {
var body = nest(orig, name, depth);
func = eval("(" + body + ")");
- %PrepareFunctionForOptimization(func);
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
}
}
@@ -43,6 +53,7 @@ function foo() {
for (var i = 0; i < 10; i++) {
%OptimizeOsr();
sum += i;
+ %PrepareFunctionForOptimization(foo);
}
result = sum;
}
@@ -57,6 +68,7 @@ function bar() {
for (var i = 0; i < 10; i++) {
%OptimizeOsr();
sum += i;
+ %PrepareFunctionForOptimization(bar);
}
return sum;
}
@@ -87,6 +99,7 @@ function row() {
%OptimizeOsr();
sum = i;
i = i + 1 | 0;
+ %PrepareFunctionForOptimization(row);
}
}
return 11;
@@ -100,6 +113,7 @@ function nub() {
while (i < 2) {
%OptimizeOsr();
i++;
+ %PrepareFunctionForOptimization(nub);
}
return i;
}
@@ -115,6 +129,7 @@ function kub() {
%OptimizeOsr();
i++;
result = x;
+ %PrepareFunctionForOptimization(kub);
}
return result;
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-for-let.js b/deps/v8/test/mjsunit/compiler/osr-for-let.js
index 3ecbe04ed2..024097b7ee 100644
--- a/deps/v8/test/mjsunit/compiler/osr-for-let.js
+++ b/deps/v8/test/mjsunit/compiler/osr-for-let.js
@@ -7,8 +7,11 @@
"use strict";
function test(expected, func) {
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
}
@@ -31,24 +34,24 @@ test(4005, bar);
function baz() {
let sum = 0;
for (let i = 0; i < 2; i++) {
+ %PrepareFunctionForOptimization(baz);
sum = 2;
%OptimizeOsr();
}
return sum;
}
-%PrepareFunctionForOptimization(baz);
test(2, baz);
function qux() {
var result = 0;
for (let i = 0; i < 2; i++) {
+ %PrepareFunctionForOptimization(qux);
result = i;
%OptimizeOsr();
}
return result;
}
-%PrepareFunctionForOptimization(qux);
test(1, qux);
@@ -56,6 +59,7 @@ function nux() {
var result = 0;
for (let i = 0; i < 2; i++) {
{
+ %PrepareFunctionForOptimization(nux);
let sum = i;
%OptimizeOsr();
result = sum;
@@ -63,7 +67,6 @@ function nux() {
}
return result;
}
-%PrepareFunctionForOptimization(nux);
test(1, nux);
diff --git a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
index d3e0c19721..a079457d10 100644
--- a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
+++ b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
@@ -14,6 +14,7 @@ function foo(t) {
for (var x in t) {
for (var i = 0; i < 2; i++) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(foo);
}
}
return 5;
@@ -28,6 +29,7 @@ function bar(t) {
for (var i = 0; i < 2; i++) {
%OptimizeOsr();
sum += t[x];
+ %PrepareFunctionForOptimization(bar);
}
}
return sum;
diff --git a/deps/v8/test/mjsunit/compiler/osr-infinite.js b/deps/v8/test/mjsunit/compiler/osr-infinite.js
index bfd4b7e3c8..51b3a3e871 100644
--- a/deps/v8/test/mjsunit/compiler/osr-infinite.js
+++ b/deps/v8/test/mjsunit/compiler/osr-infinite.js
@@ -19,6 +19,7 @@ function thrower() {
function test(func) {
for (var i = 0; i < 3; i++) {
global_counter = 0;
+ %PrepareFunctionForOptimization(func);
assertThrows(func);
}
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-labeled.js b/deps/v8/test/mjsunit/compiler/osr-labeled.js
index ba905f394e..d566cd9913 100644
--- a/deps/v8/test/mjsunit/compiler/osr-labeled.js
+++ b/deps/v8/test/mjsunit/compiler/osr-labeled.js
@@ -8,6 +8,7 @@ function foo() {
var sum = 0;
A: for (var i = 0; i < 5; i++) {
B: for (var j = 0; j < 5; j++) {
+ %PrepareFunctionForOptimization(foo);
C: for (var k = 0; k < 10; k++) {
if (k === 5) %OptimizeOsr();
if (k === 6) break B;
@@ -18,14 +19,15 @@ function foo() {
return sum;
}
%PrepareFunctionForOptimization(foo);
-
assertEquals(30, foo());
+%PrepareFunctionForOptimization(foo);
assertEquals(30, foo());
function bar(a) {
var sum = 0;
A: for (var i = 0; i < 5; i++) {
B: for (var j = 0; j < 5; j++) {
+ %PrepareFunctionForOptimization(bar);
C: for (var k = 0; k < 10; k++) {
sum++;
%OptimizeOsr();
@@ -38,12 +40,16 @@ function bar(a) {
return sum;
}
%PrepareFunctionForOptimization(bar);
-
assertEquals(1, bar(1));
+%PrepareFunctionForOptimization(bar);
assertEquals(1, bar(1));
+%PrepareFunctionForOptimization(bar);
assertEquals(5, bar(2));
+%PrepareFunctionForOptimization(bar);
assertEquals(5, bar(2));
+%PrepareFunctionForOptimization(bar);
assertEquals(25, bar(3));
+%PrepareFunctionForOptimization(bar);
assertEquals(25, bar(3));
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
index 4b6fd766f7..f1f317420f 100644
--- a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
+++ b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
@@ -8,6 +8,7 @@ function mod() {
function f0() {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f0);
}
return {blah: i};
}
@@ -16,6 +17,7 @@ function mod() {
function f1(a) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f1);
}
return {blah: i};
}
@@ -24,6 +26,7 @@ function mod() {
function f2(a,b) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f2);
}
return {blah: i};
}
@@ -32,6 +35,7 @@ function mod() {
function f3(a,b,c) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f3);
}
return {blah: i};
}
@@ -40,6 +44,7 @@ function mod() {
function f4(a,b,c,d) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f4);
}
return {blah: i};
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals.js b/deps/v8/test/mjsunit/compiler/osr-literals.js
index c5179d0e18..9d609f1db7 100644
--- a/deps/v8/test/mjsunit/compiler/osr-literals.js
+++ b/deps/v8/test/mjsunit/compiler/osr-literals.js
@@ -8,6 +8,7 @@ function mod() {
function f0() {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f0);
}
return {blah: i};
}
@@ -16,6 +17,7 @@ function mod() {
function f1(a) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f1);
}
return {blah: i};
}
@@ -24,6 +26,7 @@ function mod() {
function f2(a,b) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f2);
}
return {blah: i};
}
@@ -32,6 +35,7 @@ function mod() {
function f3(a,b,c) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f3);
}
return {blah: i};
}
@@ -40,6 +44,7 @@ function mod() {
function f4(a,b,c,d) {
for (var i = 0; i < 3; i = i + 1 | 0) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(f4);
}
return {blah: i};
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual1.js b/deps/v8/test/mjsunit/compiler/osr-manual1.js
index 0a24ba6d3e..e98d84c26f 100644
--- a/deps/v8/test/mjsunit/compiler/osr-manual1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-manual1.js
@@ -9,7 +9,7 @@ var counter = 111;
function gen(w) { // defeat compiler cache.
var num = counter++;
var Z = [ "", "", "", ];
- Z[w] = "%OptimizeOsr()";
+ Z[w] = "%OptimizeOsr(); %PrepareFunctionForOptimization(f" + num + ")";
var src =
"function f" + num + "(a,b,c) {" +
" var x = 0;" +
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual2.js b/deps/v8/test/mjsunit/compiler/osr-manual2.js
index 3359e83d2e..f4a8a663b0 100644
--- a/deps/v8/test/mjsunit/compiler/osr-manual2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-manual2.js
@@ -9,7 +9,7 @@ var counter = 188;
function gen(w) { // defeat compiler cache.
var num = counter++;
var Z = [ "", "", "", ];
- Z[w] = "%OptimizeOsr()";
+ Z[w] = "%OptimizeOsr(); %PrepareFunctionForOptimization(f" + num + ")";
var src =
"function f" + num + "(a,b,c) {" +
" var x = 0;" +
diff --git a/deps/v8/test/mjsunit/compiler/osr-maze1.js b/deps/v8/test/mjsunit/compiler/osr-maze1.js
index 2f51d3de21..b2b105db0c 100644
--- a/deps/v8/test/mjsunit/compiler/osr-maze1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-maze1.js
@@ -8,35 +8,44 @@ function bar(goal) {
var count = 0;
var sum = 11;
var i = 35;
+ %PrepareFunctionForOptimization(bar);
while (i-- > 33) {
if (count++ == goal) %OptimizeOsr();
sum = sum + i;
}
+ %PrepareFunctionForOptimization(bar);
while (i-- > 31) {
if (count++ == goal) %OptimizeOsr();
j = 9;
+ %PrepareFunctionForOptimization(bar);
while (j-- > 7) {
if (count++ == goal) %OptimizeOsr();
sum = sum + j * 3;
}
+ %PrepareFunctionForOptimization(bar);
while (j-- > 5) {
if (count++ == goal) %OptimizeOsr();
sum = sum + j * 5;
}
}
while (i-- > 29) {
+ %PrepareFunctionForOptimization(bar);
if (count++ == goal) %OptimizeOsr();
while (j-- > 3) {
var k = 10;
+ %PrepareFunctionForOptimization(bar);
if (count++ == goal) %OptimizeOsr();
while (k-- > 8) {
+ %PrepareFunctionForOptimization(bar);
if (count++ == goal) %OptimizeOsr();
sum = sum + k * 11;
}
}
while (j-- > 1) {
+ %PrepareFunctionForOptimization(bar);
if (count++ == goal) %OptimizeOsr();
while (k-- > 6) {
+ %PrepareFunctionForOptimization(bar);
if (count++ == goal) %OptimizeOsr();
sum = sum + j * 13;
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-maze2.js b/deps/v8/test/mjsunit/compiler/osr-maze2.js
index 5255dab191..1dbefe1ac1 100644
--- a/deps/v8/test/mjsunit/compiler/osr-maze2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-maze2.js
@@ -51,7 +51,7 @@ function gen(i) {
body = body.replace(new RegExp("bar"), "bar" + i);
for (var j = 1; j < 10; j++) {
var r = new RegExp("LOOP" + j + "\\(\\);");
- if (i == j) body = body.replace(r, "%OptimizeOsr();");
+ if (i == j) body = body.replace(r, "%OptimizeOsr(); %PrepareFunctionForOptimization(bar" + i +");");
else body = body.replace(r, "");
}
return eval("(" + body + ")");
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested.js b/deps/v8/test/mjsunit/compiler/osr-nested.js
index 7add40ed53..9775f6563b 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested.js
@@ -30,6 +30,7 @@
function f() {
var sum = 0;
for (var i = 0; i < 10; i++) {
+ %PrepareFunctionForOptimization(f);
for (var j = 0; j < 10; j++) {
var x = i + 2;
var y = x + 5;
diff --git a/deps/v8/test/mjsunit/compiler/osr-regex-id.js b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
index 1f66297ccd..243a7804d3 100644
--- a/deps/v8/test/mjsunit/compiler/osr-regex-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
@@ -10,6 +10,7 @@ function foo(a) {
var r = /\0/;
for (var i = 0; i < 10; i++) {
if (a) %OptimizeOsr();
+ %PrepareFunctionForOptimization(foo);
}
return r;
}
@@ -18,6 +19,7 @@ function foo(a) {
function bar(a) {
for (var i = 0; i < 10; i++) {
if (a) %OptimizeOsr();
+ %PrepareFunctionForOptimization(bar);
var r = /\0/;
}
return r;
@@ -27,6 +29,7 @@ function bar(a) {
function baz(a) {
for (var i = 0; i < 10; i++) {
if (a) %OptimizeOsr();
+ %PrepareFunctionForOptimization(baz);
}
return /\0/;
}
@@ -36,6 +39,7 @@ function qux(a) {
for (var i = 0; i < 10; i++) {
if (i > 5 && a) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(qux);
} else {
var r = /\0/;
}
@@ -47,8 +51,11 @@ function qux(a) {
function test(f) {
// Test the reference equality of regex's created in OSR'd function.
var x = f(false);
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f(true));
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f(true));
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f(true));
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-simple.js b/deps/v8/test/mjsunit/compiler/osr-simple.js
index c8585f2991..c26cfe3f15 100644
--- a/deps/v8/test/mjsunit/compiler/osr-simple.js
+++ b/deps/v8/test/mjsunit/compiler/osr-simple.js
@@ -15,9 +15,9 @@ function f() {
}
return sum;
}
-%PrepareFunctionForOptimization(f);
for (var i = 0; i < 2; i++) {
+ %PrepareFunctionForOptimization(f);
assertEquals(509500, f());
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-top1.js b/deps/v8/test/mjsunit/compiler/osr-top1.js
index 742b71d86e..6f0fac6580 100644
--- a/deps/v8/test/mjsunit/compiler/osr-top1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-top1.js
@@ -4,13 +4,17 @@
// Flags: --use-osr --allow-natives-syntax
-var sum = 0;
-for (var i = 0; i < 10000; i++) {
- if (i == 100) %OptimizeOsr();
- var x = i + 2;
- var y = x + 5;
- var z = y + 3;
- sum += z;
-}
+function f() {
+ var sum = 0;
+ for (var i = 0; i < 10000; i++) {
+ if (i == 100) %OptimizeOsr();
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
-assertEquals(50095000, sum);
+ assertEquals(50095000, sum);
+}
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/osr-top2.js b/deps/v8/test/mjsunit/compiler/osr-top2.js
index a15aa15d04..8bf7fc075f 100644
--- a/deps/v8/test/mjsunit/compiler/osr-top2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-top2.js
@@ -4,16 +4,21 @@
// Flags: --use-osr --allow-natives-syntax
-for (var j = 0; j < 3; j++) {
- var sum = 0;
- for (var i = 0; i < 1000; i++) {
- if (i == 100) %OptimizeOsr();
- var x = i + 2;
- var y = x + 5;
- var z = y + 3;
- sum += z;
+function f() {
+ for (var j = 0; j < 3; j++) {
+ var sum = 0;
+ for (var i = 0; i < 1000; i++) {
+ if (i == 100) %OptimizeOsr();
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ assertEquals(509500, sum);
+ %PrepareFunctionForOptimization(f);
}
+
assertEquals(509500, sum);
}
-
-assertEquals(509500, sum);
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/osr-top3.js b/deps/v8/test/mjsunit/compiler/osr-top3.js
index 4c4a364be0..3bf89e20c9 100644
--- a/deps/v8/test/mjsunit/compiler/osr-top3.js
+++ b/deps/v8/test/mjsunit/compiler/osr-top3.js
@@ -4,19 +4,24 @@
// Flags: --use-osr --allow-natives-syntax
-for (var k = 0; k < 2; k++) {
- for (var j = 0; j < 3; j++) {
- var sum = 0;
- for (var i = 0; i < 1000; i++) {
- if (i == 100) %OptimizeOsr();
- var x = i + 2;
- var y = x + 5;
- var z = y + 3;
- sum += z;
+function f() {
+ for (var k = 0; k < 2; k++) {
+ for (var j = 0; j < 3; j++) {
+ var sum = 0;
+ for (var i = 0; i < 1000; i++) {
+ if (i == 100) %OptimizeOsr();
+ var x = i + 2;
+ var y = x + 5;
+ var z = y + 3;
+ sum += z;
+ }
+ assertEquals(509500, sum);
+ %PrepareFunctionForOptimization(f);
}
assertEquals(509500, sum);
}
+
assertEquals(509500, sum);
}
-
-assertEquals(509500, sum);
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/osr-try-catch.js b/deps/v8/test/mjsunit/compiler/osr-try-catch.js
index 79389fd8ca..d641a4ebc5 100644
--- a/deps/v8/test/mjsunit/compiler/osr-try-catch.js
+++ b/deps/v8/test/mjsunit/compiler/osr-try-catch.js
@@ -16,6 +16,7 @@ function SingleLoop() {
catch(e) {}
for (var b = 0; b < 1; b++) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(SingleLoop);
}
}
}
@@ -37,6 +38,7 @@ function EmptyBody() {
if (a == 1) break;
a++;
+ %PrepareFunctionForOptimization(EmptyBody);
}
}
%PrepareFunctionForOptimization(EmptyBody);
@@ -52,6 +54,7 @@ function NestedLoops() {
if (b == 1) break;
b++;
}
+ %PrepareFunctionForOptimization(NestedLoops);
}
}
%PrepareFunctionForOptimization(NestedLoops);
diff --git a/deps/v8/test/mjsunit/compiler/osr-two.js b/deps/v8/test/mjsunit/compiler/osr-two.js
index 4409dc3bae..6469c1a062 100644
--- a/deps/v8/test/mjsunit/compiler/osr-two.js
+++ b/deps/v8/test/mjsunit/compiler/osr-two.js
@@ -17,6 +17,7 @@ function f(x) {
}
}
outer--;
+ %PrepareFunctionForOptimization(f);
}
return sum;
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-while-let.js b/deps/v8/test/mjsunit/compiler/osr-while-let.js
index 906e92bb21..eefde50d7f 100644
--- a/deps/v8/test/mjsunit/compiler/osr-while-let.js
+++ b/deps/v8/test/mjsunit/compiler/osr-while-let.js
@@ -26,6 +26,7 @@ function foo() {
for (; flag == 1; (flag = 0, temp_x = x)) {
if (x < 2) {
result = x; %OptimizeOsr();
+ %PrepareFunctionForOptimization(foo);
} else {
break outer;
}
@@ -48,6 +49,7 @@ function smo() {
let y = x;
for (var i = 0; i < 5; i++) {
%OptimizeOsr();
+ %PrepareFunctionForOptimization(smo);
if (i) break outer;
else result = y;
}
diff --git a/deps/v8/test/mjsunit/compiler/promise-constructor.js b/deps/v8/test/mjsunit/compiler/promise-constructor.js
index 7cbae22705..ab2d720755 100644
--- a/deps/v8/test/mjsunit/compiler/promise-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/promise-constructor.js
@@ -82,6 +82,7 @@ failWithMessage = (msg) => %AbortJS(msg);
function foo() {
return new Promise((a, b) => { throw new Error(); });
}
+ %PrepareFunctionForOptimization(foo);
function bar(i) {
let error = null;
diff --git a/deps/v8/test/mjsunit/compiler/regress-607493.js b/deps/v8/test/mjsunit/compiler/regress-607493.js
index adfef8a6ee..8e4721ce76 100644
--- a/deps/v8/test/mjsunit/compiler/regress-607493.js
+++ b/deps/v8/test/mjsunit/compiler/regress-607493.js
@@ -10,7 +10,10 @@
function g() {
for (var x in a) {
try {
- for (var i = 0; i < 10; i++) { %OptimizeOsr(); }
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(g);
+ }
return;
} catch(e) {
continue;
@@ -28,7 +31,10 @@
function g() {
for (var x in a) {
if (x) {
- for (var i = 0; i < 10; i++) { %OptimizeOsr(); }
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(g);
+ }
}
continue;
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-645851.js b/deps/v8/test/mjsunit/compiler/regress-645851.js
index f79a942405..a0d7276737 100644
--- a/deps/v8/test/mjsunit/compiler/regress-645851.js
+++ b/deps/v8/test/mjsunit/compiler/regress-645851.js
@@ -11,6 +11,7 @@ function f() {
sum = sum + 1;
%OptimizeOsr();
if (sum == 2) return;
+ %PrepareFunctionForOptimization(f);
}
}
return sum;
diff --git a/deps/v8/test/mjsunit/compiler/regress-9041.js b/deps/v8/test/mjsunit/compiler/regress-9041.js
index 46d8755bde..d7a8e6d625 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9041.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9041.js
@@ -5,17 +5,19 @@
// Flags: --allow-natives-syntax
(function() {
- class A {}
+class A {}
- function foo(a, fn) {
- const C = a.constructor;
- fn(a);
- return a instanceof C;
- }
-
- assertTrue(foo(new A, (a) => {}));
- assertTrue(foo(new A, (a) => {}));
- %OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(new A, (a) => {}));
- assertFalse(foo(new A, (a) => { a.__proto__ = {}; }));
+function foo(a, fn) {
+ const C = a.constructor;
+ fn(a);
+ return a instanceof C;
+};
+%PrepareFunctionForOptimization(foo);
+assertTrue(foo(new A(), a => {}));
+assertTrue(foo(new A(), a => {}));
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(new A(), a => {}));
+assertFalse(foo(new A(), a => {
+ a.__proto__ = {};
+}));
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-9087.js b/deps/v8/test/mjsunit/compiler/regress-9087.js
index 4220a2906c..ccb885aeb2 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9087.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9087.js
@@ -13,8 +13,8 @@ for (let i = 0; i < 1020; ++i) {
function foo() {
return obj instanceof constructor;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-9137-1.js b/deps/v8/test/mjsunit/compiler/regress-9137-1.js
index 32cbe32b15..5743847dfd 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9137-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9137-1.js
@@ -19,6 +19,7 @@ foo(function(){});
foo(function(){});
%OptimizeFunctionOnNextCall(foo);
foo(function(){});
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(function(){});
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-9137-2.js b/deps/v8/test/mjsunit/compiler/regress-9137-2.js
index ee4c157269..3642b06df4 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9137-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9137-2.js
@@ -23,6 +23,7 @@ foo([0, 1, 2]);
foo([0, 1, 2]);
%OptimizeFunctionOnNextCall(foo);
foo([0, 1, 2]);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo([0, 1, 2]);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-935092.js b/deps/v8/test/mjsunit/compiler/regress-935092.js
index df4eb06dda..2a0e5e9480 100644
--- a/deps/v8/test/mjsunit/compiler/regress-935092.js
+++ b/deps/v8/test/mjsunit/compiler/regress-935092.js
@@ -5,14 +5,17 @@
// Flags: --allow-natives-syntax
function opt(g) {
- for (var X = 0; X < 1; X++) {
- (new(function() {
- this.y
- })).x;
- (g || (g && (((g || -N)(g && 0))))).y = 0
- }
- (function() { g })
-}
+ for (var X = 0; X < 1; X++) {
+ new function() {
+ this.y;
+ }().x;
+ (g || g && (g || -N)(g && 0)).y = 0;
+ }
+ (function() {
+ g;
+ });
+};
+%PrepareFunctionForOptimization(opt);
opt({});
%OptimizeFunctionOnNextCall(opt);
opt({});
diff --git a/deps/v8/test/mjsunit/compiler/regress-939316.js b/deps/v8/test/mjsunit/compiler/regress-939316.js
index 79a0079c46..c3326b6281 100644
--- a/deps/v8/test/mjsunit/compiler/regress-939316.js
+++ b/deps/v8/test/mjsunit/compiler/regress-939316.js
@@ -12,8 +12,8 @@
function g(i) {
f(i);
- }
-
+ };
+ %PrepareFunctionForOptimization(g);
g(0);
g(1);
%OptimizeFunctionOnNextCall(g);
@@ -25,13 +25,14 @@
function f() {
try {
const o = Reflect.construct(Array, arguments, parseInt);
- } catch(e) { }
+ } catch (e) {
+ }
}
function g() {
f();
- }
-
+ };
+ %PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-944062-1.js b/deps/v8/test/mjsunit/compiler/regress-944062-1.js
index 268999c881..0367a9a96d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-944062-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-944062-1.js
@@ -9,14 +9,14 @@ let b = false;
function f() {
if (b) array[100000] = 4.2; // go to dictionary mode
- return 42
+ return 42;
};
%NeverOptimizeFunction(f);
function includes() {
return array.includes(f());
-}
-
+};
+%PrepareFunctionForOptimization(includes);
assertTrue(includes());
assertTrue(includes());
%OptimizeFunctionOnNextCall(includes);
diff --git a/deps/v8/test/mjsunit/compiler/regress-944062-2.js b/deps/v8/test/mjsunit/compiler/regress-944062-2.js
index 89f06b5452..aa03ce50a7 100644
--- a/deps/v8/test/mjsunit/compiler/regress-944062-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-944062-2.js
@@ -9,8 +9,9 @@ function includes(key, array) {
array.__defineSetter__(key, () => {});
// Will then read OOB.
return array.includes(1234);
-}
-includes("", []);
+};
+%PrepareFunctionForOptimization(includes);
+includes('', []);
includes("", []);
%OptimizeFunctionOnNextCall(includes);
includes("", []);
diff --git a/deps/v8/test/mjsunit/compiler/regress-945187.js b/deps/v8/test/mjsunit/compiler/regress-945187.js
index 7041042cd3..5d7b0dae96 100644
--- a/deps/v8/test/mjsunit/compiler/regress-945187.js
+++ b/deps/v8/test/mjsunit/compiler/regress-945187.js
@@ -5,10 +5,10 @@
// Flags: --allow-natives-syntax
function f() {
- const o = { get : Object };
+ const o = {get: Object};
Object.defineProperty(Object, 0, o);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
delete Object.fromEntries;
diff --git a/deps/v8/test/mjsunit/compiler/regress-945644.js b/deps/v8/test/mjsunit/compiler/regress-945644.js
index 2cb8839d86..03c0e00b9f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-945644.js
+++ b/deps/v8/test/mjsunit/compiler/regress-945644.js
@@ -4,19 +4,22 @@
// Flags: --allow-natives-syntax
-function f(v5,v6) {
- const v16 = [1337,1337,-765470.5051836492];
+function f(v5, v6) {
+ const v16 = [1337, 1337, -765470.5051836492];
let v19 = 0;
do {
const v20 = v19 + 1;
const v22 = Math.fround(v20);
v19 = v22;
const v23 = [v20, v22];
- function v24() { v20; v22; }
+ function v24() {
+ v20;
+ v22;
+ }
const v33 = v16.indexOf(v19);
} while (v19 < 6);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
Array.prototype.push(8);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-946889.js b/deps/v8/test/mjsunit/compiler/regress-946889.js
index 0bdaef8168..a66b3b9720 100644
--- a/deps/v8/test/mjsunit/compiler/regress-946889.js
+++ b/deps/v8/test/mjsunit/compiler/regress-946889.js
@@ -10,8 +10,8 @@ function foo() {
var arr = [];
[...arr, 42, null];
arr.length = 1;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-949435.js b/deps/v8/test/mjsunit/compiler/regress-949435.js
index e78acad183..d9f989a52d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-949435.js
+++ b/deps/v8/test/mjsunit/compiler/regress-949435.js
@@ -9,8 +9,8 @@ function f() {
v6.POSITIVE_INFINITY = 1337;
const v8 = Object.seal(v6);
v8.POSITIVE_INFINITY = Object;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
-%OptimizeFunctionOnNextCall(f)
+%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-952586.js b/deps/v8/test/mjsunit/compiler/regress-952586.js
index 0a17ed7682..1fe67b5c32 100644
--- a/deps/v8/test/mjsunit/compiler/regress-952586.js
+++ b/deps/v8/test/mjsunit/compiler/regress-952586.js
@@ -8,8 +8,8 @@ a = new Int8Array(1);
function f(i) {
return i in a;
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertTrue(f(0));
%OptimizeFunctionOnNextCall(f);
assertFalse(f(-1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-957559.js b/deps/v8/test/mjsunit/compiler/regress-957559.js
index b32f6b85f6..a21ff34e71 100644
--- a/deps/v8/test/mjsunit/compiler/regress-957559.js
+++ b/deps/v8/test/mjsunit/compiler/regress-957559.js
@@ -9,11 +9,11 @@ const v0 = [];
function f(b) {
for (let v13 = 0; v13 <= 3; v13 = v13 + 2241165261) {
for (let i = 0; i < 8; i++) {}
- const v23 = Math.max(v13,-0.0,-2523259642);
+ const v23 = Math.max(v13, -0.0, -2523259642);
const v24 = v0[v23];
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-958021.js b/deps/v8/test/mjsunit/compiler/regress-958021.js
index 252ea84365..072a3e7f53 100644
--- a/deps/v8/test/mjsunit/compiler/regress-958021.js
+++ b/deps/v8/test/mjsunit/compiler/regress-958021.js
@@ -13,9 +13,10 @@ function v0() {
const v15 = v7 <= v14;
for (var i = 0; i < 10; i++) {}
}
- } catch(v20) {}
-}
-
+ } catch (v20) {
+ }
+};
+%PrepareFunctionForOptimization(v0);
v0();
v0();
%OptimizeFunctionOnNextCall(v0);
diff --git a/deps/v8/test/mjsunit/compiler/regress-958420.js b/deps/v8/test/mjsunit/compiler/regress-958420.js
index 1601abc6bb..dd7e58d50c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-958420.js
+++ b/deps/v8/test/mjsunit/compiler/regress-958420.js
@@ -8,8 +8,8 @@ var a = [];
function foo() {
return a[Symbol.iterator]().next();
-}
-
+};
+%PrepareFunctionForOptimization(foo);
a.__proto__.push(5);
a.bla = {};
diff --git a/deps/v8/test/mjsunit/compiler/regress-958716.js b/deps/v8/test/mjsunit/compiler/regress-958716.js
index f2ede1002e..ee463af69d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-958716.js
+++ b/deps/v8/test/mjsunit/compiler/regress-958716.js
@@ -5,13 +5,13 @@
// Flags: --allow-natives-syntax
for (let i = 0; i < 2; i++) {
- (new String()).valueOf = Symbol;
+ new String().valueOf = Symbol;
}
function foo() {
Promise.resolve("");
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-961986.js b/deps/v8/test/mjsunit/compiler/regress-961986.js
index de8b5fe458..21e7bf4f6e 100644
--- a/deps/v8/test/mjsunit/compiler/regress-961986.js
+++ b/deps/v8/test/mjsunit/compiler/regress-961986.js
@@ -10,8 +10,8 @@ function foo() {
obj[1] = "";
proto[1];
proto.bla = 42;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-966560-1.js b/deps/v8/test/mjsunit/compiler/regress-966560-1.js
index d4d04f23dd..e9ccd365eb 100644
--- a/deps/v8/test/mjsunit/compiler/regress-966560-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-966560-1.js
@@ -10,11 +10,16 @@ async function __f_3() {
async function __f_4() {
await x.then();
throw new Error();
-}
+};
+%PrepareFunctionForOptimization(__f_4);
async function __f_5(f) {
try {
await f();
} catch (e) {
}
}
-(async() => {; %OptimizeFunctionOnNextCall(__f_4); await __f_5(__f_3); })();
+(async () => {
+ ;
+ %OptimizeFunctionOnNextCall(__f_4);
+ await __f_5(__f_3);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-966560-2.js b/deps/v8/test/mjsunit/compiler/regress-966560-2.js
index 33eff6020c..a4ccd8590e 100644
--- a/deps/v8/test/mjsunit/compiler/regress-966560-2.js
+++ b/deps/v8/test/mjsunit/compiler/regress-966560-2.js
@@ -10,5 +10,6 @@ function* get() {
get = [];
}
}
+%PrepareFunctionForOptimization(get);
%OptimizeFunctionOnNextCall(get);
get();
diff --git a/deps/v8/test/mjsunit/compiler/regress-977670.js b/deps/v8/test/mjsunit/compiler/regress-977670.js
new file mode 100644
index 0000000000..a87963d327
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-977670.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ var i;
+ for (i in 'xxxxxxxx') {
+ try { throw 42 } catch (e) {}
+ }
+ print(i);
+ i['' + 'length'] = 42;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js b/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
index 2fdbbc3bce..86ab260cc5 100644
--- a/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
+++ b/deps/v8/test/mjsunit/compiler/regress-closures-with-eval.js
@@ -29,6 +29,7 @@
// Verifies that closures in presence of eval work fine.
function withEval(expr, filter) {
+ %PrepareFunctionForOptimization(filter);
function walk(v) {
for (var i in v) {
for (var i in v) {}
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js b/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js
index d1cb0545e8..349f7bf31d 100644
--- a/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js
@@ -6,8 +6,8 @@
%EnsureFeedbackVectorForFunction(foo);
function foo(x) {
return x * (x == 1);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(0.5);
foo(1.5);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-974474.js b/deps/v8/test/mjsunit/compiler/regress-crbug-974474.js
new file mode 100644
index 0000000000..5f3292079d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-974474.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ const y = x == 42;
+ () => {y};
+ if (y) { Object(); }
+ [!!y];
+ return y;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo(42); foo(42);
+%OptimizeFunctionOnNextCall(foo);
+foo(42);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-974476.js b/deps/v8/test/mjsunit/compiler/regress-crbug-974476.js
new file mode 100644
index 0000000000..5672f9d764
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-974476.js
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function use(x) { return x; }
+%NeverOptimizeFunction(use);
+
+function foo() {
+ let result = undefined;
+ (function () {
+ const a = {};
+ for (_ of [0]) {
+ const empty = [];
+ (function () {
+ result = 42;
+ for (_ of [0]) {
+ for (_ of [0]) {
+ use(empty);
+ }
+ }
+ result = a;
+ })();
+ }
+ })();
+ return result;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-9113.js b/deps/v8/test/mjsunit/compiler/regress-v8-9113.js
index b602da84e6..7dc765134f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-v8-9113.js
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-9113.js
@@ -4,14 +4,14 @@
// Flags: --allow-natives-syntax
-let dummy = { x : 0.1 };
+let dummy = {x: 0.1};
-let o = { x : 0 };
+let o = {x: 0};
function f(o, v) {
o.x = v;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(o, 0);
f(o, 0);
assertEquals(Infinity, 1 / o.x);
diff --git a/deps/v8/test/mjsunit/compiler/tagged-template.js b/deps/v8/test/mjsunit/compiler/tagged-template.js
new file mode 100644
index 0000000000..29a9454f55
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/tagged-template.js
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Case One: the template is already initialized before compilation.
+let saved_array;
+function tagged_function(a) {
+ saved_array = a;
+ return "something";
+}
+
+function foo(b) {
+ let a = tagged_function`hello ${b}`;
+ return a + " " + b;
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+
+// Case Two: the template hasn't been initialized at the point we
+// do optimized compile.
+function bar(b) {
+ if (b) {
+ let a = tagged_function`initialized late ${b}`;
+ b = a;
+ }
+ return b;
+}
+
+%PrepareFunctionForOptimization(bar);
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar(true);
+
+let saved_array_from_optimized_call = saved_array;
+
+// Finally, ensure that if the function is deoptimized, the tagged-template
+// code still runs. This is useful to test because TurboFan doesn't cache
+// the tagged template in the feedback vector if it has to create it.
+%DeoptimizeFunction(bar);
+bar(true);
+
+// Furthermore, we want to ensure that the JSArray passed to the function
+// is the same.
+assertSame(saved_array_from_optimized_call, saved_array);
diff --git a/deps/v8/test/mjsunit/compiler/typedarray-keyed.js b/deps/v8/test/mjsunit/compiler/typedarray-keyed.js
index 5b4695fec2..d74fae99e5 100644
--- a/deps/v8/test/mjsunit/compiler/typedarray-keyed.js
+++ b/deps/v8/test/mjsunit/compiler/typedarray-keyed.js
@@ -8,7 +8,8 @@ var a = new Int8Array(100);
function has(i) {
return i in a;
-}
+};
+%PrepareFunctionForOptimization(has);
assertTrue(has(0));
assertTrue(has(0));
%OptimizeFunctionOnNextCall(has);
@@ -17,7 +18,8 @@ assertTrue(has(1));
function get(i) {
return a[i];
-}
+};
+%PrepareFunctionForOptimization(get);
assertEquals(0, get(0));
assertEquals(0, get(0));
%OptimizeFunctionOnNextCall(get);
@@ -26,9 +28,14 @@ assertEquals(0, get(1));
function set(i) {
return a[i] = 42 + i;
-}
-assertEquals(42, set(0)); assertEquals(42, a[0]);
-assertEquals(42, set(0)); assertEquals(42, a[0]);
+};
+%PrepareFunctionForOptimization(set);
+assertEquals(42, set(0));
+assertEquals(42, a[0]);
+assertEquals(42, set(0));
+assertEquals(42, a[0]);
%OptimizeFunctionOnNextCall(set);
-assertEquals(42, set(0)); assertEquals(42, a[0]);
-assertEquals(43, set(1)); assertEquals(43, a[1]);
+assertEquals(42, set(0));
+assertEquals(42, a[0]);
+assertEquals(43, set(1));
+assertEquals(43, a[1]);
diff --git a/deps/v8/test/mjsunit/constant-compare-nil-value.js b/deps/v8/test/mjsunit/constant-compare-nil-value.js
index 9f5b2adb06..13bef23080 100644
--- a/deps/v8/test/mjsunit/constant-compare-nil-value.js
+++ b/deps/v8/test/mjsunit/constant-compare-nil-value.js
@@ -28,14 +28,14 @@
// Flags: --allow-natives-syntax
function inlined() {
- return 1;
+ return 1;
}
function foo() {
- if ((inlined() + 0.5) == null) return "null";
- return "non-null";
-}
-
+ if (inlined() + 0.5 == null) return 'null';
+ return 'non-null';
+};
+%PrepareFunctionForOptimization(foo);
assertEquals("non-null", foo());
assertEquals("non-null", foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/constant-fold-control-instructions.js b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
index be3cdfdd91..6fb8afaa2b 100644
--- a/deps/v8/test/mjsunit/constant-fold-control-instructions.js
+++ b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
@@ -25,7 +25,7 @@ function test() {
assertFalse(%_IsJSReceiver(1));
}
-
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index f7c809bc1d..a0c0feda02 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -37,6 +37,7 @@ function test(f, iterations) {
for (let i = 0; i < n; i++) {
%OptimizeFunctionOnNextCall(f);
f();
+ %PrepareFunctionForOptimization(f);
}
// Assert that the function finally stabilized.
assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/cross-realm-global-prototype.js b/deps/v8/test/mjsunit/cross-realm-global-prototype.js
index 46e5a3a37f..130d1b36d4 100644
--- a/deps/v8/test/mjsunit/cross-realm-global-prototype.js
+++ b/deps/v8/test/mjsunit/cross-realm-global-prototype.js
@@ -8,6 +8,7 @@
var realm = Realm.create();
var test = Realm.eval(realm,
"() => { return Realm.global(0) instanceof Object }");
+%PrepareFunctionForOptimization(test);
assertFalse(test());
diff --git a/deps/v8/test/mjsunit/default-nospec.js b/deps/v8/test/mjsunit/default-nospec.js
index 0e3b6c1f55..98cef31390 100644
--- a/deps/v8/test/mjsunit/default-nospec.js
+++ b/deps/v8/test/mjsunit/default-nospec.js
@@ -8,10 +8,12 @@
function f(a, b, c) {
return String.prototype.indexOf.call(a, b, c);
}
+ %PrepareFunctionForOptimization(f);
f("abc", "de", 1);
f("abc", "de", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", "de", {});
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abc", "de", {});
assertOptimized(f);
@@ -21,10 +23,12 @@
function f(a, b, c) {
return String.prototype.indexOf.apply(a, [b, c]);
}
+ %PrepareFunctionForOptimization(f);
f("abc", "de", 1);
f("abc", "de", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", {}, 1);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abc", {}, 1);
assertOptimized(f);
@@ -34,10 +38,13 @@
function f(a, b, c) {
return Reflect.apply(String.prototype.indexOf, a, [b, c]);
}
+ %PrepareFunctionForOptimization(f);
f("abc", "de", 1);
f("abc", "de", 1);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f({}, "de", 1);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f({}, "de", 1);
assertOptimized(f);
@@ -47,10 +54,12 @@
function f(a, b) {
return String.fromCharCode.call(a, b);
}
+ %PrepareFunctionForOptimization(f);
f("abc", 1);
f("abc", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", {});
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f({}, {});
assertOptimized(f);
@@ -60,10 +69,12 @@
function f(a, b) {
return String.fromCharCode.apply(undefined, [b, {}]);
}
+ %PrepareFunctionForOptimization(f);
f("abc", 1);
f("abc", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", {});
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abc", {});
assertOptimized(f);
@@ -74,10 +85,12 @@
function f(a, b) {
return Reflect.apply(String.fromCharCode, a, [b, {}]);
}
+ %PrepareFunctionForOptimization(f);
f("abc", 1);
f("abc", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", {});
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abc", {});
assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/deopt-global-accessor.js b/deps/v8/test/mjsunit/deopt-global-accessor.js
index 5c544a0fa0..4e6e73de64 100644
--- a/deps/v8/test/mjsunit/deopt-global-accessor.js
+++ b/deps/v8/test/mjsunit/deopt-global-accessor.js
@@ -10,14 +10,18 @@ x = 3;
function f() {
return x;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
f();
%OptimizeFunctionOnNextCall(f);
f();
-Object.defineProperty(this, "x", {get:function() { return 100; }});
+Object.defineProperty(this, 'x', {
+ get: function() {
+ return 100;
+ }
+});
assertEquals(100, f());
diff --git a/deps/v8/test/mjsunit/deopt-minus-zero.js b/deps/v8/test/mjsunit/deopt-minus-zero.js
index 38795afa33..8555a70525 100644
--- a/deps/v8/test/mjsunit/deopt-minus-zero.js
+++ b/deps/v8/test/mjsunit/deopt-minus-zero.js
@@ -31,11 +31,13 @@ function mul (a, b) {
return a * b;
}
+%PrepareFunctionForOptimization(mul);
mul(-1, -1);
mul(0x80000001|0, -1);
mul(0x80000001|0, -1);
%OptimizeFunctionOnNextCall(mul);
mul(0, -1);
+%PrepareFunctionForOptimization(mul);
%OptimizeFunctionOnNextCall(mul);
mul(0, -1);
diff --git a/deps/v8/test/mjsunit/deopt-unlinked.js b/deps/v8/test/mjsunit/deopt-unlinked.js
index 06a5cc4041..422631450b 100644
--- a/deps/v8/test/mjsunit/deopt-unlinked.js
+++ b/deps/v8/test/mjsunit/deopt-unlinked.js
@@ -7,8 +7,8 @@
// bytecode is flushed, which --gc-interval can cause in stress modes.
// Flags: --noflush-bytecode --nostress-flush-bytecode
-function foo() {}
-
+function foo() {};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/deopt-with-outer-context.js b/deps/v8/test/mjsunit/deopt-with-outer-context.js
index 42a829d853..52c7a37ed3 100644
--- a/deps/v8/test/mjsunit/deopt-with-outer-context.js
+++ b/deps/v8/test/mjsunit/deopt-with-outer-context.js
@@ -8,13 +8,15 @@ function outer(y) {
function inner() {
var x = 10;
(function() {
- // Access x from inner function to force it to be context allocated.
- x = 20;
- %DeoptimizeFunction(inner);
+ // Access x from inner function to force it to be context allocated.
+ x = 20;
+ %DeoptimizeFunction(inner);
})();
// Variable y should be read from the outer context.
return y;
};
+ %PrepareFunctionForOptimization(inner);
+ ;
%OptimizeFunctionOnNextCall(inner);
return inner();
}
diff --git a/deps/v8/test/mjsunit/div-mod.js b/deps/v8/test/mjsunit/div-mod.js
index 08cee8cdd1..ccdac8de89 100644
--- a/deps/v8/test/mjsunit/div-mod.js
+++ b/deps/v8/test/mjsunit/div-mod.js
@@ -305,6 +305,7 @@ function lithium_integer_mod() {
}
+%PrepareFunctionForOptimization(lithium_integer_mod);
lithium_integer_mod();
%OptimizeFunctionOnNextCall(lithium_integer_mod)
lithium_integer_mod();
diff --git a/deps/v8/test/mjsunit/double-truncation.js b/deps/v8/test/mjsunit/double-truncation.js
index b43e1e6c63..753262993b 100644
--- a/deps/v8/test/mjsunit/double-truncation.js
+++ b/deps/v8/test/mjsunit/double-truncation.js
@@ -30,8 +30,8 @@
function RunOneTruncationTest(a, b) {
var temp = a | 0;
assertEquals(b, temp);
-}
-
+};
+%PrepareFunctionForOptimization(RunOneTruncationTest);
function RunAllTruncationTests() {
RunOneTruncationTest(0, 0);
RunOneTruncationTest(0.5, 0);
diff --git a/deps/v8/test/mjsunit/element-accessor.js b/deps/v8/test/mjsunit/element-accessor.js
index 94acc5c6c3..8d412ed12f 100644
--- a/deps/v8/test/mjsunit/element-accessor.js
+++ b/deps/v8/test/mjsunit/element-accessor.js
@@ -19,9 +19,14 @@
var set = 0;
- Object.defineProperty(o, "3", {
- get:function() { return 100; },
- set:function(v) { set = v; }});
+ Object.defineProperty(o, '3', {
+ get: function() {
+ return 100;
+ },
+ set: function(v) {
+ set = v;
+ }
+ });
store(o, 3, 1000);
assertEquals(1000, set);
@@ -32,25 +37,22 @@
var o = new Int32Array(1);
assertThrows(
() => Object.defineProperty(o, '0', {get: function() {}}), TypeError);
- assertEquals({
- value: 0,
- writable: true,
- enumerable: true,
- configurable: false
- }, Object.getOwnPropertyDescriptor(o, "0"));
+ assertEquals(
+ {value: 0, writable: true, enumerable: true, configurable: false},
+ Object.getOwnPropertyDescriptor(o, '0'));
})();
(function() {
- function f() {
- var a = new Array();
- a[1] = 1.5;
- return a;
- }
-
- f();
- f();
- %OptimizeFunctionOnNextCall(f);
- var a = f();
- a[2] = 2;
- assertEquals(3, a.length);
+function f() {
+ var a = new Array();
+ a[1] = 1.5;
+ return a;
+};
+%PrepareFunctionForOptimization(f);
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+var a = f();
+a[2] = 2;
+assertEquals(3, a.length);
})();
diff --git a/deps/v8/test/mjsunit/elements-kind-depends.js b/deps/v8/test/mjsunit/elements-kind-depends.js
index 539fbd0e42..2e550c24ce 100644
--- a/deps/v8/test/mjsunit/elements-kind-depends.js
+++ b/deps/v8/test/mjsunit/elements-kind-depends.js
@@ -34,6 +34,7 @@ function burn() {
a[2] = 20;
return a;
}
+%PrepareFunctionForOptimization(burn);
function check(a) {
assertEquals(10, a[0]);
@@ -56,6 +57,7 @@ function loop_test(x) {
x[i] = (i+1) * 0.5;
}
}
+%PrepareFunctionForOptimization(loop_test);
function check2(b) {
assertEquals(0.5, b[0]);
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 54c5e33e9e..d98cedac33 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -233,12 +233,12 @@ convert_to_fast(smis);
convert_to_fast(doubles);
// Test transition chain SMI->DOUBLE->FAST (crankshafted function will
// transition to FAST directly).
-%EnsureFeedbackVectorForFunction(convert_mixed);
function convert_mixed(array, value, kind) {
array[1] = value;
assertKind(kind, array);
assertEquals(value, array[1]);
}
+%PrepareFunctionForOptimization(convert_mixed);
smis = construct_smis();
for (var i = 0; i < 3; i++) {
convert_mixed(smis, 1.5, elements_kind.fast_double);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-2.js b/deps/v8/test/mjsunit/elide-double-hole-check-2.js
index 978abc3bb0..7ab201124a 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-2.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-2.js
@@ -29,13 +29,17 @@
function f(a, i) {
return a[i] + 0.5;
-}
-var arr = [0.0,,];
+};
+%PrepareFunctionForOptimization(f);
+var arr = [
+ 0.0,
+ ,
+];
assertEquals(0.5, f(arr, 0));
assertEquals(0.5, f(arr, 0));
%OptimizeFunctionOnNextCall(f);
assertEquals(0.5, f(arr, 0));
assertEquals(NaN, f(arr, 1));
-arr.__proto__ = [1.5,1.5,1.5];
+arr.__proto__ = [1.5, 1.5, 1.5];
assertEquals(2, f(arr, 1));
assertEquals(0.5, f(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-3.js b/deps/v8/test/mjsunit/elide-double-hole-check-3.js
index f8179403ec..58f92b0a6e 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-3.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-3.js
@@ -29,8 +29,9 @@
function f(a, i) {
return a[i] + 0.5;
-}
-Array.prototype = [1.5,1.5,1.5];
+};
+%PrepareFunctionForOptimization(f);
+Array.prototype = [1.5, 1.5, 1.5];
var arr = [0.0,,];
assertEquals(0.5, f(arr, 0));
assertEquals(0.5, f(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-9.js b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
index bbcbfb2be6..dc857b7c44 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-9.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-9.js
@@ -37,9 +37,9 @@ function set_proto_elements() {
function f(a, i) {
set_proto_elements();
return a[i] + 0.5;
-}
-
-var arr = [0.0,,2.5];
+};
+%PrepareFunctionForOptimization(f);
+var arr = [0.0, , 2.5];
assertEquals(0.5, f(arr, 0));
assertEquals(0.5, f(arr, 0));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/error-stack.js b/deps/v8/test/mjsunit/error-stack.js
index b446a398d6..566bca4324 100644
--- a/deps/v8/test/mjsunit/error-stack.js
+++ b/deps/v8/test/mjsunit/error-stack.js
@@ -73,3 +73,31 @@
assertEquals(error[42], "Who needs stack traces anyway?");
assertEquals(error.stack, undefined); // No getter.
})();
+
+(function TestFormatStackPropertyInDictionaryMode() {
+ Error.prepareStackTrace = (error, frames) => {
+ return "<formatted stack trace>";
+ };
+ const error = new Error("foo");
+ error[%MaxSmi()] = 42;
+
+ assertTrue(%HasDictionaryElements(error));
+
+ // Check it twice.
+ assertEquals(error.stack, "<formatted stack trace>");
+ assertEquals(error.stack, "<formatted stack trace>");
+})();
+
+(function TestTransitionToDictionaryModeAfterFormatting() {
+ Error.prepareStackTrace = (error, frames) => {
+ return "<formatted stack trace>";
+ };
+ const error = new Error("foo");
+ assertFalse(%HasDictionaryElements(error));
+
+ assertEquals(error.stack, "<formatted stack trace>");
+
+ error[%MaxSmi()] = 42;
+ assertTrue(%HasDictionaryElements(error));
+ assertEquals(error.stack, "<formatted stack trace>");
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-accesors.js b/deps/v8/test/mjsunit/es6/classes-accesors.js
new file mode 100644
index 0000000000..fedb548d23
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-accesors.js
@@ -0,0 +1,97 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var b = 'b';
+
+(function TestOverwritingInstanceAccessors() {
+ var C, desc;
+ C = class {
+ [b]() { return 'B'; };
+ get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ [b]() { return 'B'; };
+ set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+
+ C = class {
+ set b(v) { return 'get B'; };
+ [b]() { return 'B'; };
+ get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ get b() { return 'get B'; };
+ [b]() { return 'B'; };
+ set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+})();
+
+(function TestOverwritingStaticAccessors() {
+ var C, desc;
+ C = class {
+ static [b]() { return 'B'; };
+ static get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ static [b]() { return 'B'; };
+ static set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+
+ C = class {
+ static set b(v) { return 'get B'; };
+ static [b]() { return 'B'; };
+ static get b() { return 'get B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals('get B', desc.get());
+ assertEquals(undefined, desc.set);
+
+ C = class {
+ static get b() { return 'get B'; };
+ static [b]() { return 'B'; };
+ static set b(v) { return 'set B'; };
+ };
+ desc = Object.getOwnPropertyDescriptor(C, 'b');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ assertEquals(undefined, desc.get);
+ assertEquals('set B', desc.set());
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-constructor.js b/deps/v8/test/mjsunit/es6/classes-constructor.js
new file mode 100644
index 0000000000..faf9404f07
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-constructor.js
@@ -0,0 +1,131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestDefaultConstructorNoCrash() {
+ // Regression test for https://code.google.com/p/v8/issues/detail?id=3661
+ class C {}
+ assertThrows(function () {C();}, TypeError);
+ assertThrows(function () {C(1);}, TypeError);
+ assertTrue(new C() instanceof C);
+ assertTrue(new C(1) instanceof C);
+})();
+
+
+(function TestConstructorCall(){
+ var realmIndex = Realm.create();
+ var otherTypeError = Realm.eval(realmIndex, "TypeError");
+ var A = Realm.eval(realmIndex, '"use strict"; class A {}; A');
+ var instance = new A();
+ var constructor = instance.constructor;
+ var otherTypeError = Realm.eval(realmIndex, 'TypeError');
+ if (otherTypeError === TypeError) {
+ throw Error('Should not happen!');
+ }
+
+ // ES6 9.2.1[[Call]] throws a TypeError in the caller context/Realm when the
+ // called function is a classConstructor
+ assertThrows(function() { Realm.eval(realmIndex, "A()") }, otherTypeError);
+ assertThrows(function() { instance.constructor() }, TypeError);
+ assertThrows(function() { A() }, TypeError);
+
+ // ES6 9.3.1 call() first activates the callee context before invoking the
+ // method. The TypeError from the constructor is thus thrown in the other
+ // Realm.
+ assertThrows(function() { Realm.eval(realmIndex, "A.call()") },
+ otherTypeError);
+ assertThrows(function() { constructor.call() }, otherTypeError);
+ assertThrows(function() { A.call() }, otherTypeError);
+})();
+
+
+(function TestConstructorCallOptimized() {
+ class A { };
+
+ function invoke_constructor() { A() }
+ function call_constructor() { A.call() }
+ function apply_constructor() { A.apply() }
+ %PrepareFunctionForOptimization(invoke_constructor);
+ %PrepareFunctionForOptimization(call_constructor);
+ %PrepareFunctionForOptimization(apply_constructor);
+
+ for (var i=0; i<3; i++) {
+ assertThrows(invoke_constructor);
+ assertThrows(call_constructor);
+ assertThrows(apply_constructor);
+ }
+ // Make sure we still check for class constructors when calling optimized
+ // code.
+ %OptimizeFunctionOnNextCall(invoke_constructor);
+ assertThrows(invoke_constructor);
+ %OptimizeFunctionOnNextCall(call_constructor);
+ assertThrows(call_constructor);
+ %OptimizeFunctionOnNextCall(apply_constructor);
+ assertThrows(apply_constructor);
+})();
+
+
+(function TestDefaultConstructor() {
+ var calls = 0;
+ class Base {
+ constructor() {
+ calls++;
+ }
+ }
+ class Derived extends Base {}
+ var object = new Derived;
+ assertEquals(1, calls);
+
+ calls = 0;
+ assertThrows(function() { Derived(); }, TypeError);
+ assertEquals(0, calls);
+})();
+
+
+(function TestDefaultConstructorArguments() {
+ var args, self;
+ class Base {
+ constructor() {
+ self = this;
+ args = arguments;
+ }
+ }
+ class Derived extends Base {}
+
+ new Derived;
+ assertEquals(0, args.length);
+
+ new Derived(0, 1, 2);
+ assertEquals(3, args.length);
+ assertTrue(self instanceof Derived);
+
+ var arr = new Array(100);
+ var obj = {};
+ assertThrows(function() {Derived.apply(obj, arr);}, TypeError);
+})();
+
+
+(function TestDefaultConstructorArguments2() {
+ var args;
+ class Base {
+ constructor(x, y) {
+ args = arguments;
+ }
+ }
+ class Derived extends Base {}
+
+ new Derived;
+ assertEquals(0, args.length);
+
+ new Derived(1);
+ assertEquals(1, args.length);
+ assertEquals(1, args[0]);
+
+ new Derived(1, 2, 3);
+ assertEquals(3, args.length);
+ assertEquals(1, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(3, args[2]);
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-name-binding.js b/deps/v8/test/mjsunit/es6/classes-name-binding.js
new file mode 100644
index 0000000000..8dddcd7237
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-name-binding.js
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestNameBindingConst() {
+ assertThrows('class C { constructor() { C = 42; } }; new C();', TypeError);
+ assertThrows('new (class C { constructor() { C = 42; } })', TypeError);
+ assertThrows('class C { m() { C = 42; } }; new C().m()', TypeError);
+ assertThrows('new (class C { m() { C = 42; } }).m()', TypeError);
+ assertThrows('class C { get x() { C = 42; } }; new C().x', TypeError);
+ assertThrows('(new (class C { get x() { C = 42; } })).x', TypeError);
+ assertThrows('class C { set x(_) { C = 42; } }; new C().x = 15;', TypeError);
+ assertThrows('(new (class C { set x(_) { C = 42; } })).x = 15;', TypeError);
+})();
+
+(function TestNameBinding() {
+ var C2;
+ class C {
+ constructor() {
+ C2 = C;
+ }
+ m() {
+ C2 = C;
+ }
+ get x() {
+ C2 = C;
+ }
+ set x(_) {
+ C2 = C;
+ }
+ }
+ new C();
+ assertEquals(C, C2);
+
+ C2 = undefined;
+ new C().m();
+ assertEquals(C, C2);
+
+ C2 = undefined;
+ new C().x;
+ assertEquals(C, C2);
+
+ C2 = undefined;
+ new C().x = 1;
+ assertEquals(C, C2);
+})();
+
+(function TestNameBindingExpression() {
+ var C3;
+ var C = class C2 {
+ constructor() {
+ assertEquals(C2, C);
+ C3 = C2;
+ }
+ m() {
+ assertEquals(C2, C);
+ C3 = C2;
+ }
+ get x() {
+ assertEquals(C2, C);
+ C3 = C2;
+ }
+ set x(_) {
+ assertEquals(C2, C);
+ C3 = C2;
+ }
+ }
+ new C();
+ assertEquals(C, C3);
+
+ C3 = undefined;
+ new C().m();
+ assertEquals(C, C3);
+
+ C3 = undefined;
+ new C().x;
+ assertEquals(C, C3);
+
+ C3 = undefined;
+ new C().x = 1;
+ assertEquals(C, C3);
+})();
+
+(function TestNameBindingInExtendsExpression() {
+ assertThrows(function() {
+ class x extends x {}
+ }, ReferenceError);
+
+ assertThrows(function() {
+ (class x extends x {});
+ }, ReferenceError);
+
+ assertThrows(function() {
+ var x = (class x extends x {});
+ }, ReferenceError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-proto.js b/deps/v8/test/mjsunit/es6/classes-proto.js
new file mode 100644
index 0000000000..1f224de1db
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-proto.js
@@ -0,0 +1,152 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assertMethodDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertTrue(descr.writable);
+ assertEquals('function', typeof descr.value);
+ assertFalse('prototype' in descr.value);
+ assertEquals(name, descr.value.name);
+}
+
+
+function assertAccessorDescriptor(object, name) {
+ var descr = Object.getOwnPropertyDescriptor(object, name);
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertEquals('function', typeof descr.get);
+ assertEquals('function', typeof descr.set);
+ assertFalse('prototype' in descr.get);
+ assertFalse('prototype' in descr.set);
+ assertEquals("get " + name, descr.get.name);
+ assertEquals("set " + name, descr.set.name);
+}
+
+
+(function TestProto() {
+ class C {
+ __proto__() { return 1; }
+ }
+ assertMethodDescriptor(C.prototype, '__proto__');
+ assertEquals(1, new C().__proto__());
+})();
+
+
+(function TestProtoStatic() {
+ class C {
+ static __proto__() { return 1; }
+ }
+ assertMethodDescriptor(C, '__proto__');
+ assertEquals(1, C.__proto__());
+})();
+
+
+(function TestProtoAccessor() {
+ class C {
+ get __proto__() { return this._p; }
+ set __proto__(v) { this._p = v; }
+ }
+ assertAccessorDescriptor(C.prototype, '__proto__');
+ var c = new C();
+ c._p = 1;
+ assertEquals(1, c.__proto__);
+ c.__proto__ = 2;
+ assertEquals(2, c.__proto__);
+})();
+
+
+(function TestStaticProtoAccessor() {
+ class C {
+ static get __proto__() { return this._p; }
+ static set __proto__(v) { this._p = v; }
+ }
+ assertAccessorDescriptor(C, '__proto__');
+ C._p = 1;
+ assertEquals(1, C.__proto__);
+ C.__proto__ = 2;
+ assertEquals(2, C.__proto__);
+})();
+
+
+(function TestSettersOnProto() {
+ function Base() {}
+ Base.prototype = {
+ set constructor(_) {
+ assertUnreachable();
+ },
+ set m(_) {
+ assertUnreachable();
+ }
+ };
+ Object.defineProperty(Base, 'staticM', {
+ set: function() {
+ assertUnreachable();
+ }
+ });
+
+ class C extends Base {
+ m() {
+ return 1;
+ }
+ static staticM() {
+ return 2;
+ }
+ }
+
+ assertEquals(1, new C().m());
+ assertEquals(2, C.staticM());
+})();
+
+
+(function TestConstructableButNoPrototype() {
+ var Base = function() {}.bind();
+ assertThrows(function() {
+ class C extends Base {}
+ }, TypeError);
+})();
+
+
+(function TestPrototypeGetter() {
+ var calls = 0;
+ var Base = function() {}.bind();
+ Object.defineProperty(Base, 'prototype', {
+ get: function() {
+ calls++;
+ return null;
+ },
+ configurable: true
+ });
+ class C extends Base {}
+ assertEquals(1, calls);
+
+ calls = 0;
+ Object.defineProperty(Base, 'prototype', {
+ get: function() {
+ calls++;
+ return 42;
+ },
+ configurable: true
+ });
+ assertThrows(function() {
+ class C extends Base {}
+ }, TypeError);
+ assertEquals(1, calls);
+})();
+
+
+(function TestPrototypeSetter() {
+ var Base = function() {}.bind();
+ Object.defineProperty(Base, 'prototype', {
+ set: function() {
+ assertUnreachable();
+ }
+ });
+ assertThrows(function() {
+ class C extends Base {}
+ }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-restricted-properties.js b/deps/v8/test/mjsunit/es6/classes-restricted-properties.js
new file mode 100644
index 0000000000..085f2f23f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-restricted-properties.js
@@ -0,0 +1,165 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestThisAccessRestriction() {
+ class Base {}
+ (function() {
+ class C extends Base {
+ constructor() {
+ var y;
+ super();
+ }
+ }; new C();
+ }());
+ assertThrows(function() {
+ class C extends Base {
+ constructor() {
+ super(this.x);
+ }
+ }; new C();
+ }, ReferenceError);
+ assertThrows(function() {
+ class C extends Base {
+ constructor() {
+ super(this);
+ }
+ }; new C();
+ }, ReferenceError);
+ assertThrows(function() {
+ class C extends Base {
+ constructor() {
+ super.method();
+ super(this);
+ }
+ }; new C();
+ }, ReferenceError);
+ assertThrows(function() {
+ class C extends Base {
+ constructor() {
+ super(super.method());
+ }
+ }; new C();
+ }, ReferenceError);
+ assertThrows(function() {
+ class C extends Base {
+ constructor() {
+ super(super());
+ }
+ }; new C();
+ }, ReferenceError);
+ assertThrows(function() {
+ class C extends Base {
+ constructor() {
+ super(1, 2, Object.getPrototypeOf(this));
+ }
+ }; new C();
+ }, ReferenceError);
+ (function() {
+ class C extends Base {
+ constructor() {
+ { super(1, 2); }
+ }
+ }; new C();
+ }());
+ (function() {
+ class C extends Base {
+ constructor() {
+ if (1) super();
+ }
+ }; new C();
+ }());
+
+ class C1 extends Object {
+ constructor() {
+ 'use strict';
+ super();
+ }
+ };
+ new C1();
+
+ class C2 extends Object {
+ constructor() {
+ ; 'use strict';;;;;
+ super();
+ }
+ };
+ new C2();
+
+ class C3 extends Object {
+ constructor() {
+ ; 'use strict';;;;;
+ // This is a comment.
+ super();
+ }
+ };
+ new C3();
+}());
+
+
+function testClassRestrictedProperties(C) {
+ assertEquals(false, C.hasOwnProperty("arguments"));
+ assertThrows(function() { return C.arguments; }, TypeError);
+ assertThrows(function() { C.arguments = {}; }, TypeError);
+
+ assertEquals(false, C.hasOwnProperty("caller"));
+ assertThrows(function() { return C.caller; }, TypeError);
+ assertThrows(function() { C.caller = {}; }, TypeError);
+
+ assertEquals(false, (new C).method.hasOwnProperty("arguments"));
+ assertThrows(function() { return new C().method.arguments; }, TypeError);
+ assertThrows(function() { new C().method.arguments = {}; }, TypeError);
+
+ assertEquals(false, (new C).method.hasOwnProperty("caller"));
+ assertThrows(function() { return new C().method.caller; }, TypeError);
+ assertThrows(function() { new C().method.caller = {}; }, TypeError);
+}
+
+
+(function testRestrictedPropertiesStrict() {
+ "use strict";
+ class ClassWithDefaultConstructor {
+ method() {}
+ }
+ class Class {
+ constructor() {}
+ method() {}
+ }
+ class DerivedClassWithDefaultConstructor extends Class {}
+ class DerivedClass extends Class { constructor() { super(); } }
+
+ testClassRestrictedProperties(ClassWithDefaultConstructor);
+ testClassRestrictedProperties(Class);
+ testClassRestrictedProperties(DerivedClassWithDefaultConstructor);
+ testClassRestrictedProperties(DerivedClass);
+ testClassRestrictedProperties(class { method() {} });
+ testClassRestrictedProperties(class { constructor() {} method() {} });
+ testClassRestrictedProperties(class extends Class { });
+ testClassRestrictedProperties(
+ class extends Class { constructor() { super(); } });
+})();
+
+
+(function testRestrictedPropertiesSloppy() {
+ class ClassWithDefaultConstructor {
+ method() {}
+ }
+ class Class {
+ constructor() {}
+ method() {}
+ }
+ class DerivedClassWithDefaultConstructor extends Class {}
+ class DerivedClass extends Class { constructor() { super(); } }
+
+ testClassRestrictedProperties(ClassWithDefaultConstructor);
+ testClassRestrictedProperties(Class);
+ testClassRestrictedProperties(DerivedClassWithDefaultConstructor);
+ testClassRestrictedProperties(DerivedClass);
+ testClassRestrictedProperties(class { method() {} });
+ testClassRestrictedProperties(class { constructor() {} method() {} });
+ testClassRestrictedProperties(class extends Class { });
+ testClassRestrictedProperties(
+ class extends Class { constructor() { super(); } });
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-test-super.js b/deps/v8/test/mjsunit/es6/classes-test-super.js
new file mode 100644
index 0000000000..92f18729a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-test-super.js
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestSuperInMethods() {
+ class B {
+ method() {
+ return 1;
+ }
+ get x() {
+ return 2;
+ }
+ }
+ class C extends B {
+ method() {
+ assertEquals(2, super.x);
+ return super.method();
+ }
+ }
+ assertEquals(1, new C().method());
+})();
+
+
+(function TestSuperInGetter() {
+ class B {
+ method() {
+ return 1;
+ }
+ get x() {
+ return 2;
+ }
+ }
+ class C extends B {
+ get y() {
+ assertEquals(2, super.x);
+ return super.method();
+ }
+ }
+ assertEquals(1, new C().y);
+})();
+
+
+(function TestSuperInSetter() {
+ class B {
+ method() {
+ return 1;
+ }
+ get x() {
+ return 2;
+ }
+ }
+ class C extends B {
+ set y(v) {
+ assertEquals(3, v);
+ assertEquals(2, super.x);
+ assertEquals(1, super.method());
+ }
+ }
+ assertEquals(3, new C().y = 3);
+})();
+
+
+(function TestSuperInStaticMethods() {
+ class B {
+ static method() {
+ return 1;
+ }
+ static get x() {
+ return 2;
+ }
+ }
+ class C extends B {
+ static method() {
+ assertEquals(2, super.x);
+ return super.method();
+ }
+ }
+ assertEquals(1, C.method());
+})();
+
+
+(function TestSuperInStaticGetter() {
+ class B {
+ static method() {
+ return 1;
+ }
+ static get x() {
+ return 2;
+ }
+ }
+ class C extends B {
+ static get x() {
+ assertEquals(2, super.x);
+ return super.method();
+ }
+ }
+ assertEquals(1, C.x);
+})();
+
+
+(function TestSuperInStaticSetter() {
+ class B {
+ static method() {
+ return 1;
+ }
+ static get x() {
+ return 2;
+ }
+ }
+ class C extends B {
+ static set x(v) {
+ assertEquals(3, v);
+ assertEquals(2, super.x);
+ assertEquals(1, super.method());
+ }
+ }
+ assertEquals(3, C.x = 3);
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index 6c7a0fb869..eb25f07685 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -218,7 +218,6 @@
assertEquals('class E { x() { 42; } }', E.toString());
})();
-
function assertMethodDescriptor(object, name) {
var descr = Object.getOwnPropertyDescriptor(object, name);
assertTrue(descr.configurable);
@@ -376,247 +375,6 @@ function assertAccessorDescriptor(object, name) {
assertEquals(4, C.staticX );
})();
-
-(function TestProto() {
- class C {
- __proto__() { return 1; }
- }
- assertMethodDescriptor(C.prototype, '__proto__');
- assertEquals(1, new C().__proto__());
-})();
-
-
-(function TestProtoStatic() {
- class C {
- static __proto__() { return 1; }
- }
- assertMethodDescriptor(C, '__proto__');
- assertEquals(1, C.__proto__());
-})();
-
-
-(function TestProtoAccessor() {
- class C {
- get __proto__() { return this._p; }
- set __proto__(v) { this._p = v; }
- }
- assertAccessorDescriptor(C.prototype, '__proto__');
- var c = new C();
- c._p = 1;
- assertEquals(1, c.__proto__);
- c.__proto__ = 2;
- assertEquals(2, c.__proto__);
-})();
-
-
-(function TestStaticProtoAccessor() {
- class C {
- static get __proto__() { return this._p; }
- static set __proto__(v) { this._p = v; }
- }
- assertAccessorDescriptor(C, '__proto__');
- C._p = 1;
- assertEquals(1, C.__proto__);
- C.__proto__ = 2;
- assertEquals(2, C.__proto__);
-})();
-
-
-(function TestSettersOnProto() {
- function Base() {}
- Base.prototype = {
- set constructor(_) {
- assertUnreachable();
- },
- set m(_) {
- assertUnreachable();
- }
- };
- Object.defineProperty(Base, 'staticM', {
- set: function() {
- assertUnreachable();
- }
- });
-
- class C extends Base {
- m() {
- return 1;
- }
- static staticM() {
- return 2;
- }
- }
-
- assertEquals(1, new C().m());
- assertEquals(2, C.staticM());
-})();
-
-
-(function TestConstructableButNoPrototype() {
- var Base = function() {}.bind();
- assertThrows(function() {
- class C extends Base {}
- }, TypeError);
-})();
-
-
-(function TestPrototypeGetter() {
- var calls = 0;
- var Base = function() {}.bind();
- Object.defineProperty(Base, 'prototype', {
- get: function() {
- calls++;
- return null;
- },
- configurable: true
- });
- class C extends Base {}
- assertEquals(1, calls);
-
- calls = 0;
- Object.defineProperty(Base, 'prototype', {
- get: function() {
- calls++;
- return 42;
- },
- configurable: true
- });
- assertThrows(function() {
- class C extends Base {}
- }, TypeError);
- assertEquals(1, calls);
-})();
-
-
-(function TestPrototypeSetter() {
- var Base = function() {}.bind();
- Object.defineProperty(Base, 'prototype', {
- set: function() {
- assertUnreachable();
- }
- });
- assertThrows(function() {
- class C extends Base {}
- }, TypeError);
-})();
-
-
-(function TestSuperInMethods() {
- class B {
- method() {
- return 1;
- }
- get x() {
- return 2;
- }
- }
- class C extends B {
- method() {
- assertEquals(2, super.x);
- return super.method();
- }
- }
- assertEquals(1, new C().method());
-})();
-
-
-(function TestSuperInGetter() {
- class B {
- method() {
- return 1;
- }
- get x() {
- return 2;
- }
- }
- class C extends B {
- get y() {
- assertEquals(2, super.x);
- return super.method();
- }
- }
- assertEquals(1, new C().y);
-})();
-
-
-(function TestSuperInSetter() {
- class B {
- method() {
- return 1;
- }
- get x() {
- return 2;
- }
- }
- class C extends B {
- set y(v) {
- assertEquals(3, v);
- assertEquals(2, super.x);
- assertEquals(1, super.method());
- }
- }
- assertEquals(3, new C().y = 3);
-})();
-
-
-(function TestSuperInStaticMethods() {
- class B {
- static method() {
- return 1;
- }
- static get x() {
- return 2;
- }
- }
- class C extends B {
- static method() {
- assertEquals(2, super.x);
- return super.method();
- }
- }
- assertEquals(1, C.method());
-})();
-
-
-(function TestSuperInStaticGetter() {
- class B {
- static method() {
- return 1;
- }
- static get x() {
- return 2;
- }
- }
- class C extends B {
- static get x() {
- assertEquals(2, super.x);
- return super.method();
- }
- }
- assertEquals(1, C.x);
-})();
-
-
-(function TestSuperInStaticSetter() {
- class B {
- static method() {
- return 1;
- }
- static get x() {
- return 2;
- }
- }
- class C extends B {
- static set x(v) {
- assertEquals(3, v);
- assertEquals(2, super.x);
- assertEquals(1, super.method());
- }
- }
- assertEquals(3, C.x = 3);
-})();
-
-
(function TestNumericPropertyNames() {
class B {
1() { return 1; }
@@ -684,392 +442,6 @@ function assertAccessorDescriptor(object, name) {
assertEquals(2147483653, C[2147483653]);
})();
-
-(function TestDefaultConstructorNoCrash() {
- // Regression test for https://code.google.com/p/v8/issues/detail?id=3661
- class C {}
- assertThrows(function () {C();}, TypeError);
- assertThrows(function () {C(1);}, TypeError);
- assertTrue(new C() instanceof C);
- assertTrue(new C(1) instanceof C);
-})();
-
-
-(function TestConstructorCall(){
- var realmIndex = Realm.create();
- var otherTypeError = Realm.eval(realmIndex, "TypeError");
- var A = Realm.eval(realmIndex, '"use strict"; class A {}; A');
- var instance = new A();
- var constructor = instance.constructor;
- var otherTypeError = Realm.eval(realmIndex, 'TypeError');
- if (otherTypeError === TypeError) {
- throw Error('Should not happen!');
- }
-
- // ES6 9.2.1[[Call]] throws a TypeError in the caller context/Realm when the
- // called function is a classConstructor
- assertThrows(function() { Realm.eval(realmIndex, "A()") }, otherTypeError);
- assertThrows(function() { instance.constructor() }, TypeError);
- assertThrows(function() { A() }, TypeError);
-
- // ES6 9.3.1 call() first activates the callee context before invoking the
- // method. The TypeError from the constructor is thus thrown in the other
- // Realm.
- assertThrows(function() { Realm.eval(realmIndex, "A.call()") },
- otherTypeError);
- assertThrows(function() { constructor.call() }, otherTypeError);
- assertThrows(function() { A.call() }, otherTypeError);
-})();
-
-
-(function TestConstructorCallOptimized() {
- class A { };
-
- function invoke_constructor() { A() }
- function call_constructor() { A.call() }
- function apply_constructor() { A.apply() }
- %PrepareFunctionForOptimization(invoke_constructor);
- %PrepareFunctionForOptimization(call_constructor);
- %PrepareFunctionForOptimization(apply_constructor);
-
- for (var i=0; i<3; i++) {
- assertThrows(invoke_constructor);
- assertThrows(call_constructor);
- assertThrows(apply_constructor);
- }
- // Make sure we still check for class constructors when calling optimized
- // code.
- %OptimizeFunctionOnNextCall(invoke_constructor);
- assertThrows(invoke_constructor);
- %OptimizeFunctionOnNextCall(call_constructor);
- assertThrows(call_constructor);
- %OptimizeFunctionOnNextCall(apply_constructor);
- assertThrows(apply_constructor);
-})();
-
-
-(function TestDefaultConstructor() {
- var calls = 0;
- class Base {
- constructor() {
- calls++;
- }
- }
- class Derived extends Base {}
- var object = new Derived;
- assertEquals(1, calls);
-
- calls = 0;
- assertThrows(function() { Derived(); }, TypeError);
- assertEquals(0, calls);
-})();
-
-
-(function TestDefaultConstructorArguments() {
- var args, self;
- class Base {
- constructor() {
- self = this;
- args = arguments;
- }
- }
- class Derived extends Base {}
-
- new Derived;
- assertEquals(0, args.length);
-
- new Derived(0, 1, 2);
- assertEquals(3, args.length);
- assertTrue(self instanceof Derived);
-
- var arr = new Array(100);
- var obj = {};
- assertThrows(function() {Derived.apply(obj, arr);}, TypeError);
-})();
-
-
-(function TestDefaultConstructorArguments2() {
- var args;
- class Base {
- constructor(x, y) {
- args = arguments;
- }
- }
- class Derived extends Base {}
-
- new Derived;
- assertEquals(0, args.length);
-
- new Derived(1);
- assertEquals(1, args.length);
- assertEquals(1, args[0]);
-
- new Derived(1, 2, 3);
- assertEquals(3, args.length);
- assertEquals(1, args[0]);
- assertEquals(2, args[1]);
- assertEquals(3, args[2]);
-})();
-
-
-(function TestNameBindingConst() {
- assertThrows('class C { constructor() { C = 42; } }; new C();', TypeError);
- assertThrows('new (class C { constructor() { C = 42; } })', TypeError);
- assertThrows('class C { m() { C = 42; } }; new C().m()', TypeError);
- assertThrows('new (class C { m() { C = 42; } }).m()', TypeError);
- assertThrows('class C { get x() { C = 42; } }; new C().x', TypeError);
- assertThrows('(new (class C { get x() { C = 42; } })).x', TypeError);
- assertThrows('class C { set x(_) { C = 42; } }; new C().x = 15;', TypeError);
- assertThrows('(new (class C { set x(_) { C = 42; } })).x = 15;', TypeError);
-})();
-
-
-(function TestNameBinding() {
- var C2;
- class C {
- constructor() {
- C2 = C;
- }
- m() {
- C2 = C;
- }
- get x() {
- C2 = C;
- }
- set x(_) {
- C2 = C;
- }
- }
- new C();
- assertEquals(C, C2);
-
- C2 = undefined;
- new C().m();
- assertEquals(C, C2);
-
- C2 = undefined;
- new C().x;
- assertEquals(C, C2);
-
- C2 = undefined;
- new C().x = 1;
- assertEquals(C, C2);
-})();
-
-
-(function TestNameBindingExpression() {
- var C3;
- var C = class C2 {
- constructor() {
- assertEquals(C2, C);
- C3 = C2;
- }
- m() {
- assertEquals(C2, C);
- C3 = C2;
- }
- get x() {
- assertEquals(C2, C);
- C3 = C2;
- }
- set x(_) {
- assertEquals(C2, C);
- C3 = C2;
- }
- }
- new C();
- assertEquals(C, C3);
-
- C3 = undefined;
- new C().m();
- assertEquals(C, C3);
-
- C3 = undefined;
- new C().x;
- assertEquals(C, C3);
-
- C3 = undefined;
- new C().x = 1;
- assertEquals(C, C3);
-})();
-
-
-(function TestNameBindingInExtendsExpression() {
- assertThrows(function() {
- class x extends x {}
- }, ReferenceError);
-
- assertThrows(function() {
- (class x extends x {});
- }, ReferenceError);
-
- assertThrows(function() {
- var x = (class x extends x {});
- }, ReferenceError);
-})();
-
-
-(function TestThisAccessRestriction() {
- class Base {}
- (function() {
- class C extends Base {
- constructor() {
- var y;
- super();
- }
- }; new C();
- }());
- assertThrows(function() {
- class C extends Base {
- constructor() {
- super(this.x);
- }
- }; new C();
- }, ReferenceError);
- assertThrows(function() {
- class C extends Base {
- constructor() {
- super(this);
- }
- }; new C();
- }, ReferenceError);
- assertThrows(function() {
- class C extends Base {
- constructor() {
- super.method();
- super(this);
- }
- }; new C();
- }, ReferenceError);
- assertThrows(function() {
- class C extends Base {
- constructor() {
- super(super.method());
- }
- }; new C();
- }, ReferenceError);
- assertThrows(function() {
- class C extends Base {
- constructor() {
- super(super());
- }
- }; new C();
- }, ReferenceError);
- assertThrows(function() {
- class C extends Base {
- constructor() {
- super(1, 2, Object.getPrototypeOf(this));
- }
- }; new C();
- }, ReferenceError);
- (function() {
- class C extends Base {
- constructor() {
- { super(1, 2); }
- }
- }; new C();
- }());
- (function() {
- class C extends Base {
- constructor() {
- if (1) super();
- }
- }; new C();
- }());
-
- class C1 extends Object {
- constructor() {
- 'use strict';
- super();
- }
- };
- new C1();
-
- class C2 extends Object {
- constructor() {
- ; 'use strict';;;;;
- super();
- }
- };
- new C2();
-
- class C3 extends Object {
- constructor() {
- ; 'use strict';;;;;
- // This is a comment.
- super();
- }
- };
- new C3();
-}());
-
-
-function testClassRestrictedProperties(C) {
- assertEquals(false, C.hasOwnProperty("arguments"));
- assertThrows(function() { return C.arguments; }, TypeError);
- assertThrows(function() { C.arguments = {}; }, TypeError);
-
- assertEquals(false, C.hasOwnProperty("caller"));
- assertThrows(function() { return C.caller; }, TypeError);
- assertThrows(function() { C.caller = {}; }, TypeError);
-
- assertEquals(false, (new C).method.hasOwnProperty("arguments"));
- assertThrows(function() { return new C().method.arguments; }, TypeError);
- assertThrows(function() { new C().method.arguments = {}; }, TypeError);
-
- assertEquals(false, (new C).method.hasOwnProperty("caller"));
- assertThrows(function() { return new C().method.caller; }, TypeError);
- assertThrows(function() { new C().method.caller = {}; }, TypeError);
-}
-
-
-(function testRestrictedPropertiesStrict() {
- "use strict";
- class ClassWithDefaultConstructor {
- method() {}
- }
- class Class {
- constructor() {}
- method() {}
- }
- class DerivedClassWithDefaultConstructor extends Class {}
- class DerivedClass extends Class { constructor() { super(); } }
-
- testClassRestrictedProperties(ClassWithDefaultConstructor);
- testClassRestrictedProperties(Class);
- testClassRestrictedProperties(DerivedClassWithDefaultConstructor);
- testClassRestrictedProperties(DerivedClass);
- testClassRestrictedProperties(class { method() {} });
- testClassRestrictedProperties(class { constructor() {} method() {} });
- testClassRestrictedProperties(class extends Class { });
- testClassRestrictedProperties(
- class extends Class { constructor() { super(); } });
-})();
-
-
-(function testRestrictedPropertiesSloppy() {
- class ClassWithDefaultConstructor {
- method() {}
- }
- class Class {
- constructor() {}
- method() {}
- }
- class DerivedClassWithDefaultConstructor extends Class {}
- class DerivedClass extends Class { constructor() { super(); } }
-
- testClassRestrictedProperties(ClassWithDefaultConstructor);
- testClassRestrictedProperties(Class);
- testClassRestrictedProperties(DerivedClassWithDefaultConstructor);
- testClassRestrictedProperties(DerivedClass);
- testClassRestrictedProperties(class { method() {} });
- testClassRestrictedProperties(class { constructor() {} method() {} });
- testClassRestrictedProperties(class extends Class { });
- testClassRestrictedProperties(
- class extends Class { constructor() { super(); } });
-})();
-
-
(function testReturnFromClassLiteral() {
function usingYieldInBody() {
@@ -1095,198 +467,3 @@ function testClassRestrictedProperties(C) {
assertEquals(42, usingYieldInExtends());
})();
-
-
-(function testLargeClassesMethods() {
- const kLimit = 2000;
- let evalString = "(function(i) { " +
- "let clazz = class { " +
- " constructor(i) { this.value = i; } ";
- for (let i = 0; i < 2000; i++) {
- evalString += "property"+i+"() { return "+i+"; }; "
- }
- evalString += "};" +
- " return new clazz(i); })";
-
- let fn = eval(evalString);
- %PrepareFunctionForOptimization(fn);
- assertEquals(fn(1).value, 1);
- assertEquals(fn(2).value, 2);
- assertEquals(fn(3).value, 3);
- %OptimizeFunctionOnNextCall(fn);
- assertEquals(fn(4).value, 4);
-
- let instance = fn(1);
- assertEquals(Object.getOwnPropertyNames(instance).length, 1);
- assertEquals(Object.getOwnPropertyNames(instance.__proto__).length,
- kLimit + 1);
-
- // Call all instance functions.
- for (let i = 0; i < kLimit; i++) {
- const key = "property" + i;
- assertEquals(instance[key](), i);
- }
-})();
-
-
-(function testLargeClassesStaticMethods() {
- const kLimit = 2000;
- let evalString = "(function(i) { " +
- "let clazz = class { " +
- " constructor(i) { this.value = i; } ";
- for (let i = 0; i < kLimit; i++) {
- evalString += "static property"+i+"() { return "+i+" }; "
- }
- evalString += "};" +
- " return new clazz(i); })";
-
- let fn = eval(evalString);
-
- %PrepareFunctionForOptimization(fn);
- assertEquals(fn(1).value, 1);
- assertEquals(fn(2).value, 2);
- assertEquals(fn(3).value, 3);
- %OptimizeFunctionOnNextCall(fn);
- assertEquals(fn(4).value, 4);
-
- let instance = fn(1);
- assertEquals(Object.getOwnPropertyNames(instance).length, 1);
- assertEquals(instance.value, 1);
- instance.value = 10;
- assertEquals(instance.value, 10);
-
- // kLimit + nof default properties (length, prototype, name).
- assertEquals(Object.getOwnPropertyNames(instance.constructor).length,
- kLimit + 3);
-
- // Call all static properties.
- for (let i = 0; i < kLimit; i++) {
- const key = "property" + i;
- assertEquals(instance.constructor[key](), i);
- }
-})();
-
-
-(function testLargeClassesProperties(){
- const kLimit = 2000;
- let evalString = "(function(i) { " +
- "let clazz = class { " +
- " constructor(i) { this.value = i;";
- for (let i = 0; i < kLimit ; i++) {
- evalString += "this.property"+i +" = "+i+"; "
- }
- evalString += "}};" +
- " return (new clazz(i)); })";
-
- let fn = eval(evalString);
- %PrepareFunctionForOptimization(fn);
- assertEquals(fn(1).value, 1);
- assertEquals(fn(2).value, 2);
- assertEquals(fn(3).value, 3);
- %OptimizeFunctionOnNextCall(fn);
- assertEquals(fn(4).value, 4);
-
- let instance = fn(1);
- assertEquals(Object.getOwnPropertyNames(instance).length, kLimit+1);
-
- // Get and set all properties.
- for (let i = 0; i < kLimit; i++) {
- const key = "property" + i;
- assertEquals(instance[key], i);
- const value = "value"+i;
- instance[key] = value;
- assertEquals(instance[key], value);
- }
-})();
-
-var b = 'b';
-
-(function TestOverwritingInstanceAccessors() {
- var C, desc;
- C = class {
- [b]() { return 'B'; };
- get b() { return 'get B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals('get B', desc.get());
- assertEquals(undefined, desc.set);
-
- C = class {
- [b]() { return 'B'; };
- set b(v) { return 'set B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals(undefined, desc.get);
- assertEquals('set B', desc.set());
-
- C = class {
- set b(v) { return 'get B'; };
- [b]() { return 'B'; };
- get b() { return 'get B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals('get B', desc.get());
- assertEquals(undefined, desc.set);
-
- C = class {
- get b() { return 'get B'; };
- [b]() { return 'B'; };
- set b(v) { return 'set B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C.prototype, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals(undefined, desc.get);
- assertEquals('set B', desc.set());
-})();
-
-(function TestOverwritingStaticAccessors() {
- var C, desc;
- C = class {
- static [b]() { return 'B'; };
- static get b() { return 'get B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals('get B', desc.get());
- assertEquals(undefined, desc.set);
-
- C = class {
- static [b]() { return 'B'; };
- static set b(v) { return 'set B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals(undefined, desc.get);
- assertEquals('set B', desc.set());
-
- C = class {
- static set b(v) { return 'get B'; };
- static [b]() { return 'B'; };
- static get b() { return 'get B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals('get B', desc.get());
- assertEquals(undefined, desc.set);
-
- C = class {
- static get b() { return 'get B'; };
- static [b]() { return 'B'; };
- static set b(v) { return 'set B'; };
- };
- desc = Object.getOwnPropertyDescriptor(C, 'b');
- assertFalse(desc.enumerable);
- assertTrue(desc.configurable);
- assertEquals(undefined, desc.get);
- assertEquals('set B', desc.set());
-})();
diff --git a/deps/v8/test/mjsunit/es6/iterator-close.js b/deps/v8/test/mjsunit/es6/iterator-close.js
index fd8f361e5e..1eb9124b61 100644
--- a/deps/v8/test/mjsunit/es6/iterator-close.js
+++ b/deps/v8/test/mjsunit/es6/iterator-close.js
@@ -115,21 +115,21 @@ function* g() { yield 42; return 88 };
}, TypeError);
- assertThrows(() => {
+ assertThrowsEquals(() => {
for (var x of g()) { throw 666; }
- }, TypeError);
+ }, 666);
- assertThrows(() => {
+ assertThrowsEquals(() => {
for (let x of g()) { throw 666; }
- }, TypeError);
+ }, 666);
- assertThrows(() => {
+ assertThrowsEquals(() => {
for (const x of g()) { throw 666; }
- }, TypeError);
+ }, 666);
- assertThrows(() => {
+ assertThrowsEquals(() => {
for (x of g()) { throw 666; }
- }, TypeError);
+ }, 666);
assertThrows(() => {
diff --git a/deps/v8/test/mjsunit/es6/large-classes-methods.js b/deps/v8/test/mjsunit/es6/large-classes-methods.js
new file mode 100644
index 0000000000..6f1e34a4ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/large-classes-methods.js
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function testLargeClassesMethods() {
+ // This is to test for dictionary mode when there more than
+ // kMaxNumberOfDescriptors (1024) properties.
+ const kLimit = 1030;
+ let evalString = "(function(i) { " +
+ "let clazz = class { " +
+ " constructor(i) { this.value = i; } ";
+ for (let i = 0; i < kLimit; i++) {
+ evalString += "property"+i+"() { return "+i+"; }; "
+ }
+ evalString += "};" +
+ " return new clazz(i); })";
+
+ let fn = eval(evalString);
+ %PrepareFunctionForOptimization(fn);
+ assertEquals(fn(1).value, 1);
+ assertEquals(fn(2).value, 2);
+ assertEquals(fn(3).value, 3);
+ %OptimizeFunctionOnNextCall(fn);
+ assertEquals(fn(4).value, 4);
+
+ let instance = fn(1);
+ assertEquals(Object.getOwnPropertyNames(instance).length, 1);
+ assertEquals(Object.getOwnPropertyNames(instance.__proto__).length,
+ kLimit + 1);
+
+ // Call all instance functions.
+ for (let i = 0; i < kLimit; i++) {
+ const key = "property" + i;
+ assertEquals(instance[key](), i);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/large-classes-properties.js b/deps/v8/test/mjsunit/es6/large-classes-properties.js
new file mode 100644
index 0000000000..a670b0a907
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/large-classes-properties.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function testLargeClassesProperties(){
+ // This is to test for dictionary mode when there more than
+ // kMaxNumberOfDescriptors (1024) properties.
+ const kLimit = 1030;
+ let evalString = "(function(i) { " +
+ "let clazz = class { " +
+ " constructor(i) { this.value = i;";
+ for (let i = 0; i < kLimit ; i++) {
+ evalString += "this.property"+i +" = "+i+"; "
+ }
+ evalString += "}};" +
+ " return (new clazz(i)); })";
+
+ let fn = eval(evalString);
+ %PrepareFunctionForOptimization(fn);
+ assertEquals(fn(1).value, 1);
+ assertEquals(fn(2).value, 2);
+ assertEquals(fn(3).value, 3);
+ %OptimizeFunctionOnNextCall(fn);
+ assertEquals(fn(4).value, 4);
+
+ let instance = fn(1);
+ assertEquals(Object.getOwnPropertyNames(instance).length, kLimit+1);
+
+ // Get and set all properties.
+ for (let i = 0; i < kLimit; i++) {
+ const key = "property" + i;
+ assertEquals(instance[key], i);
+ const value = "value"+i;
+ instance[key] = value;
+ assertEquals(instance[key], value);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/large-classes-static-methods.js b/deps/v8/test/mjsunit/es6/large-classes-static-methods.js
new file mode 100644
index 0000000000..c812aa5c96
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/large-classes-static-methods.js
@@ -0,0 +1,44 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function testLargeClassesStaticMethods() {
+ // This is to test for dictionary mode when there more than
+ // kMaxNumberOfDescriptors (1024) properties.
+ const kLimit = 1030;
+ let evalString = "(function(i) { " +
+ "let clazz = class { " +
+ " constructor(i) { this.value = i; } ";
+ for (let i = 0; i < kLimit; i++) {
+ evalString += "static property"+i+"() { return "+i+" }; "
+ }
+ evalString += "};" +
+ " return new clazz(i); })";
+
+ let fn = eval(evalString);
+
+ %PrepareFunctionForOptimization(fn);
+ assertEquals(fn(1).value, 1);
+ assertEquals(fn(2).value, 2);
+ assertEquals(fn(3).value, 3);
+ %OptimizeFunctionOnNextCall(fn);
+ assertEquals(fn(4).value, 4);
+
+ let instance = fn(1);
+ assertEquals(Object.getOwnPropertyNames(instance).length, 1);
+ assertEquals(instance.value, 1);
+ instance.value = 10;
+ assertEquals(instance.value, 10);
+
+ // kLimit + nof default properties (length, prototype, name).
+ assertEquals(Object.getOwnPropertyNames(instance.constructor).length,
+ kLimit + 3);
+
+ // Call all static properties.
+ for (let i = 0; i < kLimit; i++) {
+ const key = "property" + i;
+ assertEquals(instance.constructor[key](), i);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-8.js b/deps/v8/test/mjsunit/es6/map-iterator-8.js
index 01dacfb72e..caa2d516f4 100644
--- a/deps/v8/test/mjsunit/es6/map-iterator-8.js
+++ b/deps/v8/test/mjsunit/es6/map-iterator-8.js
@@ -25,7 +25,7 @@ assertEquals([], [...map.keys()]);
assertEquals([], [...map.values()]);
assertEquals([], [...iterator]);
-assertFalse(%SetIteratorProtector());
+assertTrue(%SetIteratorProtector());
assertEquals([1,2,3], [...set]);
assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
assertEquals([1,2,3], [...set.keys()]);
diff --git a/deps/v8/test/mjsunit/es6/map-iterator-9.js b/deps/v8/test/mjsunit/es6/map-iterator-9.js
index 2db159d80e..8670ba98a4 100644
--- a/deps/v8/test/mjsunit/es6/map-iterator-9.js
+++ b/deps/v8/test/mjsunit/es6/map-iterator-9.js
@@ -23,7 +23,7 @@ assertEquals([1,2,3], [...map.keys()]);
assertEquals([2,3,4], [...map.values()]);
assertEquals([], [...iterator]);
-assertFalse(%SetIteratorProtector());
+assertTrue(%SetIteratorProtector());
assertEquals([1,2,3], [...set]);
assertEquals([[1,1],[2,2],[3,3]], [...set.entries()]);
assertEquals([1,2,3], [...set.keys()]);
diff --git a/deps/v8/test/mjsunit/es6/math-trunc.js b/deps/v8/test/mjsunit/es6/math-trunc.js
index 9a79a1f2d4..b878faeaaf 100644
--- a/deps/v8/test/mjsunit/es6/math-trunc.js
+++ b/deps/v8/test/mjsunit/es6/math-trunc.js
@@ -32,10 +32,10 @@ var test_id = 0;
function testTrunc(expected, input) {
var test = new Function('n',
'"' + (test_id++) + '";return Math.trunc(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expected, test(input));
assertEquals(expected, test(input));
assertEquals(expected, test(input));
- %PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
assertEquals(expected, test(input));
diff --git a/deps/v8/test/mjsunit/es6/new-target.js b/deps/v8/test/mjsunit/es6/new-target.js
index c77c153cc0..67ec96a9fd 100644
--- a/deps/v8/test/mjsunit/es6/new-target.js
+++ b/deps/v8/test/mjsunit/es6/new-target.js
@@ -401,13 +401,13 @@ function get_new_target() { return new.target; }
(function TestEarlyErrors() {
- assertThrows(function() { Function("new.target = 42"); }, ReferenceError);
- assertThrows(function() { Function("var foo = 1; new.target = foo = 42"); }, ReferenceError);
- assertThrows(function() { Function("var foo = 1; foo = new.target = 42"); }, ReferenceError);
- assertThrows(function() { Function("new.target--"); }, ReferenceError);
- assertThrows(function() { Function("--new.target"); }, ReferenceError);
- assertThrows(function() { Function("(new.target)++"); }, ReferenceError);
- assertThrows(function() { Function("++(new.target)"); }, ReferenceError);
+ assertThrows(function() { Function("new.target = 42"); }, SyntaxError);
+ assertThrows(function() { Function("var foo = 1; new.target = foo = 42"); }, SyntaxError);
+ assertThrows(function() { Function("var foo = 1; foo = new.target = 42"); }, SyntaxError);
+ assertThrows(function() { Function("new.target--"); }, SyntaxError);
+ assertThrows(function() { Function("--new.target"); }, SyntaxError);
+ assertThrows(function() { Function("(new.target)++"); }, SyntaxError);
+ assertThrows(function() { Function("++(new.target)"); }, SyntaxError);
assertThrows(function() { Function("for (new.target of {});"); }, SyntaxError);
})();
diff --git a/deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js b/deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js
index dc3c42ed12..f6b4d3e4e1 100644
--- a/deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/es6/proxies-prevent-extensions.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
+// Reflect.
(function () {
// No trap.
@@ -17,7 +17,6 @@
assertFalse(Reflect.isExtensible(proxy));
})();
-
(function () {
// "Undefined" trap.
@@ -40,13 +39,13 @@
var handler = { preventExtensions: 42 };
var proxy = new Proxy(target, handler);
- assertThrows(() => {Reflect.preventExtensions(proxy)}, TypeError);
+ assertThrows(() => { Reflect.preventExtensions(proxy) }, TypeError);
})();
(function () {
var target = {};
- var handler = { isExtensible() {return "bla"} };
+ var handler = { isExtensible() { return "bla" } };
var proxy = new Proxy(target, handler);
// Trap returns trueish and target is extensible.
@@ -54,7 +53,7 @@
// Trap returns trueish but target is not extensible.
Reflect.preventExtensions(target);
- assertThrows(() => {Reflect.isExtensible(proxy)}, TypeError);
+ assertThrows(() => { Reflect.isExtensible(proxy) }, TypeError);
})();
@@ -62,7 +61,7 @@
// Trap returns falsish.
var target = {};
- var handler = { preventExtensions() {return 0} };
+ var handler = { preventExtensions() { return 0 } };
var proxy = new Proxy(target, handler);
assertFalse(Reflect.preventExtensions(proxy));
@@ -73,13 +72,132 @@
(function () {
var target = {};
- var handler = { preventExtensions() {return Symbol()} };
+ var handler = { preventExtensions() { return Symbol() } };
var proxy = new Proxy(target, handler);
// Trap returns trueish but target is extensible.
- assertThrows(() => {Reflect.preventExtensions(proxy)}, TypeError);
+ assertThrows(() => { Reflect.preventExtensions(proxy) }, TypeError);
// Trap returns trueish and target is not extensible.
Reflect.preventExtensions(target);
assertTrue(Reflect.preventExtensions(proxy));
})();
+
+
+(function () {
+ // Target is proxy
+ var object = {};
+ assertTrue(Reflect.preventExtensions(object));
+ var target = new Proxy(object, {});
+ var proxy = new Proxy(target, {});
+ assertFalse(Reflect.isExtensible(object));
+ assertFalse(Reflect.isExtensible(target));
+ assertFalse(Reflect.isExtensible(proxy));
+})();
+
+// Object.
+(function () {
+ // No trap.
+
+ var target = {};
+ var handler = {};
+ var proxy = new Proxy(target, handler);
+
+ assertTrue(Object.isExtensible(target));
+ assertTrue(Object.isExtensible(proxy));
+ assertSame(proxy, Object.preventExtensions(proxy));
+ assertFalse(Object.isExtensible(target));
+ assertFalse(Object.isExtensible(proxy));
+})();
+
+(function () {
+ // "Undefined" trap.
+
+ var target = {};
+ var handler = { preventExtensions: null };
+ var proxy = new Proxy(target, handler);
+
+ assertTrue(Object.isExtensible(target));
+ assertTrue(Object.isExtensible(proxy));
+ assertSame(proxy, Object.preventExtensions(proxy));
+ assertFalse(Object.isExtensible(target));
+ assertFalse(Object.isExtensible(proxy));
+})();
+
+
+(function () {
+ // Invalid trap.
+
+ var target = {};
+ var handler = { preventExtensions: 42 };
+ var proxy = new Proxy(target, handler);
+
+ assertThrows(() => { Object.preventExtensions(proxy) }, TypeError);
+})();
+
+
+(function () {
+ var target = {};
+ var handler = { isExtensible() { return "bla" } };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish and target is extensible.
+ assertTrue(Object.isExtensible(proxy));
+
+ // Trap returns trueish but target is not extensible.
+ assertSame(target, Object.preventExtensions(target));
+ assertThrows(() => { Object.isExtensible(proxy) }, TypeError);
+})();
+
+
+(function () {
+ // Trap returns falsish.
+
+ var target = {};
+ var handler = { isExtensible() { return false } };
+ var proxy = new Proxy(target, handler);
+
+ assertThrows(() => { Object.isExtensible(proxy) }, TypeError);
+ assertSame(target, Object.preventExtensions(target));
+ assertFalse(Object.isExtensible(proxy));
+})();
+
+
+(function () {
+ // Trap returns falsish.
+
+ var target = {};
+ var handler = { preventExtensions() { return 0 } };
+ var proxy = new Proxy(target, handler);
+
+ assertFalse(Reflect.preventExtensions(proxy));
+ assertSame(target, Object.preventExtensions(target));
+ assertFalse(Reflect.preventExtensions(proxy));
+ assertThrows(() => { Object.preventExtensions(proxy) }, TypeError);
+})();
+
+
+(function () {
+ var target = {};
+ var handler = { preventExtensions() { return Symbol() } };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish but target is extensible.
+ assertThrows(() => { Object.preventExtensions(proxy) }, TypeError);
+
+ // Trap returns trueish and target is not extensible.
+ assertSame(target, Object.preventExtensions(target));
+ assertTrue(Reflect.preventExtensions(proxy));
+})();
+
+
+(function () {
+ // Target is proxy
+ var object = {};
+ assertSame(object, Object.preventExtensions(object));
+ var target = new Proxy(object, {});
+ var proxy = new Proxy(target, {});
+ assertFalse(Object.isExtensible(object));
+ assertFalse(Object.isExtensible(target));
+ assertFalse(Object.isExtensible(proxy));
+})();
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-8.js b/deps/v8/test/mjsunit/es6/set-iterator-8.js
index 2328a7b737..12f43b9d64 100644
--- a/deps/v8/test/mjsunit/es6/set-iterator-8.js
+++ b/deps/v8/test/mjsunit/es6/set-iterator-8.js
@@ -18,7 +18,7 @@ assertTrue(%SetIteratorProtector());
var iterator = set.keys();
iterator.__proto__[Symbol.iterator] = () => ({next: () => ({done: true})});
-assertFalse(%MapIteratorProtector());
+assertTrue(%MapIteratorProtector());
assertEquals([[1,2], [2,3], [3,4]], [...map]);
assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
assertEquals([1,2,3], [...map.keys()]);
diff --git a/deps/v8/test/mjsunit/es6/set-iterator-9.js b/deps/v8/test/mjsunit/es6/set-iterator-9.js
index 42cbf3077a..1414a3f425 100644
--- a/deps/v8/test/mjsunit/es6/set-iterator-9.js
+++ b/deps/v8/test/mjsunit/es6/set-iterator-9.js
@@ -17,7 +17,7 @@ assertTrue(%SetIteratorProtector());
var iterator = set.keys();
iterator[Symbol.iterator] = () => ({next: () => ({done: true})});
-assertFalse(%MapIteratorProtector());
+assertTrue(%MapIteratorProtector());
assertEquals([[1,2], [2,3], [3,4]], [...map]);
assertEquals([[1,2], [2,3], [3,4]], [...map.entries()]);
assertEquals([1,2,3], [...map.keys()]);
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index 20e6416495..026507e4b7 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -43,8 +43,8 @@ function isValidSymbolString(s) {
function TestNew() {
function indirectSymbol() { return Symbol() }
function indirect() { return indirectSymbol() }
- %PrepareFunctionForOptimization(indirect);
for (var i = 0; i < 2; ++i) {
+ %PrepareFunctionForOptimization(indirect);
for (var j = 0; j < 5; ++j) {
symbols.push(Symbol())
symbols.push(Symbol(undefined))
diff --git a/deps/v8/test/mjsunit/es6/templates.js b/deps/v8/test/mjsunit/es6/templates.js
index 3e113cb829..3da06b2b0f 100644
--- a/deps/v8/test/mjsunit/es6/templates.js
+++ b/deps/v8/test/mjsunit/es6/templates.js
@@ -725,20 +725,20 @@ var global = this;
(function testTaggedTemplateInvalidAssignmentTargetStrict() {
"use strict";
function f() {}
- assertThrows(() => Function("++f`foo`"), ReferenceError);
- assertThrows(() => Function("f`foo`++"), ReferenceError);
- assertThrows(() => Function("--f`foo`"), ReferenceError);
- assertThrows(() => Function("f`foo`--"), ReferenceError);
- assertThrows(() => Function("f`foo` = 1"), ReferenceError);
+ assertThrows(() => Function("++f`foo`"), SyntaxError);
+ assertThrows(() => Function("f`foo`++"), SyntaxError);
+ assertThrows(() => Function("--f`foo`"), SyntaxError);
+ assertThrows(() => Function("f`foo`--"), SyntaxError);
+ assertThrows(() => Function("f`foo` = 1"), SyntaxError);
})();
(function testTaggedTemplateInvalidAssignmentTargetSloppy() {
function f() {}
- assertThrows(() => Function("++f`foo`"), ReferenceError);
- assertThrows(() => Function("f`foo`++"), ReferenceError);
- assertThrows(() => Function("--f`foo`"), ReferenceError);
- assertThrows(() => Function("f`foo`--"), ReferenceError);
- assertThrows(() => Function("f`foo` = 1"), ReferenceError);
+ assertThrows(() => Function("++f`foo`"), SyntaxError);
+ assertThrows(() => Function("f`foo`++"), SyntaxError);
+ assertThrows(() => Function("--f`foo`"), SyntaxError);
+ assertThrows(() => Function("f`foo`--"), SyntaxError);
+ assertThrows(() => Function("f`foo` = 1"), SyntaxError);
})();
// Disable eval caching if a tagged template occurs in a nested function
diff --git a/deps/v8/test/mjsunit/es7/exponentiation-operator.js b/deps/v8/test/mjsunit/es7/exponentiation-operator.js
index 9d934bdaac..2c504a9f1e 100644
--- a/deps/v8/test/mjsunit/es7/exponentiation-operator.js
+++ b/deps/v8/test/mjsunit/es7/exponentiation-operator.js
@@ -264,13 +264,13 @@ function TestOverrideMathPow() {
TestOverrideMathPow();
function TestBadAssignmentLHS() {
- assertThrows("if (false) { 17 **= 10; }", ReferenceError);
- assertThrows("if (false) { '17' **= 10; }", ReferenceError);
- assertThrows("if (false) { /17/ **= 10; }", ReferenceError);
+ assertThrows("if (false) { 17 **= 10; }", SyntaxError);
+ assertThrows("if (false) { '17' **= 10; }", SyntaxError);
+ assertThrows("if (false) { /17/ **= 10; }", SyntaxError);
assertThrows("if (false) { ({ valueOf() { return 17; } } **= 10); }",
- ReferenceError);
- // TODO(caitp): a Call expression as LHS should be an early ReferenceError!
- // assertThrows("if (false) { Array() **= 10; }", ReferenceError);
+ SyntaxError);
+ // TODO(caitp): a Call expression as LHS should be an early SyntaxError!
+ // assertThrows("if (false) { Array() **= 10; }", SyntaxError);
assertThrows(() => Array() **= 10, ReferenceError);
}
TestBadAssignmentLHS();
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-5986.js b/deps/v8/test/mjsunit/es7/regress/regress-5986.js
index ca157f8026..bd3e9e0770 100644
--- a/deps/v8/test/mjsunit/es7/regress/regress-5986.js
+++ b/deps/v8/test/mjsunit/es7/regress/regress-5986.js
@@ -6,11 +6,9 @@
var array = [1.7, 1.7, 1.7];
var mutator = {
[Symbol.toPrimitive]() {
- Object.defineProperties(array, {
- 0: { get() { } },
- 1: { get() { } },
- 2: { get() { } },
- });
+ Object.defineProperties(
+ array, {0: {get() {}}, 1: {get() {}}, 2: {get() {}}});
+
return 0;
}
};
@@ -19,10 +17,14 @@ assertTrue(array.includes(undefined, mutator));
function search(array, searchElement, startIndex) {
return array.includes(searchElement, startIndex);
-}
-
+};
+%PrepareFunctionForOptimization(search);
array = [1.7, 1.7, 1.7];
-var not_mutator = { [Symbol.toPrimitive]() { return 0; } };
+var not_mutator = {
+ [Symbol.toPrimitive]() {
+ return 0;
+ }
+};
assertFalse(search(array, undefined, not_mutator));
assertFalse(search(array, undefined, not_mutator));
%OptimizeFunctionOnNextCall(search);
diff --git a/deps/v8/test/mjsunit/es9/object-spread-basic.js b/deps/v8/test/mjsunit/es9/object-spread-basic.js
index a0769b3a66..a4de924899 100644
--- a/deps/v8/test/mjsunit/es9/object-spread-basic.js
+++ b/deps/v8/test/mjsunit/es9/object-spread-basic.js
@@ -11,6 +11,11 @@ assertEquals({}, y = { ...undefined });
assertEquals({}, y = { ...null });
assertEquals({}, y = { ...1 });
+assertEquals({}, y = { ...1n });
+assertEquals({}, y = { ...NaN });
+assertEquals({}, y = { ...false });
+assertEquals({}, y = { ...true });
+assertEquals({}, y = { ...Symbol() });
assertEquals({0: 'f', 1: 'o', 2: 'o'}, y = { ...'foo' });
assertEquals({0: 0, 1: 1}, y = { ...[0, 1] });
assertEquals({}, { ...new Proxy({}, {}) });
diff --git a/deps/v8/test/mjsunit/expose-cputracemark.js b/deps/v8/test/mjsunit/expose-cputracemark.js
new file mode 100644
index 0000000000..78cc8dc084
--- /dev/null
+++ b/deps/v8/test/mjsunit/expose-cputracemark.js
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-cputracemark-as=cputracemark
+
+// Test --expose-cputracemark-as option.
+
+cputracemark(100);
+cputracemark(100, 'a');
+
+assertThrows(() => cputracemark(-1));
+assertThrows(() => cputracemark(null));
+assertThrows(() => cputracemark(""));
diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js
index 78f27809b7..3f0614032c 100644
--- a/deps/v8/test/mjsunit/external-array.js
+++ b/deps/v8/test/mjsunit/external-array.js
@@ -38,6 +38,7 @@ function f(a) {
a[0] = 0;
a[1] = 0;
}
+%PrepareFunctionForOptimization(f);
var a = new Int32Array(2);
for (var i = 0; i < 5; i++) {
@@ -167,10 +168,12 @@ assertEquals(2, a.BYTES_PER_ELEMENT);
// Test Float64Arrays.
function get(a, index) {
return a[index];
-}
+};
+%PrepareFunctionForOptimization(get);
function set(a, index, value) {
a[index] = value;
-}
+};
+%PrepareFunctionForOptimization(set);
function temp() {
var array = new Float64Array(2);
for (var i = 0; i < 5; i++) {
@@ -293,6 +296,7 @@ function test_store_nan(array, sum) {
const kRuns = 10;
function run_test(test_func, array, expected_result) {
+ %PrepareFunctionForOptimization(test_func);
for (var i = 0; i < 5; i++) test_func(array, 0);
%OptimizeFunctionOnNextCall(test_func);
var sum = 0;
@@ -344,6 +348,7 @@ for (var t = 0; t < types.length; t++) {
assertTrue(delete a.length);
// Make sure bounds checks are handled correctly for external arrays.
+ %PrepareFunctionForOptimization(run_bounds_test);
run_bounds_test(a);
run_bounds_test(a);
run_bounds_test(a);
@@ -364,6 +369,7 @@ for (var t = 0; t < types.length; t++) {
return a[0] = a[0] = 1;
}
+ %PrepareFunctionForOptimization(array_load_set_smi_check2);
array_load_set_smi_check2(a);
%OptimizeFunctionOnNextCall(array_load_set_smi_check2);
array_load_set_smi_check2(a);
@@ -378,6 +384,7 @@ function store_float32_undefined(ext_array) {
ext_array[0] = undefined;
}
+%PrepareFunctionForOptimization(store_float32_undefined);
var float32_array = new Float32Array(1);
// Make sure runtime does it right
store_float32_undefined(float32_array);
@@ -394,6 +401,7 @@ function store_float64_undefined(ext_array) {
ext_array[0] = undefined;
}
+%PrepareFunctionForOptimization(store_float64_undefined);
var float64_array = new Float64Array(1);
// Make sure runtime does it right
store_float64_undefined(float64_array);
@@ -639,6 +647,8 @@ function boo(a, i, v) {
function do_tagged_index_external_array_test(constructor) {
var t_array = new constructor([1, 2, 3, 4, 5, 6]);
+ %PrepareFunctionForOptimization(goo);
+ %PrepareFunctionForOptimization(boo);
assertEquals(1, goo(t_array, 0));
assertEquals(1, goo(t_array, 0));
boo(t_array, 0, 13);
@@ -661,6 +671,8 @@ do_tagged_index_external_array_test(Float32Array);
do_tagged_index_external_array_test(Float64Array);
var built_in_array = new Array(1, 2, 3, 4, 5, 6);
+%PrepareFunctionForOptimization(goo);
+%PrepareFunctionForOptimization(boo);
assertEquals(1, goo(built_in_array, 0));
assertEquals(1, goo(built_in_array, 0));
%OptimizeFunctionOnNextCall(goo);
@@ -671,6 +683,8 @@ assertEquals(11, goo(built_in_array, 0));
%ClearFunctionFeedback(boo);
built_in_array = new Array(1.5, 2, 3, 4, 5, 6);
+%PrepareFunctionForOptimization(goo);
+%PrepareFunctionForOptimization(boo);
assertEquals(1.5, goo(built_in_array, 0));
assertEquals(1.5, goo(built_in_array, 0));
%OptimizeFunctionOnNextCall(goo);
diff --git a/deps/v8/test/mjsunit/fast-element-smi-check.js b/deps/v8/test/mjsunit/fast-element-smi-check.js
index 09b2d6ac9e..fe5b291ea3 100644
--- a/deps/v8/test/mjsunit/fast-element-smi-check.js
+++ b/deps/v8/test/mjsunit/fast-element-smi-check.js
@@ -39,8 +39,8 @@ test_load_set_smi(123);
function test_load_set_smi_2(a) {
return a[0] = a[0] = 1;
-}
-
+};
+%PrepareFunctionForOptimization(test_load_set_smi_2);
test_load_set_smi_2(a);
%OptimizeFunctionOnNextCall(test_load_set_smi_2);
test_load_set_smi_2(a);
@@ -60,8 +60,8 @@ test_load_set_smi_3(123);
function test_load_set_smi_4(b) {
return b[0] = b[0] = 1;
-}
-
+};
+%PrepareFunctionForOptimization(test_load_set_smi_4);
test_load_set_smi_4(b);
%OptimizeFunctionOnNextCall(test_load_set_smi_4);
test_load_set_smi_4(b);
diff --git a/deps/v8/test/mjsunit/fast-literal.js b/deps/v8/test/mjsunit/fast-literal.js
index 6ff8c9c6f2..9e9c797c47 100644
--- a/deps/v8/test/mjsunit/fast-literal.js
+++ b/deps/v8/test/mjsunit/fast-literal.js
@@ -30,10 +30,12 @@
%SetAllocationTimeout(20, 0);
function f() {
return [[1, 2, 3], [1.1, 1.2, 1.3], [[], [], []]];
-}
-
-f(); f(); f();
+};
+%PrepareFunctionForOptimization(f);
+f();
+f();
+f();
%OptimizeFunctionOnNextCall(f);
-for (var i=0; i<50; i++) {
+for (var i = 0; i < 50; i++) {
f();
}
diff --git a/deps/v8/test/mjsunit/frozen-array-reduce.js b/deps/v8/test/mjsunit/frozen-array-reduce.js
new file mode 100644
index 0000000000..6f121ae5fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/frozen-array-reduce.js
@@ -0,0 +1,1420 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+/**
+ * @fileoverview Test reduce and reduceRight
+ */
+
+function clone(v) {
+ // Shallow-copies arrays, returns everything else verbatim.
+ if (v instanceof Array) {
+ // Shallow-copy an array.
+ var newArray = new Array(v.length);
+ for (var i in v) {
+ newArray[i] = v[i];
+ }
+ return newArray;
+ }
+ return v;
+}
+
+
+// Creates a callback function for reduce/reduceRight that tests the number
+// of arguments and otherwise behaves as "func", but which also
+// records all calls in an array on the function (as arrays of arguments
+// followed by result).
+function makeRecorder(func, testName) {
+ var record = [];
+ var f = function recorder(a, b, i, s) {
+ assertEquals(4, arguments.length,
+ testName + "(number of arguments: " + arguments.length + ")");
+ assertEquals("number", typeof(i), testName + "(index must be number)");
+ assertEquals(s[i], b, testName + "(current argument is at index)");
+ if (record.length > 0) {
+ var prevRecord = record[record.length - 1];
+ var prevResult = prevRecord[prevRecord.length - 1];
+ assertEquals(prevResult, a,
+ testName + "(prev result -> current input)");
+ }
+ var args = [clone(a), clone(b), i, clone(s)];
+ var result = func.apply(this, arguments);
+ args.push(clone(result));
+ record.push(args);
+ return result;
+ };
+ f.record = record;
+ return f;
+}
+
+
+function testReduce(type,
+ testName,
+ expectedResult,
+ expectedCalls,
+ array,
+ combine,
+ init) {
+ var rec = makeRecorder(combine);
+ var result;
+ if (arguments.length > 6) {
+ result = array[type](rec, init);
+ } else {
+ result = array[type](rec);
+ }
+ var calls = rec.record;
+ assertEquals(expectedCalls.length, calls.length,
+ testName + " (number of calls)");
+ for (var i = 0; i < expectedCalls.length; i++) {
+ assertEquals(expectedCalls[i], calls[i],
+ testName + " (call " + (i + 1) + ")");
+ }
+ assertEquals(expectedResult, result, testName + " (result)");
+}
+
+
+function sum(a, b) { return Number(a) + Number(b); }
+function prod(a, b) { return Number(a) * Number(b); }
+function dec(a, b, i, arr) { return Number(a) + Number(b) * Math.pow(10, arr.length - i - 1); }
+function accumulate(acc, elem, i) { acc[i] = elem; return acc; }
+
+// ---- Test Reduce[Left]
+
+var simpleArray = ['2',4,6];
+Object.freeze(simpleArray);
+
+testReduce("reduce", "SimpleReduceSum", 12,
+ [[0, '2', 0, simpleArray, 2],
+ [2, 4, 1, simpleArray, 6],
+ [6, 6, 2, simpleArray, 12]],
+ simpleArray, sum, 0);
+
+testReduce("reduce", "SimpleReduceProd", 48,
+ [[1, '2', 0, simpleArray, 2],
+ [2, 4, 1, simpleArray, 8],
+ [8, 6, 2, simpleArray, 48]],
+ simpleArray, prod, 1);
+
+testReduce("reduce", "SimpleReduceDec", 246,
+ [[0, '2', 0, simpleArray, 200],
+ [200, 4, 1, simpleArray, 240],
+ [240, 6, 2, simpleArray, 246]],
+ simpleArray, dec, 0);
+
+testReduce("reduce", "SimpleReduceAccumulate", simpleArray,
+ [[[], '2', 0, simpleArray, ['2']],
+ [['2'], 4, 1, simpleArray, ['2', 4]],
+ [['2', 4], 6, 2, simpleArray, simpleArray]],
+ simpleArray, accumulate, []);
+
+var emptyArray = [];
+Object.freeze(emptyArray);
+
+testReduce("reduce", "EmptyReduceSum", 0, [], emptyArray, sum, 0);
+testReduce("reduce", "EmptyReduceProd", 1, [], emptyArray, prod, 1);
+testReduce("reduce", "EmptyReduceDec", 0, [], emptyArray, dec, 0);
+testReduce("reduce", "EmptyReduceAccumulate", [], [], emptyArray, accumulate, []);
+
+testReduce("reduce", "EmptyReduceSumNoInit", 0, emptyArray, [0], sum);
+testReduce("reduce", "EmptyReduceProdNoInit", 1, emptyArray, [1], prod);
+testReduce("reduce", "EmptyReduceDecNoInit", 0, emptyArray, [0], dec);
+testReduce("reduce", "EmptyReduceAccumulateNoInit", [], emptyArray, [[]], accumulate);
+
+
+var simpleSparseArray = [,,,'2',,4,,6,,];
+Object.freeze(simpleSparseArray);
+
+testReduce("reduce", "SimpleSparseReduceSum", 12,
+ [[0, '2', 3, simpleSparseArray, 2],
+ [2, 4, 5, simpleSparseArray, 6],
+ [6, 6, 7, simpleSparseArray, 12]],
+ simpleSparseArray, sum, 0);
+
+testReduce("reduce", "SimpleSparseReduceProd", 48,
+ [[1, '2', 3, simpleSparseArray, 2],
+ [2, 4, 5, simpleSparseArray, 8],
+ [8, 6, 7, simpleSparseArray, 48]],
+ simpleSparseArray, prod, 1);
+
+testReduce("reduce", "SimpleSparseReduceDec", 204060,
+ [[0, '2', 3, simpleSparseArray, 200000],
+ [200000, 4, 5, simpleSparseArray, 204000],
+ [204000, 6, 7, simpleSparseArray, 204060]],
+ simpleSparseArray, dec, 0);
+
+testReduce("reduce", "SimpleSparseReduceAccumulate", [,,,'2',,4,,6],
+ [[[], '2', 3, simpleSparseArray, [,,,'2']],
+ [[,,,'2'], 4, 5, simpleSparseArray, [,,,'2',,4]],
+ [[,,,'2',,4], 6, 7, simpleSparseArray, [,,,'2',,4,,6]]],
+ simpleSparseArray, accumulate, []);
+
+
+testReduce("reduce", "EmptySparseReduceSumNoInit", 0, [], [,,0,,], sum);
+testReduce("reduce", "EmptySparseReduceProdNoInit", 1, [], [,,1,,], prod);
+testReduce("reduce", "EmptySparseReduceDecNoInit", 0, [], [,,0,,], dec);
+testReduce("reduce", "EmptySparseReduceAccumulateNoInit",
+ [], [], [,,[],,], accumulate);
+
+
+var verySparseArray = [];
+verySparseArray.length = 10000;
+verySparseArray[2000] = '2';
+verySparseArray[5000] = 4;
+verySparseArray[9000] = 6;
+var verySparseSlice2 = verySparseArray.slice(0, 2001);
+var verySparseSlice4 = verySparseArray.slice(0, 5001);
+var verySparseSlice6 = verySparseArray.slice(0, 9001);
+Object.freeze(verySparseArray);
+
+testReduce("reduce", "VerySparseReduceSum", 12,
+ [[0, '2', 2000, verySparseArray, 2],
+ [2, 4, 5000, verySparseArray, 6],
+ [6, 6, 9000, verySparseArray, 12]],
+ verySparseArray, sum, 0);
+
+testReduce("reduce", "VerySparseReduceProd", 48,
+ [[1, '2', 2000, verySparseArray, 2],
+ [2, 4, 5000, verySparseArray, 8],
+ [8, 6, 9000, verySparseArray, 48]],
+ verySparseArray, prod, 1);
+
+testReduce("reduce", "VerySparseReduceDec", Infinity,
+ [[0, '2', 2000, verySparseArray, Infinity],
+ [Infinity, 4, 5000, verySparseArray, Infinity],
+ [Infinity, 6, 9000, verySparseArray, Infinity]],
+ verySparseArray, dec, 0);
+
+testReduce("reduce", "VerySparseReduceAccumulate",
+ verySparseSlice6,
+ [[[], '2', 2000, verySparseArray, verySparseSlice2],
+ [verySparseSlice2, 4, 5000, verySparseArray, verySparseSlice4],
+ [verySparseSlice4, 6, 9000, verySparseArray, verySparseSlice6]],
+ verySparseArray, accumulate, []);
+
+
+testReduce("reduce", "VerySparseReduceSumNoInit", 12,
+ [['2', 4, 5000, verySparseArray, 6],
+ [6, 6, 9000, verySparseArray, 12]],
+ verySparseArray, sum);
+
+testReduce("reduce", "VerySparseReduceProdNoInit", 48,
+ [['2', 4, 5000, verySparseArray, 8],
+ [8, 6, 9000, verySparseArray, 48]],
+ verySparseArray, prod);
+
+testReduce("reduce", "VerySparseReduceDecNoInit", Infinity,
+ [['2', 4, 5000, verySparseArray, Infinity],
+ [Infinity, 6, 9000, verySparseArray, Infinity]],
+ verySparseArray, dec);
+
+testReduce("reduce", "SimpleSparseReduceAccumulateNoInit",
+ '2',
+ [['2', 4, 5000, verySparseArray, '2'],
+ ['2', 6, 9000, verySparseArray, '2']],
+ verySparseArray, accumulate);
+
+
+// ---- Test ReduceRight
+
+testReduce("reduceRight", "SimpleReduceRightSum", 12,
+ [[0, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 10],
+ [10, '2', 0, simpleArray, 12]],
+ simpleArray, sum, 0);
+
+testReduce("reduceRight", "SimpleReduceRightProd", 48,
+ [[1, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 24],
+ [24, '2', 0, simpleArray, 48]],
+ simpleArray, prod, 1);
+
+testReduce("reduceRight", "SimpleReduceRightDec", 246,
+ [[0, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 46],
+ [46, '2', 0, simpleArray, 246]],
+ simpleArray, dec, 0);
+
+testReduce("reduceRight", "SimpleReduceRightAccumulate", simpleArray,
+ [[[], 6, 2, simpleArray, [,,6]],
+ [[,,6], 4, 1, simpleArray, [,4,6]],
+ [[,4,6], '2', 0, simpleArray, simpleArray]],
+ simpleArray, accumulate, []);
+
+
+testReduce("reduceRight", "EmptyReduceRightSum", 0, [], [], sum, 0);
+testReduce("reduceRight", "EmptyReduceRightProd", 1, [], [], prod, 1);
+testReduce("reduceRight", "EmptyReduceRightDec", 0, [], [], dec, 0);
+testReduce("reduceRight", "EmptyReduceRightAccumulate", [],
+ [], [], accumulate, []);
+
+testReduce("reduceRight", "EmptyReduceRightSumNoInit", 0, [], [0], sum);
+testReduce("reduceRight", "EmptyReduceRightProdNoInit", 1, [], [1], prod);
+testReduce("reduceRight", "EmptyReduceRightDecNoInit", 0, [], [0], dec);
+testReduce("reduceRight", "EmptyReduceRightAccumulateNoInit",
+ [], [], [[]], accumulate);
+
+
+testReduce("reduceRight", "SimpleSparseReduceRightSum", 12,
+ [[0, 6, 7, simpleSparseArray, 6],
+ [6, 4, 5, simpleSparseArray, 10],
+ [10, '2', 3, simpleSparseArray, 12]],
+ simpleSparseArray, sum, 0);
+
+testReduce("reduceRight", "SimpleSparseReduceRightProd", 48,
+ [[1, 6, 7, simpleSparseArray, 6],
+ [6, 4, 5, simpleSparseArray, 24],
+ [24, '2', 3, simpleSparseArray, 48]],
+ simpleSparseArray, prod, 1);
+
+testReduce("reduceRight", "SimpleSparseReduceRightDec", 204060,
+ [[0, 6, 7, simpleSparseArray, 60],
+ [60, 4, 5, simpleSparseArray, 4060],
+ [4060, '2', 3, simpleSparseArray, 204060]],
+ simpleSparseArray, dec, 0);
+
+testReduce("reduceRight", "SimpleSparseReduceRightAccumulate", [,,,'2',,4,,6],
+ [[[], 6, 7, simpleSparseArray, [,,,,,,,6]],
+ [[,,,,,,,6], 4, 5, simpleSparseArray, [,,,,,4,,6]],
+ [[,,,,,4,,6], '2', 3, simpleSparseArray, [,,,'2',,4,,6]]],
+ simpleSparseArray, accumulate, []);
+
+
+testReduce("reduceRight", "EmptySparseReduceRightSumNoInit",
+ 0, [], [,,0,,], sum);
+testReduce("reduceRight", "EmptySparseReduceRightProdNoInit",
+ 1, [], [,,1,,], prod);
+testReduce("reduceRight", "EmptySparseReduceRightDecNoInit",
+ 0, [], [,,0,,], dec);
+testReduce("reduceRight", "EmptySparseReduceRightAccumulateNoInit",
+ [], [], [,,[],,], accumulate);
+
+
+var verySparseSuffix6 = [];
+verySparseSuffix6[9000] = 6;
+var verySparseSuffix4 = [];
+verySparseSuffix4[5000] = 4;
+verySparseSuffix4[9000] = 6;
+var verySparseSuffix2 = verySparseSlice6;
+
+
+testReduce("reduceRight", "VerySparseReduceRightSum", 12,
+ [[0, 6, 9000, verySparseArray, 6],
+ [6, 4, 5000, verySparseArray, 10],
+ [10, '2', 2000, verySparseArray, 12]],
+ verySparseArray, sum, 0);
+
+testReduce("reduceRight", "VerySparseReduceRightProd", 48,
+ [[1, 6, 9000, verySparseArray, 6],
+ [6, 4, 5000, verySparseArray, 24],
+ [24, '2', 2000, verySparseArray, 48]],
+ verySparseArray, prod, 1);
+
+testReduce("reduceRight", "VerySparseReduceRightDec", Infinity,
+ [[0, 6, 9000, verySparseArray, Infinity],
+ [Infinity, 4, 5000, verySparseArray, Infinity],
+ [Infinity, '2', 2000, verySparseArray, Infinity]],
+ verySparseArray, dec, 0);
+
+testReduce("reduceRight", "VerySparseReduceRightAccumulate",
+ verySparseSuffix2,
+ [[[], 6, 9000, verySparseArray, verySparseSuffix6],
+ [verySparseSuffix6, 4, 5000, verySparseArray, verySparseSuffix4],
+ [verySparseSuffix4, '2', 2000, verySparseArray, verySparseSuffix2]],
+ verySparseArray, accumulate, []);
+
+
+testReduce("reduceRight", "VerySparseReduceRightSumNoInit", 12,
+ [[6, 4, 5000, verySparseArray, 10],
+ [10, '2', 2000, verySparseArray, 12]],
+ verySparseArray, sum);
+
+testReduce("reduceRight", "VerySparseReduceRightProdNoInit", 48,
+ [[6, 4, 5000, verySparseArray, 24],
+ [24, '2', 2000, verySparseArray, 48]],
+ verySparseArray, prod);
+
+testReduce("reduceRight", "VerySparseReduceRightDecNoInit", Infinity,
+ [[6, 4, 5000, verySparseArray, Infinity],
+ [Infinity, '2', 2000, verySparseArray, Infinity]],
+ verySparseArray, dec);
+
+testReduce("reduceRight", "SimpleSparseReduceRightAccumulateNoInit",
+ 6,
+ [[6, 4, 5000, verySparseArray, 6],
+ [6, '2', 2000, verySparseArray, 6]],
+ verySparseArray, accumulate);
+
+
+// undefined is an element
+var undefArray = [,,undefined,,undefined,,];
+Object.freeze(undefArray);
+
+testReduce("reduce", "SparseUndefinedReduceAdd", NaN,
+ [[0, undefined, 2, undefArray, NaN],
+ [NaN, undefined, 4, undefArray, NaN],
+ ],
+ undefArray, sum, 0);
+
+testReduce("reduceRight", "SparseUndefinedReduceRightAdd", NaN,
+ [[0, undefined, 4, undefArray, NaN],
+ [NaN, undefined, 2, undefArray, NaN],
+ ], undefArray, sum, 0);
+
+testReduce("reduce", "SparseUndefinedReduceAddNoInit", NaN,
+ [[undefined, undefined, 4, undefArray, NaN],
+ ], undefArray, sum);
+
+testReduce("reduceRight", "SparseUndefinedReduceRightAddNoInit", NaN,
+ [[undefined, undefined, 2, undefArray, NaN],
+ ], undefArray, sum);
+
+
+// Ignore non-array properties:
+
+var arrayPlus = [1,'2',,3];
+arrayPlus[-1] = NaN;
+arrayPlus[Math.pow(2,32)] = NaN;
+arrayPlus[NaN] = NaN;
+arrayPlus["00"] = NaN;
+arrayPlus["02"] = NaN;
+arrayPlus["-0"] = NaN;
+Object.freeze(arrayPlus);
+
+testReduce("reduce", "ArrayWithNonElementPropertiesReduce", 6,
+ [[0, 1, 0, arrayPlus, 1],
+ [1, '2', 1, arrayPlus, 3],
+ [3, 3, 3, arrayPlus, 6],
+ ], arrayPlus, sum, 0);
+
+testReduce("reduceRight", "ArrayWithNonElementPropertiesReduceRight", 6,
+ [[0, 3, 3, arrayPlus, 3],
+ [3, '2', 1, arrayPlus, 5],
+ [5, 1, 0, arrayPlus, 6],
+ ], arrayPlus, sum, 0);
+
+// Test passing undefined as initial value (to test missing parameter
+// detection).
+Object.freeze(['1']).reduce((a, b) => { assertEquals(a, undefined); assertEquals(b, '1') },
+ undefined);
+Object.freeze(['1', 2]).reduce((a, b) => { assertEquals(a, '1'); assertEquals(b, 2); });
+Object.freeze(['1']).reduce((a, b) => { assertTrue(false); });
+
+// Test error conditions:
+
+var exception = false;
+try {
+ Object.freeze(['1']).reduce("not a function");
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce callback not a function not throwing TypeError");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduce non function TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.freeze(['1']).reduceRight("not a function");
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight callback not a function not throwing TypeError");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduceRight non function TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.freeze([]).reduce(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduce no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.freeze([]).reduceRight(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduceRight no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.freeze([,,,]).reduce(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce sparse no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduce no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.freeze([,,,]).reduceRight(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight sparse no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduceRight no initial TypeError type");
+}
+assertTrue(exception);
+
+
+// Array changing length
+
+function extender(a, b, i, s) {
+ s[s.length] = s.length;
+ return Number(a) + Number(b);
+}
+
+var arr = [1, '2', 3, 4];
+Object.freeze(arr);
+testReduce("reduce", "ArrayManipulationExtender", 10,
+ [[0, 1, 0, [1, '2', 3, 4], 1],
+ [1, '2', 1, [1, '2', 3, 4], 3],
+ [3, 3, 2, [1, '2', 3, 4], 6],
+ [6, 4, 3, [1, '2', 3, 4], 10],
+ ], arr, extender, 0);
+
+var arr = [];
+Object.defineProperty(arr, "0", { get: function() { delete this[0] },
+ configurable: true });
+assertEquals(undefined, Object.freeze(arr).reduce(function(val) { return val }));
+
+var arr = [];
+Object.defineProperty(arr, "0", { get: function() { delete this[0] },
+ configurable: true});
+assertEquals(undefined, Object.freeze(arr).reduceRight(function(val) { return val }));
+
+
+(function ReduceRightMaxIndex() {
+ const kMaxIndex = 0xffffffff-1;
+ let array = [];
+ array[kMaxIndex-2] = 'value-2';
+ array[kMaxIndex-1] = 'value-1';
+ // Use the maximum array index possible.
+ array[kMaxIndex] = 'value';
+ // Add the next index which is a normal property and thus will not show up.
+ array[kMaxIndex+1] = 'normal property';
+ assertThrowsEquals( () => {
+ Object.freeze(array).reduceRight((sum, value) => {
+ assertEquals('initial', sum);
+ assertEquals('value', value);
+ // Throw at this point as we would very slowly loop down from kMaxIndex.
+ throw 'do not continue';
+ }, 'initial')
+ }, 'do not continue');
+})();
+
+(function OptimizedReduce() {
+ let f = (a,current) => a + Number(current);
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.freeze(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEmpty() {
+ let f = (a,current) => a + Number(current);
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.freeze(a);
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ assertThrows(() => g([]));
+ assertUnoptimized(g);
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + Number(current); };
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.freeze(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + Number(current);
+ };
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ %PrepareFunctionForOptimization(g);
+ assertOptimized(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,'22',,33,45,56,,6,77,84,93,101,];
+ Object.freeze(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ %PrepareFunctionForOptimization(g);
+ assertOptimized(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertUnoptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertUnoptimized(g);
+})();
+
+(function TriggerReduceRightPreLoopDeopt() {
+ function f(a) {
+ a.reduceRight((x) => { return Number(x) + 1 });
+ };
+ %PrepareFunctionForOptimization(f);
+ var arr = Object.freeze([1, '2', ]);
+ f(arr);
+ f(arr);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(() => f([]), TypeError);
+ assertUnoptimized(f);
+})();
+
+(function OptimizedReduceRightEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,'22',,33,45,56,,6,77,84,93,101,];
+ Object.freeze(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[array.length-1] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ %PrepareFunctionForOptimization(g);
+ assertOptimized(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertUnoptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertUnoptimized(g);
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + current;
+ };
+ let g = function() {
+ try {
+ return Object.freeze(array).reduce(f);
+ } catch (e) {
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1, '2', 3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ f = null;
+ assertThrows(() => g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,2,3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceRight() {
+ let count = 0;
+ let f = (a,current,i) => a + Number(current) * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.freeze(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEmpty() {
+ let count = 0;
+ let f = (a,current,i) => a + Number(current) * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.freeze(a);
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ assertThrows(() => g([]));
+ assertUnoptimized(g);
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + Number(current); };
+ let g = function(a) {
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.freeze(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + Number(current);
+ };
+ let g = function(a) {
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[9] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ %PrepareFunctionForOptimization(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.freeze(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + Number(current);
+ };
+ let g = function() {
+ try {
+ return Object.freeze(array).reduceRight(f);
+ } catch (e) {
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1, '2', 3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ f = null;
+ assertThrows(() => g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.freeze(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceHoleyArrayWithDefaultAccumulator() {
+ var holey = new Array(10);
+ Object.freeze(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return currentValue;
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceRightHoleyArrayWithDefaultAccumulator() {
+ var holey = new Array(10);
+ Object.freeze(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return currentValue;
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceHoleyArrayOneElementWithDefaultAccumulator() {
+ var holey = new Array(10);
+ holey[1] = '5';
+ Object.freeze(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return Number(currentValue) + accumulator;
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceRightHoleyArrayOneElementWithDefaultAccumulator() {
+ var holey = new Array(10);
+ holey[1] = '5';
+ Object.freeze(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return Number(currentValue) + accumulator;
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceMixedHoleyArrays() {
+ function r(a) {
+ return a.reduce((acc, i) => {acc[0]});
+ };
+ %PrepareFunctionForOptimization(r);
+ assertEquals(r(Object.freeze([[0]])), [0]);
+ assertEquals(r(Object.freeze([[0]])), [0]);
+ assertEquals(r(Object.freeze([0,,])), 0);
+ %OptimizeFunctionOnNextCall(r);
+ assertEquals(r(Object.freeze([,0,0])), undefined);
+ assertOptimized(r);
+})();
diff --git a/deps/v8/test/mjsunit/function-named-self-reference.js b/deps/v8/test/mjsunit/function-named-self-reference.js
index 5b03b094b7..4148d53841 100644
--- a/deps/v8/test/mjsunit/function-named-self-reference.js
+++ b/deps/v8/test/mjsunit/function-named-self-reference.js
@@ -33,13 +33,14 @@ var fn = function fn(val) {
%OptimizeFunctionOnNextCall(fn);
function run(val) {
- var res = fn((val + 1) << 1);
+ var res = fn(val + 1 << 1);
return res;
}
return run(0);
-}
-
+};
+;
+%PrepareFunctionForOptimization(fn);
var res = fn();
assertEquals(res, 2);
diff --git a/deps/v8/test/mjsunit/global-infinity.js b/deps/v8/test/mjsunit/global-infinity.js
index 33080579ae..d52f9490e8 100644
--- a/deps/v8/test/mjsunit/global-infinity.js
+++ b/deps/v8/test/mjsunit/global-infinity.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/rematerialize-on-deopt.js b/deps/v8/test/mjsunit/harmony/bigint/rematerialize-on-deopt.js
new file mode 100644
index 0000000000..cee915ccaf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/rematerialize-on-deopt.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+{
+ function test(a, b, c) {
+ let x = BigInt.asUintN(64, a + b);
+ console.log(x);
+ try {
+ return BigInt.asUintN(64, x + c);
+ } catch(_) {
+ return x;
+ }
+ }
+
+ %PrepareFunctionForOptimization(test);
+ test(3n, 4n, 5n);
+ test(6n, 7n, 8n);
+ test(9n, 2n, 1n);
+ %OptimizeFunctionOnNextCall(test);
+ test(1n, 2n, 3n);
+ test(3n, 2n, 1n);
+
+ assertEquals(6n, test(1n, 3n, 2n));
+ assertEquals(5n, test(2n, 3n, 2));
+}
diff --git a/deps/v8/test/mjsunit/harmony/global-configurable.js b/deps/v8/test/mjsunit/harmony/global-configurable.js
index dd823f1531..d7a80579aa 100644
--- a/deps/v8/test/mjsunit/harmony/global-configurable.js
+++ b/deps/v8/test/mjsunit/harmony/global-configurable.js
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-global
-
assertTrue(delete globalThis);
assertEquals(this.globalThis, undefined);
diff --git a/deps/v8/test/mjsunit/harmony/global-writable.js b/deps/v8/test/mjsunit/harmony/global-writable.js
index f0c2d754bb..e4bce2b245 100644
--- a/deps/v8/test/mjsunit/harmony/global-writable.js
+++ b/deps/v8/test/mjsunit/harmony/global-writable.js
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-global
-
globalThis = '[[Writable]]';
assertEquals(globalThis, '[[Writable]]');
diff --git a/deps/v8/test/mjsunit/harmony/global.js b/deps/v8/test/mjsunit/harmony/global.js
index 3d43864c47..60a12fe8f5 100644
--- a/deps/v8/test/mjsunit/harmony/global.js
+++ b/deps/v8/test/mjsunit/harmony/global.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-global
-
assertEquals(globalThis, this);
assertEquals(this.globalThis, this);
assertEquals(globalThis.globalThis, this);
diff --git a/deps/v8/test/mjsunit/harmony/hashbang-eval.js b/deps/v8/test/mjsunit/harmony/hashbang-eval.js
index f30ebd0c45..645ecdaea6 100644
--- a/deps/v8/test/mjsunit/harmony/hashbang-eval.js
+++ b/deps/v8/test/mjsunit/harmony/hashbang-eval.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-hashbang
-
// Hashbang syntax is allowed in eval.
assertEquals(eval("#!"), undefined);
assertEquals(eval("#!\n"), undefined);
diff --git a/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js
index 49570b51de..8caf588606 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-compilation-errored.js
@@ -5,8 +5,8 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var error1, error2;
-import('modules-skip-12.js').catch(e => error1 = e);
-import('modules-skip-12.js').catch(e => error2 = e);
+import('modules-skip-12.mjs').catch(e => error1 = e);
+import('modules-skip-12.mjs').catch(e => error2 = e);
%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
diff --git a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
index 87dbc0a6aa..e39ce7ca4f 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-evaluation-errored.js
@@ -5,8 +5,8 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var error1, error2;
-import('modules-skip-11.js').catch(e => error1 = e);
-import('modules-skip-11.js').catch(e => error2 = e);
+import('modules-skip-11.mjs').catch(e => error1 = e);
+import('modules-skip-11.mjs').catch(e => error2 = e);
%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
diff --git a/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js b/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
index 2cdbaaea32..958ba55e5e 100644
--- a/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
+++ b/deps/v8/test/mjsunit/harmony/import-from-instantiation-errored.js
@@ -5,8 +5,8 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var error1, error2;
-import('modules-skip-10.js').catch(e => error1 = e);
-import('modules-skip-10.js').catch(e => error2 = e);
+import('modules-skip-10.mjs').catch(e => error1 = e);
+import('modules-skip-10.mjs').catch(e => error2 = e);
%PerformMicrotaskCheckpoint();
assertEquals(error1, error2);
diff --git a/deps/v8/test/mjsunit/harmony/module-parsing-eval.js b/deps/v8/test/mjsunit/harmony/module-parsing-eval.mjs
index 6c080dbf44..6c080dbf44 100644
--- a/deps/v8/test/mjsunit/harmony/module-parsing-eval.js
+++ b/deps/v8/test/mjsunit/harmony/module-parsing-eval.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-1.js b/deps/v8/test/mjsunit/harmony/modules-import-1.mjs
index f62d4d7b32..954ed9e63e 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-1.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-1.mjs
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var life;
-import('modules-skip-1.js').then(namespace => life = namespace.life());
+import('modules-skip-1.mjs').then(namespace => life = namespace.life());
%PerformMicrotaskCheckpoint();
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-10.js b/deps/v8/test/mjsunit/harmony/modules-import-10.mjs
index eda4aaf7f9..539a63dc6a 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-10.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-10.mjs
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var life;
-import('modules-skip-6.js').then(namespace => life = namespace.life);
+import('modules-skip-6.mjs').then(namespace => life = namespace.life);
assertEquals(undefined, Object.life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-11.js b/deps/v8/test/mjsunit/harmony/modules-import-11.mjs
index ffba6a0722..bcbc419503 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-11.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-11.mjs
@@ -8,7 +8,7 @@ var ran = false;
async function test() {
try {
- let namespace = await import('modules-skip-7.js');
+ let namespace = await import('modules-skip-7.mjs');
let life = await namespace.getLife();
assertEquals(42, life);
ran = true;
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-12.js b/deps/v8/test/mjsunit/harmony/modules-import-12.mjs
index d898c984ad..9f6dba6cc4 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-12.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-12.mjs
@@ -4,10 +4,10 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
-ran = false;
+let ran = false;
async function test() {
try {
- let namespace = await import('modules-skip-empty.js');
+ let namespace = await import('modules-skip-empty.mjs');
ran = true;
} catch(e) {
%AbortJS('failure: '+ e);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-13.js b/deps/v8/test/mjsunit/harmony/modules-import-13.mjs
index 52518350ba..dcbc04223f 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-13.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-13.mjs
@@ -3,12 +3,12 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-dynamic-import
-// Resources: test/mjsunit/harmony/modules-skip-1.js
+// Resources: test/mjsunit/harmony/modules-skip-1.mjs
-ran = false;
+let ran = false;
async function test1() {
try {
- let x = { toString() { return 'modules-skip-1.js' } };
+ let x = { toString() { return 'modules-skip-1.mjs' } };
let namespace = await import(x);
let life = namespace.life();
assertEquals(42, life);
@@ -25,7 +25,7 @@ assertTrue(ran);
ran = false;
async function test2() {
try {
- let x = { get toString() { return () => 'modules-skip-1.js' } };
+ let x = { get toString() { return () => 'modules-skip-1.mjs' } };
let namespace = await import(x);
let life = namespace.life();
assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-14.js b/deps/v8/test/mjsunit/harmony/modules-import-14.mjs
index 32b307eb3b..6c176eaef1 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-14.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-14.mjs
@@ -8,12 +8,12 @@ var ran = false;
async function test() {
try {
- let x = await import('modules-skip-1.js');
- // modules-skip-5.js statically imports modules-skip-1.js
- let y = await import('modules-skip-5.js');
+ let x = await import('modules-skip-1.mjs');
+ // modules-skip-5.mjs statically imports modules-skip-1.mjs
+ let y = await import('modules-skip-5.mjs');
assertSame(x, y.static_life);
- let z = await import('modules-skip-1.js');
+ let z = await import('modules-skip-1.mjs');
assertSame(x, z);
ran = true;
} catch(e) {
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-15.js b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
index d041add3db..b4febd5787 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-15.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-15.mjs
@@ -8,7 +8,7 @@ var ran = false;
async function test1() {
try {
- let x = await import('modules-skip-8.js');
+ let x = await import('modules-skip-8.mjs');
%AbortJS('failure: should be unreachable');
} catch(e) {
assertEquals('Unexpected reserved word', e.message);
@@ -24,12 +24,12 @@ ran = false;
async function test2() {
try {
- let x = await import('modules-skip-9.js');
+ let x = await import('modules-skip-9.mjs');
%AbortJS('failure: should be unreachable');
} catch(e) {
assertInstanceof(e, SyntaxError);
assertEquals(
- "The requested module 'modules-skip-empty.js' does not provide an " +
+ "The requested module 'modules-skip-empty.mjs' does not provide an " +
"export named 'default'",
e.message);
ran = true;
@@ -44,7 +44,7 @@ ran = false;
async function test3() {
try {
- let x = await import('nonexistent-file.js');
+ let x = await import('nonexistent-file.mjs');
%AbortJS('failure: should be unreachable');
} catch(e) {
assertTrue(e.startsWith('Error reading'));
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-16.js b/deps/v8/test/mjsunit/harmony/modules-import-16.js
deleted file mode 100644
index 18ad445a84..0000000000
--- a/deps/v8/test/mjsunit/harmony/modules-import-16.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-dynamic-import
-
-var ran = false;
-var x;
-
-var body = "import('modules-skip-1.js').then(ns => { x = ns.life();" +
- " ran = true;} ).catch(err => %AbortJS(err))"
-var func = new Function(body);
-func();
-
-%PerformMicrotaskCheckpoint();
-assertEquals(42, x);
-assertTrue(ran);
-
-var ran = false;
-var body = "import('modules-skip-1.js').then(ns => { x = ns.life();" +
- " ran = true;} ).catch(err => %AbortJS(err))"
-eval("var func = new Function(body); func();");
-
-%PerformMicrotaskCheckpoint();
-assertEquals(42, x);
-assertTrue(ran);
-
-var ran = false;
-var body = "eval(import('modules-skip-1.js').then(ns => { x = ns.life();" +
- " ran = true;} ).catch(err => %AbortJS(err)))"
-var func = new Function(body);
-func();
-
-%PerformMicrotaskCheckpoint();
-assertEquals(42, x);
-assertTrue(ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-16.mjs b/deps/v8/test/mjsunit/harmony/modules-import-16.mjs
new file mode 100644
index 0000000000..411bb1b5e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-import-16.mjs
@@ -0,0 +1,36 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-dynamic-import
+
+globalThis.ran = false;
+globalThis.x = undefined;
+
+let body = "import('modules-skip-1.mjs').then(ns => { x = ns.life();" +
+ " ran = true;} ).catch(err => %AbortJS(err))"
+let func = new Function(body);
+func();
+
+%PerformMicrotaskCheckpoint();
+assertEquals(42, globalThis.x);
+assertTrue(globalThis.ran);
+
+globalThis.ran = false;
+body = "import('modules-skip-1.mjs').then(ns => { x = ns.life();" +
+ " ran = true;} ).catch(err => %AbortJS(err))"
+eval("var func = new Function(body); func();");
+
+%PerformMicrotaskCheckpoint();
+assertEquals(42, globalThis.x);
+assertTrue(globalThis.ran);
+
+globalThis.ran = false;
+body = "eval(import('modules-skip-1.mjs').then(ns => { x = ns.life();" +
+ " ran = true;} ).catch(err => %AbortJS(err)))"
+func = new Function(body);
+func();
+
+%PerformMicrotaskCheckpoint();
+assertEquals(42, globalThis.x);
+assertTrue(globalThis.ran);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-17.js b/deps/v8/test/mjsunit/harmony/modules-import-17.mjs
index 606ebcd385..7c7eb2e55f 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-17.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-17.mjs
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --harmony-namespace-exports
var ns;
-import('modules-skip-13.js').then(x => ns = x);
+import('modules-skip-13.mjs').then(x => ns = x);
%PerformMicrotaskCheckpoint();
assertEquals(42, ns.default);
assertEquals(ns, ns.self);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-2.js b/deps/v8/test/mjsunit/harmony/modules-import-2.mjs
index a3fe0bc601..2b0ee5b420 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-2.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-2.mjs
@@ -6,8 +6,8 @@
var life;
var msg;
-import('modules-skip-1.js').then(namespace => life = namespace.life());
-import('modules-skip-2.js').catch(err => msg = err.message);
+import('modules-skip-1.mjs').then(namespace => life = namespace.life());
+import('modules-skip-2.mjs').catch(err => msg = err.message);
assertEquals(undefined, life);
assertEquals(undefined, msg);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-3.js b/deps/v8/test/mjsunit/harmony/modules-import-3.mjs
index d8cbe2a228..3cfb442e37 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-3.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-3.mjs
@@ -7,7 +7,7 @@
var ran = false;
async function foo () {
try {
- let life = await import('modules-skip-2.js');
+ let life = await import('modules-skip-2.mjs');
assertUnreachable();
} catch(e) {
assertEquals('42 is not the answer', e.message);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-4.js b/deps/v8/test/mjsunit/harmony/modules-import-4.mjs
index 38a80b30e8..38a80b30e8 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-4.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-4.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-5.js b/deps/v8/test/mjsunit/harmony/modules-import-5.mjs
index c868a0c63f..33991efdc5 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-5.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-5.mjs
@@ -5,9 +5,9 @@
// Flags: --allow-natives-syntax --harmony-dynamic-import
var life;
-let x = 'modules-skip-1.js';
+let x = 'modules-skip-1.mjs';
import(x).then(namespace => life = namespace.life());
-x = 'modules-skip-2.js';
+x = 'modules-skip-2.mjs';
%PerformMicrotaskCheckpoint();
assertEquals(42, life);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-6.js b/deps/v8/test/mjsunit/harmony/modules-import-6.mjs
index 02fdf1b5fa..ae389301e6 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-6.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-6.mjs
@@ -9,8 +9,8 @@ var ran = false;
async function test() {
try {
let [namespace1, namespace2] = await Promise.all([
- import('modules-skip-1.js'),
- import('modules-skip-3.js')
+ import('modules-skip-1.mjs'),
+ import('modules-skip-3.mjs')
]);
let life = namespace1.life();
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-7.js b/deps/v8/test/mjsunit/harmony/modules-import-7.mjs
index 8df8ddcdb2..df16460f72 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-7.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-7.mjs
@@ -8,7 +8,7 @@ var ran = false;
async function test() {
try {
- let namespace = await import('modules-skip-4.js');
+ let namespace = await import('modules-skip-4.mjs');
assertEquals(42, namespace.life());
assertEquals("42", namespace.stringlife);
ran = true;
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-8.js b/deps/v8/test/mjsunit/harmony/modules-import-8.mjs
index ac21a8c9e9..ac21a8c9e9 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-8.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-8.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-9.js b/deps/v8/test/mjsunit/harmony/modules-import-9.mjs
index 664416f0eb..c3a19af2c5 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-9.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-9.mjs
@@ -8,7 +8,7 @@ var ran = false;
async function test() {
try {
- let namespace = await import('modules-skip-5.js').then(x => x);
+ let namespace = await import('modules-skip-5.mjs').then(x => x);
assertSame(namespace.static_life, namespace.dynamic_life);
assertSame(namespace.relative_static_life, namespace.dynamic_life);
ran = true;
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-large.js b/deps/v8/test/mjsunit/harmony/modules-import-large.mjs
index 250a41bd82..761f598b43 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-large.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-large.mjs
@@ -1,13 +1,11 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
// Flags: --allow-natives-syntax
-import * as m1 from "modules-skip-large1.js";
-import * as m2 from "modules-skip-large2.js";
+import * as m1 from "modules-skip-large1.mjs";
+import * as m2 from "modules-skip-large2.mjs";
assertFalse(%HasFastProperties(m1));
assertFalse(%HasFastProperties(m2));
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-meta.js b/deps/v8/test/mjsunit/harmony/modules-import-meta.mjs
index 5ea8a686f2..ab2ac04a57 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-meta.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-meta.mjs
@@ -1,11 +1,9 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --harmony-import-meta
-import foreign, { url as otherUrl } from './modules-skip-export-import-meta.js';
+import foreign, { url as otherUrl } from './modules-skip-export-import-meta.mjs';
assertEquals("object", typeof import.meta);
assertEquals(null, Object.getPrototypeOf(import.meta));
@@ -24,7 +22,7 @@ loadImportMetaFn();
assertSame(loadImportMetaFn(), import.meta);
// This property isn't part of the spec itself but is mentioned as an example
-assertMatches(/\/modules-import-meta\.js$/, import.meta.url);
+assertMatches(/\/modules-import-meta\.mjs$/, import.meta.url);
import.meta.x = 42;
assertEquals(42, import.meta.x);
@@ -40,5 +38,5 @@ assertThrows(() => eval('import.meta'), SyntaxError);
assertThrows(() => new Function('return import.meta;'), SyntaxError);
assertNotEquals(foreign, import.meta);
-assertMatches(/\/modules-skip-export-import-meta\.js$/, foreign.url);
+assertMatches(/\/modules-skip-export-import-meta\.mjs$/, foreign.url);
assertEquals(foreign.url, otherUrl);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-namespace.js b/deps/v8/test/mjsunit/harmony/modules-import-namespace.mjs
index fd0ad05fa4..5dbe84ccd0 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-namespace.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-namespace.mjs
@@ -1,12 +1,10 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
// Flags: --allow-natives-syntax
-import * as m from "modules-skip-3.js";
+import * as m from "modules-skip-3.mjs";
function get() {
return m.stringlife;
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-1.js b/deps/v8/test/mjsunit/harmony/modules-skip-1.mjs
index ee854b69d2..ee854b69d2 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-1.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-1.mjs
diff --git a/deps/v8/test/message/fail/modules-skip-cycle6.js b/deps/v8/test/mjsunit/harmony/modules-skip-10.mjs
index c27c302cc8..692dc384c4 100644
--- a/deps/v8/test/message/fail/modules-skip-cycle6.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-10.mjs
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export {foo} from "modules-cycle6.js";
+import {x} from "./modules-skip-10.mjs"
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-11.js b/deps/v8/test/mjsunit/harmony/modules-skip-11.mjs
index 8981f50ad4..8981f50ad4 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-11.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-11.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-12.js b/deps/v8/test/mjsunit/harmony/modules-skip-12.mjs
index 05fedfa8d1..05fedfa8d1 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-12.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-12.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-13.js b/deps/v8/test/mjsunit/harmony/modules-skip-13.mjs
index d823a283f8..1575a54edd 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-13.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-13.mjs
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export * as self from "./modules-skip-13.js";
+export * as self from "./modules-skip-13.mjs";
export default 42;
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-2.js b/deps/v8/test/mjsunit/harmony/modules-skip-2.mjs
index 6ff97a9893..6ff97a9893 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-2.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-2.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-3.js b/deps/v8/test/mjsunit/harmony/modules-skip-3.mjs
index 6f9984ed51..6f9984ed51 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-3.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-3.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-4.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-4.mjs
new file mode 100644
index 0000000000..d962bfe291
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-4.mjs
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from 'modules-skip-1.mjs';
+export * from 'modules-skip-3.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-5.js b/deps/v8/test/mjsunit/harmony/modules-skip-5.mjs
index 879445977f..fc1eebb82a 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-5.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-5.mjs
@@ -6,9 +6,9 @@
var dynamic_life;
-import * as static_life from 'modules-skip-1.js';
-import * as relative_static_life from './modules-skip-1.js';
-import('modules-skip-1.js').then(namespace => dynamic_life = namespace);
+import * as static_life from 'modules-skip-1.mjs';
+import * as relative_static_life from './modules-skip-1.mjs';
+import('modules-skip-1.mjs').then(namespace => dynamic_life = namespace);
export { static_life };
export { relative_static_life };
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-6.js b/deps/v8/test/mjsunit/harmony/modules-skip-6.mjs
index 6acc2b8499..6acc2b8499 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-6.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-6.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-7.js b/deps/v8/test/mjsunit/harmony/modules-skip-7.mjs
index 4d3a952e92..4733c46e24 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-7.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-7.mjs
@@ -6,7 +6,7 @@
export async function getLife() {
try {
- let namespace = await import('modules-skip-1.js');
+ let namespace = await import('modules-skip-1.mjs');
return namespace.life();
} catch (e) {
%AbortJS('failure: ' + e);
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-8.js b/deps/v8/test/mjsunit/harmony/modules-skip-8.mjs
index 816cf2cdb7..816cf2cdb7 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-8.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-8.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-9.mjs b/deps/v8/test/mjsunit/harmony/modules-skip-9.mjs
new file mode 100644
index 0000000000..cca36f988e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-9.mjs
@@ -0,0 +1,5 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import x from 'modules-skip-empty.mjs';
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-empty.js b/deps/v8/test/mjsunit/harmony/modules-skip-empty.mjs
index 4af874cfa7..4af874cfa7 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-empty.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-empty.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js b/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.mjs
index e58e5018af..e58e5018af 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-export-import-meta.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-large1.js b/deps/v8/test/mjsunit/harmony/modules-skip-large1.mjs
index e643df32a9..e643df32a9 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-large1.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-large1.mjs
diff --git a/deps/v8/test/mjsunit/harmony/modules-skip-large2.js b/deps/v8/test/mjsunit/harmony/modules-skip-large2.mjs
index 2c03557197..2c03557197 100644
--- a/deps/v8/test/mjsunit/harmony/modules-skip-large2.js
+++ b/deps/v8/test/mjsunit/harmony/modules-skip-large2.mjs
diff --git a/deps/v8/test/mjsunit/harmony/object-fromentries.js b/deps/v8/test/mjsunit/harmony/object-fromentries.js
index 8bbd6317c6..86733b150c 100644
--- a/deps/v8/test/mjsunit/harmony/object-fromentries.js
+++ b/deps/v8/test/mjsunit/harmony/object-fromentries.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-from-entries
-
const fromEntries = Object.fromEntries;
const ObjectPrototype = Object.prototype;
const ObjectPrototypeHasOwnProperty = ObjectPrototype.hasOwnProperty;
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
index a85019dd45..4e6801e469 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
@@ -8,7 +8,7 @@ load('test/mjsunit/test-async.js');
async function f(assert) {
try {
- module_namespace_obj = await import('modules-skip-1.js');
+ module_namespace_obj = await import('modules-skip-1.mjs');
} catch(e) {
%AbortJS(e);
}
diff --git a/deps/v8/test/mjsunit/harmony/private-methods.js b/deps/v8/test/mjsunit/harmony/private-methods.js
index e7784a29f5..360b065f17 100644
--- a/deps/v8/test/mjsunit/harmony/private-methods.js
+++ b/deps/v8/test/mjsunit/harmony/private-methods.js
@@ -6,57 +6,211 @@
"use strict";
+// Basic private method test
{
+ let calledWith;
class C {
- #a() {}
+ #a(arg) { calledWith = arg; }
+ callA(arg) { this.#a(arg); }
+ }
+
+ const c = new C;
+ assertEquals(undefined, c.a);
+ assertEquals(undefined, calledWith);
+ c.callA(1);
+ assertEquals(1, calledWith);
+}
+
+// Call private method in another instance
+{
+ class C {
+ #a(arg) { this.calledWith = arg; }
+ callAIn(obj, arg) { obj.#a(arg); }
}
- new C;
+
+ const c = new C;
+ const c2 = new C;
+
+ assertEquals(undefined, c.a);
+ assertEquals(undefined, c.calledWith);
+ assertEquals(undefined, c2.calledWith);
+
+ c2.callAIn(c, 'fromC2');
+ assertEquals('fromC2', c.calledWith);
+ assertEquals(undefined, c2.calledWith);
+
+ c2.callAIn(c2, 'c2');
+ assertEquals('fromC2', c.calledWith);
+ assertEquals('c2', c2.calledWith);
+
+ assertThrows(() => { c2.callAIn({}); }, TypeError);
}
+// Private methods and private fields
{
class C {
- #a() {
+ #a;
+ constructor(a) {
+ this.#a = a;
+ }
+ #getAPlus1() {
+ return this.#a + 1;
+ }
+ equals(obj) {
+ return this.#getAPlus1() === obj.#getAPlus1();
+ }
+ }
+ const c = new C(0);
+ const c2 = new C(2);
+ const c3 = new C(2);
+ assertEquals(true, c2.equals(c3));
+ assertEquals(false, c2.equals(c));
+ assertEquals(false, c3.equals(c));
+}
+
+// Class inheritance
+{
+ class A {
+ #val;
+ constructor(a) {
+ this.#val = a;
+ }
+ #a() { return this.#val; }
+ getA() { return this.#a(); }
+ }
+ class B extends A {
+ constructor(b) {
+ super(b);
+ }
+ b() { return this.getA() }
+ }
+ const b = new B(1);
+ assertEquals(1, b.b());
+}
+
+// Private members should be accessed according to the class the
+// invoked method is in.
+{
+ class A {
+ #val;
+ constructor(a) {
+ this.#val = a;
+ }
+ #getVal() { return this.#val; }
+ getA() { return this.#getVal(); }
+ getVal() { return this.#getVal(); }
+ }
+
+ class B extends A {
+ #val;
+ constructor(a, b) {
+ super(a);
+ this.#val = b;
+ }
+ #getVal() { return this.#val; }
+ getB() { return this.#getVal(); }
+ getVal() { return this.#getVal(); }
+ }
+
+ const b = new B(1, 2);
+ assertEquals(1, b.getA());
+ assertEquals(2, b.getB());
+ assertEquals(1, A.prototype.getVal.call(b));
+ assertEquals(2, B.prototype.getVal.call(b));
+ const a = new A(1);
+ assertEquals(1, a.getA());
+ assertThrows(() => B.prototype.getB.call(a), TypeError);
+}
+
+// Private methods in nested classes.
+{
+ class C {
+ #b() {
class B {
- #a() { }
+ #foo(arg) { return arg; }
+ callFoo(arg) { return this.#foo(arg); }
}
- new B;
+ return new B();
}
+ createB() { return this.#b(); }
}
- new C;
+ const c = new C;
+ const b = c.createB();
+ assertEquals(1, b.callFoo(1));
}
+// Private methods in nested classes with inheritance.
{
- class A {
- #a() {
- class C extends A {
- #c() { }
+ class C {
+ #b() {
+ class B extends C {
+ #foo(arg) { return arg; }
+ callFoo(arg) { return this.#foo(arg); }
}
- new C;
+ return new B();
}
+ createB() { return this.#b(); }
}
- new A;
+ const c = new C;
+ const b = c.createB();
+ assertEquals(1, b.callFoo(1));
+ const b2 = b.createB();
+ assertEquals(1, b2.callFoo(1));
}
+// Class expressions.
{
const C = class {
- #a() { }
- }
- new C;
+ #a() { return 1; }
+ callA(obj) { return obj.#a() }
+ };
+ const c = new C;
+ const c2 = new C;
+ assertEquals(1, c.callA(c));
+ assertEquals(1, c.callA(c2));
}
+// Nested class expressions.
{
const C = class {
- #a() {
+ #b() {
const B = class {
- #a() { }
+ #foo(arg) { return arg; }
+ callFoo(arg) { return this.#foo(arg); }
+ };
+ return new B();
+ }
+ createB() { return this.#b(); }
+ };
+
+ const c = new C;
+ const b = c.createB();
+ assertEquals(1, b.callFoo(1));
+}
+
+
+// Nested class expressions with hierarchy.
+{
+ const C = class {
+ #b() {
+ const B = class extends C {
+ #foo(arg) { return arg; }
+ callFoo(arg) { return this.#foo(arg); }
}
- new B;
+ return new B();
}
+ createB() { return this.#b(); }
}
- new C;
+
+ const c = new C;
+ const b = c.createB();
+ assertEquals(1, b.callFoo(1));
+ const b2 = b.createB();
+ assertEquals(1, b2.callFoo(1));
}
+// Adding the brand twice on the same object should throw.
{
class A {
constructor(arg) {
@@ -72,11 +226,45 @@
}
}
- // Add the brand twice on the same object.
let c1 = new C({});
assertThrows(() => new C(c1), TypeError);
}
+// Private methods should be not visible to proxies.
+{
+ class X {
+ #x() {}
+ x() { this.#x(); };
+ callX(obj) { obj.#x(); }
+ }
+ let handlerCalled = false;
+ const x = new X();
+ let p = new Proxy(new X, {
+ apply(target, thisArg, argumentsList) {
+ handlerCalled = true;
+ Reflect.apply(target, thisArg, argumentsList);
+ }
+ });
+ assertThrows(() => p.x(), TypeError);
+ assertThrows(() => x.callX(p), TypeError);
+ assertThrows(() => X.prototype.x.call(p), TypeError);
+ assertThrows(() => X.prototype.callX(p), TypeError);
+ assertEquals(false, handlerCalled);
+}
+
+// Reference outside of class.
+{
+ class C {
+ #a() {}
+ }
+ assertThrows('new C().#a()');
+}
+
+// Duplicate private names.
+{
+ assertThrows('class C { #a = 1; #a() {} }');
+}
+
{
// TODO(v8:9177): test extending a class expression that does not have
// a private method.
diff --git a/deps/v8/test/mjsunit/harmony/regexp-overriden-exec.js b/deps/v8/test/mjsunit/harmony/regexp-overriden-exec.js
new file mode 100644
index 0000000000..7aba0aebe7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-overriden-exec.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt --opt
+
+const r = RegExp('bla');
+
+function foo() {
+ r.test('string');
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+assertOptimized(foo);
+
+r.__proto__.exec = function() {
+ return null;
+}
+Object.freeze(r.__proto__);
+
+foo();
+assertUnoptimized(foo);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-912504.js b/deps/v8/test/mjsunit/harmony/regress/regress-912504.js
index 78b1992b14..42428baa34 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-912504.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-912504.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --always-opt --harmony-object-from-entries --allow-natives-syntax
+// Flags: --always-opt --allow-natives-syntax
function test() {
Object.fromEntries([[]]);
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js
new file mode 100644
index 0000000000..24724eea14
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer-stress.js
@@ -0,0 +1,36 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Alloc(size) {
+ let b = new SharedArrayBuffer(size);
+ let v = new Int32Array(b);
+ return {buffer : b, view : v};
+}
+
+function RunSomeAllocs(total, retained, size) {
+ print(`-------iterations = ${total}, retained = $ { retained } -------`);
+ var array = new Array(retained);
+ for (var i = 0; i < total; i++) {
+ if ((i % 25) == 0)
+ print(`iteration $ { i }`);
+ let pair = Alloc(size);
+ // For some iterations, retain the memory, view, or both.
+ switch (i % 3) {
+ case 0:
+ pair.memory = null;
+ break;
+ case 1:
+ pair.view = null;
+ break;
+ case 2:
+ break;
+ }
+ array[i % retained] = pair;
+ }
+}
+
+RunSomeAllocs(10, 1, 1024);
+RunSomeAllocs(100, 3, 2048);
+RunSomeAllocs(1000, 10, 16384);
+RunSomeAllocs(10000, 20, 32768);
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer-worker-gc-stress.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer-worker-gc-stress.js
new file mode 100644
index 0000000000..7c86db7cfa
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer-worker-gc-stress.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/worker-ping-test.js");
+
+let config = {
+ numThings: 4, // size of circular buffer
+ numWorkers: 4, // number of workers
+ numMessages: 10000, // number of messages sent to each worker
+ allocInterval: 11, // interval for allocating new things per worker
+ traceScript: false, // print the script
+ traceAlloc: false, // print each allocation attempt
+ traceIteration: 10, // print diagnostics every so many iterations
+ abortOnFail: true, // kill worker if allocation fails
+
+ AllocThing: function AllocThing(id) {
+ return new SharedArrayBuffer(10000);
+ },
+}
+
+RunWorkerPingTest(config);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
index c1ec4070f4..df599ebd40 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/basics.js
@@ -85,7 +85,25 @@
(function TestUnregisterWithNonExistentKey() {
let fg = new FinalizationGroup(() => {});
- fg.unregister({"k": "whatever"});
+ let success = fg.unregister({"k": "whatever"});
+ assertFalse(success);
+})();
+
+(function TestUnregisterWithNonFinalizationGroup() {
+ assertThrows(() => FinalizationGroup.prototype.unregister.call({}, {}),
+ TypeError);
+})();
+
+(function TestUnregisterWithNonObjectUnregisterToken() {
+ let fg = new FinalizationGroup(() => {});
+ assertThrows(() => fg.unregister(1), TypeError);
+ assertThrows(() => fg.unregister(1n), TypeError);
+ assertThrows(() => fg.unregister('one'), TypeError);
+ assertThrows(() => fg.unregister(Symbol()), TypeError);
+ assertThrows(() => fg.unregister(true), TypeError);
+ assertThrows(() => fg.unregister(false), TypeError);
+ assertThrows(() => fg.unregister(undefined), TypeError);
+ assertThrows(() => fg.unregister(null), TypeError);
})();
(function TestWeakRefConstructor() {
@@ -138,3 +156,15 @@
let rv = FinalizationGroup.prototype.cleanupSome.call(fg);
assertEquals(undefined, rv);
})();
+
+(function TestCleanupSomeWithNonCallableCallback() {
+ let fg = new FinalizationGroup(() => {});
+ assertThrows(() => fg.cleanupSome(1), TypeError);
+ assertThrows(() => fg.cleanupSome(1n), TypeError);
+ assertThrows(() => fg.cleanupSome(Symbol()), TypeError);
+ assertThrows(() => fg.cleanupSome({}), TypeError);
+ assertThrows(() => fg.cleanupSome('foo'), TypeError);
+ assertThrows(() => fg.cleanupSome(true), TypeError);
+ assertThrows(() => fg.cleanupSome(false), TypeError);
+ assertThrows(() => fg.cleanupSome(null), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
new file mode 100644
index 0000000000..67ed64e85a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-2.js
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
+
+let cleanup_count = 0;
+let cleanup_holdings = [];
+let cleanup = function(iter) {
+ for (holdings of iter) {
+ cleanup_holdings.push(holdings);
+ }
+ ++cleanup_count;
+}
+
+let fg = new FinalizationGroup(cleanup);
+(function() {
+ let o = {};
+ fg.register(o, "holdings");
+
+ assertEquals(0, cleanup_count);
+})();
+
+// GC will detect o as dead.
+gc();
+
+// passing no callback, should trigger cleanup function
+fg.cleanupSome();
+assertEquals(1, cleanup_count);
+assertEquals(1, cleanup_holdings.length);
+assertEquals("holdings", cleanup_holdings[0]);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
index 1d3ceda3f2..06eb292dac 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome.js
@@ -2,11 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking --allow-natives-syntax
let cleanup_count = 0;
let cleanup_holdings = [];
let cleanup = function(iter) {
+ %AbortJS("shouldn't be called");
+}
+
+let cleanup2 = function(iter) {
for (holdings of iter) {
cleanup_holdings.push(holdings);
}
@@ -19,14 +23,14 @@ let fg = new FinalizationGroup(cleanup);
fg.register(o, "holdings");
// cleanupSome won't do anything since there are no reclaimed targets.
- fg.cleanupSome();
+ fg.cleanupSome(cleanup2);
assertEquals(0, cleanup_count);
})();
// GC will detect o as dead.
gc();
-fg.cleanupSome();
+fg.cleanupSome(cleanup2);
assertEquals(1, cleanup_count);
assertEquals(1, cleanup_holdings.length);
assertEquals("holdings", cleanup_holdings[0]);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
index f6480f86b6..3db18e016e 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-after-cleanup.js
@@ -36,7 +36,8 @@ let timeout_func = function() {
assertEquals(1, cleanup_holdings_count);
// Unregister an already iterated over weak reference.
- fg.unregister(key);
+ let success = fg.unregister(key);
+ assertFalse(success);
// Assert that it didn't do anything.
setTimeout(() => { assertEquals(1, cleanup_call_count); }, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
index 10b8bc67ff..c54dbb25de 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-before-cleanup.js
@@ -19,8 +19,8 @@ let key = {"k": "this is the key"};
fg.register(object, "my holdings", key);
// Clear the WeakCell before the GC has a chance to discover it.
- let return_value = fg.unregister(key);
- assertEquals(undefined, return_value);
+ let success = fg.unregister(key);
+ assertTrue(success);
// object goes out of scope.
})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
index e6ea150027..d1dd8f8b75 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-called-twice.js
@@ -19,10 +19,12 @@ let key = {"k": "this is the key"};
fg.register(object, "holdings", key);
// Unregister before the GC has a chance to discover the object.
- fg.unregister(key);
+ let success = fg.unregister(key);
+ assertTrue(success);
// Call unregister again (just to assert we handle this gracefully).
- fg.unregister(key);
+ success = fg.unregister(key);
+ assertFalse(success);
// object goes out of scope.
})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
index aa9eab20ff..80ca85f619 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup1.js
@@ -8,7 +8,8 @@ let cleanup_call_count = 0;
let cleanup_holdings_count = 0;
let cleanup = function(iter) {
// Unregister before we've iterated through the holdings.
- fg.unregister(key);
+ let success = fg.unregister(key);
+ assertTrue(success);
for (wc of iter) {
++cleanup_holdings_count;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
index 84ec3aaef8..038f5093e1 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup2.js
@@ -9,7 +9,9 @@ let cleanup_holdings_count = 0;
let cleanup = function(iter) {
for (holdings of iter) {
assertEquals(holdings, "holdings");
- fg.unregister(key);
+ let success = fg.unregister(key);
+ assertFalse(success);
+
++cleanup_holdings_count;
}
++cleanup_call_count;
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
index 39706a7b9b..af6b5c13ed 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup3.js
@@ -12,7 +12,8 @@ let cleanup = function(iter) {
++cleanup_holdings_count;
}
// Unregister an already iterated over weak reference.
- fg.unregister(key);
+ let success = fg.unregister(key);
+ assertFalse(success);
++cleanup_call_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
index 67ed227502..8a0a5d5707 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup4.js
@@ -10,10 +10,12 @@ let cleanup = function(iter) {
for (holdings of iter) {
// See which target we're iterating over and unregister the other one.
if (holdings == 1) {
- fg.unregister(key2);
+ let success = fg.unregister(key2);
+ assertTrue(success);
} else {
assertSame(holdings, 2);
- fg.unregister(key1);
+ let success = fg.unregister(key1);
+ assertTrue(success);
}
++cleanup_holdings_count;
}
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js
new file mode 100644
index 0000000000..974485e9cb
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-inside-cleanup5.js
@@ -0,0 +1,48 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let cleanup_call_count = 0;
+let cleanup_holdings_count = 0;
+let cleanup = function(iter) {
+ for (holdings of iter) {
+ assertEquals(holdings, "holdings");
+
+ // There's one more object with the same key that we haven't
+ // iterated over yet so we should be able to unregister the
+ // callback for that one.
+ let success = fg.unregister(key);
+ assertTrue(success);
+
+ ++cleanup_holdings_count;
+ }
+ ++cleanup_call_count;
+}
+
+let fg = new FinalizationGroup(cleanup);
+// Create an object and register it in the FinalizationGroup. The object needs to be inside
+// a closure so that we can reliably kill them!
+let key = {"k": "this is the key"};
+
+(function() {
+ let object = {};
+ let object2 = {};
+ fg.register(object, "holdings", key);
+ fg.register(object2, "holdings", key);
+
+ // object goes out of scope.
+})();
+
+// This GC will discover dirty WeakCells and schedule cleanup.
+gc();
+assertEquals(0, cleanup_call_count);
+
+// Assert that the cleanup function was called and iterated the WeakCell.
+let timeout_func = function() {
+ assertEquals(1, cleanup_call_count);
+ assertEquals(1, cleanup_holdings_count);
+}
+
+setTimeout(timeout_func, 0);
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
index 748b7065c6..8be0db1444 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-many.js
@@ -31,7 +31,8 @@ let key2 = {"k": "key2"};
fg.register(object2, "holdings2", key2);
// Unregister before the GC has a chance to discover the objects.
- fg.unregister(key1);
+ let success = fg.unregister(key1);
+ assertTrue(success);
// objects go out of scope.
})();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
index 2466568397..4ad4425a4c 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/unregister-when-cleanup-already-scheduled.js
@@ -26,7 +26,8 @@ gc();
assertEquals(0, cleanup_call_count);
// Unregister the object from the FinalizationGroup before cleanup has ran.
-fg.unregister(key);
+let success = fg.unregister(key);
+assertTrue(success);
// Assert that the cleanup function won't be called.
let timeout_func = function() {
diff --git a/deps/v8/test/mjsunit/has-own-property.js b/deps/v8/test/mjsunit/has-own-property.js
index 5ff8db5f7b..b009b7d75a 100644
--- a/deps/v8/test/mjsunit/has-own-property.js
+++ b/deps/v8/test/mjsunit/has-own-property.js
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Normal objects.
// Check for objects.
assertTrue({x:12}.hasOwnProperty('x'));
assertFalse({x:12}.hasOwnProperty('y'));
@@ -36,3 +37,135 @@ assertTrue(Object.prototype.hasOwnProperty.call('', 'length'));
// Check for numbers.
assertFalse((123).hasOwnProperty('length'));
assertFalse(Object.prototype.hasOwnProperty.call(123, 'length'));
+
+// Frozen object.
+// Check for objects.
+assertTrue(Object.freeze({x:12}).hasOwnProperty('x'));
+assertFalse(Object.freeze({x:12}).hasOwnProperty('y'));
+
+// Check for strings.
+assertTrue(Object.freeze('').hasOwnProperty('length'));
+assertTrue(Object.prototype.hasOwnProperty.call(Object.freeze(''), 'length'));
+
+// Check for numbers.
+assertFalse(Object.freeze(123).hasOwnProperty('length'));
+assertFalse(Object.prototype.hasOwnProperty.call(Object.freeze(123), 'length'));
+
+// Direct vs. inherited properties.
+var o = ['exist'];
+Object.freeze(o);
+assertTrue(o.hasOwnProperty('0'));
+assertFalse(o.hasOwnProperty('toString'));
+assertFalse(o.hasOwnProperty('hasOwnProperty'));
+
+// Using hasOwnProperty as a property name.
+var foo = ['a'];
+foo.hasOwnProperty = function() { return false; };
+Object.freeze(foo);
+assertFalse(foo.hasOwnProperty('0')); // always returns false.
+
+// Use another Object's hasOwnProperty
+// and call it with 'this' set to foo.
+assertTrue(({}).hasOwnProperty.call(foo, '0'));
+
+// It's also possible to use the hasOwnProperty property
+// from the Object prototype for this purpose.
+assertTrue(Object.prototype.hasOwnProperty.call(foo, '0'));
+
+// Check for null or undefined
+var o = Object.freeze([, null, undefined, 'a', 1, Symbol('2')]);
+assertFalse(o.hasOwnProperty('0')); // hole
+assertTrue(o.hasOwnProperty('1'));
+assertTrue(o.hasOwnProperty('2'));
+assertTrue(o.hasOwnProperty('3'));
+assertTrue(o.hasOwnProperty('4'));
+assertTrue(o.hasOwnProperty('5'));
+assertFalse(o.hasOwnProperty('6')); // out of bounds
+
+// Sealed object.
+// Check for objects.
+assertTrue(Object.seal({x:12}).hasOwnProperty('x'));
+assertFalse(Object.seal({x:12}).hasOwnProperty('y'));
+
+// Check for strings.
+assertTrue(Object.seal('').hasOwnProperty('length'));
+assertTrue(Object.prototype.hasOwnProperty.call(Object.seal(''), 'length'));
+
+// Check for numbers.
+assertFalse(Object.seal(123).hasOwnProperty('length'));
+assertFalse(Object.prototype.hasOwnProperty.call(Object.seal(123), 'length'));
+
+// Direct vs. inherited properties.
+var o = ['exist'];
+Object.seal(o);
+assertTrue(o.hasOwnProperty('0'));
+assertFalse(o.hasOwnProperty('toString'));
+assertFalse(o.hasOwnProperty('hasOwnProperty'));
+
+// Using hasOwnProperty as a property name.
+var foo = ['a'];
+foo.hasOwnProperty = function() { return false; };
+Object.seal(foo);
+assertFalse(foo.hasOwnProperty('0')); // always returns false.
+
+// Use another Object's hasOwnProperty
+// and call it with 'this' set to foo.
+assertTrue(({}).hasOwnProperty.call(foo, '0'));
+
+// It's also possible to use the hasOwnProperty property
+// from the Object prototype for this purpose.
+assertTrue(Object.prototype.hasOwnProperty.call(foo, '0'));
+
+// Check for null or undefined
+var o = Object.seal([, null, undefined, 'a', 1, Symbol('2')]);
+assertFalse(o.hasOwnProperty('0')); // hole.
+assertTrue(o.hasOwnProperty('1'));
+assertTrue(o.hasOwnProperty('2'));
+assertTrue(o.hasOwnProperty('3'));
+assertTrue(o.hasOwnProperty('4'));
+assertTrue(o.hasOwnProperty('5'));
+assertFalse(o.hasOwnProperty('6')); // out of bounds.
+
+// Non-extensible object.
+// Check for objects.
+assertTrue(Object.preventExtensions({x:12}).hasOwnProperty('x'));
+assertFalse(Object.preventExtensions({x:12}).hasOwnProperty('y'));
+
+// Check for strings.
+assertTrue(Object.preventExtensions('').hasOwnProperty('length'));
+assertTrue(Object.prototype.hasOwnProperty.call(Object.preventExtensions(''), 'length'));
+
+// Check for numbers.
+assertFalse(Object.preventExtensions(123).hasOwnProperty('length'));
+assertFalse(Object.prototype.hasOwnProperty.call(Object.preventExtensions(123), 'length'));
+
+// Direct vs. inherited properties.
+var o = ['exist'];
+Object.preventExtensions(o);
+assertTrue(o.hasOwnProperty('0'));
+assertFalse(o.hasOwnProperty('toString'));
+assertFalse(o.hasOwnProperty('hasOwnProperty'));
+
+// Using hasOwnProperty as a property name.
+var foo = ['a'];
+foo.hasOwnProperty = function() { return false; };
+Object.preventExtensions(foo);
+assertFalse(foo.hasOwnProperty('0')); // always returns false.
+
+// Use another Object's hasOwnProperty
+// and call it with 'this' set to foo.
+assertTrue(({}).hasOwnProperty.call(foo, '0'));
+
+// It's also possible to use the hasOwnProperty property
+// from the Object prototype for this purpose.
+assertTrue(Object.prototype.hasOwnProperty.call(foo, '0'));
+
+// Check for null or undefined.
+var o = Object.preventExtensions([, null, undefined, 'a', 1, Symbol('2')]);
+assertFalse(o.hasOwnProperty('0')); // hole.
+assertTrue(o.hasOwnProperty('1'));
+assertTrue(o.hasOwnProperty('2'));
+assertTrue(o.hasOwnProperty('3'));
+assertTrue(o.hasOwnProperty('4'));
+assertTrue(o.hasOwnProperty('5'));
+assertFalse(o.hasOwnProperty('6')); // out of bounds.
diff --git a/deps/v8/test/mjsunit/hash-code.js b/deps/v8/test/mjsunit/hash-code.js
index 1a0057ff38..7937d7c5e3 100644
--- a/deps/v8/test/mjsunit/hash-code.js
+++ b/deps/v8/test/mjsunit/hash-code.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax
function f() {
- var x = { a: 1 }
+ var x = {a: 1};
var set = new Set();
set.add(x);
@@ -17,8 +17,8 @@ function f() {
x.g = 6;
assertTrue(set.has(x));
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js b/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js
index 77f9acf3c8..4f5cd787b5 100644
--- a/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js
+++ b/deps/v8/test/mjsunit/ignition/optimized-stack-trace.js
@@ -13,8 +13,8 @@ function f() {
function g(x) {
return f();
-}
-
+};
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
index ae9286b100..c1575e625f 100644
--- a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
+++ b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
@@ -9,5 +9,5 @@ function f() {
if (i == 5) %OptimizeOsr();
}
}
-%EnsureFeedbackVectorForFunction(f);
+%PrepareFunctionForOptimization(f);
f();
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-generator.js b/deps/v8/test/mjsunit/ignition/osr-from-generator.js
index e2d628819f..5fe1012e94 100644
--- a/deps/v8/test/mjsunit/ignition/osr-from-generator.js
+++ b/deps/v8/test/mjsunit/ignition/osr-from-generator.js
@@ -55,6 +55,7 @@
for (var k = 0; k < 10; ++k) {
if (k == 5) %OptimizeOsr();
}
+ %PrepareFunctionForOptimization(gen4);
}
yield i;
}
diff --git a/deps/v8/test/mjsunit/invalid-lhs.js b/deps/v8/test/mjsunit/invalid-lhs.js
index 92929b68e5..aec841006a 100644
--- a/deps/v8/test/mjsunit/invalid-lhs.js
+++ b/deps/v8/test/mjsunit/invalid-lhs.js
@@ -29,23 +29,23 @@
// exceptions are delayed until runtime.
// Normal assignments:
-assertThrows("12 = 12", ReferenceError);
-assertThrows("x++ = 12", ReferenceError);
+assertThrows("12 = 12", SyntaxError);
+assertThrows("x++ = 12", SyntaxError);
assertThrows("eval('var x') = 12", ReferenceError);
-assertThrows("if (false) 12 = 12", ReferenceError);
+assertThrows("if (false) 12 = 12", SyntaxError);
assertDoesNotThrow("if (false) eval('var x') = 12");
// Pre- and post-fix operations:
-assertThrows("12++", ReferenceError);
-assertThrows("12--", ReferenceError);
-assertThrows("++12", ReferenceError);
-assertThrows("--12", ReferenceError);
+assertThrows("12++", SyntaxError);
+assertThrows("12--", SyntaxError);
+assertThrows("++12", SyntaxError);
+assertThrows("--12", SyntaxError);
assertThrows("++(eval('12'))", ReferenceError);
assertThrows("(eval('12'))++", ReferenceError);
-assertThrows("if (false) 12++", ReferenceError);
-assertThrows("if (false) 12--", ReferenceError);
-assertThrows("if (false) ++12", ReferenceError);
-assertThrows("if (false) --12", ReferenceError);
+assertThrows("if (false) 12++", SyntaxError);
+assertThrows("if (false) 12--", SyntaxError);
+assertThrows("if (false) ++12", SyntaxError);
+assertThrows("if (false) --12", SyntaxError);
assertDoesNotThrow("if (false) ++(eval('12'))");
assertDoesNotThrow("if (false) (eval('12'))++");
@@ -56,19 +56,19 @@ assertThrows("if (false) for (12 in [1]) print(12);", SyntaxError);
assertDoesNotThrow("if (false) for (eval('0') in [1]) print(12);");
// For:
-assertThrows("for (12 = 1;;) print(12);", ReferenceError);
+assertThrows("for (12 = 1;;) print(12);", SyntaxError);
assertThrows("for (eval('var x') = 1;;) print(12);", ReferenceError);
-assertThrows("if (false) for (12 = 1;;) print(12);", ReferenceError);
+assertThrows("if (false) for (12 = 1;;) print(12);", SyntaxError);
assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);");
// Assignments to 'this'.
-assertThrows("this = 42", ReferenceError);
-assertThrows("function f() { this = 12; }", ReferenceError);
+assertThrows("this = 42", SyntaxError);
+assertThrows("function f() { this = 12; }", SyntaxError);
assertThrows("for (this in {x:3, y:4, z:5}) ;", SyntaxError);
-assertThrows("for (this = 0;;) ;", ReferenceError);
-assertThrows("this++", ReferenceError);
-assertThrows("++this", ReferenceError);
-assertThrows("this--", ReferenceError);
-assertThrows("--this", ReferenceError);
-assertThrows("if (false) this = 42", ReferenceError);
-assertThrows("if (false) this++", ReferenceError);
+assertThrows("for (this = 0;;) ;", SyntaxError);
+assertThrows("this++", SyntaxError);
+assertThrows("++this", SyntaxError);
+assertThrows("this--", SyntaxError);
+assertThrows("--this", SyntaxError);
+assertThrows("if (false) this = 42", SyntaxError);
+assertThrows("if (false) this++", SyntaxError);
diff --git a/deps/v8/test/mjsunit/json2.js b/deps/v8/test/mjsunit/json2.js
index 917e8c88e6..f56fd43f58 100644
--- a/deps/v8/test/mjsunit/json2.js
+++ b/deps/v8/test/mjsunit/json2.js
@@ -54,7 +54,7 @@ expected_2 = '[' + nulls + 'null]';
TestStringify(expected_1, array_1);
TestStringify(expected_2, array_2);
-// Test JSValue with custom prototype.
+// Test JSPrimitiveWrapper with custom prototype.
var num_wrapper = Object(42);
num_wrapper.__proto__ = { __proto__: null,
toString: function() { return true; } };
diff --git a/deps/v8/test/mjsunit/keyed-has-ic-module-export.js b/deps/v8/test/mjsunit/keyed-has-ic-module-export.mjs
index 5183157ac4..b36d053f9f 100644
--- a/deps/v8/test/mjsunit/keyed-has-ic-module-export.js
+++ b/deps/v8/test/mjsunit/keyed-has-ic-module-export.mjs
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
export var a = "A";
export var b = "B";
export var c = "C";
diff --git a/deps/v8/test/mjsunit/keyed-has-ic-module-import.js b/deps/v8/test/mjsunit/keyed-has-ic-module-import.mjs
index 77a42925f4..c8a5f51ccb 100644
--- a/deps/v8/test/mjsunit/keyed-has-ic-module-import.js
+++ b/deps/v8/test/mjsunit/keyed-has-ic-module-import.mjs
@@ -3,9 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// MODULE
-import * as mod from "keyed-has-ic-module-export.js";
+import * as mod from "keyed-has-ic-module-export.mjs";
function testIn(obj, key) {
return key in obj;
@@ -64,6 +63,7 @@ var tests = {
for (let test in tests) {
%DeoptimizeFunction(testIn);
%ClearFunctionFeedback(testIn);
+ %PrepareFunctionForOptimization(testIn);
tests[test]();
%OptimizeFunctionOnNextCall(testIn);
tests[test]();
diff --git a/deps/v8/test/mjsunit/keyed-has-ic.js b/deps/v8/test/mjsunit/keyed-has-ic.js
index ee15ea4859..05c46e03e0 100644
--- a/deps/v8/test/mjsunit/keyed-has-ic.js
+++ b/deps/v8/test/mjsunit/keyed-has-ic.js
@@ -255,6 +255,7 @@ var tests = {
for (test in tests) {
%DeoptimizeFunction(testIn);
%ClearFunctionFeedback(testIn);
+ %PrepareFunctionForOptimization(testIn);
tests[test]();
%OptimizeFunctionOnNextCall(testIn);
tests[test]();
@@ -267,8 +268,9 @@ for (test in tests) {
var proto = function() {
assertTrue("prototype" in o);
o.prototype;
- }
+ };
+ %PrepareFunctionForOptimization(proto);
proto();
proto();
%OptimizeFunctionOnNextCall(proto);
@@ -281,6 +283,7 @@ for (test in tests) {
0 in "string"
};
+ %PrepareFunctionForOptimization(test);
assertThrows(test, TypeError);
assertThrows(test, TypeError);
%OptimizeFunctionOnNextCall(test);
@@ -293,6 +296,7 @@ for (test in tests) {
assertTrue("length" in this);
};
+ %PrepareFunctionForOptimization(test);
test.call("");
test.call("");
%OptimizeFunctionOnNextCall(test);
@@ -305,6 +309,7 @@ for (test in tests) {
return index in arguments;
};
+ %PrepareFunctionForOptimization(test);
assertFalse(test())
assertFalse(test())
assertTrue(test(0));
@@ -327,6 +332,7 @@ for (test in tests) {
return 2 in arguments;
};
+ %PrepareFunctionForOptimization(test);
assertFalse(test(1));
assertFalse(test(1));
%OptimizeFunctionOnNextCall(test);
@@ -342,6 +348,7 @@ for (test in tests) {
}
return true;
}
+ %PrepareFunctionForOptimization(test);
var str = "string";
// this will place slow_stub in the IC for strings.
@@ -370,6 +377,7 @@ for (test in tests) {
}
return true;
}
+ %PrepareFunctionForOptimization(test);
var str = "string";
assertFalse(test(str, "length"));
@@ -387,6 +395,7 @@ for (test in tests) {
}
return true;
}
+ %PrepareFunctionForOptimization(test);
var str = "string";
assertFalse(test(str, 0));
@@ -404,6 +413,7 @@ for (test in tests) {
}
return true;
}
+ %PrepareFunctionForOptimization(test);
var ary = [0, 1, 2, '3'];
function testArray(ary) {
@@ -452,6 +462,7 @@ function testHeapConstantArray(heap_constant_ary) {
function test() {
return 1 in heap_constant_ary;
}
+ %PrepareFunctionForOptimization(test);
assertTrue(test());
assertTrue(test());
@@ -460,6 +471,7 @@ function testHeapConstantArray(heap_constant_ary) {
heap_constant_ary[1] = 2;
assertTrue(test());
+ %PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
assertTrue(test());
}
diff --git a/deps/v8/test/mjsunit/keyed-named-access.js b/deps/v8/test/mjsunit/keyed-named-access.js
index 11f8fb50d8..aec57dda8b 100644
--- a/deps/v8/test/mjsunit/keyed-named-access.js
+++ b/deps/v8/test/mjsunit/keyed-named-access.js
@@ -17,18 +17,21 @@ function f(o) {
return result;
}
+%PrepareFunctionForOptimization(f);
f(o1);
f(o1);
f(o1);
%OptimizeFunctionOnNextCall(f);
assertEquals(1000, f(o1));
+%PrepareFunctionForOptimization(f);
f(o2);
f(o2);
f(o2);
%OptimizeFunctionOnNextCall(f);
assertEquals(1100, f(o2));
+%PrepareFunctionForOptimization(f);
f(o3);
f(o3);
f(o3);
@@ -53,6 +56,7 @@ assertEquals(1200, f(o3));
return g(1, o[h()]--, 10);
}
+ %PrepareFunctionForOptimization(test);
test(global);
test(global);
%OptimizeFunctionOnNextCall(test);
@@ -65,6 +69,7 @@ assertEquals(1200, f(o3));
this[0, ""]--;
}
+ %PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/keyed-store-array-literal.js b/deps/v8/test/mjsunit/keyed-store-array-literal.js
index 6892a8fd18..000385b9d3 100644
--- a/deps/v8/test/mjsunit/keyed-store-array-literal.js
+++ b/deps/v8/test/mjsunit/keyed-store-array-literal.js
@@ -11,6 +11,7 @@ function f1() {
assertEquals([undefined, 42], x);
}
+%PrepareFunctionForOptimization(f1);
f1();
f1();
%OptimizeFunctionOnNextCall(f1);
@@ -26,6 +27,7 @@ function f2() {
assertEquals([0, 1, 2, 3, 4], x);
}
+%PrepareFunctionForOptimization(f2);
f2();
f2();
%OptimizeFunctionOnNextCall(f2);
@@ -41,6 +43,7 @@ function f3() {
assertEquals([0, 1.1, {}], x);
}
+%PrepareFunctionForOptimization(f3);
f3();
f3();
%OptimizeFunctionOnNextCall(f3);
@@ -52,6 +55,7 @@ function f4(x) {
x[x.length] = x.length;
}
+%PrepareFunctionForOptimization(f4);
let x1 = [];
f4(x1);
assertEquals([0], x1);
@@ -63,6 +67,7 @@ assertEquals([0, 1, 2], x1);
f4(x1);
assertEquals([0, 1, 2, 3], x1);
+%PrepareFunctionForOptimization(f4);
let x2 = {length: 42};
f4(x2);
assertEquals(42, x2[42]);
diff --git a/deps/v8/test/mjsunit/large-object-literal.js b/deps/v8/test/mjsunit/large-object-literal.js
index 8bf4debbb7..1c539c8b67 100644
--- a/deps/v8/test/mjsunit/large-object-literal.js
+++ b/deps/v8/test/mjsunit/large-object-literal.js
@@ -36,7 +36,7 @@ function testLiteral(size) {
for (var i = 0; i < size; i++) {
if (i > 0) literal += ",";
- literal += ("a" + i + ":" + i);
+ literal += 'a' + i + ':' + i;
}
literal += "}";
@@ -45,7 +45,7 @@ function testLiteral(size) {
// Check that the properties have the expected values.
for (var i = 0; i < size; i++) {
- assertEquals(i, o["a"+i]);
+ assertEquals(i, o['a' + i]);
}
}
function testElementLiteral(size) {
@@ -55,7 +55,7 @@ function testElementLiteral(size) {
for (var i = 0; i < size; i++) {
if (i > 0) literal += ",";
- literal += (i + ":'" + i+"'");
+ literal += i + ':\'' + i + '\'';
}
literal += "}";
@@ -64,7 +64,7 @@ function testElementLiteral(size) {
// Check that the properties have the expected values.
for (var i = 0; i < size; i++) {
- assertEquals(i+"", o[i]);
+ assertEquals(i + '', o[i]);
}
}
@@ -83,8 +83,9 @@ for (var i = 0; i < sizes.length; i++) {
function TestSlowLiteralOptimized() {
function f() {
- return {__proto__:null, bar:"barValue"};
- }
+ return {__proto__: null, bar: 'barValue'};
+ };
+ %PrepareFunctionForOptimization(f);
let obj = f();
assertFalse(%HasFastProperties(obj));
assertEquals(Object.getPrototypeOf(obj), null);
diff --git a/deps/v8/test/mjsunit/math-ceil.js b/deps/v8/test/mjsunit/math-ceil.js
index 314fd0a08b..d0999b07f3 100644
--- a/deps/v8/test/mjsunit/math-ceil.js
+++ b/deps/v8/test/mjsunit/math-ceil.js
@@ -9,6 +9,7 @@ var test_id = 0;
function testCeil(expect, input) {
var test = new Function('n',
'"' + (test_id++) + '";return Math.ceil(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
diff --git a/deps/v8/test/mjsunit/math-floor-negative.js b/deps/v8/test/mjsunit/math-floor-negative.js
index e39d5ade4b..f04f527085 100644
--- a/deps/v8/test/mjsunit/math-floor-negative.js
+++ b/deps/v8/test/mjsunit/math-floor-negative.js
@@ -31,19 +31,19 @@ function test1() {
// Trigger overflow when converting/truncating double to integer.
// Divide by 10 to avoid overflow when smi-tagging at the end.
return Math.floor(-100000000000.5) / 10;
-}
-
+};
+%PrepareFunctionForOptimization(test1);
function test2() {
// Trigger no overflow.
return Math.floor(-100.2);
-}
-
+};
+%PrepareFunctionForOptimization(test2);
function test3() {
// Trigger overflow when compensating by subtracting after compare.
// Divide by 10 to avoid overflow when smi-tagging at the end.
return Math.floor(-2147483648.1) / 10;
-}
-
+};
+%PrepareFunctionForOptimization(test3);
test1();
test1();
%OptimizeFunctionOnNextCall(test1);
diff --git a/deps/v8/test/mjsunit/math-floor-of-div.js b/deps/v8/test/mjsunit/math-floor-of-div.js
index d0026e963a..1c695a47f8 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div.js
@@ -35,72 +35,74 @@ function div(a, b) {
var limit = 0x1000000;
var exhaustive_limit = 100;
var step = 10;
-var values = [0x10000001,
- 0x12345678,
- -0x789abcdf, // 0x87654321
- 0x01234567,
- 0x76543210,
- -0x80000000, // 0x80000000
- 0x7fffffff,
- -0x0fffffff, // 0xf0000001
- 0x00000010,
- -0x01000000 // 0xff000000
- ];
+var values = [
+ 0x10000001, 0x12345678,
+ -0x789abcdf, // 0x87654321
+ 0x01234567, 0x76543210,
+ -0x80000000, // 0x80000000
+ 0x7fffffff,
+ -0x0fffffff, // 0xf0000001
+ 0x00000010,
+ -0x01000000 // 0xff000000
+];
function test_div() {
var c = 0;
for (var k = 0; k <= limit; k++) {
- if (k > exhaustive_limit) { c += step; k += c; }
- assertEquals(Math.floor(div(k, 1)), Math.floor(k / 1));
- assertEquals(Math.floor(div(k, -1)), Math.floor(k / -1));
- assertEquals(Math.floor(div(k, 2)), Math.floor(k / 2));
- assertEquals(Math.floor(div(k, -2)), Math.floor(k / -2));
- assertEquals(Math.floor(div(k, 3)), Math.floor(k / 3));
- assertEquals(Math.floor(div(k, -3)), Math.floor(k / -3));
- assertEquals(Math.floor(div(k, 4)), Math.floor(k / 4));
- assertEquals(Math.floor(div(k, -4)), Math.floor(k / -4));
- assertEquals(Math.floor(div(k, 5)), Math.floor(k / 5));
- assertEquals(Math.floor(div(k, -5)), Math.floor(k / -5));
- assertEquals(Math.floor(div(k, 6)), Math.floor(k / 6));
- assertEquals(Math.floor(div(k, -6)), Math.floor(k / -6));
- assertEquals(Math.floor(div(k, 7)), Math.floor(k / 7));
- assertEquals(Math.floor(div(k, -7)), Math.floor(k / -7));
- assertEquals(Math.floor(div(k, 8)), Math.floor(k / 8));
- assertEquals(Math.floor(div(k, -8)), Math.floor(k / -8));
- assertEquals(Math.floor(div(k, 9)), Math.floor(k / 9));
- assertEquals(Math.floor(div(k, -9)), Math.floor(k / -9));
- assertEquals(Math.floor(div(k, 10)), Math.floor(k / 10));
- assertEquals(Math.floor(div(k, -10)), Math.floor(k / -10));
- assertEquals(Math.floor(div(k, 11)), Math.floor(k / 11));
- assertEquals(Math.floor(div(k, -11)), Math.floor(k / -11));
- assertEquals(Math.floor(div(k, 12)), Math.floor(k / 12));
- assertEquals(Math.floor(div(k, -12)), Math.floor(k / -12));
- assertEquals(Math.floor(div(k, 13)), Math.floor(k / 13));
- assertEquals(Math.floor(div(k, -13)), Math.floor(k / -13));
- assertEquals(Math.floor(div(k, 14)), Math.floor(k / 14));
- assertEquals(Math.floor(div(k, -14)), Math.floor(k / -14));
- assertEquals(Math.floor(div(k, 15)), Math.floor(k / 15));
- assertEquals(Math.floor(div(k, -15)), Math.floor(k / -15));
- assertEquals(Math.floor(div(k, 16)), Math.floor(k / 16));
- assertEquals(Math.floor(div(k, -16)), Math.floor(k / -16));
- assertEquals(Math.floor(div(k, 17)), Math.floor(k / 17));
- assertEquals(Math.floor(div(k, -17)), Math.floor(k / -17));
- assertEquals(Math.floor(div(k, 18)), Math.floor(k / 18));
- assertEquals(Math.floor(div(k, -18)), Math.floor(k / -18));
- assertEquals(Math.floor(div(k, 19)), Math.floor(k / 19));
- assertEquals(Math.floor(div(k, -19)), Math.floor(k / -19));
- assertEquals(Math.floor(div(k, 20)), Math.floor(k / 20));
- assertEquals(Math.floor(div(k, -20)), Math.floor(k / -20));
- assertEquals(Math.floor(div(k, 21)), Math.floor(k / 21));
- assertEquals(Math.floor(div(k, -21)), Math.floor(k / -21));
- assertEquals(Math.floor(div(k, 22)), Math.floor(k / 22));
- assertEquals(Math.floor(div(k, -22)), Math.floor(k / -22));
- assertEquals(Math.floor(div(k, 23)), Math.floor(k / 23));
- assertEquals(Math.floor(div(k, -23)), Math.floor(k / -23));
- assertEquals(Math.floor(div(k, 24)), Math.floor(k / 24));
- assertEquals(Math.floor(div(k, -24)), Math.floor(k / -24));
- assertEquals(Math.floor(div(k, 25)), Math.floor(k / 25));
- assertEquals(Math.floor(div(k, -25)), Math.floor(k / -25));
+ if (k > exhaustive_limit) {
+ c += step;
+ k += c;
+ }
+ assertEquals(Math.floor(div(k, 1)), Math.floor(k / 1));
+ assertEquals(Math.floor(div(k, -1)), Math.floor(k / -1));
+ assertEquals(Math.floor(div(k, 2)), Math.floor(k / 2));
+ assertEquals(Math.floor(div(k, -2)), Math.floor(k / -2));
+ assertEquals(Math.floor(div(k, 3)), Math.floor(k / 3));
+ assertEquals(Math.floor(div(k, -3)), Math.floor(k / -3));
+ assertEquals(Math.floor(div(k, 4)), Math.floor(k / 4));
+ assertEquals(Math.floor(div(k, -4)), Math.floor(k / -4));
+ assertEquals(Math.floor(div(k, 5)), Math.floor(k / 5));
+ assertEquals(Math.floor(div(k, -5)), Math.floor(k / -5));
+ assertEquals(Math.floor(div(k, 6)), Math.floor(k / 6));
+ assertEquals(Math.floor(div(k, -6)), Math.floor(k / -6));
+ assertEquals(Math.floor(div(k, 7)), Math.floor(k / 7));
+ assertEquals(Math.floor(div(k, -7)), Math.floor(k / -7));
+ assertEquals(Math.floor(div(k, 8)), Math.floor(k / 8));
+ assertEquals(Math.floor(div(k, -8)), Math.floor(k / -8));
+ assertEquals(Math.floor(div(k, 9)), Math.floor(k / 9));
+ assertEquals(Math.floor(div(k, -9)), Math.floor(k / -9));
+ assertEquals(Math.floor(div(k, 10)), Math.floor(k / 10));
+ assertEquals(Math.floor(div(k, -10)), Math.floor(k / -10));
+ assertEquals(Math.floor(div(k, 11)), Math.floor(k / 11));
+ assertEquals(Math.floor(div(k, -11)), Math.floor(k / -11));
+ assertEquals(Math.floor(div(k, 12)), Math.floor(k / 12));
+ assertEquals(Math.floor(div(k, -12)), Math.floor(k / -12));
+ assertEquals(Math.floor(div(k, 13)), Math.floor(k / 13));
+ assertEquals(Math.floor(div(k, -13)), Math.floor(k / -13));
+ assertEquals(Math.floor(div(k, 14)), Math.floor(k / 14));
+ assertEquals(Math.floor(div(k, -14)), Math.floor(k / -14));
+ assertEquals(Math.floor(div(k, 15)), Math.floor(k / 15));
+ assertEquals(Math.floor(div(k, -15)), Math.floor(k / -15));
+ assertEquals(Math.floor(div(k, 16)), Math.floor(k / 16));
+ assertEquals(Math.floor(div(k, -16)), Math.floor(k / -16));
+ assertEquals(Math.floor(div(k, 17)), Math.floor(k / 17));
+ assertEquals(Math.floor(div(k, -17)), Math.floor(k / -17));
+ assertEquals(Math.floor(div(k, 18)), Math.floor(k / 18));
+ assertEquals(Math.floor(div(k, -18)), Math.floor(k / -18));
+ assertEquals(Math.floor(div(k, 19)), Math.floor(k / 19));
+ assertEquals(Math.floor(div(k, -19)), Math.floor(k / -19));
+ assertEquals(Math.floor(div(k, 20)), Math.floor(k / 20));
+ assertEquals(Math.floor(div(k, -20)), Math.floor(k / -20));
+ assertEquals(Math.floor(div(k, 21)), Math.floor(k / 21));
+ assertEquals(Math.floor(div(k, -21)), Math.floor(k / -21));
+ assertEquals(Math.floor(div(k, 22)), Math.floor(k / 22));
+ assertEquals(Math.floor(div(k, -22)), Math.floor(k / -22));
+ assertEquals(Math.floor(div(k, 23)), Math.floor(k / 23));
+ assertEquals(Math.floor(div(k, -23)), Math.floor(k / -23));
+ assertEquals(Math.floor(div(k, 24)), Math.floor(k / 24));
+ assertEquals(Math.floor(div(k, -24)), Math.floor(k / -24));
+ assertEquals(Math.floor(div(k, 25)), Math.floor(k / 25));
+ assertEquals(Math.floor(div(k, -25)), Math.floor(k / -25));
assertEquals(Math.floor(div(k, 125)), Math.floor(k / 125));
assertEquals(Math.floor(div(k, -125)), Math.floor(k / -125));
assertEquals(Math.floor(div(k, 625)), Math.floor(k / 625));
@@ -108,57 +110,60 @@ function test_div() {
}
c = 0;
for (var k = 0; k <= limit; k++) {
- if (k > exhaustive_limit) { c += step; k += c; }
- assertEquals(Math.floor(div(-k, 1)), Math.floor(-k / 1));
- assertEquals(Math.floor(div(-k, -1)), Math.floor(-k / -1));
- assertEquals(Math.floor(div(-k, 2)), Math.floor(-k / 2));
- assertEquals(Math.floor(div(-k, -2)), Math.floor(-k / -2));
- assertEquals(Math.floor(div(-k, 3)), Math.floor(-k / 3));
- assertEquals(Math.floor(div(-k, -3)), Math.floor(-k / -3));
- assertEquals(Math.floor(div(-k, 4)), Math.floor(-k / 4));
- assertEquals(Math.floor(div(-k, -4)), Math.floor(-k / -4));
- assertEquals(Math.floor(div(-k, 5)), Math.floor(-k / 5));
- assertEquals(Math.floor(div(-k, -5)), Math.floor(-k / -5));
- assertEquals(Math.floor(div(-k, 6)), Math.floor(-k / 6));
- assertEquals(Math.floor(div(-k, -6)), Math.floor(-k / -6));
- assertEquals(Math.floor(div(-k, 7)), Math.floor(-k / 7));
- assertEquals(Math.floor(div(-k, -7)), Math.floor(-k / -7));
- assertEquals(Math.floor(div(-k, 8)), Math.floor(-k / 8));
- assertEquals(Math.floor(div(-k, -8)), Math.floor(-k / -8));
- assertEquals(Math.floor(div(-k, 9)), Math.floor(-k / 9));
- assertEquals(Math.floor(div(-k, -9)), Math.floor(-k / -9));
- assertEquals(Math.floor(div(-k, 10)), Math.floor(-k / 10));
- assertEquals(Math.floor(div(-k, -10)), Math.floor(-k / -10));
- assertEquals(Math.floor(div(-k, 11)), Math.floor(-k / 11));
- assertEquals(Math.floor(div(-k, -11)), Math.floor(-k / -11));
- assertEquals(Math.floor(div(-k, 12)), Math.floor(-k / 12));
- assertEquals(Math.floor(div(-k, -12)), Math.floor(-k / -12));
- assertEquals(Math.floor(div(-k, 13)), Math.floor(-k / 13));
- assertEquals(Math.floor(div(-k, -13)), Math.floor(-k / -13));
- assertEquals(Math.floor(div(-k, 14)), Math.floor(-k / 14));
- assertEquals(Math.floor(div(-k, -14)), Math.floor(-k / -14));
- assertEquals(Math.floor(div(-k, 15)), Math.floor(-k / 15));
- assertEquals(Math.floor(div(-k, -15)), Math.floor(-k / -15));
- assertEquals(Math.floor(div(-k, 16)), Math.floor(-k / 16));
- assertEquals(Math.floor(div(-k, -16)), Math.floor(-k / -16));
- assertEquals(Math.floor(div(-k, 17)), Math.floor(-k / 17));
- assertEquals(Math.floor(div(-k, -17)), Math.floor(-k / -17));
- assertEquals(Math.floor(div(-k, 18)), Math.floor(-k / 18));
- assertEquals(Math.floor(div(-k, -18)), Math.floor(-k / -18));
- assertEquals(Math.floor(div(-k, 19)), Math.floor(-k / 19));
- assertEquals(Math.floor(div(-k, -19)), Math.floor(-k / -19));
- assertEquals(Math.floor(div(-k, 20)), Math.floor(-k / 20));
- assertEquals(Math.floor(div(-k, -20)), Math.floor(-k / -20));
- assertEquals(Math.floor(div(-k, 21)), Math.floor(-k / 21));
- assertEquals(Math.floor(div(-k, -21)), Math.floor(-k / -21));
- assertEquals(Math.floor(div(-k, 22)), Math.floor(-k / 22));
- assertEquals(Math.floor(div(-k, -22)), Math.floor(-k / -22));
- assertEquals(Math.floor(div(-k, 23)), Math.floor(-k / 23));
- assertEquals(Math.floor(div(-k, -23)), Math.floor(-k / -23));
- assertEquals(Math.floor(div(-k, 24)), Math.floor(-k / 24));
- assertEquals(Math.floor(div(-k, -24)), Math.floor(-k / -24));
- assertEquals(Math.floor(div(-k, 25)), Math.floor(-k / 25));
- assertEquals(Math.floor(div(-k, -25)), Math.floor(-k / -25));
+ if (k > exhaustive_limit) {
+ c += step;
+ k += c;
+ }
+ assertEquals(Math.floor(div(-k, 1)), Math.floor(-k / 1));
+ assertEquals(Math.floor(div(-k, -1)), Math.floor(-k / -1));
+ assertEquals(Math.floor(div(-k, 2)), Math.floor(-k / 2));
+ assertEquals(Math.floor(div(-k, -2)), Math.floor(-k / -2));
+ assertEquals(Math.floor(div(-k, 3)), Math.floor(-k / 3));
+ assertEquals(Math.floor(div(-k, -3)), Math.floor(-k / -3));
+ assertEquals(Math.floor(div(-k, 4)), Math.floor(-k / 4));
+ assertEquals(Math.floor(div(-k, -4)), Math.floor(-k / -4));
+ assertEquals(Math.floor(div(-k, 5)), Math.floor(-k / 5));
+ assertEquals(Math.floor(div(-k, -5)), Math.floor(-k / -5));
+ assertEquals(Math.floor(div(-k, 6)), Math.floor(-k / 6));
+ assertEquals(Math.floor(div(-k, -6)), Math.floor(-k / -6));
+ assertEquals(Math.floor(div(-k, 7)), Math.floor(-k / 7));
+ assertEquals(Math.floor(div(-k, -7)), Math.floor(-k / -7));
+ assertEquals(Math.floor(div(-k, 8)), Math.floor(-k / 8));
+ assertEquals(Math.floor(div(-k, -8)), Math.floor(-k / -8));
+ assertEquals(Math.floor(div(-k, 9)), Math.floor(-k / 9));
+ assertEquals(Math.floor(div(-k, -9)), Math.floor(-k / -9));
+ assertEquals(Math.floor(div(-k, 10)), Math.floor(-k / 10));
+ assertEquals(Math.floor(div(-k, -10)), Math.floor(-k / -10));
+ assertEquals(Math.floor(div(-k, 11)), Math.floor(-k / 11));
+ assertEquals(Math.floor(div(-k, -11)), Math.floor(-k / -11));
+ assertEquals(Math.floor(div(-k, 12)), Math.floor(-k / 12));
+ assertEquals(Math.floor(div(-k, -12)), Math.floor(-k / -12));
+ assertEquals(Math.floor(div(-k, 13)), Math.floor(-k / 13));
+ assertEquals(Math.floor(div(-k, -13)), Math.floor(-k / -13));
+ assertEquals(Math.floor(div(-k, 14)), Math.floor(-k / 14));
+ assertEquals(Math.floor(div(-k, -14)), Math.floor(-k / -14));
+ assertEquals(Math.floor(div(-k, 15)), Math.floor(-k / 15));
+ assertEquals(Math.floor(div(-k, -15)), Math.floor(-k / -15));
+ assertEquals(Math.floor(div(-k, 16)), Math.floor(-k / 16));
+ assertEquals(Math.floor(div(-k, -16)), Math.floor(-k / -16));
+ assertEquals(Math.floor(div(-k, 17)), Math.floor(-k / 17));
+ assertEquals(Math.floor(div(-k, -17)), Math.floor(-k / -17));
+ assertEquals(Math.floor(div(-k, 18)), Math.floor(-k / 18));
+ assertEquals(Math.floor(div(-k, -18)), Math.floor(-k / -18));
+ assertEquals(Math.floor(div(-k, 19)), Math.floor(-k / 19));
+ assertEquals(Math.floor(div(-k, -19)), Math.floor(-k / -19));
+ assertEquals(Math.floor(div(-k, 20)), Math.floor(-k / 20));
+ assertEquals(Math.floor(div(-k, -20)), Math.floor(-k / -20));
+ assertEquals(Math.floor(div(-k, 21)), Math.floor(-k / 21));
+ assertEquals(Math.floor(div(-k, -21)), Math.floor(-k / -21));
+ assertEquals(Math.floor(div(-k, 22)), Math.floor(-k / 22));
+ assertEquals(Math.floor(div(-k, -22)), Math.floor(-k / -22));
+ assertEquals(Math.floor(div(-k, 23)), Math.floor(-k / 23));
+ assertEquals(Math.floor(div(-k, -23)), Math.floor(-k / -23));
+ assertEquals(Math.floor(div(-k, 24)), Math.floor(-k / 24));
+ assertEquals(Math.floor(div(-k, -24)), Math.floor(-k / -24));
+ assertEquals(Math.floor(div(-k, 25)), Math.floor(-k / 25));
+ assertEquals(Math.floor(div(-k, -25)), Math.floor(-k / -25));
assertEquals(Math.floor(div(-k, 125)), Math.floor(-k / 125));
assertEquals(Math.floor(div(-k, -125)), Math.floor(-k / -125));
assertEquals(Math.floor(div(-k, 625)), Math.floor(-k / 625));
@@ -168,18 +173,22 @@ function test_div() {
// Use (values[key] | 0) to force the integer type.
for (var i = 0; i < values.length; i++) {
for (var j = 0; j < values.length; j++) {
- assertEquals(Math.floor(div((values[i] | 0), (values[j] | 0))),
- Math.floor((values[i] | 0) / (values[j] | 0)));
- assertEquals(Math.floor(div(-(values[i] | 0), (values[j] | 0))),
- Math.floor(-(values[i] | 0) / (values[j] | 0)));
- assertEquals(Math.floor(div((values[i] | 0), -(values[j] | 0))),
- Math.floor((values[i] | 0) / -(values[j] | 0)));
- assertEquals(Math.floor(div(-(values[i] | 0), -(values[j] | 0))),
- Math.floor(-(values[i] | 0) / -(values[j] | 0)));
+ assertEquals(
+ Math.floor(div(values[i] | 0, values[j] | 0)),
+ Math.floor((values[i] | 0) / (values[j] | 0)));
+ assertEquals(
+ Math.floor(div(-(values[i] | 0), values[j] | 0)),
+ Math.floor(-(values[i] | 0) / (values[j] | 0)));
+ assertEquals(
+ Math.floor(div(values[i] | 0, -(values[j] | 0))),
+ Math.floor((values[i] | 0) / -(values[j] | 0)));
+ assertEquals(
+ Math.floor(div(-(values[i] | 0), -(values[j] | 0))),
+ Math.floor(-(values[i] | 0) / -(values[j] | 0)));
}
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div);
test_div();
%OptimizeFunctionOnNextCall(test_div);
test_div();
@@ -189,18 +198,22 @@ var values2 = [1, 3, 10, 99, 100, 101, 0x7fffffff];
function test_div2() {
for (var i = 0; i < values2.length; i++) {
for (var j = 0; j < values2.length; j++) {
- assertEquals(Math.floor(div((values2[i] | 0), (values2[j] | 0))),
- Math.floor((values2[i] | 0) / (values2[j] | 0)));
- assertEquals(Math.floor(div(-(values2[i] | 0), (values2[j] | 0))),
- Math.floor(-(values2[i] | 0) / (values2[j] | 0)));
- assertEquals(Math.floor(div((values2[i] | 0), -(values2[j] | 0))),
- Math.floor((values2[i] | 0) / -(values2[j] | 0)));
- assertEquals(Math.floor(div(-(values2[i] | 0), -(values2[j] | 0))),
- Math.floor(-(values2[i] | 0) / -(values2[j] | 0)));
+ assertEquals(
+ Math.floor(div(values2[i] | 0, values2[j] | 0)),
+ Math.floor((values2[i] | 0) / (values2[j] | 0)));
+ assertEquals(
+ Math.floor(div(-(values2[i] | 0), values2[j] | 0)),
+ Math.floor(-(values2[i] | 0) / (values2[j] | 0)));
+ assertEquals(
+ Math.floor(div(values2[i] | 0, -(values2[j] | 0))),
+ Math.floor((values2[i] | 0) / -(values2[j] | 0)));
+ assertEquals(
+ Math.floor(div(-(values2[i] | 0), -(values2[j] | 0))),
+ Math.floor(-(values2[i] | 0) / -(values2[j] | 0)));
}
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div2);
test_div2();
%OptimizeFunctionOnNextCall(test_div2);
test_div2();
@@ -227,23 +240,22 @@ function test_div_deopt_minus_zero() {
for (var i = 0; i < 2; ++i) {
assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div_deopt_minus_zero);
function test_div_deopt_overflow() {
for (var i = 0; i < 2; ++i) {
// We use '| 0' to force the representation to int32.
- assertEquals(-min_int_in_array[0],
- Math.floor((min_int_in_array[0] | 0) / -1));
+ assertEquals(
+ -min_int_in_array[0], Math.floor((min_int_in_array[0] | 0) / -1));
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div_deopt_overflow);
function test_div_deopt_div_by_zero() {
for (var i = 0; i < 2; ++i) {
- assertEquals(div(i, 0),
- Math.floor(i / 0));
+ assertEquals(div(i, 0), Math.floor(i / 0));
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div_deopt_div_by_zero);
test_div_deopt_minus_zero();
test_div_deopt_overflow();
test_div_deopt_div_by_zero();
@@ -257,26 +269,26 @@ test_div_deopt_div_by_zero();
// Test for dividing by variable.
function test_div_deopt_minus_zero_v() {
for (var i = 0; i < 2; ++i) {
- assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) /
- neg_one_in_array[0])));
+ assertTrue(IsNegativeZero(
+ Math.floor((zero_in_array[0] | 0) / neg_one_in_array[0])));
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div_deopt_minus_zero_v);
function test_div_deopt_overflow_v() {
for (var i = 0; i < 2; ++i) {
// We use '| 0' to force the representation to int32.
- assertEquals(-min_int_in_array[0],
- Math.floor((min_int_in_array[0] | 0) / neg_one_in_array[0]));
+ assertEquals(
+ -min_int_in_array[0],
+ Math.floor((min_int_in_array[0] | 0) / neg_one_in_array[0]));
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div_deopt_overflow_v);
function test_div_deopt_div_by_zero_v() {
for (var i = 0; i < 2; ++i) {
- assertEquals(div(i, 0),
- Math.floor(i / zero_in_array[0]));
+ assertEquals(div(i, 0), Math.floor(i / zero_in_array[0]));
}
-}
-
+};
+%PrepareFunctionForOptimization(test_div_deopt_div_by_zero_v);
test_div_deopt_minus_zero_v();
test_div_deopt_overflow_v();
test_div_deopt_div_by_zero_v();
@@ -291,8 +303,8 @@ test_div_deopt_div_by_zero_v();
// Test for flooring division with negative dividend.
function flooring_div_by_3(y) {
return Math.floor(y / 3);
-}
-
+};
+%PrepareFunctionForOptimization(flooring_div_by_3);
assertEquals(-1, flooring_div_by_3(-2));
assertEquals(-1, flooring_div_by_3(-2));
%OptimizeFunctionOnNextCall(flooring_div_by_3);
diff --git a/deps/v8/test/mjsunit/math-floor-part2.js b/deps/v8/test/mjsunit/math-floor-part2.js
index 658979841a..c404f6eab6 100644
--- a/deps/v8/test/mjsunit/math-floor-part2.js
+++ b/deps/v8/test/mjsunit/math-floor-part2.js
@@ -32,6 +32,7 @@ var test_id = 0;
function testFloor(expect, input) {
var test = new Function('n',
'"' + (test_id++) + '";return Math.floor(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
@@ -41,6 +42,7 @@ function testFloor(expect, input) {
var test_double_input = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(+n)');
+ %PrepareFunctionForOptimization(test_double_input);
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
@@ -50,6 +52,7 @@ function testFloor(expect, input) {
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
+ %PrepareFunctionForOptimization(test_double_output);
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
@@ -59,6 +62,7 @@ function testFloor(expect, input) {
var test_via_ceil = new Function(
'n',
'"' + (test_id++) + '";return -Math.ceil(-n)');
+ %PrepareFunctionForOptimization(test_via_ceil);
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
@@ -69,6 +73,7 @@ function testFloor(expect, input) {
var test_via_trunc = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(n)');
+ %PrepareFunctionForOptimization(test_via_trunc);
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
diff --git a/deps/v8/test/mjsunit/math-floor-part3.js b/deps/v8/test/mjsunit/math-floor-part3.js
index 4ce645ea0d..c8a37270c0 100644
--- a/deps/v8/test/mjsunit/math-floor-part3.js
+++ b/deps/v8/test/mjsunit/math-floor-part3.js
@@ -32,6 +32,7 @@ var test_id = 0;
function testFloor(expect, input) {
var test = new Function('n',
'"' + (test_id++) + '";return Math.floor(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
@@ -41,6 +42,7 @@ function testFloor(expect, input) {
var test_double_input = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(+n)');
+ %PrepareFunctionForOptimization(test_double_input);
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
@@ -50,6 +52,7 @@ function testFloor(expect, input) {
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
+ %PrepareFunctionForOptimization(test_double_output);
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
@@ -59,6 +62,7 @@ function testFloor(expect, input) {
var test_via_ceil = new Function(
'n',
'"' + (test_id++) + '";return -Math.ceil(-n)');
+ %PrepareFunctionForOptimization(test_via_ceil);
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
@@ -69,6 +73,7 @@ function testFloor(expect, input) {
var test_via_trunc = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(n)');
+ %PrepareFunctionForOptimization(test_via_trunc);
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
diff --git a/deps/v8/test/mjsunit/math-imul.js b/deps/v8/test/mjsunit/math-imul.js
index e05c000c64..db9baae236 100644
--- a/deps/v8/test/mjsunit/math-imul.js
+++ b/deps/v8/test/mjsunit/math-imul.js
@@ -29,20 +29,26 @@
var imul_func = Math.imul;
function imul_polyfill(a, b) {
- var ah = (a >>> 16) & 0xffff;
+ var ah = a >>> 16 & 0xffff;
var al = a & 0xffff;
- var bh = (b >>> 16) & 0xffff;
+ var bh = b >>> 16 & 0xffff;
var bl = b & 0xffff;
- return ((al * bl) + (((ah * bl + al * bh) << 16) >>> 0) | 0);
+ return al * bl + (ah * bl + al * bh << 16 >>> 0) | 0;
}
function TestMathImul(expected, a, b) {
- function imul_meth_closure(a, b) { return Math.imul(a, b); }
- function imul_func_closure(a, b) { return imul_func(a, b); }
+ function imul_meth_closure(a, b) {
+ return Math.imul(a, b);
+ };
+ %PrepareFunctionForOptimization(imul_meth_closure);
+ function imul_func_closure(a, b) {
+ return imul_func(a, b);
+ }
// Test reference implementation.
+ ;
+ %PrepareFunctionForOptimization(imul_func_closure);
assertEquals(expected, imul_polyfill(a, b));
-
// Test direct method call.
assertEquals(expected, Math.imul(a, b));
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 4fb95b7aa7..8582b38036 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -390,25 +390,13 @@ var prettyPrinted;
}
assertSame = function assertSame(expected, found, name_opt) {
- // TODO(mstarzinger): We should think about using Harmony's egal operator
- // or the function equivalent Object.is() here.
- if (found === expected) {
- if (expected !== 0 || (1 / expected) === (1 / found)) return;
- } else if ((expected !== expected) && (found !== found)) {
- return;
- }
+ if (Object.is(expected, found)) return;
fail(prettyPrinted(expected), found, name_opt);
};
assertNotSame = function assertNotSame(expected, found, name_opt) {
- // TODO(mstarzinger): We should think about using Harmony's egal operator
- // or the function equivalent Object.is() here.
- if (found !== expected) {
- if (expected === 0 || (1 / expected) !== (1 / found)) return;
- } else if (!((expected !== expected) && (found !== found))) {
- return;
- }
- fail(prettyPrinted(expected), found, name_opt);
+ if (!Object.is(expected, found)) return;
+ fail("not same as " + prettyPrinted(expected), found, name_opt);
}
assertEquals = function assertEquals(expected, found, name_opt) {
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index b55e8d790c..bdcf3cf18d 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -57,6 +57,9 @@
# Issue 8505: Math.pow is incorrect for asm.js
'regress/wasm/regress-8505': [SKIP],
+ # Issue 9380: Memory leaks of shared WebAssembly.Memory objects
+ 'wasm/shared-memory-worker-gc': [SKIP],
+
##############################################################################
# Too slow in debug mode with --stress-opt mode.
'regress/regress-create-exception': [PASS, ['mode == debug', SKIP]],
@@ -77,6 +80,10 @@
# Too slow in debug mode, due to large allocations.
'regress/regress-crbug-941743': [PASS, ['mode == debug', SKIP], ['(arch == arm or arch == arm64 or arch == mipsel or arch == mips64el) and simulator_run == True', SKIP]],
+ # Too slow in debug mode BUG(v8:9506): times out.
+ 'wasm/shared-memory-worker-explicit-gc-stress': [PASS, ['mode == debug', SKIP], ['tsan', SKIP]],
+ 'wasm/shared-memory-worker-gc-stress': [PASS, ['mode == debug', SKIP]],
+
##############################################################################
# Only RegExp stuff tested, no need for extensive optimizing compiler tests.
'regexp-global': [PASS, NO_VARIANTS],
@@ -241,14 +248,10 @@
# BUG(v8:9260)
'tools/profviz': [SKIP],
+
# Test doesn't work on 32-bit architectures (it would require a
# regexp pattern with too many captures).
'regress/regress-976627': [FAIL, ['arch == x64 or arch == arm64 or arch == mips64el or arch == ppc64 or arch == s390x', PASS]],
-
- # To be re-enabled once https://crbug.com/v8/9534 is fixed.
- 'es6/regress/regress-crbug-465671': [SKIP],
- 'es6/regress/regress-crbug-465671-null': [SKIP],
- 'regress/regress-543994': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -350,6 +353,7 @@
'regress/regress-crbug-746835': [SKIP],
'regress/regress-crbug-772056': [SKIP],
'regress/regress-crbug-816961': [SKIP],
+ 'regress/regress-crbug-969498': [SKIP],
'regress/wasm/*': [SKIP],
'regress/regress-8947': [SKIP],
'regress/regress-9165': [SKIP],
@@ -364,6 +368,7 @@
'asm/global-imports': [SKIP],
'asm/regress-913822': [SKIP],
'asm/regress-937650': [SKIP],
+ 'asm/regress-9531': [SKIP],
'asm/return-types': [SKIP],
'regress/regress-599719': [SKIP],
'regress/regress-6196': [SKIP],
@@ -372,6 +377,7 @@
'regress/regress-6838-3': [SKIP],
'regress/regress-9022': [SKIP],
'regress/regress-crbug-934138': [SKIP],
+ 'regress/regress-crbug-976934': [SKIP],
# Timeouts in lite / jitless mode.
'asm/embenchen/*': [SKIP],
@@ -448,6 +454,9 @@
# OOMing tests
'regress/regress-500980': [SKIP],
+ # BUG(v8:9337).
+ 'compiler/regress-9017': [SKIP],
+
# Slow tests.
'array-concat': [PASS, SLOW],
'array-indexing': [PASS, SLOW],
@@ -573,13 +582,16 @@
'regress/regress-599414-array-concat-fast-path': [PASS, SLOW],
# BUG(v8:9026). Flaky timeouts.
- 'es6/classes': [SKIP],
+ 'es6/large-classes-properties': [SKIP],
# Slow tests.
'compiler/regress-9017': [PASS, SLOW],
'es6/block-conflicts-sloppy': [PASS, SLOW],
'math-floor-part1': [PASS, SLOW],
'regress/regress-500980': [PASS, SLOW],
+
+ # BUG(v8:9506): times out.
+ 'wasm/shared-memory-worker-explicit-gc-stress': [SKIP],
}], # 'tsan == True'
##############################################################################
@@ -613,6 +625,9 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
+ # BUG(v8:9337).
+ 'compiler/regress-9017': [SKIP],
+
# Slow tests.
'array-sort': [PASS, SLOW],
'compiler/osr-with-args': [PASS, SLOW],
@@ -770,6 +785,7 @@
'regress/regress-748069': [FAIL],
'regress/regress-752764': [FAIL],
# Flaky OOM:
+ 'regress/regress-779407': [SKIP],
'regress/regress-852258': [SKIP],
}], # 'system == android'
@@ -784,6 +800,12 @@
# Slow tests.
'es6/typedarray-of': [PASS, SLOW],
'regress/regress-crbug-854299': [PASS, SLOW],
+
+ # Runs flakily OOM because multiple isolates are involved which create many
+ # wasm memories each. Before running OOM on a wasm memory allocation we
+ # trigger a GC, but only in the isolate allocating the new memory.
+ 'wasm/module-memory': [SKIP],
+ 'wasm/shared-memory-gc-stress': [SKIP],
}], # 'isolates'
##############################################################################
@@ -827,17 +849,20 @@
'code-coverage-ad-hoc': [SKIP],
'code-coverage-precise': [SKIP],
+ # Flaky under GC stress (sometimes precludes the tested optimization)
+ 'compiler/load-elimination-const-field': [SKIP],
+
# Passes incompatible arguments.
'd8/d8-arguments': [SKIP],
# Fails allocation on tsan.
- 'es6/classes': [PASS, ['tsan', SKIP]],
'regress/regress-779407': [PASS, ['tsan', SKIP]],
# Tests that fail some assertions due to checking internal state sensitive
# to GC.
'compiler/native-context-specialization-hole-check': [SKIP],
'regress/regress-trap-allocation-memento': [SKIP],
+ 'regress/regress-v8-9267-*': [SKIP],
'shared-function-tier-up-turbo': [SKIP],
}], # 'gc_fuzzer'
@@ -904,6 +929,12 @@
}], # 'arch == ppc64'
##############################################################################
+['system == aix', {
+ # stack overflow
+ 'regress/regress-crbug-178790': [PASS, ['mode == debug', SKIP]],
+}], # 'system == aix'
+
+##############################################################################
['arch == s390x', {
# stack overflow
'regress/regress-crbug-178790': [PASS, ['mode == debug', SKIP]],
@@ -921,10 +952,9 @@
# Flaky crash on Odroid devices: https://crbug.com/v8/7678
'regress/regress-336820': [PASS, ['arch == arm and not simulator_run', SKIP]],
- # Too slow for TSAN in stress mode.
# Goes OOM on ODROID devices: https://crbug.com/v8/9026
# Too slow on PPC: https://crbug.com/v8/9246
- 'es6/classes': [PASS, SLOW, ['tsan or (arch == arm and not simulator_run) or arch in [ppc, ppc64]', SKIP]],
+ 'es6/large-classes-properties': [PASS, SLOW, ['(arch == arm and not simulator_run) or arch in [ppc, ppc64]', SKIP]],
'regress/regress-1122': [PASS, ['tsan', SKIP]],
# Too slow with gc_stress on arm64.
@@ -944,7 +974,7 @@
##############################################################################
['variant == stress and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
- 'es6/classes': [SKIP],
+ 'es6/large-classes-properties': [SKIP],
'generated-transition-stub': [SKIP],
'regress/regress-336820': [SKIP],
'wasm/grow-memory': [SKIP],
@@ -1053,10 +1083,4 @@
'*': [SKIP],
}], # variant == jitless and not embedded_builtins
-##############################################################################
-['variant == future', {
- # crbug.com/v8/9094
- 'compiler/constant-fold-cow-array': [SKIP],
-}], # variant == future
-
]
diff --git a/deps/v8/test/mjsunit/modules-circular-valid.js b/deps/v8/test/mjsunit/modules-circular-valid.js
deleted file mode 100644
index e381eefdbc..0000000000
--- a/deps/v8/test/mjsunit/modules-circular-valid.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-export {a as b} from "modules-skip-circular-valid.js";
diff --git a/deps/v8/test/message/fail/modules-cycle4.js b/deps/v8/test/mjsunit/modules-circular-valid.mjs
index 2e22757e54..8d3977f148 100644
--- a/deps/v8/test/message/fail/modules-cycle4.js
+++ b/deps/v8/test/mjsunit/modules-circular-valid.mjs
@@ -1,7 +1,5 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {x} from "modules-cycle3.js";
+export {a as b} from "modules-skip-circular-valid.mjs";
diff --git a/deps/v8/test/mjsunit/modules-cycle.js b/deps/v8/test/mjsunit/modules-cycle.js
deleted file mode 100644
index 6b775a568e..0000000000
--- a/deps/v8/test/mjsunit/modules-cycle.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import "modules-skip-cycle.js";
-export {a as foo} from "modules-skip-1.js"
diff --git a/deps/v8/test/mjsunit/modules-cycle.mjs b/deps/v8/test/mjsunit/modules-cycle.mjs
new file mode 100644
index 0000000000..730ac87178
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-cycle.mjs
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "modules-skip-cycle.mjs";
+export {a as foo} from "modules-skip-1.mjs"
diff --git a/deps/v8/test/mjsunit/modules-default-name1.js b/deps/v8/test/mjsunit/modules-default-name1.mjs
index 54c3afeec5..0943853690 100644
--- a/deps/v8/test/mjsunit/modules-default-name1.js
+++ b/deps/v8/test/mjsunit/modules-default-name1.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name1.js";
+import {default as goo} from "modules-skip-default-name1.mjs";
assertEquals(
{value: "gaga", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name2.js b/deps/v8/test/mjsunit/modules-default-name2.mjs
index 51e64139ca..c89536e579 100644
--- a/deps/v8/test/mjsunit/modules-default-name2.js
+++ b/deps/v8/test/mjsunit/modules-default-name2.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name2.js";
+import {default as goo} from "modules-skip-default-name2.mjs";
assertEquals(
{value: "gaga", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name6.js b/deps/v8/test/mjsunit/modules-default-name3.mjs
index 1ac1bcb0c9..cad829c24e 100644
--- a/deps/v8/test/mjsunit/modules-default-name6.js
+++ b/deps/v8/test/mjsunit/modules-default-name3.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name6.js";
+import {default as goo} from "modules-skip-default-name3.mjs";
assertEquals(
{value: "default", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name5.js b/deps/v8/test/mjsunit/modules-default-name4.mjs
index d6e0e5c049..c3b8ea0e25 100644
--- a/deps/v8/test/mjsunit/modules-default-name5.js
+++ b/deps/v8/test/mjsunit/modules-default-name4.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name5.js";
+import {default as goo} from "modules-skip-default-name4.mjs";
assertEquals(
{value: "Gaga", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name4.js b/deps/v8/test/mjsunit/modules-default-name5.mjs
index c69da9d02b..ac55b0f794 100644
--- a/deps/v8/test/mjsunit/modules-default-name4.js
+++ b/deps/v8/test/mjsunit/modules-default-name5.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name4.js";
+import {default as goo} from "modules-skip-default-name5.mjs";
assertEquals(
{value: "Gaga", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name8.js b/deps/v8/test/mjsunit/modules-default-name6.mjs
index b192a2544a..eec249315a 100644
--- a/deps/v8/test/mjsunit/modules-default-name8.js
+++ b/deps/v8/test/mjsunit/modules-default-name6.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name8.js";
+import {default as goo} from "modules-skip-default-name6.mjs";
assertEquals(
{value: "default", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name7.js b/deps/v8/test/mjsunit/modules-default-name7.mjs
index 82904d4212..f8799bacd7 100644
--- a/deps/v8/test/mjsunit/modules-default-name7.js
+++ b/deps/v8/test/mjsunit/modules-default-name7.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name7.js";
+import {default as goo} from "modules-skip-default-name7.mjs";
let descr = Reflect.getOwnPropertyDescriptor(goo, 'name');
assertEquals(descr,
{value: descr.value, configurable: true, writable: true, enumerable: false});
diff --git a/deps/v8/test/mjsunit/modules-default-name3.js b/deps/v8/test/mjsunit/modules-default-name8.mjs
index caab3eb32a..1c3db2dffa 100644
--- a/deps/v8/test/mjsunit/modules-default-name3.js
+++ b/deps/v8/test/mjsunit/modules-default-name8.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name3.js";
+import {default as goo} from "modules-skip-default-name8.mjs";
assertEquals(
{value: "default", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default-name9.js b/deps/v8/test/mjsunit/modules-default-name9.mjs
index 3ba711f47e..27083f9463 100644
--- a/deps/v8/test/mjsunit/modules-default-name9.js
+++ b/deps/v8/test/mjsunit/modules-default-name9.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {default as goo} from "modules-skip-default-name9.js";
+import {default as goo} from "modules-skip-default-name9.mjs";
assertEquals(
{value: "default", configurable: true, writable: false, enumerable: false},
Reflect.getOwnPropertyDescriptor(goo, 'name'));
diff --git a/deps/v8/test/mjsunit/modules-default.js b/deps/v8/test/mjsunit/modules-default.mjs
index 304703b246..facbbd07aa 100644
--- a/deps/v8/test/mjsunit/modules-default.js
+++ b/deps/v8/test/mjsunit/modules-default.mjs
@@ -1,11 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import foo from "modules-skip-1.js";
+import foo from "modules-skip-1.mjs";
assertEquals(42, foo);
-import {default as gaga} from "modules-skip-1.js";
+import {default as gaga} from "modules-skip-1.mjs";
assertEquals(42, gaga);
diff --git a/deps/v8/test/mjsunit/modules-empty-import1.js b/deps/v8/test/mjsunit/modules-empty-import1.mjs
index 60498f187a..11bb94df20 100644
--- a/deps/v8/test/mjsunit/modules-empty-import1.js
+++ b/deps/v8/test/mjsunit/modules-empty-import1.mjs
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import "modules-skip-empty-import.js";
-import {counter} from "modules-skip-empty-import-aux.js";
+import "modules-skip-empty-import.mjs";
+import {counter} from "modules-skip-empty-import-aux.mjs";
assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import2.js b/deps/v8/test/mjsunit/modules-empty-import2.mjs
index 8862c94c92..aab6c2a286 100644
--- a/deps/v8/test/mjsunit/modules-empty-import2.js
+++ b/deps/v8/test/mjsunit/modules-empty-import2.mjs
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {} from "modules-skip-empty-import.js";
-import {counter} from "modules-skip-empty-import-aux.js";
+import {} from "modules-skip-empty-import.mjs";
+import {counter} from "modules-skip-empty-import-aux.mjs";
assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import3.js b/deps/v8/test/mjsunit/modules-empty-import3.mjs
index 0503891fce..be7fa7e357 100644
--- a/deps/v8/test/mjsunit/modules-empty-import3.js
+++ b/deps/v8/test/mjsunit/modules-empty-import3.mjs
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-export {} from "modules-skip-empty-import.js";
-import {counter} from "modules-skip-empty-import-aux.js";
+export {} from "modules-skip-empty-import.mjs";
+import {counter} from "modules-skip-empty-import-aux.mjs";
assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import4.js b/deps/v8/test/mjsunit/modules-empty-import4.js
deleted file mode 100644
index 0cea643414..0000000000
--- a/deps/v8/test/mjsunit/modules-empty-import4.js
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import "modules-skip-empty-import.js";
-import {} from "modules-skip-empty-import.js";
-export {} from "modules-skip-empty-import.js";
-import {counter} from "modules-skip-empty-import-aux.js";
-assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-empty-import4.mjs b/deps/v8/test/mjsunit/modules-empty-import4.mjs
new file mode 100644
index 0000000000..55576c03d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-empty-import4.mjs
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "modules-skip-empty-import.mjs";
+import {} from "modules-skip-empty-import.mjs";
+export {} from "modules-skip-empty-import.mjs";
+import {counter} from "modules-skip-empty-import-aux.mjs";
+assertEquals(1, counter);
diff --git a/deps/v8/test/mjsunit/modules-error-trace.js b/deps/v8/test/mjsunit/modules-error-trace.mjs
index bbf83c510d..12a6f332bc 100644
--- a/deps/v8/test/mjsunit/modules-error-trace.js
+++ b/deps/v8/test/mjsunit/modules-error-trace.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
// Make sure the generator resume function doesn't show up in the stack trace.
const stack = (new Error).stack;
diff --git a/deps/v8/test/mjsunit/modules-export-star-as1.js b/deps/v8/test/mjsunit/modules-export-star-as1.mjs
index 1696c1c84d..41aac3d659 100644
--- a/deps/v8/test/mjsunit/modules-export-star-as1.js
+++ b/deps/v8/test/mjsunit/modules-export-star-as1.mjs
@@ -1,10 +1,8 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --harmony-namespace-exports
-import {foo} from "./modules-skip-8.js";
+import {foo} from "./modules-skip-8.mjs";
assertEquals(42, foo.default);
assertEquals(1, foo.get_a());
diff --git a/deps/v8/test/mjsunit/modules-export-star-as2.js b/deps/v8/test/mjsunit/modules-export-star-as2.mjs
index 57828ebd67..86be1c2b7c 100644
--- a/deps/v8/test/mjsunit/modules-export-star-as2.js
+++ b/deps/v8/test/mjsunit/modules-export-star-as2.mjs
@@ -1,14 +1,12 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --harmony-namespace-exports
-export * as self from "./modules-export-star-as2.js";
-export * as self_again from "./modules-export-star-as2.js";
-import {self as myself} from "./modules-export-star-as2.js";
-import {self_again as myself_again} from "./modules-export-star-as2.js";
+export * as self from "./modules-export-star-as2.mjs";
+export * as self_again from "./modules-export-star-as2.mjs";
+import {self as myself} from "./modules-export-star-as2.mjs";
+import {self_again as myself_again} from "./modules-export-star-as2.mjs";
assertEquals(["self", "self_again"], Object.keys(myself));
assertEquals(myself, myself.self);
diff --git a/deps/v8/test/mjsunit/modules-export-star-as3.js b/deps/v8/test/mjsunit/modules-export-star-as3.mjs
index 4077cbd9c6..4e00aefe5c 100644
--- a/deps/v8/test/mjsunit/modules-export-star-as3.js
+++ b/deps/v8/test/mjsunit/modules-export-star-as3.mjs
@@ -1,13 +1,11 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --harmony-namespace-exports
let self = 42;
-export * as self from "./modules-export-star-as3.js";
-import {self as myself} from "./modules-export-star-as3.js";
+export * as self from "./modules-export-star-as3.mjs";
+import {self as myself} from "./modules-export-star-as3.mjs";
assertEquals(["self"], Object.keys(myself));
assertEquals(myself, myself.self);
assertEquals(42, self);
diff --git a/deps/v8/test/mjsunit/modules-exports1.js b/deps/v8/test/mjsunit/modules-exports1.mjs
index 260f545225..4afd52e08e 100644
--- a/deps/v8/test/mjsunit/modules-exports1.js
+++ b/deps/v8/test/mjsunit/modules-exports1.mjs
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
export var myvar = "VAR";
assertEquals("VAR", myvar);
assertEquals("VAR", eval("myvar"));
diff --git a/deps/v8/test/mjsunit/modules-exports2.js b/deps/v8/test/mjsunit/modules-exports2.mjs
index 77f6bb6ccc..0c858cb1ae 100644
--- a/deps/v8/test/mjsunit/modules-exports2.js
+++ b/deps/v8/test/mjsunit/modules-exports2.mjs
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
export function foo() { return 42 }
assertEquals(42, foo());
foo = 1;
diff --git a/deps/v8/test/mjsunit/modules-exports3.js b/deps/v8/test/mjsunit/modules-exports3.mjs
index a792852ad9..cf36f70ef9 100644
--- a/deps/v8/test/mjsunit/modules-exports3.js
+++ b/deps/v8/test/mjsunit/modules-exports3.mjs
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
export { myvar, mylet, myconst };
var myvar = "VAR";
diff --git a/deps/v8/test/mjsunit/modules-imports1.js b/deps/v8/test/mjsunit/modules-imports1.mjs
index f2e33f0f6a..3927b0d5ab 100644
--- a/deps/v8/test/mjsunit/modules-imports1.js
+++ b/deps/v8/test/mjsunit/modules-imports1.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a as x, set_a as set_x} from "modules-skip-1.js"
+import {a as x, set_a as set_x} from "modules-skip-1.mjs"
let get_x = () => x;
diff --git a/deps/v8/test/mjsunit/modules-imports2.js b/deps/v8/test/mjsunit/modules-imports2.mjs
index 35a7f76691..de76045697 100644
--- a/deps/v8/test/mjsunit/modules-imports2.js
+++ b/deps/v8/test/mjsunit/modules-imports2.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
let get_x = () => x;
@@ -23,4 +21,4 @@ assertEquals("foo", (() => x)());
assertEquals("foo", eval('x'));
assertEquals("foo", get_x());
-import {a as x, set_a as set_x} from "modules-skip-1.js"
+import {a as x, set_a as set_x} from "modules-skip-1.mjs"
diff --git a/deps/v8/test/mjsunit/modules-imports3.js b/deps/v8/test/mjsunit/modules-imports3.mjs
index 42fcdcecb7..b656e8b6bb 100644
--- a/deps/v8/test/mjsunit/modules-imports3.js
+++ b/deps/v8/test/mjsunit/modules-imports3.mjs
@@ -1,11 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a as x, a as y} from "modules-skip-1.js";
-import {b as z, get_a, set_a} from "modules-skip-1.js";
+import {a as x, a as y} from "modules-skip-1.mjs";
+import {b as z, get_a, set_a} from "modules-skip-1.mjs";
assertEquals(1, get_a());
assertEquals(1, x);
diff --git a/deps/v8/test/mjsunit/modules-imports4.js b/deps/v8/test/mjsunit/modules-imports4.mjs
index 4d734878aa..d777b05f61 100644
--- a/deps/v8/test/mjsunit/modules-imports4.js
+++ b/deps/v8/test/mjsunit/modules-imports4.mjs
@@ -1,12 +1,10 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {b, c} from "modules-skip-2.js";
-import {a, set_a} from "modules-skip-1.js";
-import x from "modules-skip-2.js";
+import {b, c} from "modules-skip-2.mjs";
+import {a, set_a} from "modules-skip-1.mjs";
+import x from "modules-skip-2.mjs";
assertEquals(42, x);
diff --git a/deps/v8/test/mjsunit/modules-imports5.js b/deps/v8/test/mjsunit/modules-imports5.mjs
index b494c7e1a7..fe53f8a6c5 100644
--- a/deps/v8/test/mjsunit/modules-imports5.js
+++ b/deps/v8/test/mjsunit/modules-imports5.mjs
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a} from "modules-skip-3.js";
+import {a} from "modules-skip-3.mjs";
export var b = 20;
assertEquals(42, a+b);
diff --git a/deps/v8/test/mjsunit/modules-imports6.js b/deps/v8/test/mjsunit/modules-imports6.mjs
index 4cb117a98d..5dec159425 100644
--- a/deps/v8/test/mjsunit/modules-imports6.js
+++ b/deps/v8/test/mjsunit/modules-imports6.mjs
@@ -1,11 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {b, c} from "modules-skip-4.js";
-import {a, set_a} from "modules-skip-4.js";
+import {b, c} from "modules-skip-4.mjs";
+import {a, set_a} from "modules-skip-4.mjs";
assertEquals(1, a);
assertEquals(1, b);
diff --git a/deps/v8/test/mjsunit/modules-imports7.js b/deps/v8/test/mjsunit/modules-imports7.mjs
index 2501481797..5d6f6b9f27 100644
--- a/deps/v8/test/mjsunit/modules-imports7.js
+++ b/deps/v8/test/mjsunit/modules-imports7.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {a} from "modules-skip-6.js";
+import {a} from "modules-skip-6.mjs";
assertEquals(10, a);
diff --git a/deps/v8/test/mjsunit/modules-imports8.js b/deps/v8/test/mjsunit/modules-imports8.mjs
index 56ea60f4c3..781674f5fe 100644
--- a/deps/v8/test/mjsunit/modules-imports8.js
+++ b/deps/v8/test/mjsunit/modules-imports8.mjs
@@ -1,11 +1,9 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --harmony-namespace-exports
-import {a, b} from "./modules-skip-9.js";
+import {a, b} from "./modules-skip-9.mjs";
assertSame(a, b);
assertEquals(42, a.default);
assertEquals(1, a.a);
diff --git a/deps/v8/test/mjsunit/modules-init1.js b/deps/v8/test/mjsunit/modules-init1.mjs
index fbc8df2cd1..553db8fa5b 100644
--- a/deps/v8/test/mjsunit/modules-init1.js
+++ b/deps/v8/test/mjsunit/modules-init1.mjs
@@ -1,9 +1,7 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import "modules-skip-init1.js";
+import "modules-skip-init1.mjs";
export function bar() { return 42 };
bar = 5;
diff --git a/deps/v8/test/mjsunit/modules-init2.js b/deps/v8/test/mjsunit/modules-init2.mjs
index 866cb2742a..f2e26835e9 100644
--- a/deps/v8/test/mjsunit/modules-init2.js
+++ b/deps/v8/test/mjsunit/modules-init2.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {bar} from "modules-init1.js";
+import {bar} from "modules-init1.mjs";
assertEquals(5, bar);
diff --git a/deps/v8/test/mjsunit/modules-init3.js b/deps/v8/test/mjsunit/modules-init3.mjs
index e8b46106f7..c11c2053b1 100644
--- a/deps/v8/test/mjsunit/modules-init3.js
+++ b/deps/v8/test/mjsunit/modules-init3.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {check} from "modules-skip-init3.js";
+import {check} from "modules-skip-init3.mjs";
assertSame(undefined, w);
assertThrows(() => x, ReferenceError);
diff --git a/deps/v8/test/mjsunit/modules-init4.js b/deps/v8/test/mjsunit/modules-init4.js
deleted file mode 100644
index 6b9e47726c..0000000000
--- a/deps/v8/test/mjsunit/modules-init4.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import "./modules-skip-init4a.js"
-export {foo, ns} from "./modules-skip-init4b.js"
diff --git a/deps/v8/test/message/fail/modules-cycle5.js b/deps/v8/test/mjsunit/modules-init4.mjs
index 53382daac4..681edd51e5 100644
--- a/deps/v8/test/message/fail/modules-cycle5.js
+++ b/deps/v8/test/mjsunit/modules-init4.mjs
@@ -1,8 +1,6 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import "modules-skip-cycle5.js";
-export {foo} from "modules-cycle5.js";
+import "./modules-skip-init4a.mjs"
+export {foo, ns} from "./modules-skip-init4b.mjs"
diff --git a/deps/v8/test/mjsunit/modules-namespace-getownproperty1.js b/deps/v8/test/mjsunit/modules-namespace-getownproperty1.mjs
index 55b4db03f5..00ad50cbee 100644
--- a/deps/v8/test/mjsunit/modules-namespace-getownproperty1.js
+++ b/deps/v8/test/mjsunit/modules-namespace-getownproperty1.mjs
@@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
-import * as ns from "./modules-namespace-getownproperty1.js";
+import * as ns from "./modules-namespace-getownproperty1.mjs";
////////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/test/mjsunit/modules-namespace-getownproperty2.js b/deps/v8/test/mjsunit/modules-namespace-getownproperty2.mjs
index c276a09210..e3f3130749 100644
--- a/deps/v8/test/mjsunit/modules-namespace-getownproperty2.js
+++ b/deps/v8/test/mjsunit/modules-namespace-getownproperty2.mjs
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
+import * as ns from "./modules-namespace-getownproperty2.mjs";
-import * as ns from "./modules-namespace-getownproperty2.js";
-
-// This tests the same as modules-namespace-getownproperty1.js except that here
+// This tests the same as modules-namespace-getownproperty1.mjs except that here
// variable a doesn't exist. This means that the late-declared variable b is the
// (alphabetically) first property of the namespace object, which makes a
// difference for some operations.
diff --git a/deps/v8/test/mjsunit/modules-namespace1.js b/deps/v8/test/mjsunit/modules-namespace1.mjs
index 82b1e528ad..8087e8315a 100644
--- a/deps/v8/test/mjsunit/modules-namespace1.js
+++ b/deps/v8/test/mjsunit/modules-namespace1.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
let ja = 42;
export {ja as yo};
@@ -66,8 +64,8 @@ for (let key of nonexistent) {
// The actual star import that we are testing. Namespace imports are
// initialized before evaluation.
-import * as foo from "modules-namespace1.js";
+import * as foo from "modules-namespace1.mjs";
// There can be only one namespace object.
-import * as bar from "modules-namespace1.js";
+import * as bar from "modules-namespace1.mjs";
assertSame(foo, bar);
diff --git a/deps/v8/test/mjsunit/modules-namespace2.js b/deps/v8/test/mjsunit/modules-namespace2.mjs
index 03c90a0099..6197a9054a 100644
--- a/deps/v8/test/mjsunit/modules-namespace2.js
+++ b/deps/v8/test/mjsunit/modules-namespace2.mjs
@@ -1,14 +1,12 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
assertEquals(
["b", "c", "get_a", "ns2", "set_a", "zzz", Symbol.toStringTag],
Reflect.ownKeys(ns));
-import * as foo from "modules-skip-1.js";
+import * as foo from "modules-skip-1.mjs";
assertSame(foo.a, ns.b);
assertSame(foo.a, ns.c);
assertSame(foo.get_a, ns.get_a);
@@ -16,5 +14,5 @@ assertSame(foo.set_a, ns.set_a);
assertEquals(123, ns.zzz);
assertSame(ns, ns.ns2.ns);
-import * as ns from "modules-skip-namespace.js";
+import * as ns from "modules-skip-namespace.mjs";
export {ns};
diff --git a/deps/v8/test/mjsunit/modules-namespace3.js b/deps/v8/test/mjsunit/modules-namespace3.mjs
index df9ef7806b..52340090ea 100644
--- a/deps/v8/test/mjsunit/modules-namespace3.js
+++ b/deps/v8/test/mjsunit/modules-namespace3.mjs
@@ -1,11 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import * as foo from "modules-namespace3.js";
-export * from "modules-namespace3.js";
+import * as foo from "modules-namespace3.mjs";
+export * from "modules-namespace3.mjs";
export var bar;
assertEquals(["bar", "default"], Object.getOwnPropertyNames(foo));
export default function() {};
diff --git a/deps/v8/test/mjsunit/modules-namespace4.js b/deps/v8/test/mjsunit/modules-namespace4.mjs
index ef508d5fb8..93c577c253 100644
--- a/deps/v8/test/mjsunit/modules-namespace4.js
+++ b/deps/v8/test/mjsunit/modules-namespace4.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import * as foo from "modules-namespace4.js";
+import * as foo from "modules-namespace4.mjs";
assertSame(undefined, a);
assertThrows(() => b, ReferenceError);
diff --git a/deps/v8/test/mjsunit/modules-preparse.js b/deps/v8/test/mjsunit/modules-preparse.mjs
index d5922bb1aa..3eb0b17bea 100644
--- a/deps/v8/test/mjsunit/modules-preparse.js
+++ b/deps/v8/test/mjsunit/modules-preparse.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
let foo = 42;
function testFoo(x) { assertEquals(x, foo); }
diff --git a/deps/v8/test/mjsunit/modules-relative-path.js b/deps/v8/test/mjsunit/modules-relative-path.mjs
index 7e6a37ac1c..f400bc049d 100644
--- a/deps/v8/test/mjsunit/modules-relative-path.js
+++ b/deps/v8/test/mjsunit/modules-relative-path.mjs
@@ -1,10 +1,8 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import {x as y} from "./modules-relative-path.js";
+import {x as y} from "./modules-relative-path.mjs";
export let x = 0;
assertEquals(0, x);
diff --git a/deps/v8/test/mjsunit/modules-skip-1.js b/deps/v8/test/mjsunit/modules-skip-1.mjs
index 35731806bb..35731806bb 100644
--- a/deps/v8/test/mjsunit/modules-skip-1.js
+++ b/deps/v8/test/mjsunit/modules-skip-1.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-2.js b/deps/v8/test/mjsunit/modules-skip-2.mjs
index d5ff578b49..352690a2af 100644
--- a/deps/v8/test/mjsunit/modules-skip-2.js
+++ b/deps/v8/test/mjsunit/modules-skip-2.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export {a as b, default} from "modules-skip-1.js";
-import {a as tmp} from "modules-skip-1.js";
+export {a as b, default} from "modules-skip-1.mjs";
+import {a as tmp} from "modules-skip-1.mjs";
export {tmp as c};
export const zzz = 999;
diff --git a/deps/v8/test/mjsunit/modules-skip-3.js b/deps/v8/test/mjsunit/modules-skip-3.mjs
index 38ead4923a..7668299ae3 100644
--- a/deps/v8/test/mjsunit/modules-skip-3.js
+++ b/deps/v8/test/mjsunit/modules-skip-3.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {b} from "modules-imports5.js";
+import {b} from "modules-imports5.mjs";
export let a = 22;
assertSame(undefined, b);
assertEquals(22, a);
diff --git a/deps/v8/test/mjsunit/modules-skip-4.js b/deps/v8/test/mjsunit/modules-skip-4.js
deleted file mode 100644
index 092e27b5fd..0000000000
--- a/deps/v8/test/mjsunit/modules-skip-4.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-export * from "modules-skip-1.js";
-export * from "modules-skip-2.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-4.mjs b/deps/v8/test/mjsunit/modules-skip-4.mjs
new file mode 100644
index 0000000000..3549fd1598
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-4.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.mjs";
+export * from "modules-skip-2.mjs";
diff --git a/deps/v8/test/mjsunit/modules-skip-5.js b/deps/v8/test/mjsunit/modules-skip-5.mjs
index 6a1ef495e6..6a1ef495e6 100644
--- a/deps/v8/test/mjsunit/modules-skip-5.js
+++ b/deps/v8/test/mjsunit/modules-skip-5.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-6.js b/deps/v8/test/mjsunit/modules-skip-6.mjs
index 4a0ef8da64..c6d400c231 100644
--- a/deps/v8/test/mjsunit/modules-skip-6.js
+++ b/deps/v8/test/mjsunit/modules-skip-6.mjs
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export * from "modules-skip-1.js";
-export * from "modules-skip-5.js";
+export * from "modules-skip-1.mjs";
+export * from "modules-skip-5.mjs";
export const a = 10;
diff --git a/deps/v8/test/mjsunit/modules-skip-7.js b/deps/v8/test/mjsunit/modules-skip-7.js
deleted file mode 100644
index 9f46990373..0000000000
--- a/deps/v8/test/mjsunit/modules-skip-7.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-export * from "modules-skip-1.js"
-export * from "modules-skip-5.js"
diff --git a/deps/v8/test/mjsunit/modules-skip-7.mjs b/deps/v8/test/mjsunit/modules-skip-7.mjs
new file mode 100644
index 0000000000..4515128761
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-7.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.mjs"
+export * from "modules-skip-5.mjs"
diff --git a/deps/v8/test/mjsunit/modules-skip-8.js b/deps/v8/test/mjsunit/modules-skip-8.mjs
index 376788e283..8b1d981394 100644
--- a/deps/v8/test/mjsunit/modules-skip-8.js
+++ b/deps/v8/test/mjsunit/modules-skip-8.mjs
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export * as foo from "./modules-skip-1.js";
+export * as foo from "./modules-skip-1.mjs";
diff --git a/deps/v8/test/mjsunit/modules-skip-9.js b/deps/v8/test/mjsunit/modules-skip-9.mjs
index c0afcdf99e..3eb53acdf6 100644
--- a/deps/v8/test/mjsunit/modules-skip-9.js
+++ b/deps/v8/test/mjsunit/modules-skip-9.mjs
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import * as b from "./modules-skip-1.js";
+import * as b from "./modules-skip-1.mjs";
export {b};
-export * as a from "./modules-skip-1.js";
+export * as a from "./modules-skip-1.mjs";
diff --git a/deps/v8/test/mjsunit/modules-skip-circular-valid.js b/deps/v8/test/mjsunit/modules-skip-circular-valid.mjs
index c22544aed0..a408dd6aaa 100644
--- a/deps/v8/test/mjsunit/modules-skip-circular-valid.js
+++ b/deps/v8/test/mjsunit/modules-skip-circular-valid.mjs
@@ -3,6 +3,6 @@
// found in the LICENSE file.
export let a = {key: 'value'};
-import {b} from "modules-circular-valid.js";
+import {b} from "modules-circular-valid.mjs";
assertSame(a, b);
assertEquals('value', a.key);
diff --git a/deps/v8/test/message/fail/modules-skip-cycle5.js b/deps/v8/test/mjsunit/modules-skip-cycle.mjs
index 6eee47423e..0d0f41a5dd 100644
--- a/deps/v8/test/message/fail/modules-skip-cycle5.js
+++ b/deps/v8/test/mjsunit/modules-skip-cycle.mjs
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export {foo} from "modules-cycle5.js";
+export {foo} from "modules-cycle.mjs";
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name1.js b/deps/v8/test/mjsunit/modules-skip-default-name1.mjs
index 30a95cd691..30a95cd691 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name1.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name1.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name2.js b/deps/v8/test/mjsunit/modules-skip-default-name2.mjs
index a064b0526d..a064b0526d 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name2.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name2.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name3.js b/deps/v8/test/mjsunit/modules-skip-default-name3.mjs
index ed26e463bb..ed26e463bb 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name3.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name3.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name4.js b/deps/v8/test/mjsunit/modules-skip-default-name4.mjs
index 1c569bed56..1c569bed56 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name4.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name4.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name5.js b/deps/v8/test/mjsunit/modules-skip-default-name5.mjs
index df72336718..df72336718 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name5.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name5.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name6.js b/deps/v8/test/mjsunit/modules-skip-default-name6.mjs
index 02f562998d..02f562998d 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name6.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name6.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name7.js b/deps/v8/test/mjsunit/modules-skip-default-name7.mjs
index e4400ca409..e4400ca409 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name7.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name7.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name8.js b/deps/v8/test/mjsunit/modules-skip-default-name8.mjs
index 83e1ae43ff..83e1ae43ff 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name8.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name8.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-default-name9.js b/deps/v8/test/mjsunit/modules-skip-default-name9.mjs
index ac166f39f9..ac166f39f9 100644
--- a/deps/v8/test/mjsunit/modules-skip-default-name9.js
+++ b/deps/v8/test/mjsunit/modules-skip-default-name9.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-empty-import-aux.js b/deps/v8/test/mjsunit/modules-skip-empty-import-aux.mjs
index 1eb5b15159..1eb5b15159 100644
--- a/deps/v8/test/mjsunit/modules-skip-empty-import-aux.js
+++ b/deps/v8/test/mjsunit/modules-skip-empty-import-aux.mjs
diff --git a/deps/v8/test/mjsunit/modules-skip-empty-import.js b/deps/v8/test/mjsunit/modules-skip-empty-import.mjs
index 74d4d3ab48..b45c7c74fb 100644
--- a/deps/v8/test/mjsunit/modules-skip-empty-import.js
+++ b/deps/v8/test/mjsunit/modules-skip-empty-import.mjs
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {incr} from "modules-skip-empty-import-aux.js";
+import {incr} from "modules-skip-empty-import-aux.mjs";
incr();
diff --git a/deps/v8/test/mjsunit/modules-skip-init1.js b/deps/v8/test/mjsunit/modules-skip-init1.mjs
index 4eb4a0a865..c8b3dd1230 100644
--- a/deps/v8/test/mjsunit/modules-skip-init1.js
+++ b/deps/v8/test/mjsunit/modules-skip-init1.mjs
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {bar} from "modules-init1.js";
+import {bar} from "modules-init1.mjs";
assertEquals(42, bar());
diff --git a/deps/v8/test/mjsunit/modules-skip-init3.js b/deps/v8/test/mjsunit/modules-skip-init3.mjs
index 589b2cfb16..2362e00dff 100644
--- a/deps/v8/test/mjsunit/modules-skip-init3.js
+++ b/deps/v8/test/mjsunit/modules-skip-init3.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {v, w, x, y, z} from "modules-init3.js";
+import {v, w, x, y, z} from "modules-init3.mjs";
assertEquals({value: 40, done: true}, v().next());
assertSame(undefined, w);
diff --git a/deps/v8/test/mjsunit/modules-skip-init4a.js b/deps/v8/test/mjsunit/modules-skip-init4a.mjs
index 66f4e8276b..3d1f068368 100644
--- a/deps/v8/test/mjsunit/modules-skip-init4a.js
+++ b/deps/v8/test/mjsunit/modules-skip-init4a.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import {foo, ns} from "./modules-init4.js"
+import {foo, ns} from "./modules-init4.mjs"
assertEquals(foo(), 42)
assertEquals(ns.foo(), 42)
assertSame(foo, ns.foo)
diff --git a/deps/v8/test/mjsunit/modules-skip-init4b.js b/deps/v8/test/mjsunit/modules-skip-init4b.mjs
index 502c93b8f4..36225f00e1 100644
--- a/deps/v8/test/mjsunit/modules-skip-init4b.js
+++ b/deps/v8/test/mjsunit/modules-skip-init4b.mjs
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-import * as ns from "./modules-skip-init4b.js"
+import * as ns from "./modules-skip-init4b.mjs"
export function foo() { return 42 }
export {ns}
diff --git a/deps/v8/test/mjsunit/modules-skip-namespace.js b/deps/v8/test/mjsunit/modules-skip-namespace.mjs
index ff6a7b81d3..7cd90ec168 100644
--- a/deps/v8/test/mjsunit/modules-skip-namespace.js
+++ b/deps/v8/test/mjsunit/modules-skip-namespace.mjs
@@ -6,8 +6,8 @@
// ["ns", Symbol.toStringTag, Symbol.iterator], Reflect.ownKeys(ns2));
//assertEquals(["ns"], [...ns2]);
-export * from "modules-skip-4.js";
-export * from "modules-skip-5.js";
+export * from "modules-skip-4.mjs";
+export * from "modules-skip-5.mjs";
export var zzz = 123;
export {ns2};
-import * as ns2 from "modules-namespace2.js";
+import * as ns2 from "modules-namespace2.mjs";
diff --git a/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js b/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js
deleted file mode 100644
index 5cbcd85a33..0000000000
--- a/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-export * from "modules-skip-1.js";
-export * from "modules-skip-5.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.mjs b/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.mjs
new file mode 100644
index 0000000000..f68ac1fe0b
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-star-exports-conflict.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-1.mjs";
+export * from "modules-skip-5.mjs";
diff --git a/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js b/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js
deleted file mode 100644
index ab67ca803d..0000000000
--- a/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-export * from "modules-skip-star-exports-cycle.js";
-export * from "modules-star-exports-cycle.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.mjs b/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.mjs
new file mode 100644
index 0000000000..5ad76ce73e
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-star-exports-cycle.mjs
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export * from "modules-skip-star-exports-cycle.mjs";
+export * from "modules-star-exports-cycle.mjs";
diff --git a/deps/v8/test/mjsunit/modules-star-exports-cycle.js b/deps/v8/test/mjsunit/modules-star-exports-cycle.mjs
index 6af3139af0..72a037e085 100644
--- a/deps/v8/test/mjsunit/modules-star-exports-cycle.js
+++ b/deps/v8/test/mjsunit/modules-star-exports-cycle.mjs
@@ -1,11 +1,9 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
const bar = 42;
export {bar as foo};
-import {foo} from "modules-skip-star-exports-cycle.js";
+import {foo} from "modules-skip-star-exports-cycle.mjs";
assertEquals(42, foo);
diff --git a/deps/v8/test/mjsunit/modules-this.js b/deps/v8/test/mjsunit/modules-this.mjs
index 2c8fc74fe7..9c736c2248 100644
--- a/deps/v8/test/mjsunit/modules-this.js
+++ b/deps/v8/test/mjsunit/modules-this.mjs
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
assertEquals(undefined, this);
diff --git a/deps/v8/test/mjsunit/modules-turbo1.js b/deps/v8/test/mjsunit/modules-turbo1.mjs
index ce7e0b8f34..b814b4811f 100644
--- a/deps/v8/test/mjsunit/modules-turbo1.js
+++ b/deps/v8/test/mjsunit/modules-turbo1.mjs
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --allow-natives-syntax --opt --turbo-filter=*
export let x = 0;
diff --git a/deps/v8/test/mjsunit/modules-turbo2.js b/deps/v8/test/mjsunit/modules-turbo2.mjs
index 18d84bad3d..172f33b78b 100644
--- a/deps/v8/test/mjsunit/modules-turbo2.js
+++ b/deps/v8/test/mjsunit/modules-turbo2.mjs
@@ -1,8 +1,6 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// MODULE
// Flags: --allow-natives-syntax
export let x = 0;
diff --git a/deps/v8/test/mjsunit/nans.js b/deps/v8/test/mjsunit/nans.js
index 5630e5b061..ddd6e4ca55 100644
--- a/deps/v8/test/mjsunit/nans.js
+++ b/deps/v8/test/mjsunit/nans.js
@@ -36,6 +36,7 @@ function isLittleEndian() {
// Test that both kinds of NaNs (signaling or quiet) do not signal
function TestAllModes(f) {
+ %PrepareFunctionForOptimization(f);
f(); // Runtime
f(); // IC
f(); // IC second time
diff --git a/deps/v8/test/mjsunit/non-extensible-array-reduce.js b/deps/v8/test/mjsunit/non-extensible-array-reduce.js
new file mode 100644
index 0000000000..1bc87b7add
--- /dev/null
+++ b/deps/v8/test/mjsunit/non-extensible-array-reduce.js
@@ -0,0 +1,1420 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+/**
+ * @fileoverview Test reduce and reduceRight
+ */
+
+function clone(v) {
+ // Shallow-copies arrays, returns everything else verbatim.
+ if (v instanceof Array) {
+ // Shallow-copy an array.
+ var newArray = new Array(v.length);
+ for (var i in v) {
+ newArray[i] = v[i];
+ }
+ return newArray;
+ }
+ return v;
+}
+
+
+// Creates a callback function for reduce/reduceRight that tests the number
+// of arguments and otherwise behaves as "func", but which also
+// records all calls in an array on the function (as arrays of arguments
+// followed by result).
+function makeRecorder(func, testName) {
+ var record = [];
+ var f = function recorder(a, b, i, s) {
+ assertEquals(4, arguments.length,
+ testName + "(number of arguments: " + arguments.length + ")");
+ assertEquals("number", typeof(i), testName + "(index must be number)");
+ assertEquals(s[i], b, testName + "(current argument is at index)");
+ if (record.length > 0) {
+ var prevRecord = record[record.length - 1];
+ var prevResult = prevRecord[prevRecord.length - 1];
+ assertEquals(prevResult, a,
+ testName + "(prev result -> current input)");
+ }
+ var args = [clone(a), clone(b), i, clone(s)];
+ var result = func.apply(this, arguments);
+ args.push(clone(result));
+ record.push(args);
+ return result;
+ };
+ f.record = record;
+ return f;
+}
+
+
+function testReduce(type,
+ testName,
+ expectedResult,
+ expectedCalls,
+ array,
+ combine,
+ init) {
+ var rec = makeRecorder(combine);
+ var result;
+ if (arguments.length > 6) {
+ result = array[type](rec, init);
+ } else {
+ result = array[type](rec);
+ }
+ var calls = rec.record;
+ assertEquals(expectedCalls.length, calls.length,
+ testName + " (number of calls)");
+ for (var i = 0; i < expectedCalls.length; i++) {
+ assertEquals(expectedCalls[i], calls[i],
+ testName + " (call " + (i + 1) + ")");
+ }
+ assertEquals(expectedResult, result, testName + " (result)");
+}
+
+
+function sum(a, b) { return Number(a) + Number(b); }
+function prod(a, b) { return Number(a) * Number(b); }
+function dec(a, b, i, arr) { return Number(a) + Number(b) * Math.pow(10, arr.length - i - 1); }
+function accumulate(acc, elem, i) { acc[i] = elem; return acc; }
+
+// ---- Test Reduce[Left]
+
+var simpleArray = ['2',4,6];
+Object.preventExtensions(simpleArray);
+
+testReduce("reduce", "SimpleReduceSum", 12,
+ [[0, '2', 0, simpleArray, 2],
+ [2, 4, 1, simpleArray, 6],
+ [6, 6, 2, simpleArray, 12]],
+ simpleArray, sum, 0);
+
+testReduce("reduce", "SimpleReduceProd", 48,
+ [[1, '2', 0, simpleArray, 2],
+ [2, 4, 1, simpleArray, 8],
+ [8, 6, 2, simpleArray, 48]],
+ simpleArray, prod, 1);
+
+testReduce("reduce", "SimpleReduceDec", 246,
+ [[0, '2', 0, simpleArray, 200],
+ [200, 4, 1, simpleArray, 240],
+ [240, 6, 2, simpleArray, 246]],
+ simpleArray, dec, 0);
+
+testReduce("reduce", "SimpleReduceAccumulate", simpleArray,
+ [[[], '2', 0, simpleArray, ['2']],
+ [['2'], 4, 1, simpleArray, ['2', 4]],
+ [['2', 4], 6, 2, simpleArray, simpleArray]],
+ simpleArray, accumulate, []);
+
+var emptyArray = [];
+Object.preventExtensions(emptyArray);
+
+testReduce("reduce", "EmptyReduceSum", 0, [], emptyArray, sum, 0);
+testReduce("reduce", "EmptyReduceProd", 1, [], emptyArray, prod, 1);
+testReduce("reduce", "EmptyReduceDec", 0, [], emptyArray, dec, 0);
+testReduce("reduce", "EmptyReduceAccumulate", [], [], emptyArray, accumulate, []);
+
+testReduce("reduce", "EmptyReduceSumNoInit", 0, emptyArray, [0], sum);
+testReduce("reduce", "EmptyReduceProdNoInit", 1, emptyArray, [1], prod);
+testReduce("reduce", "EmptyReduceDecNoInit", 0, emptyArray, [0], dec);
+testReduce("reduce", "EmptyReduceAccumulateNoInit", [], emptyArray, [[]], accumulate);
+
+
+var simpleSparseArray = [,,,'2',,4,,6,,];
+Object.preventExtensions(simpleSparseArray);
+
+testReduce("reduce", "SimpleSparseReduceSum", 12,
+ [[0, '2', 3, simpleSparseArray, 2],
+ [2, 4, 5, simpleSparseArray, 6],
+ [6, 6, 7, simpleSparseArray, 12]],
+ simpleSparseArray, sum, 0);
+
+testReduce("reduce", "SimpleSparseReduceProd", 48,
+ [[1, '2', 3, simpleSparseArray, 2],
+ [2, 4, 5, simpleSparseArray, 8],
+ [8, 6, 7, simpleSparseArray, 48]],
+ simpleSparseArray, prod, 1);
+
+testReduce("reduce", "SimpleSparseReduceDec", 204060,
+ [[0, '2', 3, simpleSparseArray, 200000],
+ [200000, 4, 5, simpleSparseArray, 204000],
+ [204000, 6, 7, simpleSparseArray, 204060]],
+ simpleSparseArray, dec, 0);
+
+testReduce("reduce", "SimpleSparseReduceAccumulate", [,,,'2',,4,,6],
+ [[[], '2', 3, simpleSparseArray, [,,,'2']],
+ [[,,,'2'], 4, 5, simpleSparseArray, [,,,'2',,4]],
+ [[,,,'2',,4], 6, 7, simpleSparseArray, [,,,'2',,4,,6]]],
+ simpleSparseArray, accumulate, []);
+
+
+testReduce("reduce", "EmptySparseReduceSumNoInit", 0, [], [,,0,,], sum);
+testReduce("reduce", "EmptySparseReduceProdNoInit", 1, [], [,,1,,], prod);
+testReduce("reduce", "EmptySparseReduceDecNoInit", 0, [], [,,0,,], dec);
+testReduce("reduce", "EmptySparseReduceAccumulateNoInit",
+ [], [], [,,[],,], accumulate);
+
+
+var verySparseArray = [];
+verySparseArray.length = 10000;
+verySparseArray[2000] = '2';
+verySparseArray[5000] = 4;
+verySparseArray[9000] = 6;
+var verySparseSlice2 = verySparseArray.slice(0, 2001);
+var verySparseSlice4 = verySparseArray.slice(0, 5001);
+var verySparseSlice6 = verySparseArray.slice(0, 9001);
+Object.preventExtensions(verySparseArray);
+
+testReduce("reduce", "VerySparseReduceSum", 12,
+ [[0, '2', 2000, verySparseArray, 2],
+ [2, 4, 5000, verySparseArray, 6],
+ [6, 6, 9000, verySparseArray, 12]],
+ verySparseArray, sum, 0);
+
+testReduce("reduce", "VerySparseReduceProd", 48,
+ [[1, '2', 2000, verySparseArray, 2],
+ [2, 4, 5000, verySparseArray, 8],
+ [8, 6, 9000, verySparseArray, 48]],
+ verySparseArray, prod, 1);
+
+testReduce("reduce", "VerySparseReduceDec", Infinity,
+ [[0, '2', 2000, verySparseArray, Infinity],
+ [Infinity, 4, 5000, verySparseArray, Infinity],
+ [Infinity, 6, 9000, verySparseArray, Infinity]],
+ verySparseArray, dec, 0);
+
+testReduce("reduce", "VerySparseReduceAccumulate",
+ verySparseSlice6,
+ [[[], '2', 2000, verySparseArray, verySparseSlice2],
+ [verySparseSlice2, 4, 5000, verySparseArray, verySparseSlice4],
+ [verySparseSlice4, 6, 9000, verySparseArray, verySparseSlice6]],
+ verySparseArray, accumulate, []);
+
+
+testReduce("reduce", "VerySparseReduceSumNoInit", 12,
+ [['2', 4, 5000, verySparseArray, 6],
+ [6, 6, 9000, verySparseArray, 12]],
+ verySparseArray, sum);
+
+testReduce("reduce", "VerySparseReduceProdNoInit", 48,
+ [['2', 4, 5000, verySparseArray, 8],
+ [8, 6, 9000, verySparseArray, 48]],
+ verySparseArray, prod);
+
+testReduce("reduce", "VerySparseReduceDecNoInit", Infinity,
+ [['2', 4, 5000, verySparseArray, Infinity],
+ [Infinity, 6, 9000, verySparseArray, Infinity]],
+ verySparseArray, dec);
+
+testReduce("reduce", "SimpleSparseReduceAccumulateNoInit",
+ '2',
+ [['2', 4, 5000, verySparseArray, '2'],
+ ['2', 6, 9000, verySparseArray, '2']],
+ verySparseArray, accumulate);
+
+
+// ---- Test ReduceRight
+
+testReduce("reduceRight", "SimpleReduceRightSum", 12,
+ [[0, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 10],
+ [10, '2', 0, simpleArray, 12]],
+ simpleArray, sum, 0);
+
+testReduce("reduceRight", "SimpleReduceRightProd", 48,
+ [[1, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 24],
+ [24, '2', 0, simpleArray, 48]],
+ simpleArray, prod, 1);
+
+testReduce("reduceRight", "SimpleReduceRightDec", 246,
+ [[0, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 46],
+ [46, '2', 0, simpleArray, 246]],
+ simpleArray, dec, 0);
+
+testReduce("reduceRight", "SimpleReduceRightAccumulate", simpleArray,
+ [[[], 6, 2, simpleArray, [,,6]],
+ [[,,6], 4, 1, simpleArray, [,4,6]],
+ [[,4,6], '2', 0, simpleArray, simpleArray]],
+ simpleArray, accumulate, []);
+
+
+testReduce("reduceRight", "EmptyReduceRightSum", 0, [], [], sum, 0);
+testReduce("reduceRight", "EmptyReduceRightProd", 1, [], [], prod, 1);
+testReduce("reduceRight", "EmptyReduceRightDec", 0, [], [], dec, 0);
+testReduce("reduceRight", "EmptyReduceRightAccumulate", [],
+ [], [], accumulate, []);
+
+testReduce("reduceRight", "EmptyReduceRightSumNoInit", 0, [], [0], sum);
+testReduce("reduceRight", "EmptyReduceRightProdNoInit", 1, [], [1], prod);
+testReduce("reduceRight", "EmptyReduceRightDecNoInit", 0, [], [0], dec);
+testReduce("reduceRight", "EmptyReduceRightAccumulateNoInit",
+ [], [], [[]], accumulate);
+
+
+testReduce("reduceRight", "SimpleSparseReduceRightSum", 12,
+ [[0, 6, 7, simpleSparseArray, 6],
+ [6, 4, 5, simpleSparseArray, 10],
+ [10, '2', 3, simpleSparseArray, 12]],
+ simpleSparseArray, sum, 0);
+
+testReduce("reduceRight", "SimpleSparseReduceRightProd", 48,
+ [[1, 6, 7, simpleSparseArray, 6],
+ [6, 4, 5, simpleSparseArray, 24],
+ [24, '2', 3, simpleSparseArray, 48]],
+ simpleSparseArray, prod, 1);
+
+testReduce("reduceRight", "SimpleSparseReduceRightDec", 204060,
+ [[0, 6, 7, simpleSparseArray, 60],
+ [60, 4, 5, simpleSparseArray, 4060],
+ [4060, '2', 3, simpleSparseArray, 204060]],
+ simpleSparseArray, dec, 0);
+
+testReduce("reduceRight", "SimpleSparseReduceRightAccumulate", [,,,'2',,4,,6],
+ [[[], 6, 7, simpleSparseArray, [,,,,,,,6]],
+ [[,,,,,,,6], 4, 5, simpleSparseArray, [,,,,,4,,6]],
+ [[,,,,,4,,6], '2', 3, simpleSparseArray, [,,,'2',,4,,6]]],
+ simpleSparseArray, accumulate, []);
+
+
+testReduce("reduceRight", "EmptySparseReduceRightSumNoInit",
+ 0, [], [,,0,,], sum);
+testReduce("reduceRight", "EmptySparseReduceRightProdNoInit",
+ 1, [], [,,1,,], prod);
+testReduce("reduceRight", "EmptySparseReduceRightDecNoInit",
+ 0, [], [,,0,,], dec);
+testReduce("reduceRight", "EmptySparseReduceRightAccumulateNoInit",
+ [], [], [,,[],,], accumulate);
+
+
+var verySparseSuffix6 = [];
+verySparseSuffix6[9000] = 6;
+var verySparseSuffix4 = [];
+verySparseSuffix4[5000] = 4;
+verySparseSuffix4[9000] = 6;
+var verySparseSuffix2 = verySparseSlice6;
+
+
+testReduce("reduceRight", "VerySparseReduceRightSum", 12,
+ [[0, 6, 9000, verySparseArray, 6],
+ [6, 4, 5000, verySparseArray, 10],
+ [10, '2', 2000, verySparseArray, 12]],
+ verySparseArray, sum, 0);
+
+testReduce("reduceRight", "VerySparseReduceRightProd", 48,
+ [[1, 6, 9000, verySparseArray, 6],
+ [6, 4, 5000, verySparseArray, 24],
+ [24, '2', 2000, verySparseArray, 48]],
+ verySparseArray, prod, 1);
+
+testReduce("reduceRight", "VerySparseReduceRightDec", Infinity,
+ [[0, 6, 9000, verySparseArray, Infinity],
+ [Infinity, 4, 5000, verySparseArray, Infinity],
+ [Infinity, '2', 2000, verySparseArray, Infinity]],
+ verySparseArray, dec, 0);
+
+testReduce("reduceRight", "VerySparseReduceRightAccumulate",
+ verySparseSuffix2,
+ [[[], 6, 9000, verySparseArray, verySparseSuffix6],
+ [verySparseSuffix6, 4, 5000, verySparseArray, verySparseSuffix4],
+ [verySparseSuffix4, '2', 2000, verySparseArray, verySparseSuffix2]],
+ verySparseArray, accumulate, []);
+
+
+testReduce("reduceRight", "VerySparseReduceRightSumNoInit", 12,
+ [[6, 4, 5000, verySparseArray, 10],
+ [10, '2', 2000, verySparseArray, 12]],
+ verySparseArray, sum);
+
+testReduce("reduceRight", "VerySparseReduceRightProdNoInit", 48,
+ [[6, 4, 5000, verySparseArray, 24],
+ [24, '2', 2000, verySparseArray, 48]],
+ verySparseArray, prod);
+
+testReduce("reduceRight", "VerySparseReduceRightDecNoInit", Infinity,
+ [[6, 4, 5000, verySparseArray, Infinity],
+ [Infinity, '2', 2000, verySparseArray, Infinity]],
+ verySparseArray, dec);
+
+testReduce("reduceRight", "SimpleSparseReduceRightAccumulateNoInit",
+ 6,
+ [[6, 4, 5000, verySparseArray, 6],
+ [6, '2', 2000, verySparseArray, 6]],
+ verySparseArray, accumulate);
+
+
+// undefined is an element
+var undefArray = [,,undefined,,undefined,,];
+Object.preventExtensions(undefArray);
+
+testReduce("reduce", "SparseUndefinedReduceAdd", NaN,
+ [[0, undefined, 2, undefArray, NaN],
+ [NaN, undefined, 4, undefArray, NaN],
+ ],
+ undefArray, sum, 0);
+
+testReduce("reduceRight", "SparseUndefinedReduceRightAdd", NaN,
+ [[0, undefined, 4, undefArray, NaN],
+ [NaN, undefined, 2, undefArray, NaN],
+ ], undefArray, sum, 0);
+
+testReduce("reduce", "SparseUndefinedReduceAddNoInit", NaN,
+ [[undefined, undefined, 4, undefArray, NaN],
+ ], undefArray, sum);
+
+testReduce("reduceRight", "SparseUndefinedReduceRightAddNoInit", NaN,
+ [[undefined, undefined, 2, undefArray, NaN],
+ ], undefArray, sum);
+
+
+// Ignore non-array properties:
+
+var arrayPlus = [1,'2',,3];
+arrayPlus[-1] = NaN;
+arrayPlus[Math.pow(2,32)] = NaN;
+arrayPlus[NaN] = NaN;
+arrayPlus["00"] = NaN;
+arrayPlus["02"] = NaN;
+arrayPlus["-0"] = NaN;
+Object.preventExtensions(arrayPlus);
+
+testReduce("reduce", "ArrayWithNonElementPropertiesReduce", 6,
+ [[0, 1, 0, arrayPlus, 1],
+ [1, '2', 1, arrayPlus, 3],
+ [3, 3, 3, arrayPlus, 6],
+ ], arrayPlus, sum, 0);
+
+testReduce("reduceRight", "ArrayWithNonElementPropertiesReduceRight", 6,
+ [[0, 3, 3, arrayPlus, 3],
+ [3, '2', 1, arrayPlus, 5],
+ [5, 1, 0, arrayPlus, 6],
+ ], arrayPlus, sum, 0);
+
+// Test passing undefined as initial value (to test missing parameter
+// detection).
+Object.preventExtensions(['1']).reduce((a, b) => { assertEquals(a, undefined); assertEquals(b, '1') },
+ undefined);
+Object.preventExtensions(['1', 2]).reduce((a, b) => { assertEquals(a, '1'); assertEquals(b, 2); });
+Object.preventExtensions(['1']).reduce((a, b) => { assertTrue(false); });
+
+// Test error conditions:
+
+var exception = false;
+try {
+ Object.preventExtensions(['1']).reduce("not a function");
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce callback not a function not throwing TypeError");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduce non function TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.preventExtensions(['1']).reduceRight("not a function");
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight callback not a function not throwing TypeError");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduceRight non function TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.preventExtensions([]).reduce(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduce no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.preventExtensions([]).reduceRight(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduceRight no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.preventExtensions([,,,]).reduce(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce sparse no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduce no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.preventExtensions([,,,]).reduceRight(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight sparse no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduceRight no initial TypeError type");
+}
+assertTrue(exception);
+
+
+// Array changing length
+
+function extender(a, b, i, s) {
+ s[s.length] = s.length;
+ return Number(a) + Number(b);
+}
+
+var arr = [1, '2', 3, 4];
+Object.preventExtensions(arr);
+testReduce("reduce", "ArrayManipulationExtender", 10,
+ [[0, 1, 0, [1, '2', 3, 4], 1],
+ [1, '2', 1, [1, '2', 3, 4], 3],
+ [3, 3, 2, [1, '2', 3, 4], 6],
+ [6, 4, 3, [1, '2', 3, 4], 10],
+ ], arr, extender, 0);
+
+var arr = [];
+Object.defineProperty(arr, "0", { get: function() { delete this[0] },
+ configurable: true });
+assertEquals(undefined, Object.preventExtensions(arr).reduce(function(val) { return val }));
+
+var arr = [];
+Object.defineProperty(arr, "0", { get: function() { delete this[0] },
+ configurable: true});
+assertEquals(undefined, Object.preventExtensions(arr).reduceRight(function(val) { return val }));
+
+
+(function ReduceRightMaxIndex() {
+ const kMaxIndex = 0xffffffff-1;
+ let array = [];
+ array[kMaxIndex-2] = 'value-2';
+ array[kMaxIndex-1] = 'value-1';
+ // Use the maximum array index possible.
+ array[kMaxIndex] = 'value';
+ // Add the next index which is a normal property and thus will not show up.
+ array[kMaxIndex+1] = 'normal property';
+ assertThrowsEquals( () => {
+ Object.preventExtensions(array).reduceRight((sum, value) => {
+ assertEquals('initial', sum);
+ assertEquals('value', value);
+ // Throw at this point as we would very slowly loop down from kMaxIndex.
+ throw 'do not continue';
+ }, 'initial')
+ }, 'do not continue');
+})();
+
+(function OptimizedReduce() {
+ let f = (a,current) => a + Number(current);
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.preventExtensions(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEmpty() {
+ let f = (a,current) => a + Number(current);
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.preventExtensions(a);
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ assertThrows(() => g([]));
+ assertUnoptimized(g);
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + Number(current); };
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.preventExtensions(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + Number(current);
+ };
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ %PrepareFunctionForOptimization(g);
+ assertOptimized(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,'22',,33,45,56,,6,77,84,93,101,];
+ Object.preventExtensions(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ assertOptimized(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(array);
+ %PrepareFunctionForOptimization(g);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertUnoptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertUnoptimized(g);
+})();
+
+(function TriggerReduceRightPreLoopDeopt() {
+ function f(a) {
+ a.reduceRight((x) => { return Number(x) + 1 });
+ };
+ %PrepareFunctionForOptimization(f);
+ var arr = Object.preventExtensions([1, '2', ]);
+ f(arr);
+ f(arr);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(() => f([]), TypeError);
+ assertUnoptimized(f);
+})();
+
+(function OptimizedReduceRightEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,'22',,33,45,56,,6,77,84,93,101,];
+ Object.preventExtensions(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[array.length-1] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ assertOptimized(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(array);
+ %PrepareFunctionForOptimization(g);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertUnoptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertUnoptimized(g);
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + current;
+ };
+ let g = function() {
+ try {
+ return Object.preventExtensions(array).reduce(f);
+ } catch (e) {
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1, '2', 3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ f = null;
+ assertThrows(() => g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,2,3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceRight() {
+ let count = 0;
+ let f = (a,current,i) => a + Number(current) * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.preventExtensions(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEmpty() {
+ let count = 0;
+ let f = (a,current,i) => a + Number(current) * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.preventExtensions(a);
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ assertThrows(() => g([]));
+ assertUnoptimized(g);
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + Number(current); };
+ let g = function(a) {
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.preventExtensions(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + Number(current);
+ };
+ let g = function(a) {
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[9] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ %PrepareFunctionForOptimization(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.preventExtensions(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + Number(current);
+ };
+ let g = function() {
+ try {
+ return Object.preventExtensions(array).reduceRight(f);
+ } catch (e) {
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1, '2', 3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ f = null;
+ assertThrows(() => g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.preventExtensions(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceHoleyArrayWithDefaultAccumulator() {
+ var holey = new Array(10);
+ Object.preventExtensions(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return currentValue;
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceRightHoleyArrayWithDefaultAccumulator() {
+ var holey = new Array(10);
+ Object.preventExtensions(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return currentValue;
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceHoleyArrayOneElementWithDefaultAccumulator() {
+ var holey = new Array(10);
+ holey[1] = '5';
+ Object.preventExtensions(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return Number(currentValue) + accumulator;
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceRightHoleyArrayOneElementWithDefaultAccumulator() {
+ var holey = new Array(10);
+ holey[1] = '5';
+ Object.preventExtensions(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return Number(currentValue) + accumulator;
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceMixedHoleyArrays() {
+ function r(a) {
+ return a.reduce((acc, i) => {acc[0]});
+ };
+ %PrepareFunctionForOptimization(r);
+ assertEquals(r(Object.preventExtensions([[0]])), [0]);
+ assertEquals(r(Object.preventExtensions([[0]])), [0]);
+ assertEquals(r(Object.preventExtensions([0,,])), 0);
+ %OptimizeFunctionOnNextCall(r);
+ assertEquals(r(Object.preventExtensions([,0,0])), undefined);
+ assertOptimized(r);
+})();
diff --git a/deps/v8/test/mjsunit/noopt.js b/deps/v8/test/mjsunit/noopt.js
index 70aed326d4..9ee73ae3e0 100644
--- a/deps/v8/test/mjsunit/noopt.js
+++ b/deps/v8/test/mjsunit/noopt.js
@@ -7,8 +7,10 @@
// Check that --noopt actually works.
-function f() { return 42; }
-
+function f() {
+ return 42;
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/number-tostring-subnormal.js b/deps/v8/test/mjsunit/number-tostring-subnormal.js
new file mode 100644
index 0000000000..80305f64cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/number-tostring-subnormal.js
@@ -0,0 +1,92 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests Number.prototype.toString for numbers within or near the subnormal
+// range, when the radix argument is 2.
+
+// A JavaScript number is a IEEE 754 binary64 (double-precision floating-point)
+// value, so we should be able to provide a binary string value that is an
+// exact representation.
+
+const zeros = count => '0'.repeat(count);
+
+const test = (binaryStringValue, double) => {
+ assertEquals(binaryStringValue, double.toString(2));
+};
+
+// 2**-1074
+test(`0.${zeros(1073)}1`, Number.MIN_VALUE);
+
+// Bug v8:9294
+test(`0.${zeros(1022)}1101100011110111011100000100011001111101110001010111`,
+ 1.8858070859709815e-308);
+test(`0.${zeros(1021)}11110111001111000110110111011101110001000000000000001`,
+ 4.297800585227606e-308);
+
+// Normal doubles smaller than 2**-1021 (4.450147717014403e-308). These values
+// are not in the subnormal range, but like the subnormals they have a gap of
+// Number.MIN_VALUE between themselves and the next double.
+test(`0.${zeros(1021)}11100001110000011100111110011010010100100010001001001`,
+ 3.924423154449847e-308);
+test(`-0.${zeros(1021)}11001101101111001101100010110110011000000011010111001`,
+ -3.57641826104544e-308);
+test(`-0.${zeros(1021)}11101100100000100110110001000010010001001000101110001`,
+ -4.1113361447183043e-308);
+test(`0.${zeros(1021)}11111001101000111100111001001101101001001011010001101`,
+ 4.339587042263274e-308);
+test(`0.${zeros(1021)}10111011101001010010101011100001110000000001011110001`,
+ 3.261909352323954e-308);
+test(`0.${zeros(1021)}10001001101101110110000110001011100001100111111111001`,
+ 2.3939766453008923e-308);
+test(`-0.${zeros(1021)}11101001000110010001100111110111001001000001100010011`,
+ -4.052034242003901e-308);
+test(`-0.${zeros(1021)}10001111010010000100000110100010101101001000110010101`,
+ -2.4907311894031355e-308);
+test(`-0.${zeros(1021)}10101100001001010011101010001110011010000001111000001`,
+ -2.9924709724070097e-308);
+test(`-0.${zeros(1021)}11111101001111010110001011001001000100110101001010111`,
+ -4.402165887028534e-308);
+
+// Subnormal doubles: numbers smaller than 2**-1022 (2.2250738585072014e-308).
+test(`0.${zeros(1022)}1100111101100011000101110111111000011111110001001011`,
+ 1.802545172319673e-308);
+test(`0.${zeros(1022)}1111001000101011101110111111011111111100111101101011`,
+ 2.104874994274149e-308);
+test(`-0.${zeros(1022)}1001110011110110110010010010010001111100001111101011`,
+ -1.3642832344349763e-308);
+test(`0.${zeros(1023)}111101010100011111101110111101011000001110011101101`,
+ 1.0659537476238824e-308);
+test(`-0.${zeros(1023)}100101011110101100111101101001110011101011110111101`,
+ -6.51524700064251e-309);
+test(`-0.${zeros(1024)}10011100110100110010111101001001111100000101000111`,
+ -3.407686279964664e-309);
+test(`-0.${zeros(1024)}11101001001010000001101111010101011111111011010111`,
+ -5.06631661953108e-309);
+test(`-0.${zeros(1024)}10111100111100001100010100110011001011000011011101`,
+ -4.105533080656306e-309);
+test(`0.${zeros(1025)}1101111100101101111110101111001010101100001100111`,
+ 2.42476131288505e-309);
+test(`-0.${zeros(1025)}1001000100011011100101010010101010101011101000111`,
+ -1.576540281929606e-309);
+test(`0.${zeros(1023)}111101100001000000001011101011101010001110011111101`,
+ 1.0693508455891784e-308);
+test(`0.${zeros(1024)}11100010101101001101011010110001110101110100010001`,
+ 4.926157093545696e-309);
+test(`-0.${zeros(1027)}10010111100001111110000101011001010111100011011`,
+ -4.1158103328176e-310);
+test(`0.${zeros(1030)}11111010010111010110101101000111010011100001`,
+ 8.500372841691e-311);
+test(`0.${zeros(1033)}101001010011111001100100001010001101`,
+ 7.01292938871e-312);
+test(`0.${zeros(1037)}11101010101101100111000110100111001`, 6.22574126804e-313);
+test(`-0.${zeros(1040)}10100001001101011011111001111111`, -5.3451064043e-314);
+test(`-0.${zeros(1043)}1001100101100100000001000011111`, -6.35731246e-315);
+test(`0.${zeros(1046)}10101110110100011010110001`, 9.05676893e-316);
+test(`-0.${zeros(1050)}11001010000110011100011`, -6.5438353e-317);
+test(`0.${zeros(1053)}111001010000010001`, 9.269185e-318);
+test(`-0.${zeros(1057)}1100001000010101`, -4.90953e-319);
+test(`-0.${zeros(1059)}10011001001111`, -9.6906e-320);
+test(`0.${zeros(1063)}111101111`, 9.782e-321);
+test(`0.${zeros(1067)}10011`, 3.75e-322);
+test(`-0.${zeros(1070)}1`, -4e-323);
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
index cc9c0b79a6..dba774cf21 100644
--- a/deps/v8/test/mjsunit/object-define-property.js
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -1053,12 +1053,14 @@ function anotherGetter() { return 222; }
function testGetterOnProto(expected, o) {
assertEquals(expected, o.quebec);
}
+%PrepareFunctionForOptimization(testGetterOnProto);
obj1 = {};
Object.defineProperty(obj1, "quebec", { get: getter, configurable: true });
obj2 = Object.create(obj1);
obj3 = Object.create(obj2);
+%PrepareFunctionForOptimization(testGetterOnProto);
testGetterOnProto(111, obj3);
testGetterOnProto(111, obj3);
%OptimizeFunctionOnNextCall(testGetterOnProto);
@@ -1069,6 +1071,7 @@ Object.defineProperty(obj1, "quebec", { get: anotherGetter });
testGetterOnProto(222, obj3);
testGetterOnProto(222, obj3);
+%PrepareFunctionForOptimization(testGetterOnProto);
%OptimizeFunctionOnNextCall(testGetterOnProto);
testGetterOnProto(222, obj3);
testGetterOnProto(222, obj3);
@@ -1083,6 +1086,7 @@ function testSetterOnProto(expected, o) {
o.romeo = 444;
assertEquals(expected, modifyMe);
}
+%PrepareFunctionForOptimization(testSetterOnProto);
obj1 = {};
Object.defineProperty(obj1, "romeo", { set: setter, configurable: true });
@@ -1099,6 +1103,7 @@ Object.defineProperty(obj1, "romeo", { set: anotherSetter });
testSetterOnProto(446, obj3);
testSetterOnProto(446, obj3);
+%PrepareFunctionForOptimization(testSetterOnProto);
%OptimizeFunctionOnNextCall(testSetterOnProto);
testSetterOnProto(446, obj3);
testSetterOnProto(446, obj3);
@@ -1108,6 +1113,7 @@ function testSetterOnProtoStrict(o) {
"use strict";
o.sierra = 12345;
}
+%PrepareFunctionForOptimization(testSetterOnProtoStrict);
obj1 = {};
Object.defineProperty(obj1, "sierra",
@@ -1140,6 +1146,7 @@ assertTrue(exception);
function Assign(o) {
o.blubb = 123;
}
+%PrepareFunctionForOptimization(Assign);
function C() {}
diff --git a/deps/v8/test/mjsunit/object-freeze.js b/deps/v8/test/mjsunit/object-freeze.js
index 98ad163895..244f952d8c 100644
--- a/deps/v8/test/mjsunit/object-freeze.js
+++ b/deps/v8/test/mjsunit/object-freeze.js
@@ -793,3 +793,561 @@ function checkUndefined() {
assertTrue(checkUndefined(...arr));
assertTrue(checkUndefined(...[...arr]));
assertTrue(checkUndefined.apply(this, [...arr]));
+
+//
+// Array.prototype.map
+//
+(function() {
+ var a = Object.freeze(['0','1','2','3','4']);
+
+ // Simple use.
+ var result = [1,2,3,4,5];
+ assertArrayEquals(result, a.map(function(n) { return Number(n) + 1; }));
+
+ // Use specified object as this object when calling the function.
+ var o = { delta: 42 }
+ result = [42,43,44,45,46];
+ assertArrayEquals(result, a.map(function(n) { return this.delta + Number(n); }, o));
+
+ // Modify original array.
+ b = Object.freeze(['0','1','2','3','4']);
+ result = [1,2,3,4,5];
+ assertArrayEquals(result,
+ b.map(function(n, index, array) {
+ array[index] = Number(n) + 1; return Number(n) + 1;
+ }));
+ assertArrayEquals(b, a);
+
+ // Only loop through initial part of array and elements are not
+ // added.
+ a = Object.freeze(['0','1','2','3','4']);
+ result = [1,2,3,4,5];
+ assertArrayEquals(result,
+ a.map(function(n, index, array) { assertThrows(() => { array.push(n) }); return Number(n) + 1; }));
+ assertArrayEquals(['0','1','2','3','4'], a);
+
+ // Respect holes.
+ a = new Array(20);
+ a[1] = '2';
+ Object.freeze(a);
+ a = Object.freeze(a).map(function(n) { return 2*Number(n); });
+
+ for (var i in a) {
+ assertEquals(4, a[i]);
+ assertEquals('1', i);
+ }
+
+ // Skip over missing properties.
+ a = {
+ "0": 1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2, , 4],
+ Array.prototype.map.call(Object.freeze(a), function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1, 2], received);
+
+ // Modify array prototype
+ a = ['1', , 2];
+ received = [];
+ assertThrows(() => {
+ Array.prototype.map.call(Object.freeze(a), function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ });
+ }, TypeError);
+ assertArrayEquals([], received);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ Object.freeze(['1', '2']).map(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ Object.freeze(['1', '2']).map(function() { a.push(this) }, {});
+ assertSame(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ Object.freeze(['1', '2']).map(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
+
+// Test with double elements
+// Test packed element array built-in functions with freeze.
+function testDoubleFrozenArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ // Verify that the value can't be written
+ obj1 = new Array(...obj);
+ var length = obj.length;
+ for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj1[i], obj[i]);
+ }
+ // Verify that the length can't be written by builtins.
+ assertTrue(Array.isArray(obj));
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(); }, TypeError);
+ assertThrows(function() { obj.shift(); }, TypeError);
+ assertThrows(function() { obj.unshift(); }, TypeError);
+ assertThrows(function() { obj.copyWithin(0,0); }, TypeError);
+ assertThrows(function() { obj.fill(0); }, TypeError);
+ assertThrows(function() { obj.reverse(); }, TypeError);
+ assertThrows(function() { obj.sort(); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertThrows(function() { obj.splice(0, 0); }, TypeError);
+ assertTrue(Object.isFrozen(obj));
+
+ // Verify search, filter, iterator
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf(undefined), -1);
+ assertFalse(obj.includes(Symbol("test")));
+ assertTrue(obj.includes(1));
+ assertTrue(obj.includes(-1.1));
+ assertFalse(obj.includes());
+ assertEquals(obj.find(x => x==0), undefined);
+ assertEquals(obj.findIndex(x => x==2), 4);
+ assertFalse(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 0);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, 1.1);
+ assertEquals(iterator.next().value, -1.1);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, 1.1]);
+ assertEquals(iterator.next().value, [1, -1.1]);
+}
+
+obj = new Array(1.1, -1.1, 1, -1, 2);
+assertTrue(%HasDoubleElements(obj));
+Object.freeze(obj);
+testDoubleFrozenArray1(obj);
+
+// Verify change from sealed to frozen
+obj = new Array(1.1, -1.1, 1, -1, 2);
+assertTrue(%HasDoubleElements(obj));
+Object.seal(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testDoubleFrozenArray1(obj);
+
+// Verify change from non-extensible to frozen
+obj = new Array(1.1, -1.1, 1, -1, 2);
+assertTrue(%HasDoubleElements(obj));
+Object.preventExtensions(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testDoubleFrozenArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for frozen packed array
+function testDoubleFrozenArray2(arr) {
+ assertTrue(Object.isFrozen(arr));
+ assertTrue(Array.isArray(arr));
+ assertEquals(arr.map(x => [x]), [[1], [1.1], [0]]);
+ assertEquals(arr.flatMap(x => [x]), arr);
+ assertEquals(arr.flat(), arr);
+ assertEquals(arr.join('-'), "1-1.1-0");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), 2.1);
+ assertEquals(arr.reduceRight(reducer), 2.1);
+ assertEquals(arr.slice(0, 1), [1]);
+}
+var arr1 = new Array(1, 1.1, 0);
+assertTrue(%HasDoubleElements(arr1));
+Object.freeze(arr1);
+testDoubleFrozenArray2(arr1);
+
+// Verify change from sealed to frozen
+var arr1 = new Array(1, 1.1, 0);
+assertTrue(%HasDoubleElements(arr1));
+Object.seal(arr1);
+Object.freeze(arr1);
+testDoubleFrozenArray2(arr1);
+
+
+// Verify change from non-extensible to frozen
+var arr1 = new Array(1, 1.1, 0);
+assertTrue(%HasDoubleElements(arr1));
+Object.preventExtensions(arr1);
+Object.freeze(arr1);
+testDoubleFrozenArray2(arr1);
+
+// Test regression with Object.defineProperty
+var obj = [];
+obj.propertyA = 42;
+obj[0] = 1.1;
+Object.freeze(obj);
+assertThrows(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ });
+}, TypeError);
+assertEquals(42, obj.propertyA);
+assertThrows(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ writable: false,
+ });
+}, TypeError);
+assertDoesNotThrow(function() {obj.propertyA = 2;});
+assertEquals(obj.propertyA, 42);
+assertThrows(function() {
+ Object.defineProperty(obj, 'abc', {
+ value: obj,
+ });
+}, TypeError);
+
+// Regression test with simple array
+var arr = [1.1];
+Object.freeze(arr);
+arr[0] = 1;
+assertEquals(arr[0], 1.1);
+
+// Test regression Array.concat with double
+var arr = [1.1];
+Object.freeze(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, [1.1, 0.5]);
+Object.freeze(arr);
+arr = arr.concat([1.5, 'b']);
+assertEquals(arr, [1.1, 0.5, 1.5, 'b']);
+
+// Regression test with change length
+var arr = [1.1, 0];
+Object.freeze(arr);
+assertEquals(arr.length, 2);
+arr.length = 3;
+assertEquals(arr.length, 2);
+arr[2] = 'c';
+assertEquals(arr[2], undefined);
+arr.length = 1;
+assertEquals(arr.length, 2);
+
+// Start testing with holey array
+// Test holey element array built-in functions with freeze.
+function testHoleyDoubleFrozenArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ // Verify that the value can't be written
+ obj1 = new Array(...obj);
+ var length = obj.length;
+ for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj1[i], obj[i]);
+ }
+
+ // Verify that the length can't be written by builtins.
+ assertTrue(Array.isArray(obj));
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(); }, TypeError);
+ assertThrows(function() { obj.shift(); }, TypeError);
+ assertThrows(function() { obj.unshift(); }, TypeError);
+ assertThrows(function() { obj.copyWithin(0,0); }, TypeError);
+ assertThrows(function() { obj.fill(0); }, TypeError);
+ assertThrows(function() { obj.reverse(); }, TypeError);
+ assertThrows(function() { obj.sort(); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertThrows(function() { obj.splice(0, 0); }, TypeError);
+ assertTrue(Object.isFrozen(obj));
+
+ // Verify search, filter, iterator
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf(1.1), 5);
+ assertEquals(obj.indexOf(undefined), -1);
+ assertFalse(obj.includes(Symbol("test")));
+ assertTrue(obj.includes(undefined));
+ assertFalse(obj.includes(NaN));
+ assertTrue(obj.includes());
+ assertEquals(obj.find(x => x==2), undefined);
+ assertEquals(obj.findIndex(x => x==1.1), 5);
+ assertFalse(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 0);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, -1.1);
+ assertEquals(iterator.next().value, 0);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, -1.1]);
+ assertEquals(iterator.next().value, [1, 0]);
+}
+
+obj = [-1.1, 0, 1, , -1, 1.1];
+assertTrue(%HasHoleyElements(obj));
+Object.freeze(obj);
+testHoleyDoubleFrozenArray1(obj);
+
+// Verify change from sealed to frozen
+obj = [-1.1, 0, 1, , -1, 1.1];
+assertTrue(%HasHoleyElements(obj));
+Object.seal(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testHoleyDoubleFrozenArray1(obj);
+
+// Verify change from non-extensible to frozen
+obj = [-1.1, 0, 1, , -1, 1.1];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testHoleyDoubleFrozenArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for frozen packed array
+function testHoleyDoubleFrozenArray2(arr) {
+ assertTrue(Object.isFrozen(arr));
+ assertTrue(Array.isArray(arr));
+ assertEquals(arr.map(x => [x]), [, [1.1], [1], [0]]);
+ assertEquals(arr.flatMap(x => [x]), [1.1, 1, 0]);
+ assertEquals(arr.flat(), [1.1, 1, 0]);
+ assertEquals(arr.join('-'), "-1.1-1-0");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), 2.1);
+ assertEquals(arr.reduceRight(reducer), 2.1);
+ assertEquals(arr.slice(0, 1), [,]);
+ assertEquals(arr.slice(1, 2), [1.1]);
+}
+var arr1 = [, 1.1, 1, 0];
+assertTrue(%HasHoleyElements(arr1));
+Object.preventExtensions(arr1);
+Object.freeze(arr1);
+testHoleyDoubleFrozenArray2(arr1);
+
+// Verify change from sealed to frozen
+var arr1 = [, 1.1, 1, 0];
+assertTrue(%HasHoleyElements(arr1));
+Object.seal(arr1);
+Object.freeze(arr1);
+testHoleyDoubleFrozenArray2(arr1);
+
+// Verify change from non-extensible to frozen
+var arr1 = [, 1.1, 1, 0];
+assertTrue(%HasHoleyElements(arr1));
+Object.preventExtensions(arr1);
+Object.freeze(arr1);
+testHoleyDoubleFrozenArray2(arr1);
+
+// Test regression with Object.defineProperty
+var obj = [1.1, , 0];
+obj.propertyA = 42;
+obj[0] = true;
+Object.freeze(obj);
+assertThrows(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ });
+}, TypeError);
+assertEquals(42, obj.propertyA);
+assertThrows(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ writable: false,
+ });
+}, TypeError);
+assertDoesNotThrow(function() {obj.propertyA = 2;});
+assertEquals(obj.propertyA, 42);
+assertThrows(function() {
+ Object.defineProperty(obj, 'abc', {
+ value: obj,
+ });
+}, TypeError);
+
+// Regression test with simple holey array
+var arr = [, 1.1];
+Object.freeze(arr);
+arr[1] = 'b';
+assertEquals(arr[1], 1.1);
+arr[0] = 1;
+assertEquals(arr[0], undefined);
+
+// Test regression Array.concat with double
+var arr = [1.1, , 0];
+Object.freeze(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, [1.1, , 0, 0.5]);
+Object.freeze(arr);
+arr = arr.concat([1.5, 'c']);
+assertEquals(arr, [1.1, ,0, 0.5, 1.5, 'c']);
+
+// Regression test with change length
+var arr = [1.1, ,0];
+Object.freeze(arr);
+assertEquals(arr.length, 3);
+arr.length = 4;
+assertEquals(arr.length, 3);
+arr[3] = 'c';
+assertEquals(arr[2], 0);
+assertEquals(arr[3], undefined);
+arr.length = 2;
+assertEquals(arr.length, 3);
+
+// Change length with holey entries at the end
+var arr = [1.1, ,];
+Object.freeze(arr);
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 2);
+arr.length = 3;
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 2);
+
+// Spread with array
+var arr = [1.1, 0, -1];
+Object.freeze(arr);
+var arrSpread = [...arr];
+assertEquals(arrSpread.length, arr.length);
+assertEquals(arrSpread[0], 1.1);
+assertEquals(arrSpread[1], 0);
+assertEquals(arrSpread[2], -1);
+
+// Spread with array-like
+function returnArgs() {
+ return Object.freeze(arguments);
+}
+var arrLike = returnArgs(1.1, 0, -1);
+assertTrue(Object.isFrozen(arrLike));
+var arrSpread = [...arrLike];
+assertEquals(arrSpread.length, arrLike.length);
+assertEquals(arrSpread[0], 1.1);
+assertEquals(arrSpread[1], 0);
+assertEquals(arrSpread[2], -1);
+
+// Spread with holey
+function countArgs() {
+ return arguments.length;
+}
+var arr = [, 1.1, 0];
+Object.freeze(arr);
+assertEquals(countArgs(...arr), 3);
+assertEquals(countArgs(...[...arr]), 3);
+assertEquals(countArgs.apply(this, [...arr]), 3);
+function checkUndefined() {
+ return arguments[0] === undefined;
+}
+assertTrue(checkUndefined(...arr));
+assertTrue(checkUndefined(...[...arr]));
+assertTrue(checkUndefined.apply(this, [...arr]));
+
+//
+// Array.prototype.map
+//
+(function() {
+ var a = Object.freeze([0.1,1,2,3,4]);
+
+ // Simple use.
+ var result = [1.1,2,3,4,5];
+ assertArrayEquals(result, a.map(function(n) { return Number(n) + 1; }));
+
+ // Use specified object as this object when calling the function.
+ var o = { delta: 42 }
+ result = [42.1,43,44,45,46];
+ assertArrayEquals(result, a.map(function(n) { return this.delta + Number(n); }, o));
+
+ // Modify original array.
+ b = Object.freeze([0.1,1,2,3,4]);
+ result = [1.1,2,3,4,5];
+ assertArrayEquals(result,
+ b.map(function(n, index, array) {
+ array[index] = Number(n) + 1; return Number(n) + 1;
+ }));
+ assertArrayEquals(b, a);
+
+ // Only loop through initial part of array and elements are not
+ // added.
+ a = Object.freeze([0.1,1,2,3,4]);
+ result = [1.1,2,3,4,5];
+ assertArrayEquals(result,
+ a.map(function(n, index, array) { assertThrows(() => { array.push(n) }); return Number(n) + 1; }));
+ assertArrayEquals([0.1,1,2,3,4], a);
+
+ // Respect holes.
+ a = new Array(20);
+ a[1] = 1.1;
+ Object.freeze(a);
+ a = Object.freeze(a).map(function(n) { return 2*Number(n); });
+
+ for (var i in a) {
+ assertEquals(2.2, a[i]);
+ assertEquals('1', i);
+ }
+
+ // Skip over missing properties.
+ a = {
+ "0": 1.1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2.2, , 4],
+ Array.prototype.map.call(Object.freeze(a), function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1.1, 2], received);
+
+ // Modify array prototype
+ a = [1.1, , 2];
+ received = [];
+ assertThrows(() => {
+ Array.prototype.map.call(Object.freeze(a), function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ });
+ }, TypeError);
+ assertArrayEquals([], received);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ Object.freeze([1.1, 2]).map(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ Object.freeze([1.1, 2]).map(function() { a.push(this) }, {});
+ assertSame(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ Object.freeze([1.1, 1.2]).map(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
diff --git a/deps/v8/test/mjsunit/object-literal-overwrite.js b/deps/v8/test/mjsunit/object-literal-overwrite.js
index 4d19d35d12..955c5a21f1 100644
--- a/deps/v8/test/mjsunit/object-literal-overwrite.js
+++ b/deps/v8/test/mjsunit/object-literal-overwrite.js
@@ -33,51 +33,33 @@
// Tests for the full code generator (if active).
-var foo1 = {
- bar: 6,
- bar: 7
-};
+var foo1 = {bar: 6, bar: 7};
-var foo2 = {
- bar: function(a){},
- bar: 7
-};
+var foo2 = {bar: function(a) {}, bar: 7};
-var foo3 = {
- bar: function(a){},
- bar: function(b){},
- bar: 7
-};
+var foo3 = {bar: function(a) {}, bar: function(b) {}, bar: 7};
var foo4 = {
- bar: function(b){},
+ bar: function(b) {},
bar: 4,
- bar: function(){return 7},
+ bar: function() {
+ return 7;
+ }
};
-var foo5 = {
- 13: function(a){},
- 13: 7
-}
+var foo5 = {13: function(a) {}, 13: 7};
-var foo6 = {
- 14.31: function(a){},
- 14.31: 7
-}
+var foo6 = {14.31: function(a) {}, 14.31: 7};
-var foo7 = {
- 15: 6,
- 15: 7
-}
+var foo7 = {15: 6, 15: 7};
function foo8(i) {
- var obj = {
- x: {a: i},
- x: 7
- };
+ var obj = {x: {a: i}, x: 7};
+
return obj.x;
};
-
+%PrepareFunctionForOptimization(foo8);
+;
assertEquals(7, foo1.bar);
assertEquals(7, foo2.bar);
assertEquals(7, foo3.bar);
@@ -95,18 +77,23 @@ assertEquals(7, foo8(1));
// Test for the classic code generator.
function fun(x) {
- var inner = { j: function(x) { return x; }, j: 7 };
+ var inner = {
+ j: function(x) {
+ return x;
+ },
+ j: 7
+ };
return inner.j;
}
-assertEquals(7, fun(7) );
+assertEquals(7, fun(7));
// Check that the initializers of computed properties are executed, even if
// no store instructions are generated for the literals.
var glob1 = 0;
-var bar1 = { x: glob1++, x: glob1++, x: glob1++, x: 7};
+var bar1 = {x: glob1++, x: glob1++, x: glob1++, x: 7};
assertEquals(3, glob1);
@@ -114,7 +101,7 @@ assertEquals(3, glob1);
var glob2 = 0;
function fun2() {
- var r = { y: glob2++, y: glob2++, y: glob2++, y: 7};
+ var r = {y: glob2++, y: glob2++, y: glob2++, y: 7};
return r.y;
}
@@ -125,7 +112,7 @@ assertEquals(3, glob2);
var glob3 = 0;
function fun3() {
- var r = { 113: glob3++, 113: glob3++, 113: glob3++, 113: 7};
+ var r = {113: glob3++, 113: glob3++, 113: glob3++, 113: 7};
return r[113];
}
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index cbc4f5de93..0050792b67 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -28,6 +28,7 @@
// Flags: --allow-natives-syntax
function runLiteralsTest(fn) {
+ %PrepareFunctionForOptimization(fn);
// The first run creates an copy directly from the boilerplate decsription.
fn();
// The second run will create the boilerplate.
@@ -531,6 +532,7 @@ runLiteralsTest(function TestSlowLiteralOptimized() {
function f() {
return {__proto__:null, bar:"barValue"};
}
+ %PrepareFunctionForOptimization(f);
let obj = f();
assertFalse(%HasFastProperties(obj));
assertEquals(Object.getPrototypeOf(obj), null);
diff --git a/deps/v8/test/mjsunit/object-prevent-extensions.js b/deps/v8/test/mjsunit/object-prevent-extensions.js
index 419a4351af..4bda84e2dd 100644
--- a/deps/v8/test/mjsunit/object-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/object-prevent-extensions.js
@@ -30,6 +30,8 @@
// Flags: --allow-natives-syntax
+assertFalse(Object.isExtensible());
+
var obj1 = {};
// Extensible defaults to true.
assertTrue(Object.isExtensible(obj1));
@@ -457,3 +459,476 @@ function checkUndefined() {
assertTrue(checkUndefined(...arr));
assertTrue(checkUndefined(...[...arr]));
assertTrue(checkUndefined.apply(this, [...arr]));
+
+//
+// Array.prototype.map
+//
+(function() {
+ var a = Object.preventExtensions(['0','1','2','3','4']);
+
+ // Simple use.
+ var result = [1,2,3,4,5];
+ assertArrayEquals(result, a.map(function(n) { return Number(n) + 1; }));
+
+ // Use specified object as this object when calling the function.
+ var o = { delta: 42 }
+ result = [42,43,44,45,46];
+ assertArrayEquals(result, a.map(function(n) { return this.delta + Number(n); }, o));
+
+ // Modify original array.
+ b = Object.preventExtensions(['0','1','2','3','4']);
+ result = [1,2,3,4,5];
+ assertArrayEquals(result,
+ b.map(function(n, index, array) {
+ array[index] = Number(n) + 1; return Number(n) + 1;
+ }));
+ assertArrayEquals(b, result);
+
+ // Only loop through initial part of array and elements are not
+ // added.
+ a = Object.preventExtensions(['0','1','2','3','4']);
+ result = [1,2,3,4,5];
+ assertArrayEquals(result,
+ a.map(function(n, index, array) { assertThrows(() => { array.push(n) }); return Number(n) + 1; }));
+ assertArrayEquals(['0','1','2','3','4'], a);
+
+ // Respect holes.
+ a = new Array(20);
+ a[1] = '2';
+ Object.preventExtensions(a);
+ a = Object.preventExtensions(a).map(function(n) { return 2*Number(n); });
+
+ for (var i in a) {
+ assertEquals(4, a[i]);
+ assertEquals('1', i);
+ }
+
+ // Skip over missing properties.
+ a = {
+ "0": 1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2, , 4],
+ Array.prototype.map.call(Object.preventExtensions(a), function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1, 2], received);
+
+ // Modify array prototype
+ a = ['1', , 2];
+ received = [];
+ assertThrows(() => {
+ Array.prototype.map.call(Object.preventExtensions(a), function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ });
+ }, TypeError);
+ assertArrayEquals([], received);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ Object.preventExtensions(['1', '2']).map(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ Object.preventExtensions(['1', '2']).map(function() { a.push(this) }, {});
+ assertSame(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ Object.preventExtensions(['1', '2']).map(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
+
+
+// Test for double element
+// Test packed element array built-in functions with preventExtensions.
+obj = new Array(-1.1, 0, 1, -1, 1.1);
+assertTrue(%HasDoubleElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+
+// Verify that the length can't be written by builtins.
+assertThrows(function() { obj.push(1); }, TypeError);
+assertDoesNotThrow(function() { obj.shift(); });
+assertThrows(function() { obj.unshift(1); }, TypeError);
+assertThrows(function() { obj.splice(0, 0, 1); }, TypeError);
+assertDoesNotThrow(function() {obj.splice(0, 0)});
+
+// Verify search, filter, iterator
+obj = new Array(-1.1, 0, 1, -1, 1.1);
+assertTrue(%HasDoubleElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(obj.lastIndexOf(1), 2);
+assertEquals(obj.indexOf(1.1), 4);
+assertEquals(obj.indexOf(undefined), -1);
+assertFalse(obj.includes(Symbol("test")));
+assertFalse(obj.includes(undefined));
+assertFalse(obj.includes(NaN));
+assertFalse(obj.includes());
+assertEquals(obj.find(x => x==0), 0);
+assertEquals(obj.findIndex(x => x==1.1), 4);
+assertFalse(obj.some(x => typeof x == 'symbol'));
+assertFalse(obj.every(x => x == -1));
+var filteredArray = obj.filter(e => typeof e == "symbol");
+assertEquals(filteredArray.length, 0);
+assertEquals(obj.map(x => x), obj);
+var countPositiveNumber = 0;
+obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+});
+assertEquals(countPositiveNumber, 1);
+assertEquals(obj.length, obj.concat([]).length);
+var iterator = obj.values();
+assertEquals(iterator.next().value, -1.1);
+assertEquals(iterator.next().value, 0);
+var iterator = obj.keys();
+assertEquals(iterator.next().value, 0);
+assertEquals(iterator.next().value, 1);
+var iterator = obj.entries();
+assertEquals(iterator.next().value, [0, -1.1]);
+assertEquals(iterator.next().value, [1, 0]);
+
+// Verify that the value can be written
+var length = obj.length;
+for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+}
+
+// Verify flat, map, flatMap, join, reduce, reduceRight for non-extensible packed array
+var arr = [1.1, 0, 1];
+assertTrue(%HasDoubleElements(arr));
+Object.preventExtensions(arr);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(arr.map(x => [x]), [[1.1], [0], [1]]);
+assertEquals(arr.flatMap(x => [x]), arr);
+assertEquals(arr.flat(), arr);
+assertEquals(arr.join('-'), "1.1-0-1");
+assertEquals(arr.reduce(reducer), 2.1);
+assertEquals(arr.reduceRight(reducer), 2.1);
+assertEquals(arr.slice(0, 1), [1.1]);
+
+// Verify change content of non-extensible packed array
+arr.sort();
+assertEquals(arr.join(''), "011.1");
+arr.reverse();
+assertEquals(arr.join(''), "1.110");
+arr.copyWithin(0, 1, 2);
+assertEquals(arr.join(''),"110");
+arr.fill('d');
+assertEquals(arr.join(''), "ddd");
+arr.pop();
+assertEquals(arr.join(''), "dd");
+
+// Regression test with simple array
+var arr = [1.1];
+Object.preventExtensions(arr);
+arr[0] = 'b';
+assertEquals(arr[0], 'b');
+
+// Test regression Array.concat with double
+var arr = [1.1];
+Object.preventExtensions(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, [1.1, 0.5]);
+Object.preventExtensions(arr);
+arr = arr.concat([1.5, 'b']);
+assertEquals(arr, [1.1, 0.5, 1.5, 'b']);
+
+// Regression test with change length
+var arr = [1.1, 0];
+Object.preventExtensions(arr);
+assertEquals(arr.length, 2);
+arr.length = 3;
+assertEquals(arr.length, 3);
+arr[2] = 'c';
+assertEquals(arr[2], undefined);
+arr.length = 1;
+assertEquals(arr.length, 1);
+assertEquals(arr[1], undefined);
+
+// Test for holey array
+// Test holey element array built-in functions with preventExtensions.
+obj = [-1.1, 0, 1, , -1, 1.1];
+assertTrue(%HasDoubleElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+
+// Verify that the length can't be written by builtins.
+assertThrows(function() { obj.push(1); }, TypeError);
+assertThrows(function() { obj.shift(); }, TypeError);
+assertThrows(function() { obj.unshift(1); }, TypeError);
+assertThrows(function() { obj.splice(0, 0, 1); }, TypeError);
+assertDoesNotThrow(function() {obj.splice(0, 0)});
+
+// Verify search, filter, iterator
+obj = [-1.1, 0, 1, ,-1, 1.1];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(obj.lastIndexOf(1), 2);
+assertEquals(obj.indexOf(1.1), 5);
+assertEquals(obj.indexOf(undefined), -1);
+assertFalse(obj.includes(Symbol("test")));
+assertTrue(obj.includes(undefined));
+assertFalse(obj.includes(NaN));
+assertTrue(obj.includes());
+assertEquals(obj.find(x => x==0), 0);
+assertEquals(obj.findIndex(x => x==1.1), 5);
+assertFalse(obj.some(x => typeof x == 'symbol'));
+assertFalse(obj.every(x => x == -1));
+var filteredArray = obj.filter(e => typeof e == "symbol");
+assertEquals(filteredArray.length, 0);
+assertEquals(obj.map(x => x), obj);
+var countPositiveNumber = 0;
+obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+});
+assertEquals(countPositiveNumber, 1);
+assertEquals(obj.length, obj.concat([]).length);
+var iterator = obj.values();
+assertEquals(iterator.next().value, -1.1);
+assertEquals(iterator.next().value, 0);
+var iterator = obj.keys();
+assertEquals(iterator.next().value, 0);
+assertEquals(iterator.next().value, 1);
+var iterator = obj.entries();
+assertEquals(iterator.next().value, [0, -1.1]);
+assertEquals(iterator.next().value, [1, 0]);
+
+// Verify that the value can be written
+var length = obj.length;
+for (var i = 0; i < length-1; i++) {
+ if (i==3) continue;
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+}
+
+// Verify flat, map, flatMap, join, reduce, reduceRight for non-extensible holey array
+var arr = [, 1.1, 0, 1];
+assertTrue(%HasDoubleElements(arr));
+Object.preventExtensions(arr);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(arr.map(x => [x]), [, [1.1], [0], [1]]);
+assertEquals(arr.flatMap(x => [x]), [1.1, 0, 1]);
+assertEquals(arr.flat(), [1.1, 0, 1]);
+assertEquals(arr.join('-'), "-1.1-0-1");
+assertEquals(arr.reduce(reducer1), 2.1);
+assertEquals(arr.reduceRight(reducer1), 2.1);
+assertEquals(arr.slice(0, 1), [,]);
+assertEquals(arr.slice(1, 2), [1.1]);
+
+// Verify change content of non-extensible holey array
+assertThrows(function(){arr.sort();}, TypeError);
+assertEquals(arr.join(''), "1.101");
+assertThrows(function(){arr.reverse();}, TypeError);
+assertEquals(arr.join(''), "1.101");
+assertThrows(function(){arr.copyWithin(0, 1, 2);}, TypeError);
+arr.copyWithin(1, 2, 3);
+assertEquals(arr.join(''),"001");
+assertThrows(function(){arr.fill('d');}, TypeError);
+assertEquals(arr.join(''), "001");
+arr.pop();
+assertEquals(arr.join(''), "00");
+
+// Regression test with simple holey array
+var arr = [, 1.1];
+Object.preventExtensions(arr);
+arr[1] = 'b';
+assertEquals(arr[1], 'b');
+arr[0] = 1;
+assertEquals(arr[0], undefined);
+
+// Test regression Array.concat with double
+var arr = [1.1, , 0];
+Object.preventExtensions(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, [1.1, , 0, 0.5]);
+Object.preventExtensions(arr);
+arr = arr.concat([1.5, 'c']);
+assertEquals(arr, [1.1, , 0, 0.5, 1.5, 'c']);
+
+// Regression test with change length
+var arr = [1.1, , 0];
+Object.preventExtensions(arr);
+assertEquals(arr.length, 3);
+arr.length = 4;
+assertEquals(arr.length, 4);
+arr[3] = 'c';
+assertEquals(arr[3], undefined);
+arr.length = 2;
+assertEquals(arr.length, 2);
+assertEquals(arr[2], undefined);
+assertEquals(arr.pop(), undefined);
+assertEquals(arr.length, 1);
+assertEquals(arr[1], undefined);
+
+// Change length with holey entries at the end
+var arr = [1.1, ,];
+Object.preventExtensions(arr);
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 0);
+arr.length = 3;
+assertEquals(arr.length, 3);
+arr.length = 0;
+assertEquals(arr.length, 0);
+
+// Spread with array
+var arr = [1.1, 0, -1];
+Object.preventExtensions(arr);
+var arrSpread = [...arr];
+assertEquals(arrSpread.length, arr.length);
+assertEquals(arrSpread[0], 1.1);
+assertEquals(arrSpread[1], 0);
+assertEquals(arrSpread[2], -1);
+
+// Spread with array-like
+function returnArgs() {
+ return Object.preventExtensions(arguments);
+}
+var arrLike = returnArgs(1.1, 0, -1);
+assertFalse(Object.isExtensible(arrLike));
+var arrSpread = [...arrLike];
+assertEquals(arrSpread.length, arrLike.length);
+assertEquals(arrSpread[0], 1.1);
+assertEquals(arrSpread[1], 0);
+assertEquals(arrSpread[2], -1);
+
+// Spread with holey
+function countArgs() {
+ return arguments.length;
+}
+var arr = [, 1.1, 0];
+Object.preventExtensions(arr);
+assertEquals(countArgs(...arr), 3);
+assertEquals(countArgs(...[...arr]), 3);
+assertEquals(countArgs.apply(this, [...arr]), 3);
+function checkUndefined() {
+ return arguments[0] === undefined;
+}
+assertTrue(checkUndefined(...arr));
+assertTrue(checkUndefined(...[...arr]));
+assertTrue(checkUndefined.apply(this, [...arr]));
+
+//
+// Array.prototype.map
+//
+(function() {
+ var a = Object.preventExtensions([0.1,1,2,3,4]);
+
+ // Simple use.
+ var result = [1.1,2,3,4,5];
+ assertArrayEquals(result, a.map(function(n) { return Number(n) + 1; }));
+
+ // Use specified object as this object when calling the function.
+ var o = { delta: 42 }
+ result = [42.1,43,44,45,46];
+ assertArrayEquals(result, a.map(function(n) { return this.delta + Number(n); }, o));
+
+ // Modify original array.
+ b = Object.preventExtensions([0.1,1,2,3,4]);
+ result = [1.1,2,3,4,5];
+ assertArrayEquals(result,
+ b.map(function(n, index, array) {
+ array[index] = Number(n) + 1; return Number(n) + 1;
+ }));
+ assertArrayEquals(b, result);
+
+ // Only loop through initial part of array and elements are not
+ // added.
+ a = Object.preventExtensions([0.1,1,2,3,4]);
+ result = [1.1,2,3,4,5];
+ assertArrayEquals(result,
+ a.map(function(n, index, array) { assertThrows(() => { array.push(n) }); return Number(n) + 1; }));
+ assertArrayEquals([0.1,1,2,3,4], a);
+
+ // Respect holes.
+ a = new Array(20);
+ a[1] = 1.1;
+ Object.preventExtensions(a);
+ a = Object.preventExtensions(a).map(function(n) { return 2*Number(n); });
+
+ for (var i in a) {
+ assertEquals(2.2, a[i]);
+ assertEquals('1', i);
+ }
+
+ // Skip over missing properties.
+ a = {
+ "0": 1.1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2.2, , 4],
+ Array.prototype.map.call(Object.preventExtensions(a), function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1.1, 2], received);
+
+ // Modify array prototype
+ a = [1.1 , 2];
+ received = [];
+ assertThrows(() => {
+ Array.prototype.map.call(Object.preventExtensions(a), function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ });
+ }, TypeError);
+ assertArrayEquals([], received);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ Object.preventExtensions([1.1, 2]).map(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ Object.preventExtensions([1.1, 2]).map(function() { a.push(this) }, {});
+ assertSame(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ Object.preventExtensions([1.1, 2]).map(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index a82e3a82ae..684d94a6b8 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -36,7 +36,7 @@ for (var key in non_objects) {
assertSame(non_objects[key], Object.seal(non_objects[key]));
}
-// Test that isFrozen always returns true for non-objects
+// Test that isSealed always returns true for non-objects
for (var key in non_objects) {
assertTrue(Object.isSealed(non_objects[key]));
}
@@ -765,3 +765,530 @@ function checkUndefined() {
assertTrue(checkUndefined(...arr));
assertTrue(checkUndefined(...[...arr]));
assertTrue(checkUndefined.apply(this, [...arr]));
+
+//
+// Array.prototype.map
+//
+(function() {
+ var a = Object.seal(['0','1','2','3','4']);
+
+ // Simple use.
+ var result = [1,2,3,4,5];
+ assertArrayEquals(result, a.map(function(n) { return Number(n) + 1; }));
+
+ // Use specified object as this object when calling the function.
+ var o = { delta: 42 }
+ result = [42,43,44,45,46];
+ assertArrayEquals(result, a.map(function(n) { return this.delta + Number(n); }, o));
+
+ // Modify original array.
+ b = Object.seal(['0','1','2','3','4']);
+ result = [1,2,3,4,5];
+ assertArrayEquals(result,
+ b.map(function(n, index, array) {
+ array[index] = Number(n) + 1; return Number(n) + 1;
+ }));
+ assertArrayEquals(b, result);
+
+ // Only loop through initial part of array and elements are not
+ // added.
+ a = Object.seal(['0','1','2','3','4']);
+ result = [1,2,3,4,5];
+ assertArrayEquals(result,
+ a.map(function(n, index, array) { assertThrows(() => { array.push(n) }); return Number(n) + 1; }));
+ assertArrayEquals(['0','1','2','3','4'], a);
+
+ // Respect holes.
+ a = new Array(20);
+ a[1] = '2';
+ Object.seal(a);
+ a = Object.seal(a).map(function(n) { return 2*Number(n); });
+
+ for (var i in a) {
+ assertEquals(4, a[i]);
+ assertEquals('1', i);
+ }
+
+ // Skip over missing properties.
+ a = {
+ "0": 1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2, , 4],
+ Array.prototype.map.call(Object.seal(a), function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1, 2], received);
+
+ // Modify array prototype
+ a = ['1', , 2];
+ received = [];
+ assertThrows(() => {
+ Array.prototype.map.call(Object.seal(a), function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ });
+ }, TypeError);
+ assertArrayEquals([], received);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ Object.seal(['1', '2']).map(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ Object.seal(['1', '2']).map(function() { a.push(this) }, {});
+ assertSame(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ Object.seal(['1', '2']).map(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
+
+
+// Test with double elements
+// Test packed element array built-in functions with seal.
+function testDoubleSealedArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+
+ // Verify that the length can't be written by builtins.
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(1); }, TypeError);
+ assertThrows(function() { obj.shift(); }, TypeError);
+ assertThrows(function() { obj.unshift(1); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertDoesNotThrow(function() { obj.splice(0, 0); });
+
+ // Verify search, filter, iterator
+ assertEquals(obj.lastIndexOf(1), 1);
+ assertEquals(obj.indexOf(undefined), -1);
+ assertFalse(obj.includes(Symbol("test")));
+ assertTrue(obj.includes(1));
+ assertTrue(obj.includes(-1.1));
+ assertFalse(obj.includes());
+ assertEquals(obj.find(x => x==0), undefined);
+ assertEquals(obj.findIndex(x => x==2), 3);
+ assertFalse(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 0);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 1);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, -1.1);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, -1.1]);
+ assertEquals(iterator.next().value, [1, 1]);
+
+ // Verify that the value can't be written
+ var length = obj.length;
+ for (var i = 0; i < length; i++) {
+ obj[i] = 'new';
+ assertEquals('new', obj[i]);
+ }
+}
+
+obj = new Array(1.1, -1.1, 1, -1, 2);
+assertTrue(%HasDoubleElements(obj));
+Object.seal(obj);
+testDoubleSealedArray1(obj);
+
+// Verify change from non-extensible to sealed
+obj = new Array(1.1, -1.1, 1, -1, 2);
+assertTrue(%HasDoubleElements(obj));
+Object.preventExtensions(obj);
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+testDoubleSealedArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for sealed packed array
+function testDoubleSealedArray2(arr) {
+ assertTrue(Object.isSealed(arr));
+ assertTrue(Array.isArray(arr));
+ assertEquals(arr.map(x => [x]), [[1], [1.1], [0]]);
+ assertEquals(arr.flatMap(x => [x]), arr);
+ assertEquals(arr.flat(), arr);
+ assertEquals(arr.join('-'), "1-1.1-0");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), 2.1);
+ assertEquals(arr.reduceRight(reducer), 2.1);
+ assertEquals(arr.slice(0, 1), [1]);
+}
+var arr1 = new Array(1, 1.1, 0);
+assertTrue(%HasDoubleElements(arr1));
+Object.seal(arr1);
+testDoubleSealedArray2(arr1);
+
+// Verify change from non-extensible to sealed
+var arr1 = new Array(1, 1.1, 0);
+assertTrue(%HasDoubleElements(arr1));
+Object.preventExtensions(arr1);
+Object.seal(arr1);
+testDoubleSealedArray2(arr1);
+
+// Test regression with Object.defineProperty
+var obj = [];
+obj.propertyA = 42;
+obj[0] = 1.1;
+Object.seal(obj);
+assertDoesNotThrow(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ });
+});
+assertEquals(obj, obj.propertyA);
+assertDoesNotThrow(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ writable: false,
+ });
+});
+obj.propertyA = 42;
+assertEquals(obj.propertyA, 42);
+assertThrows(function() {
+ Object.defineProperty(obj, 'abc', {
+ value: obj,
+ });
+}, TypeError);
+
+// Regression test with simple array
+var arr = [1.1];
+Object.seal(arr);
+arr[0] = 1;
+assertEquals(arr[0], 1);
+
+// Test regression Array.concat with double
+var arr = [1.1];
+Object.seal(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, [1.1, 0.5]);
+Object.seal(arr);
+arr = arr.concat([1.5, 'b']);
+assertEquals(arr, [1.1, 0.5, 1.5, 'b']);
+
+// Regression test with change length
+var arr = [1.1, 0];
+Object.seal(arr);
+assertEquals(arr.length, 2);
+arr.length = 3;
+assertEquals(arr.length, 3);
+arr[2] = 'c';
+assertEquals(arr[2], undefined);
+arr.length = 1;
+assertEquals(arr.length, 2);
+
+// Start testing for holey double element array
+// Test holey double element array built-in functions with seal.
+function testHoleyDoubleSealedArray1() {
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+
+ // Verify that the length can't be written by builtins.
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(1); }, TypeError);
+ assertThrows(function() { obj.shift(); }, TypeError);
+ assertThrows(function() { obj.unshift(1); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertDoesNotThrow(function() { obj.splice(0, 0); });
+
+ // Verify search, filter, iterator
+ obj = [-1.1, 0, 1, , -1, 1.1];
+ assertTrue(%HasHoleyElements(obj));
+ Object.seal(obj);
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf(1.1), 5);
+ assertEquals(obj.indexOf(undefined), -1);
+ assertFalse(obj.includes(Symbol("test")));
+ assertTrue(obj.includes(undefined));
+ assertFalse(obj.includes(NaN));
+ assertTrue(obj.includes());
+ assertEquals(obj.find(x => x==0), 0);
+ assertEquals(obj.findIndex(x => x==1.1), 5);
+ assertFalse(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 0);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, -1.1);
+ assertEquals(iterator.next().value, 0);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, -1.1]);
+ assertEquals(iterator.next().value, [1, 0]);
+
+ // Verify that the value can be written
+ var length = obj.length;
+ for (var i = 0; i < length; i++) {
+ if (i==3) continue;
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+ }
+};
+
+obj = [-1.1, 0, 1, , -1, 1.1];
+assertTrue(%HasHoleyElements(obj));
+Object.seal(obj);
+testHoleyDoubleSealedArray1(obj);
+
+// Verify change from non-extensible to sealed
+obj = [-1.1, 0, 1, , -1, 1.1];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+testHoleyDoubleSealedArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for sealed packed array
+function testHoleyDoubleSealedArray2(arr) {
+ assertTrue(Object.isSealed(arr));
+ assertTrue(Array.isArray(arr));
+ assertEquals(arr.map(x => [x]), [, [1.1], [1], [0]]);
+ assertEquals(arr.flatMap(x => [x]), [1.1, 1, 0]);
+ assertEquals(arr.flat(), [1.1, 1, 0]);
+ assertEquals(arr.join('-'), "-1.1-1-0");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), 2.1);
+ assertEquals(arr.reduceRight(reducer), 2.1);
+ assertEquals(arr.slice(0, 1), [,]);
+ assertEquals(arr.slice(1, 2), [1.1]);
+}
+var arr1 = [, 1.1, 1, 0];
+assertTrue(%HasHoleyElements(arr1));
+Object.seal(arr1);
+testHoleyDoubleSealedArray2(arr1);
+
+// Verify change from non-extensible to sealed
+var arr1 = [, 1.1, 1, 0];
+assertTrue(%HasHoleyElements(arr1));
+Object.preventExtensions(arr1);
+Object.seal(arr1);
+testHoleyDoubleSealedArray2(arr1);
+
+// Test regression with Object.defineProperty
+var obj = [1.1, , 0];
+obj.propertyA = 42;
+obj[0] = 1.2;
+Object.seal(obj);
+assertDoesNotThrow(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ });
+});
+assertEquals(obj, obj.propertyA);
+assertDoesNotThrow(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ writable: false,
+ });
+});
+obj.propertyA = 42;
+assertEquals(obj.propertyA, 42);
+assertThrows(function() {
+ Object.defineProperty(obj, 'abc', {
+ value: obj,
+ });
+}, TypeError);
+
+// Regression test with simple holey array
+var arr = [, 1.1];
+Object.seal(arr);
+arr[1] = 'b';
+assertEquals(arr[1], 'b');
+arr[0] = 1;
+assertEquals(arr[0], undefined);
+
+// Test regression Array.concat with double
+var arr = [1.1, , 0];
+Object.seal(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, [1.1, , 0, 0.5]);
+Object.seal(arr);
+arr = arr.concat([1.5, 'c']);
+assertEquals(arr, [1.1, ,0, 0.5, 1.5, 'c']);
+
+// Regression test with change length
+var arr = [1.1, ,0];
+Object.seal(arr);
+assertEquals(arr.length, 3);
+arr.length = 4;
+assertEquals(arr.length, 4);
+arr[3] = 'c';
+assertEquals(arr[2], 0);
+assertEquals(arr[3], undefined);
+arr.length = 2;
+assertEquals(arr.length, 3);
+
+// Change length with holey entries at the end
+var arr = [1.1, ,];
+Object.seal(arr);
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 1);
+arr.length = 3;
+assertEquals(arr.length, 3);
+arr.length = 0;
+assertEquals(arr.length, 1);
+
+// Spread with array
+var arr = [1.1, 0, -1];
+Object.seal(arr);
+var arrSpread = [...arr];
+assertEquals(arrSpread.length, arr.length);
+assertEquals(arrSpread[0], 1.1);
+assertEquals(arrSpread[1], 0);
+assertEquals(arrSpread[2], -1);
+
+// Spread with array-like
+function returnArgs() {
+ return Object.seal(arguments);
+}
+var arrLike = returnArgs(1.1, 0, -1);
+assertTrue(Object.isSealed(arrLike));
+var arrSpread = [...arrLike];
+assertEquals(arrSpread.length, arrLike.length);
+assertEquals(arrSpread[0], 1.1);
+assertEquals(arrSpread[1], 0);
+assertEquals(arrSpread[2], -1);
+
+// Spread with holey
+function countArgs() {
+ return arguments.length;
+}
+var arr = [, 1.1, 0];
+Object.seal(arr);
+assertEquals(countArgs(...arr), 3);
+assertEquals(countArgs(...[...arr]), 3);
+assertEquals(countArgs.apply(this, [...arr]), 3);
+function checkUndefined() {
+ return arguments[0] === undefined;
+}
+assertTrue(checkUndefined(...arr));
+assertTrue(checkUndefined(...[...arr]));
+assertTrue(checkUndefined.apply(this, [...arr]));
+
+//
+// Array.prototype.map
+//
+(function() {
+ var a = Object.seal([0.1,1,2,3,4]);
+
+ // Simple use.
+ var result = [1.1,2,3,4,5];
+ assertArrayEquals(result, a.map(function(n) { return Number(n) + 1; }));
+
+ // Use specified object as this object when calling the function.
+ var o = { delta: 42 }
+ result = [42.1,43,44,45,46];
+ assertArrayEquals(result, a.map(function(n) { return this.delta + Number(n); }, o));
+
+ // Modify original array.
+ b = Object.seal([0.1,1,2,3,4]);
+ result = [1.1,2,3,4,5];
+ assertArrayEquals(result,
+ b.map(function(n, index, array) {
+ array[index] = Number(n) + 1; return Number(n) + 1;
+ }));
+ assertArrayEquals(b, result);
+
+ // Only loop through initial part of array and elements are not
+ // added.
+ a = Object.seal([0.1,1,2,3,4]);
+ result = [1.1,2,3,4,5];
+ assertArrayEquals(result,
+ a.map(function(n, index, array) { assertThrows(() => { array.push(n) }); return Number(n) + 1; }));
+ assertArrayEquals([0.1,1,2,3,4], a);
+
+ // Respect holes.
+ a = new Array(20);
+ a[1] = 1.1;
+ Object.seal(a);
+ a = Object.seal(a).map(function(n) { return 2*Number(n); });
+
+ for (var i in a) {
+ assertEquals(2.2, a[i]);
+ assertEquals('1', i);
+ }
+
+ // Skip over missing properties.
+ a = {
+ "0": 1.1,
+ "2": 2,
+ length: 3
+ };
+ var received = [];
+ assertArrayEquals([2.2, , 4],
+ Array.prototype.map.call(Object.seal(a), function(n) {
+ received.push(n);
+ return n * 2;
+ }));
+ assertArrayEquals([1.1, 2], received);
+
+ // Modify array prototype
+ a = [1.1, , 2];
+ received = [];
+ assertThrows(() => {
+ Array.prototype.map.call(Object.seal(a), function(n) {
+ a.__proto__ = null;
+ received.push(n);
+ return n * 2;
+ });
+ }, TypeError);
+ assertArrayEquals([], received);
+
+ // Create a new object in each function call when receiver is a
+ // primitive value. See ECMA-262, Annex C.
+ a = [];
+ Object.seal([1.1, 2]).map(function() { a.push(this) }, "");
+ assertTrue(a[0] !== a[1]);
+
+ // Do not create a new object otherwise.
+ a = [];
+ Object.seal([1.1, 2]).map(function() { a.push(this) }, {});
+ assertSame(a[0], a[1]);
+
+ // In strict mode primitive values should not be coerced to an object.
+ a = [];
+ Object.seal([1.1, 1.2]).map(function() { 'use strict'; a.push(this); }, "");
+ assertEquals("", a[0]);
+ assertEquals(a[0], a[1]);
+
+})();
diff --git a/deps/v8/test/mjsunit/omit-constant-mapcheck.js b/deps/v8/test/mjsunit/omit-constant-mapcheck.js
index ae6308f215..672595f7c0 100644
--- a/deps/v8/test/mjsunit/omit-constant-mapcheck.js
+++ b/deps/v8/test/mjsunit/omit-constant-mapcheck.js
@@ -27,12 +27,12 @@
// Flags: --allow-natives-syntax
-var g1 = { a:1 }
+var g1 = {a: 1};
function load() {
return g1.a;
-}
-
+};
+%PrepareFunctionForOptimization(load);
assertEquals(1, load());
assertEquals(1, load());
%OptimizeFunctionOnNextCall(load);
@@ -40,12 +40,12 @@ assertEquals(1, load());
delete g1.a;
assertEquals(undefined, load());
-var g2 = { a:2 }
+var g2 = {a: 2};
function load2() {
return g2.a;
-}
-
+};
+%PrepareFunctionForOptimization(load2);
assertEquals(2, load2());
assertEquals(2, load2());
%OptimizeFunctionOnNextCall(load2);
@@ -54,17 +54,17 @@ g2.b = 10;
g2.a = 5;
assertEquals(5, load2());
-var g3 = { a:2, b:9, c:1 }
+var g3 = {a: 2, b: 9, c: 1};
function store(v) {
g3.a = v;
return g3.a;
-}
-
+};
+%PrepareFunctionForOptimization(store);
assertEquals(5, store(5));
assertEquals(8, store(8));
%OptimizeFunctionOnNextCall(store);
assertEquals(10, store(10));
delete g3.c;
store(7);
-assertEquals({a:7, b:9}, g3);
+assertEquals({a: 7, b: 9}, g3);
diff --git a/deps/v8/test/mjsunit/optimized-array-every.js b/deps/v8/test/mjsunit/optimized-array-every.js
index 0cbab7df67..30578c55f0 100644
--- a/deps/v8/test/mjsunit/optimized-array-every.js
+++ b/deps/v8/test/mjsunit/optimized-array-every.js
@@ -15,6 +15,7 @@
return v < 2;
});
}
+ %PrepareFunctionForOptimization(earlyExit);
assertFalse(earlyExit());
earlyExit();
%OptimizeFunctionOnNextCall(earlyExit);
@@ -35,6 +36,7 @@
return v < 8;
});
}
+ %PrepareFunctionForOptimization(softyPlusEarlyExit);
assertFalse(softyPlusEarlyExit(false));
softyPlusEarlyExit(false);
%OptimizeFunctionOnNextCall(softyPlusEarlyExit);
@@ -58,6 +60,7 @@
return v < 8;
});
}
+ %PrepareFunctionForOptimization(softyPlusEarlyExit);
assertFalse(softyPlusEarlyExit(false));
assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8], called_values);
softyPlusEarlyExit(false);
@@ -81,6 +84,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -102,6 +106,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
assertTrue(eagerDeoptInCalled());
assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], called_values);
eagerDeoptInCalled();
@@ -125,6 +130,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertTrue(lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -144,6 +150,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertTrue(lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -164,6 +171,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertTrue(lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -184,6 +192,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -207,6 +216,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -232,6 +242,7 @@
%NeverOptimizeFunction(callback);
a.every(callback);
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -259,6 +270,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -287,6 +299,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -318,6 +331,7 @@
}
return result;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals(true, lazyDeopt(false));
assertEquals(true, lazyDeopt(false));
assertEquals("nope", lazyDeopt(true));
@@ -343,6 +357,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -370,6 +385,7 @@
b.every(callback);
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -397,6 +413,7 @@
});
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -420,6 +437,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertThrows(() => lazyDeopt());
assertThrows(() => lazyDeopt());
try {
@@ -446,6 +464,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -462,6 +481,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -476,6 +496,7 @@
function unreliable(a, b) {
return a.every(x => true, side_effect(a, b));
}
+ %PrepareFunctionForOptimization(unreliable);
let a = [1, 2, 3];
unreliable(a, false);
@@ -492,6 +513,7 @@
function notCallable() {
return a.every(undefined);
}
+ %PrepareFunctionForOptimization(notCallable);
assertThrows(notCallable, TypeError);
try { notCallable(); } catch(e) { }
@@ -509,6 +531,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(prototypeChanged);
prototypeChanged();
prototypeChanged();
%OptimizeFunctionOnNextCall(prototypeChanged);
diff --git a/deps/v8/test/mjsunit/optimized-array-find.js b/deps/v8/test/mjsunit/optimized-array-find.js
index abcd2cf704..b8f3baa28c 100644
--- a/deps/v8/test/mjsunit/optimized-array-find.js
+++ b/deps/v8/test/mjsunit/optimized-array-find.js
@@ -20,6 +20,7 @@
return v === 20;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -41,6 +42,7 @@
return v === 9;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
assertEquals(9, eagerDeoptInCalled());
assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], called_values);
eagerDeoptInCalled();
@@ -65,6 +67,7 @@
return v > 3;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(4, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -84,6 +87,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(undefined, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -104,6 +108,7 @@
return v > 3;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(4, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -124,6 +129,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -147,6 +153,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -172,6 +179,7 @@
%NeverOptimizeFunction(callback);
a.find(callback);
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -199,6 +207,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -227,6 +236,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -258,6 +268,7 @@
}
return result;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals(3, lazyDeopt(false));
assertEquals(3, lazyDeopt(false));
assertEquals("nope", lazyDeopt(true));
@@ -283,6 +294,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -310,6 +322,7 @@
b.find(callback);
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -337,6 +350,7 @@
});
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -360,6 +374,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertThrows(() => lazyDeopt());
assertThrows(() => lazyDeopt());
try {
@@ -385,6 +400,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(prototypeChanged);
prototypeChanged();
prototypeChanged();
%OptimizeFunctionOnNextCall(prototypeChanged);
@@ -406,6 +422,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -422,6 +439,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -436,6 +454,7 @@
function unreliable(a, b) {
return a.find(x => false, side_effect(a, b));
}
+ %PrepareFunctionForOptimization(unreliable);
let a = [1, 2, 3];
unreliable(a, false);
@@ -452,6 +471,7 @@
function notCallable() {
return a.find(undefined);
}
+ %PrepareFunctionForOptimization(notCallable);
assertThrows(notCallable, TypeError);
try { notCallable(); } catch(e) { }
diff --git a/deps/v8/test/mjsunit/optimized-array-findindex.js b/deps/v8/test/mjsunit/optimized-array-findindex.js
index 91f4a6cc60..299ae0a2bf 100644
--- a/deps/v8/test/mjsunit/optimized-array-findindex.js
+++ b/deps/v8/test/mjsunit/optimized-array-findindex.js
@@ -20,6 +20,7 @@
return v === 20;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -41,6 +42,7 @@
return v === 9;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
assertEquals(8, eagerDeoptInCalled());
assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], called_values);
eagerDeoptInCalled();
@@ -65,6 +67,7 @@
return v > 3;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(3, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -84,6 +87,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(-1, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -104,6 +108,7 @@
return v > 3;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(3, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -124,6 +129,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -147,6 +153,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -172,6 +179,7 @@
%NeverOptimizeFunction(callback);
a.findIndex(callback);
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -199,6 +207,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -227,6 +236,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -258,6 +268,7 @@
}
return result;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals(2, lazyDeopt(false));
assertEquals(2, lazyDeopt(false));
assertEquals("nope", lazyDeopt(true));
@@ -283,6 +294,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -310,6 +322,7 @@
b.findIndex(callback);
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -337,6 +350,7 @@
});
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -360,6 +374,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertThrows(() => lazyDeopt());
assertThrows(() => lazyDeopt());
try {
@@ -385,6 +400,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(prototypeChanged);
prototypeChanged();
prototypeChanged();
%OptimizeFunctionOnNextCall(prototypeChanged);
@@ -406,6 +422,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -422,6 +439,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -436,6 +454,7 @@
function unreliable(a, b) {
return a.findIndex(x => false, side_effect(a, b));
}
+ %PrepareFunctionForOptimization(unreliable);
let a = [1, 2, 3];
unreliable(a, false);
@@ -452,6 +471,7 @@
function notCallable() {
return a.findIndex(undefined);
}
+ %PrepareFunctionForOptimization(notCallable);
assertThrows(notCallable, TypeError);
try { notCallable(); } catch(e) { }
diff --git a/deps/v8/test/mjsunit/optimized-array-some.js b/deps/v8/test/mjsunit/optimized-array-some.js
index 8d0114aa64..73862702a2 100644
--- a/deps/v8/test/mjsunit/optimized-array-some.js
+++ b/deps/v8/test/mjsunit/optimized-array-some.js
@@ -15,6 +15,7 @@
return v > 2;
});
}
+ %PrepareFunctionForOptimization(earlyExit);
assertTrue(earlyExit());
earlyExit();
%OptimizeFunctionOnNextCall(earlyExit);
@@ -35,6 +36,7 @@
return v > 7;
});
}
+ %PrepareFunctionForOptimization(softyPlusEarlyExit);
assertTrue(softyPlusEarlyExit(false));
softyPlusEarlyExit(false);
%OptimizeFunctionOnNextCall(softyPlusEarlyExit);
@@ -58,6 +60,7 @@
return v > 7;
});
}
+ %PrepareFunctionForOptimization(softyPlusEarlyExit);
assertTrue(softyPlusEarlyExit(false));
assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8], called_values);
softyPlusEarlyExit(false);
@@ -81,6 +84,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -102,6 +106,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
assertFalse(eagerDeoptInCalled());
assertArrayEquals([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], called_values);
eagerDeoptInCalled();
@@ -125,6 +130,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertFalse(lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -144,6 +150,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertFalse(lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -164,6 +171,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyChanger);
assertFalse(lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -184,6 +192,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -207,6 +216,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -232,6 +242,7 @@
%NeverOptimizeFunction(callback);
a.some(callback);
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -259,6 +270,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -287,6 +299,7 @@
caught = true;
}
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -318,6 +331,7 @@
}
return result;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals(false, lazyDeopt(false));
assertEquals(false, lazyDeopt(false));
assertEquals("nope", lazyDeopt(true));
@@ -343,6 +357,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -370,6 +385,7 @@
b.some(callback);
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -397,6 +413,7 @@
});
return did_assert_error;
}
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -420,6 +437,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(lazyDeopt);
assertThrows(() => lazyDeopt());
assertThrows(() => lazyDeopt());
try {
@@ -445,6 +463,7 @@
return false;
});
}
+ %PrepareFunctionForOptimization(prototypeChanged);
prototypeChanged();
prototypeChanged();
%OptimizeFunctionOnNextCall(prototypeChanged);
@@ -466,6 +485,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -482,6 +502,7 @@
});
return callback_values;
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -494,6 +515,7 @@
function notCallable() {
return a.some(undefined);
}
+ %PrepareFunctionForOptimization(notCallable);
assertThrows(notCallable, TypeError);
try { notCallable(); } catch(e) { }
diff --git a/deps/v8/test/mjsunit/optimized-foreach-holey-3.js b/deps/v8/test/mjsunit/optimized-foreach-holey-3.js
index a397a0ab08..5767bf4f6e 100644
--- a/deps/v8/test/mjsunit/optimized-foreach-holey-3.js
+++ b/deps/v8/test/mjsunit/optimized-foreach-holey-3.js
@@ -5,28 +5,33 @@
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
(function() {
- var result = 0;
- var proto_set_func = function(p, s) {
- %NeverOptimizeFunction(proto_set_func);
- if (s) {
- p[0] = 1;
- }
+var result = 0;
+var proto_set_func = function(p, s) {
+ %NeverOptimizeFunction(proto_set_func);
+ if (s) {
+ p[0] = 1;
}
- var f = function(s) {
- var b = [,,];
- b[1] = 0;
- b[2] = 2;
- var sum = function(v,i,o) {
- if (i==1) proto_set_func(b.__proto__, s);
- result += v;
- };
- b.forEach(sum);
- }
- f();
- f();
- %OptimizeFunctionOnNextCall(f);
- f();
- f(true);
- f();
- assertEquals(11, result);
+};
+var f = function(s) {
+ var b = [
+ ,
+ ,
+ ];
+ b[1] = 0;
+ b[2] = 2;
+ var sum = function(v, i, o) {
+ if (i == 1) proto_set_func(b.__proto__, s);
+ result += v;
+ };
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(f);
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+f(true);
+f();
+assertEquals(11, result);
})();
diff --git a/deps/v8/test/mjsunit/optimized-foreach.js b/deps/v8/test/mjsunit/optimized-foreach.js
index 1fe54b5e9f..c47bd03741 100644
--- a/deps/v8/test/mjsunit/optimized-foreach.js
+++ b/deps/v8/test/mjsunit/optimized-foreach.js
@@ -4,225 +4,253 @@
// Flags: --allow-natives-syntax --expose-gc --turbo-inline-array-builtins
-var a = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,0,0];
-var b = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
-var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
+var a = [
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, 0
+];
+var b = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+];
+var c = [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
+];
// Unknown field access leads to soft-deopt unrelated to forEach, should still
// lead to correct result.
(function() {
- var result = 0;
- var eagerDeoptInCalled = function(deopt) {
- var sum = function(v,i,o) {
- result += v;
- if (i == 13 && deopt) {
- a.abc = 25;
- }
+var result = 0;
+var eagerDeoptInCalled = function(deopt) {
+ var sum = function(v, i, o) {
+ result += v;
+ if (i == 13 && deopt) {
+ a.abc = 25;
}
- a.forEach(sum);
- }
- eagerDeoptInCalled();
- eagerDeoptInCalled();
- %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
- eagerDeoptInCalled();
- eagerDeoptInCalled(true);
- eagerDeoptInCalled();
- assertEquals(1500, result);
+ };
+ a.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(eagerDeoptInCalled);
+eagerDeoptInCalled();
+eagerDeoptInCalled();
+%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+eagerDeoptInCalled();
+eagerDeoptInCalled(true);
+eagerDeoptInCalled();
+assertEquals(1500, result);
})();
// Length change detected during loop, must cause properly handled eager deopt.
(function() {
- var result = 0;
- var eagerDeoptInCalled = function(deopt) {
- var sum = function(v,i,o) {
- result += v;
- a.length = (i == 13 && deopt) ? 25 : 27;
- }
- a.forEach(sum);
- }
- eagerDeoptInCalled();
- eagerDeoptInCalled();
- %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
- eagerDeoptInCalled();
- eagerDeoptInCalled(true);
- eagerDeoptInCalled();
- assertEquals(1500, result);
+var result = 0;
+var eagerDeoptInCalled = function(deopt) {
+ var sum = function(v, i, o) {
+ result += v;
+ a.length = i == 13 && deopt ? 25 : 27;
+ };
+ a.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(eagerDeoptInCalled);
+eagerDeoptInCalled();
+eagerDeoptInCalled();
+%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+eagerDeoptInCalled();
+eagerDeoptInCalled(true);
+eagerDeoptInCalled();
+assertEquals(1500, result);
})();
// Escape analyzed array
(function() {
- var result = 0;
- var eagerDeoptInCalled = function(deopt) {
- var a_noescape = [0,1,2,3,4,5];
- var sum = function(v,i,o) {
- result += v;
- if (i == 13 && deopt) {
- a_noescape.length = 25;
- }
+var result = 0;
+var eagerDeoptInCalled = function(deopt) {
+ var a_noescape = [0, 1, 2, 3, 4, 5];
+ var sum = function(v, i, o) {
+ result += v;
+ if (i == 13 && deopt) {
+ a_noescape.length = 25;
}
- a_noescape.forEach(sum);
- }
- eagerDeoptInCalled();
- eagerDeoptInCalled();
- %OptimizeFunctionOnNextCall(eagerDeoptInCalled);
- eagerDeoptInCalled();
- eagerDeoptInCalled(true);
- eagerDeoptInCalled();
- assertEquals(75, result);
+ };
+ a_noescape.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(eagerDeoptInCalled);
+eagerDeoptInCalled();
+eagerDeoptInCalled();
+%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
+eagerDeoptInCalled();
+eagerDeoptInCalled(true);
+eagerDeoptInCalled();
+assertEquals(75, result);
})();
// Escape analyzed array where sum function isn't inlined, forcing a lazy deopt
// with GC that relies on the stashed-away return result fro the lazy deopt
// being properly stored in a place on the stack that gets GC'ed.
(function() {
- var result = 0;
- var lazyDeopt = function(deopt) {
- var b = [1,2,3];
- var sum = function(v,i,o) {
- result += i;
- if (i == 1 && deopt) {
- %DeoptimizeFunction(lazyDeopt);
- }
- gc(); gc();
- };
- %NeverOptimizeFunction(sum);
- b.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
- lazyDeopt(true);
- lazyDeopt();
+var result = 0;
+var lazyDeopt = function(deopt) {
+ var b = [1, 2, 3];
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 1 && deopt) {
+ %DeoptimizeFunction(lazyDeopt);
+ }
+ gc();
+ gc();
+ };
+ %NeverOptimizeFunction(sum);
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
+lazyDeopt(true);
+lazyDeopt();
})();
// Lazy deopt from runtime call from inlined callback function.
(function() {
- var result = 0;
- var lazyDeopt = function(deopt) {
- var sum = function(v,i,o) {
- result += i;
- if (i == 13 && deopt) {
- %DeoptimizeNow();
- }
+var result = 0;
+var lazyDeopt = function(deopt) {
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 13 && deopt) {
+ %DeoptimizeNow();
}
- b.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
- lazyDeopt(true);
- lazyDeopt();
- assertEquals(1500, result);
+ };
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
+lazyDeopt(true);
+lazyDeopt();
+assertEquals(1500, result);
})();
// Lazy deopt from runtime call from non-inline callback function.
(function() {
- var result = 0;
- var lazyDeopt = function(deopt) {
- var sum = function(v,i,o) {
- result += i;
- if (i == 13 && deopt) {
- %DeoptimizeNow();
- }
- };
- %NeverOptimizeFunction(sum);
- b.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
- lazyDeopt(true);
- lazyDeopt();
- assertEquals(1500, result);
+var result = 0;
+var lazyDeopt = function(deopt) {
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 13 && deopt) {
+ %DeoptimizeNow();
+ }
+ };
+ %NeverOptimizeFunction(sum);
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
+lazyDeopt(true);
+lazyDeopt();
+assertEquals(1500, result);
})();
(function() {
- var result = 0;
- var lazyDeopt = function(deopt) {
- var sum = function(v,i,o) {
- result += i;
- if (i == 13 && deopt) {
- %DeoptimizeNow();
- gc();
- gc();
- gc();
- }
+var result = 0;
+var lazyDeopt = function(deopt) {
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 13 && deopt) {
+ %DeoptimizeNow();
+ gc();
+ gc();
+ gc();
}
- c.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
- lazyDeopt(true);
- lazyDeopt();
- assertEquals(1500, result);
+ };
+ c.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
+lazyDeopt(true);
+lazyDeopt();
+assertEquals(1500, result);
})();
// Call to a.forEach is done inside a try-catch block and the callback function
// being called actually throws.
(function() {
- var caught = false;
- var result = 0;
- var lazyDeopt = function(deopt) {
- var sum = function(v,i,o) {
- result += i;
- if (i == 1 && deopt) {
- throw("a");
- }
- }
- try {
- c.forEach(sum);
- } catch (e) {
- caught = true;
+var caught = false;
+var result = 0;
+var lazyDeopt = function(deopt) {
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 1 && deopt) {
+ throw 'a';
}
+ };
+ try {
+ c.forEach(sum);
+ } catch (e) {
+ caught = true;
}
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
- assertDoesNotThrow(lazyDeopt.bind(this, true));
- assertTrue(caught);
- lazyDeopt();
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
+assertDoesNotThrow(lazyDeopt.bind(this, true));
+assertTrue(caught);
+lazyDeopt();
})();
// Call to a.forEach is done inside a try-catch block and the callback function
// being called actually throws, but the callback is not inlined.
(function() {
- var caught = false;
- var result = 0;
- var lazyDeopt = function(deopt) {
- var sum = function(v,i,o) {
- result += i;
- if (i == 1 && deopt) {
- throw("a");
- }
- };
- %NeverOptimizeFunction(sum);
- try {
- c.forEach(sum);
- } catch (e) {
- caught = true;
+var caught = false;
+var result = 0;
+var lazyDeopt = function(deopt) {
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 1 && deopt) {
+ throw 'a';
}
+ };
+ %NeverOptimizeFunction(sum);
+ try {
+ c.forEach(sum);
+ } catch (e) {
+ caught = true;
}
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
- assertDoesNotThrow(lazyDeopt.bind(this, true));
- assertTrue(caught);
- lazyDeopt();
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
+assertDoesNotThrow(lazyDeopt.bind(this, true));
+assertTrue(caught);
+lazyDeopt();
})();
// Call to a.forEach is done inside a try-catch block and the callback function
// being called throws into a deoptimized caller function.
(function TestThrowIntoDeoptimizedOuter() {
- var a = [1,2,3,4];
+ var a = [1, 2, 3, 4];
var lazyDeopt = function(deopt) {
- var sum = function(v,i,o) {
+ var sum = function(v, i, o) {
result += v;
if (i == 1 && deopt) {
%DeoptimizeFunction(lazyDeopt);
@@ -234,11 +262,13 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
try {
a.forEach(sum);
} catch (e) {
- assertEquals("some exception", e)
+ assertEquals('some exception', e);
result += 100;
}
return result;
- }
+ };
+ ;
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals(10, lazyDeopt(false));
assertEquals(10, lazyDeopt(false));
assertEquals(103, lazyDeopt(true));
@@ -249,99 +279,107 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
})();
(function() {
- var re = /Array\.forEach/;
- var lazyDeopt = function foobar(deopt) {
- var b = [1,2,3];
- var result = 0;
- var sum = function(v,i,o) {
- result += v;
- if (i == 1) {
- var e = new Error();
- print(e.stack);
- assertTrue(re.exec(e.stack) !== null);
- }
- };
- var o = [1,2,3];
- b.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
+var re = /Array\.forEach/;
+var lazyDeopt = function foobar(deopt) {
+ var b = [1, 2, 3];
+ var result = 0;
+ var sum = function(v, i, o) {
+ result += v;
+ if (i == 1) {
+ var e = new Error();
+ print(e.stack);
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ };
+ var o = [1, 2, 3];
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
})();
(function() {
- var re = /Array\.forEach/;
- var lazyDeopt = function(deopt) {
- var b = [1,2,3];
- var result = 0;
- var sum = function(v,i,o) {
- result += v;
- if (i == 1) {
- var e = new Error();
- assertTrue(re.exec(e.stack) !== null);
- }
- };
- %NeverOptimizeFunction(sum);
- var o = [1,2,3];
- b.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
+var re = /Array\.forEach/;
+var lazyDeopt = function(deopt) {
+ var b = [1, 2, 3];
+ var result = 0;
+ var sum = function(v, i, o) {
+ result += v;
+ if (i == 1) {
+ var e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ };
+ %NeverOptimizeFunction(sum);
+ var o = [1, 2, 3];
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
})();
(function() {
- var re = /Array\.forEach/;
- var lazyDeopt = function(deopt) {
- var b = [1,2,3];
- var result = 0;
- var sum = function(v,i,o) {
- result += v;
- if (i == 1) {
- %DeoptimizeNow();
- } else if (i == 2) {
- var e = new Error();
- assertTrue(re.exec(e.stack) !== null);
- }
- };
- var o = [1,2,3];
- b.forEach(sum);
- }
- lazyDeopt();
- lazyDeopt();
- %OptimizeFunctionOnNextCall(lazyDeopt);
- lazyDeopt();
+var re = /Array\.forEach/;
+var lazyDeopt = function(deopt) {
+ var b = [1, 2, 3];
+ var result = 0;
+ var sum = function(v, i, o) {
+ result += v;
+ if (i == 1) {
+ %DeoptimizeNow();
+ } else if (i == 2) {
+ var e = new Error();
+ assertTrue(re.exec(e.stack) !== null);
+ }
+ };
+ var o = [1, 2, 3];
+ b.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+lazyDeopt();
+lazyDeopt();
+%OptimizeFunctionOnNextCall(lazyDeopt);
+lazyDeopt();
})();
(function() {
- var re = /Array\.forEach/;
- var a = [1,2,3];
- var result = 0;
- var lazyDeopt = function() {
- var sum = function(v,i,o) {
- result += i;
- if (i == 1) {
- %DeoptimizeFunction(lazyDeopt);
- throw new Error();
- }
- };
- a.forEach(sum);
- }
- assertThrows(() => lazyDeopt());
- assertThrows(() => lazyDeopt());
- try {
- lazyDeopt();
- } catch (e) {
- assertTrue(re.exec(e.stack) !== null);
- }
- %OptimizeFunctionOnNextCall(lazyDeopt);
- try {
- lazyDeopt();
- } catch (e) {
- assertTrue(re.exec(e.stack) !== null);
- }
+var re = /Array\.forEach/;
+var a = [1, 2, 3];
+var result = 0;
+var lazyDeopt = function() {
+ var sum = function(v, i, o) {
+ result += i;
+ if (i == 1) {
+ %DeoptimizeFunction(lazyDeopt);
+ throw new Error();
+ }
+ };
+ a.forEach(sum);
+};
+;
+%PrepareFunctionForOptimization(lazyDeopt);
+assertThrows(() => lazyDeopt());
+assertThrows(() => lazyDeopt());
+try {
+ lazyDeopt();
+} catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+}
+%OptimizeFunctionOnNextCall(lazyDeopt);
+try {
+ lazyDeopt();
+} catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+}
})();
// Verify holes are skipped.
@@ -353,7 +391,8 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
callback_values.push(v);
});
return callback_values;
- }
+ };
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -368,7 +407,8 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
callback_values.push(v);
});
return callback_values;
- }
+ };
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -377,14 +417,17 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
// Ensure that we handle side-effects between load and call.
(() => {
- function side_effect(a, b) { if (b) a.foo = 3; return a; }
+ function side_effect(a, b) {
+ if (b) a.foo = 3;
+ return a;
+ }
%NeverOptimizeFunction(side_effect);
function unreliable(a, b) {
let sum = 0;
return a.forEach(x => sum += x, side_effect(a, b));
- }
-
+ };
+ %PrepareFunctionForOptimization(unreliable);
let a = [1, 2, 3];
unreliable(a, false);
unreliable(a, false);
diff --git a/deps/v8/test/mjsunit/optimized-reduceright.js b/deps/v8/test/mjsunit/optimized-reduceright.js
index f0b20e09a6..4c7b21e275 100644
--- a/deps/v8/test/mjsunit/optimized-reduceright.js
+++ b/deps/v8/test/mjsunit/optimized-reduceright.js
@@ -12,7 +12,11 @@
// For this particular eager deopt point to work, we need to dodge
// TurboFan's soft-deopts through a non-inlined and non-optimized function
// call to foo().
- function foo(o, deopt) { if (deopt) { o.abc = 3; }}
+ function foo(o, deopt) {
+ if (deopt) {
+ o.abc = 3;
+ }
+ }
%NeverOptimizeFunction(foo);
function eagerDeoptInCalled(deopt) {
return a.reduceRight((r, v, i, o) => {
@@ -21,7 +25,8 @@
}
return r + "S";
}, "H");
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -32,42 +37,60 @@
// Make sure we gracefully handle the case of an empty array in
// optimized code.
(function() {
- var nothingThere = function(only_holes) {
- var a = [1,2,,3]; // holey smi array.
- if (only_holes) {
- a = [,,,]; // also a holey smi array.
- }
- return a.reduceRight((r,v,i,o)=>r+v);
+var nothingThere = function(only_holes) {
+ var a = [1, 2, , 3]; // holey smi array.
+ if (only_holes) {
+ a = [
+ ,
+ ,
+ ,
+ ]; // also a holey smi array.
}
- nothingThere();
- nothingThere();
- %OptimizeFunctionOnNextCall(nothingThere);
- assertThrows(() => nothingThere(true));
+ return a.reduceRight((r, v, i, o) => r + v);
+};
+;
+%PrepareFunctionForOptimization(nothingThere);
+nothingThere();
+nothingThere();
+%OptimizeFunctionOnNextCall(nothingThere);
+assertThrows(() => nothingThere(true));
})();
// An error generated inside the callback includes reduce in it's
// stack trace.
(function() {
- var re = /Array\.reduceRight/;
- var alwaysThrows = function() {
- var b = [,,,];
- var result = 0;
- var callback = function(r,v,i,o) {
- return r + v;
- };
- b.reduceRight(callback);
- }
- try {
- alwaysThrows();
- } catch (e) {
- assertTrue(re.exec(e.stack) !== null);
- }
- try { alwaysThrows(); } catch (e) {}
- try { alwaysThrows(); } catch (e) {}
- %OptimizeFunctionOnNextCall(alwaysThrows);
- try {
- alwaysThrows();
- } catch (e) {
- assertTrue(re.exec(e.stack) !== null);
- }
+var re = /Array\.reduceRight/;
+var alwaysThrows = function() {
+ var b = [
+ ,
+ ,
+ ,
+ ];
+ var result = 0;
+ var callback = function(r, v, i, o) {
+ return r + v;
+ };
+ b.reduceRight(callback);
+};
+;
+%PrepareFunctionForOptimization(alwaysThrows);
+try {
+ alwaysThrows();
+} catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+}
+try {
+ alwaysThrows();
+} catch (e) {
+}
+try {
+ alwaysThrows();
+} catch (e) {
+}
+%OptimizeFunctionOnNextCall(alwaysThrows);
+try {
+ alwaysThrows();
+} catch (e) {
+ assertTrue(re.exec(e.stack) !== null);
+}
})();
diff --git a/deps/v8/test/mjsunit/optimized-typeof.js b/deps/v8/test/mjsunit/optimized-typeof.js
index b0c0725c51..bcd348c437 100644
--- a/deps/v8/test/mjsunit/optimized-typeof.js
+++ b/deps/v8/test/mjsunit/optimized-typeof.js
@@ -28,19 +28,19 @@
// Flags: --allow-natives-syntax
function typeofDirectly() {
- return typeof({}) === "undefined";
-}
-
+ return typeof{} === 'undefined';
+};
+%PrepareFunctionForOptimization(typeofDirectly);
typeofDirectly();
typeofDirectly();
%OptimizeFunctionOnNextCall(typeofDirectly);
typeofDirectly();
function typeofViaVariable() {
- var foo = typeof({})
+ var foo = typeof{};
return foo === "undefined";
-}
-
+};
+%PrepareFunctionForOptimization(typeofViaVariable);
typeofViaVariable();
typeofViaVariable();
%OptimizeFunctionOnNextCall(typeofViaVariable);
diff --git a/deps/v8/test/mjsunit/outobject-double-for-in.js b/deps/v8/test/mjsunit/outobject-double-for-in.js
index eb8ac940a7..59fcf83a9d 100644
--- a/deps/v8/test/mjsunit/outobject-double-for-in.js
+++ b/deps/v8/test/mjsunit/outobject-double-for-in.js
@@ -60,6 +60,7 @@ function test_props(a) {
}
}
+%PrepareFunctionForOptimization(test_props);
test_props(z);
test_props(z);
%OptimizeFunctionOnNextCall(test_props);
diff --git a/deps/v8/test/mjsunit/packed-elements.js b/deps/v8/test/mjsunit/packed-elements.js
index d421c51f0d..d0df553451 100644
--- a/deps/v8/test/mjsunit/packed-elements.js
+++ b/deps/v8/test/mjsunit/packed-elements.js
@@ -94,6 +94,7 @@ function test6() {
function test_with_optimization(f) {
// Run tests in a loop to make sure that inlined Array() constructor runs out
// of new space memory and must fall back on runtime impl.
+ %PrepareFunctionForOptimization(f);
for (i = 0; i < 25000; ++i) f();
%OptimizeFunctionOnNextCall(f);
for (i = 0; i < 25000; ++i) f(); // Make sure GC happens
diff --git a/deps/v8/test/mjsunit/polymorph-arrays.js b/deps/v8/test/mjsunit/polymorph-arrays.js
index 7d3221a20c..7c2c09aa06 100644
--- a/deps/v8/test/mjsunit/polymorph-arrays.js
+++ b/deps/v8/test/mjsunit/polymorph-arrays.js
@@ -45,6 +45,7 @@ function testPolymorphicLoads() {
function load(a, i) {
return a[i];
}
+ %PrepareFunctionForOptimization(load);
var object_array = new Object;
var sparse_object_array = new Object;
@@ -110,6 +111,7 @@ function testPolymorphicStores() {
function store(a, i, val) {
a[i] = val;
}
+ %PrepareFunctionForOptimization(store);
var object_array = new Object;
var sparse_object_array = new Object;
diff --git a/deps/v8/test/mjsunit/prototype-changes.js b/deps/v8/test/mjsunit/prototype-changes.js
index e7fcc7ee95..c34f760780 100644
--- a/deps/v8/test/mjsunit/prototype-changes.js
+++ b/deps/v8/test/mjsunit/prototype-changes.js
@@ -24,7 +24,8 @@ var c = new C();
function f(expected) {
var result = c.z;
assertEquals(expected, result);
-}
+};
+%PrepareFunctionForOptimization(f);
f(undefined);
f(undefined);
%OptimizeFunctionOnNextCall(f);
@@ -35,9 +36,12 @@ f("z");
// Test updating .__proto__ pointers.
var p1 = {foo: 1.5};
-var p2 = {}; p2.__proto__ = p1;
-var p3 = {}; p3.__proto__ = p2;
-var o = {}; o.__proto__ = p3;
+var p2 = {};
+p2.__proto__ = p1;
+var p3 = {};
+p3.__proto__ = p2;
+var o = {};
+o.__proto__ = p3;
for (var i = 0; i < 2; i++) o.foo; // Force registration.
@@ -52,5 +56,9 @@ function g(o, expected) {
g(o, 1.7);
g(o, 1.7);
g(o, 1.7);
-Object.defineProperty(p1a, "foo", {get: function() { return "foo"}});
+Object.defineProperty(p1a, 'foo', {
+ get: function() {
+ return 'foo';
+ }
+});
g(o, "foo");
diff --git a/deps/v8/test/mjsunit/readonly.js b/deps/v8/test/mjsunit/readonly.js
index 72a08a7c86..ec938d65c0 100644
--- a/deps/v8/test/mjsunit/readonly.js
+++ b/deps/v8/test/mjsunit/readonly.js
@@ -34,7 +34,7 @@ function CreateFromLiteral() {
}
function CreateFromObject() {
- return new Object;
+ return new Object();
}
function CreateDefault() {
@@ -43,19 +43,27 @@ function CreateDefault() {
function CreateFromConstructor(proto) {
function C() {}
- (new C).b = 9; // Make sure that we can have an in-object property.
+ new C().b = 9; // Make sure that we can have an in-object property.
C.prototype = proto;
- return function() { return new C; }
+ return function() {
+ return new C();
+ };
}
function CreateFromApi(proto) {
- return function() { return Object.create(proto); }
+ return function() {
+ return Object.create(proto);
+ };
}
function CreateWithProperty(proto) {
- function C() { this.a = -100; }
+ function C() {
+ this.a = -100;
+ }
C.prototype = proto;
- return function() { return new C; }
+ return function() {
+ return new C();
+ };
}
var bases = [CreateFromLiteral, CreateFromObject, CreateDefault];
@@ -87,7 +95,7 @@ function TestAllCreates(f) {
o.up = o;
for (var j = 0; j < up; ++j) o.up = Object.getPrototypeOf(o.up);
return o;
- })
+ });
}
}
}
@@ -102,11 +110,17 @@ function ReadonlyByNonwritableDataProperty(o, name) {
}
function ReadonlyByAccessorPropertyWithoutSetter(o, name) {
- Object.defineProperty(o, name, {get: function() { return -42; }});
+ Object.defineProperty(o, name, {
+ get: function() {
+ return -42;
+ }
+ });
}
function ReadonlyByGetter(o, name) {
- o.__defineGetter__("a", function() { return -43; });
+ o.__defineGetter__('a', function() {
+ return -43;
+ });
}
function ReadonlyByFreeze(o, name) {
@@ -131,18 +145,19 @@ function ReadonlyByProxy(o, name) {
return {value: -46, writable: false, configurable: true};
}
});
+
o.__proto__ = p;
}
var readonlys = [
ReadonlyByNonwritableDataProperty, ReadonlyByAccessorPropertyWithoutSetter,
- ReadonlyByGetter, ReadonlyByFreeze, ReadonlyByProto // ReadonlyByProxy
-]
+ ReadonlyByGetter, ReadonlyByFreeze, ReadonlyByProto // ReadonlyByProxy
+];
function TestAllReadonlys(f) {
// Provide various methods to making a property read-only.
for (var i = 0; i < readonlys.length; ++i) {
- print(" readonly =", i)
+ print(' readonly =', i);
f(readonlys[i]);
}
}
@@ -152,13 +167,13 @@ function TestAllReadonlys(f) {
function Assign(o, x) {
o.a = x;
-}
-
+};
+%PrepareFunctionForOptimization(Assign);
function AssignStrict(o, x) {
"use strict";
o.a = x;
-}
-
+};
+%PrepareFunctionForOptimization(AssignStrict);
function TestAllModes(f) {
for (var strict = 0; strict < 2; ++strict) {
print(" strict =", strict);
@@ -167,14 +182,16 @@ function TestAllModes(f) {
}
function TestAllScenarios(f) {
- for (var t = 0; t < 100; t = 2*t + 1) {
- print("t =", t)
+ for (var t = 0; t < 100; t = 2 * t + 1) {
+ print('t =', t);
f(function(strict, create, readonly) {
// Make sure that the assignments are monomorphic.
%DeoptimizeFunction(Assign);
%DeoptimizeFunction(AssignStrict);
%ClearFunctionFeedback(Assign);
%ClearFunctionFeedback(AssignStrict);
+ %PrepareFunctionForOptimization(Assign);
+ %PrepareFunctionForOptimization(AssignStrict);
for (var i = 0; i < t; ++i) {
var o = create();
assertFalse("a" in o && !("a" in o.__proto__));
@@ -193,7 +210,10 @@ function TestAllScenarios(f) {
if (strict === 0)
Assign(o, t + 1);
else
- assertThrows(function() { AssignStrict(o, t + 1) }, TypeError);
+
+ assertThrows(function() {
+ AssignStrict(o, t + 1);
+ }, TypeError);
assertTrue(o.a < 0);
});
}
@@ -212,22 +232,23 @@ TestAllScenarios(function(scenario) {
});
});
-
// Extra test forcing bailout.
-function Assign2(o, x) { o.a = x }
-
+function Assign2(o, x) {
+ o.a = x;
+};
+%PrepareFunctionForOptimization(Assign2);
(function() {
- var p = CreateFromConstructor(Object.prototype)();
- var c = CreateFromConstructor(p);
- for (var i = 0; i < 3; ++i) {
- var o = c();
- Assign2(o, i);
- assertEquals(i, o.a);
- }
- %OptimizeFunctionOnNextCall(Assign2);
- ReadonlyByNonwritableDataProperty(p, "a");
+var p = CreateFromConstructor(Object.prototype)();
+var c = CreateFromConstructor(p);
+for (var i = 0; i < 3; ++i) {
var o = c();
- Assign2(o, 0);
- assertTrue(o.a < 0);
+ Assign2(o, i);
+ assertEquals(i, o.a);
+}
+%OptimizeFunctionOnNextCall(Assign2);
+ReadonlyByNonwritableDataProperty(p, "a");
+var o = c();
+Assign2(o, 0);
+assertTrue(o.a < 0);
})();
diff --git a/deps/v8/test/mjsunit/recursive-store-opt.js b/deps/v8/test/mjsunit/recursive-store-opt.js
index fb2649248d..b29c258b9f 100644
--- a/deps/v8/test/mjsunit/recursive-store-opt.js
+++ b/deps/v8/test/mjsunit/recursive-store-opt.js
@@ -33,8 +33,8 @@ function g() {
function f() {
return new g();
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index aabac1ed9f..0cad7e7164 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -824,3 +824,14 @@ assertEquals("\\u2028", new RegExp("\\u2028").source);
assertEquals("\\u2029", /\u2029/.source);
assertEquals("\\u2029", new RegExp("\u2029").source);
assertEquals("\\u2029", new RegExp("\\u2029").source);
+assertEquals("[/]", /[/]/.source);
+assertEquals("[\\/]", /[\/]/.source);
+assertEquals("[\\\\/]", /[\\/]/.source);
+assertEquals("[/]", new RegExp("[/]").source);
+assertEquals("[/]", new RegExp("[\/]").source);
+assertEquals("[\\/]", new RegExp("[\\/]").source);
+assertEquals("[[/]", /[[/]/.source);
+assertEquals("[/]]", /[/]]/.source);
+assertEquals("[[/]]", /[[/]]/.source);
+assertEquals("[[\\/]", /[[\/]/.source);
+assertEquals("[[\\/]]", /[[\/]]/.source);
diff --git a/deps/v8/test/mjsunit/regress-958725.js b/deps/v8/test/mjsunit/regress-958725.js
index 37706e8adf..bb0bd11055 100644
--- a/deps/v8/test/mjsunit/regress-958725.js
+++ b/deps/v8/test/mjsunit/regress-958725.js
@@ -13,13 +13,12 @@ function f(v3) {
for (let v17 = 0; v17 < 100000; v17++) {
}
const v18 = Object();
- function v19(v20,v21,v22) {
- }
+ function v19(v20, v21, v22) {}
i++;;
} while (i < 1);
const v25 = Object.freeze(v8);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(Object);
%OptimizeFunctionOnNextCall(f);
f(Object);
diff --git a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
index a935a49c10..a0af463564 100644
--- a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
@@ -31,11 +31,14 @@
function f(a, deopt, osr) {
var result = (a + 10, "result");
var dummy = deopt + 0;
- for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
+ for (var i = 0; osr && i < 2; i++) {
+ %PrepareFunctionForOptimization(f);
+ %OptimizeOsr();
+ }
return result;
}
- %PrepareFunctionForOptimization(f);
+ %PrepareFunctionForOptimization(f);
assertEquals("result", f(true, 3, false));
assertEquals("result", f(true, 3, false));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
index b20645ce22..4e852d8d95 100644
--- a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
@@ -31,7 +31,10 @@ function f(deopt, osr) {
var result = "result";
%_Call(function() {}, 0, 0);
var dummy = deopt + 0;
- for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
+ for (var i = 0; osr && i < 2; i++) {
+ %PrepareFunctionForOptimization(f);
+ %OptimizeOsr();
+ }
return result;
}
diff --git a/deps/v8/test/mjsunit/regress/compare-map-elim1.js b/deps/v8/test/mjsunit/regress/compare-map-elim1.js
index d98c089dec..42d7cbd7cc 100644
--- a/deps/v8/test/mjsunit/regress/compare-map-elim1.js
+++ b/deps/v8/test/mjsunit/regress/compare-map-elim1.js
@@ -30,8 +30,8 @@
function foo(o) {
return o.foo1;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
function getter() {
return this.x + this.z + foo2(this);
}
@@ -40,11 +40,11 @@ function foo2(o) {
return o.a;
}
-var o1 = {z:0, x:1};
-var o2 = {z:0, a:1.5, x:1};
-var o3 = {z:0, a:1.5};
-Object.defineProperty(o1, "foo1", {get:getter});
-Object.defineProperty(o2, "foo1", {get:getter});
+var o1 = {z: 0, x: 1};
+var o2 = {z: 0, a: 1.5, x: 1};
+var o3 = {z: 0, a: 1.5};
+Object.defineProperty(o1, 'foo1', {get: getter});
+Object.defineProperty(o2, 'foo1', {get: getter});
foo(o1);
foo(o1);
diff --git a/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
index b28dff73a7..2142ce1a04 100644
--- a/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/comparison-in-effect-context-deopt.js
@@ -32,16 +32,16 @@ function lazyDeopt() {
return "deopt";
}
-var x = { toString : lazyDeopt };
+var x = {toString: lazyDeopt};
function g(x) {
return "result";
}
function test(x) {
- return g(void(x == ""));
-}
-
+ return g(void (x == ''));
+};
+%PrepareFunctionForOptimization(test);
test(x);
%OptimizeFunctionOnNextCall(test);
assertEquals("result", test(x));
diff --git a/deps/v8/test/mjsunit/regress/consolidated-holey-load.js b/deps/v8/test/mjsunit/regress/consolidated-holey-load.js
index ef8f1ef140..375b742ea7 100644
--- a/deps/v8/test/mjsunit/regress/consolidated-holey-load.js
+++ b/deps/v8/test/mjsunit/regress/consolidated-holey-load.js
@@ -29,10 +29,10 @@
function foo(array) {
return array[0];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
var a = [1, 2, , 4]; // Holey Smi elements.
-var b = ["abcd", 0]; // Fast elements.
+var b = ['abcd', 0]; // Fast elements.
foo(b); // Observe fast elements first, or the IC will transition without
foo(a); // going polymorphic.
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/cross-script-vars.js b/deps/v8/test/mjsunit/regress/cross-script-vars.js
index fd235f997b..4148cdb46e 100644
--- a/deps/v8/test/mjsunit/regress/cross-script-vars.js
+++ b/deps/v8/test/mjsunit/regress/cross-script-vars.js
@@ -71,13 +71,13 @@ function DefineLoadVar() {
'var x;' +
'function ' + name + '() {' +
' return x;' +
- '};';
+ '};'
return Realm.eval(test_realm, AddStrict(code, cfg));
}
function LoadVar() {
var name = 'LoadVar_' + test_realm;
- var code =
+ var code = (cfg.optimize ? '%PrepareFunctionForOptimization(' + name + ');' : '') +
(cfg.optimize ? '%OptimizeFunctionOnNextCall(' + name + ');' : '') +
name + '();';
return Realm.eval(test_realm, AddStrict(code, cfg));
@@ -88,7 +88,6 @@ function DefineStoreVar() {
var code = 'var g = (Function("return this"))();' +
'var x;' +
'function ' + name + '(v) {' +
-// ' %DebugPrint(g);' +
' return x = v;' +
'};';
return Realm.eval(test_realm, AddStrict(code, cfg));
@@ -96,7 +95,7 @@ function DefineStoreVar() {
function StoreVar(v) {
var name = 'StoreVar_' + test_realm;
- var code =
+ var code = (cfg.optimize ? '%PrepareFunctionForOptimization(' + name + ');' : '') +
(cfg.optimize ? '%OptimizeFunctionOnNextCall(' + name + ');' : '') +
name + '(' + v + ');';
return Realm.eval(test_realm, AddStrict(code, cfg));
diff --git a/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js b/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js
index 911279b43e..e5121eca89 100644
--- a/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js
+++ b/deps/v8/test/mjsunit/regress/internalized-string-not-equal.js
@@ -30,9 +30,10 @@
// A bug in r15773, when masks for internalized string and string types
// were reorganized.
function equal(o1, o2) {
- return (o1 == o2);
-}
-var a = "abc";
+ return o1 == o2;
+};
+%PrepareFunctionForOptimization(equal);
+var a = 'abc';
var b = "abc";
equal(a, b);
equal(a, b);
diff --git a/deps/v8/test/mjsunit/regress/math-min.js b/deps/v8/test/mjsunit/regress/math-min.js
index 942e9d0b7d..703170f1a9 100644
--- a/deps/v8/test/mjsunit/regress/math-min.js
+++ b/deps/v8/test/mjsunit/regress/math-min.js
@@ -16,34 +16,34 @@ function f1() {
var z = a[0];
// Same register.
assertEquals(0, Math.min(z, z));
-}
-
+};
+%PrepareFunctionForOptimization(f1);
function f2() {
// Different registers.
assertEquals(0, Math.min(a[0], a[1]));
-}
-
+};
+%PrepareFunctionForOptimization(f2);
function f3() {
// Zero and minus zero.
assertEquals(-0, Math.min(a[1], a[2]));
-}
-
+};
+%PrepareFunctionForOptimization(f3);
function f4() {
// Zero and minus zero, reversed order.
assertEquals(-0, Math.min(a[2], a[1]));
-}
-
+};
+%PrepareFunctionForOptimization(f4);
function f5() {
// Minus zero, same register.
var m_z = a[2];
assertEquals(-0, Math.min(m_z, m_z));
-}
-
+};
+%PrepareFunctionForOptimization(f5);
function f6() {
// Minus zero, different registers.
assertEquals(-0, Math.min(a[2], a[3]));
-}
-
+};
+%PrepareFunctionForOptimization(f6);
for (var i = 0; i < 3; i++) {
f1();
f2();
diff --git a/deps/v8/test/mjsunit/regress/number-named-call-deopt.js b/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
index 1598af12b4..e2ed0563f2 100644
--- a/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
+++ b/deps/v8/test/mjsunit/regress/number-named-call-deopt.js
@@ -29,11 +29,13 @@
function f(x, deopt, osr) {
var res = "result";
- void(x.toString(10, deopt + 0));
- if (osr) for (var i = 0; i < 100000; i++) { }
+ void x.toString(10, deopt + 0);
+ if (osr)
+ for (var i = 0; i < 100000; i++) {
+ }
return res;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(4, 0, false);
f(4, 0, false);
f(4, 0, false);
diff --git a/deps/v8/test/mjsunit/regress/poly_count_operation.js b/deps/v8/test/mjsunit/regress/poly_count_operation.js
index a8a1ed2ebc..99e17475fd 100644
--- a/deps/v8/test/mjsunit/regress/poly_count_operation.js
+++ b/deps/v8/test/mjsunit/regress/poly_count_operation.js
@@ -35,6 +35,7 @@ var deopt_setter = false;
function f_mono(o) {
return 5 + o.x++;
}
+%PrepareFunctionForOptimization(f_mono);
var to_deopt = f_mono;
@@ -75,6 +76,7 @@ assertEquals(3, s);
function f_poly(o) {
return 5 + o.x++;
}
+%PrepareFunctionForOptimization(f_poly);
v = 1;
to_deopt = f_poly;
@@ -93,6 +95,7 @@ assertEquals(8, f_poly(o2));
assertEquals(6, g);
assertEquals(6, s);
+%PrepareFunctionForOptimization(f_poly);
%OptimizeFunctionOnNextCall(f_poly);
v = undefined;
assertEquals(NaN, f_poly(o2));
@@ -102,6 +105,7 @@ assertEquals(7, s);
function f_pre(o) {
return 5 + ++o.x;
}
+%PrepareFunctionForOptimization(f_pre);
v = 1;
to_deopt = f_pre;
@@ -120,6 +124,7 @@ assertEquals(9, f_pre(o2));
assertEquals(10, g);
assertEquals(10, s);
+%PrepareFunctionForOptimization(f_pre);
%OptimizeFunctionOnNextCall(f_pre);
v = undefined;
assertEquals(NaN, f_pre(o2));
@@ -130,6 +135,7 @@ assertEquals(11, s);
function f_get(o) {
return 5 + o.x++;
}
+%PrepareFunctionForOptimization(f_get);
v = 1;
to_deopt = f_get;
@@ -148,6 +154,7 @@ assertEquals(8, f_get(o2));
assertEquals(14, g);
assertEquals(14, s);
+%PrepareFunctionForOptimization(f_get);
%OptimizeFunctionOnNextCall(f_get);
v = undefined;
assertEquals(NaN, f_get(o2));
diff --git a/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js b/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
index 6188279248..e19540d649 100644
--- a/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
+++ b/deps/v8/test/mjsunit/regress/polymorphic-accessor-test-context.js
@@ -4,20 +4,32 @@
// Flags: --allow-natives-syntax
-function t1() { return this instanceof t1; }
-function t2() { return this instanceof t2; }
+function t1() {
+ return this instanceof t1;
+}
+function t2() {
+ return this instanceof t2;
+}
-var o1 = new (function() { })();
-Object.defineProperty(o1, "t", {get:function() { return this instanceof o1.constructor; }});
-var o2 = new (function() { })();
-Object.defineProperty(o2, "t", {get:function() { return this instanceof o1.constructor; }});
-var o3 = new (function() { })();
+var o1 = new function() {}();
+Object.defineProperty(o1, 't', {
+ get: function() {
+ return this instanceof o1.constructor;
+ }
+});
+var o2 = new function() {}();
+Object.defineProperty(o2, 't', {
+ get: function() {
+ return this instanceof o1.constructor;
+ }
+});
+var o3 = new function() {}();
o3.t = true;
function f(o) {
return 1 + (o.t ? 1 : 2);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(o1);
f(o1);
f(o2);
diff --git a/deps/v8/test/mjsunit/regress/post-increment-close-context.js b/deps/v8/test/mjsunit/regress/post-increment-close-context.js
index 08ade10f1d..29c3d18c34 100644
--- a/deps/v8/test/mjsunit/regress/post-increment-close-context.js
+++ b/deps/v8/test/mjsunit/regress/post-increment-close-context.js
@@ -29,8 +29,9 @@
var foo = {bar: -2};
function crash() {
- return !(foo.bar++);
-}
+ return !foo.bar++;
+};
+%PrepareFunctionForOptimization(crash);
assertFalse(crash());
assertEquals(-1, foo.bar);
%OptimizeFunctionOnNextCall(crash);
diff --git a/deps/v8/test/mjsunit/regress/regress-100409.js b/deps/v8/test/mjsunit/regress/regress-100409.js
index c29250f28d..ce3cbf4e92 100644
--- a/deps/v8/test/mjsunit/regress/regress-100409.js
+++ b/deps/v8/test/mjsunit/regress/regress-100409.js
@@ -27,10 +27,10 @@
// Flags: --allow-natives-syntax
-function outer () {
+function outer() {
var val = 0;
- function foo () {
+ function foo() {
val = 0;
val;
var z = false;
@@ -41,15 +41,14 @@ function outer () {
}
}
return val++;
- }
-
+ };
+ %PrepareFunctionForOptimization(foo);
return foo;
}
-
var foo = outer();
assertEquals(1, foo());
assertEquals(1, foo());
- %OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-1079.js b/deps/v8/test/mjsunit/regress/regress-1079.js
index 208dc5b41a..78401bf339 100644
--- a/deps/v8/test/mjsunit/regress/regress-1079.js
+++ b/deps/v8/test/mjsunit/regress/regress-1079.js
@@ -33,6 +33,7 @@
function optimized() {
return unoptimized.apply(null, arguments);
}
+%PrepareFunctionForOptimization(optimized);
// It's not crucial that this is unoptimized.
function unoptimized() {
diff --git a/deps/v8/test/mjsunit/regress/regress-108296.js b/deps/v8/test/mjsunit/regress/regress-108296.js
index 38ecda778c..a38d0e4396 100644
--- a/deps/v8/test/mjsunit/regress/regress-108296.js
+++ b/deps/v8/test/mjsunit/regress/regress-108296.js
@@ -30,18 +30,20 @@
// This test checks that young immediates embedded into code objects
// are referenced through a cell.
-function f (k, a, b) {
+function f(k, a, b) {
// Create control flow for a.foo. Control flow resolution will
// be generated as a part of a gap move. Gap move operate on immediates as
// a.foo is a CONSTANT_FUNCTION.
var x = k ? a.foo : a.foo;
return x.prototype;
-}
-
-var a = { };
+};
+%PrepareFunctionForOptimization(f);
+var a = {};
// Make sure that foo is a CONSTANT_FUNCTION but not be pretenured.
-a.foo = (function () { return function () {}; })();
+a.foo = function() {
+ return function() {};
+}();
// Ensure that both branches of ternary operator have monomorphic type feedback.
f(true, a, a);
diff --git a/deps/v8/test/mjsunit/regress/regress-1099.js b/deps/v8/test/mjsunit/regress/regress-1099.js
index 49e4a52794..235ff8991f 100644
--- a/deps/v8/test/mjsunit/regress/regress-1099.js
+++ b/deps/v8/test/mjsunit/regress/regress-1099.js
@@ -42,6 +42,7 @@ function Y(x) {
}
var y = Y(X());
+%PrepareFunctionForOptimization(y);
for (var i = 0; i < 5; i++) {
assertTrue(y("foo"));
diff --git a/deps/v8/test/mjsunit/regress/regress-110509.js b/deps/v8/test/mjsunit/regress/regress-110509.js
index 132bd233be..549bbd0e37 100644
--- a/deps/v8/test/mjsunit/regress/regress-110509.js
+++ b/deps/v8/test/mjsunit/regress/regress-110509.js
@@ -32,8 +32,8 @@
function foo() {
Math.random();
new Function("");
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-1106.js b/deps/v8/test/mjsunit/regress/regress-1106.js
index e462d5dae7..f70c348a6e 100644
--- a/deps/v8/test/mjsunit/regress/regress-1106.js
+++ b/deps/v8/test/mjsunit/regress/regress-1106.js
@@ -37,19 +37,25 @@ x.bar = 4;
delete x.foo;
x.foo = 5;
-function f() { return foo; }
-
-for (i=0 ; i < 5; ++i) {
+function f() {
+ return foo;
+};
+%PrepareFunctionForOptimization(f);
+for (i = 0; i < 5; ++i) {
assertEquals(5, f());
}
%OptimizeFunctionOnNextCall(f);
assertEquals(5, f());
// Test calls on functions defined in the prototype of the global object.
-x.gee = function() { return 42; }
-function g() { return gee(); }
-
-for (i=0 ; i < 5; ++i) {
+x.gee = function() {
+ return 42;
+};
+function g() {
+ return gee();
+};
+%PrepareFunctionForOptimization(g);
+for (i = 0; i < 5; ++i) {
assertEquals(42, g());
}
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-1117.js b/deps/v8/test/mjsunit/regress/regress-1117.js
index 664dadd067..808e596ad4 100644
--- a/deps/v8/test/mjsunit/regress/regress-1117.js
+++ b/deps/v8/test/mjsunit/regress/regress-1117.js
@@ -30,14 +30,20 @@
// Test that we actually return the right value (-0) when we multiply
// constant 0 with a negative integer.
-function foo(y) {return 0 * y; }
-assertEquals(1/foo(-42), -Infinity);
-assertEquals(1/foo(-42), -Infinity);
+function foo(y) {
+ return 0 * y;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(1 / foo(-42), -Infinity);
+assertEquals(1 / foo(-42), -Infinity);
%OptimizeFunctionOnNextCall(foo);
-assertEquals(1/foo(-42), -Infinity);
+assertEquals(1 / foo(-42), -Infinity);
-function bar(x) { return x * 0; }
-assertEquals(Infinity, 1/bar(5));
-assertEquals(Infinity, 1/bar(5));
+function bar(x) {
+ return x * 0;
+};
+%PrepareFunctionForOptimization(bar);
+assertEquals(Infinity, 1 / bar(5));
+assertEquals(Infinity, 1 / bar(5));
%OptimizeFunctionOnNextCall(bar);
-assertEquals(-Infinity, 1/bar(-5));
+assertEquals(-Infinity, 1 / bar(-5));
diff --git a/deps/v8/test/mjsunit/regress/regress-1118.js b/deps/v8/test/mjsunit/regress/regress-1118.js
index 08b7239e33..9143b6d735 100644
--- a/deps/v8/test/mjsunit/regress/regress-1118.js
+++ b/deps/v8/test/mjsunit/regress/regress-1118.js
@@ -46,7 +46,10 @@ function g() { try { return o.f(); } finally { }}
// This function should be optimized via OSR.
function h() {
- for (var i = 0; i < 10; i++) %OptimizeOsr();
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(h);
+ }
g();
}
%PrepareFunctionForOptimization(h);
diff --git a/deps/v8/test/mjsunit/regress/regress-115100.js b/deps/v8/test/mjsunit/regress/regress-115100.js
index c917446eff..6740a55954 100644
--- a/deps/v8/test/mjsunit/regress/regress-115100.js
+++ b/deps/v8/test/mjsunit/regress/regress-115100.js
@@ -27,8 +27,13 @@
// Flags: --allow-natives-syntax
-function foo(obj) { obj.prop = 0; }
-function mk() { return Object.create(null); }
+function foo(obj) {
+ obj.prop = 0;
+};
+%PrepareFunctionForOptimization(foo);
+function mk() {
+ return Object.create(null);
+}
foo(mk());
foo(mk());
diff --git a/deps/v8/test/mjsunit/regress/regress-1166.js b/deps/v8/test/mjsunit/regress/regress-1166.js
index 8278abae68..ca3f8308b1 100644
--- a/deps/v8/test/mjsunit/regress/regress-1166.js
+++ b/deps/v8/test/mjsunit/regress/regress-1166.js
@@ -29,10 +29,14 @@
// Deoptimization after a short-circuit logical operation in an effect
// context should not see the value of the expression.
-function observe(x, y) { return x; }
-
-function test(x) { return observe(1, ((false || false), x + 1)); }
+function observe(x, y) {
+ return x;
+}
+function test(x) {
+ return observe(1, (false || false, x + 1));
+};
+%PrepareFunctionForOptimization(test);
for (var i = 0; i < 5; ++i) test(0);
%OptimizeFunctionOnNextCall(test);
test(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-1167.js b/deps/v8/test/mjsunit/regress/regress-1167.js
index 2206f3d0d0..a79b9981cd 100644
--- a/deps/v8/test/mjsunit/regress/regress-1167.js
+++ b/deps/v8/test/mjsunit/regress/regress-1167.js
@@ -33,7 +33,7 @@ function test0(n) {
var a = new Array(n);
for (var i = 0; i < n; ++i) {
// ~ of a non-numeric value is used to trigger deoptimization.
- a[i] = void(!(delete 'object')) % ~(delete 4);
+ a[i] = void !delete 'object' % ~delete 4;
}
}
@@ -49,7 +49,7 @@ for (var i = 0; i < 5; ++i) {
function test1(n) {
var a = new Array(n);
for (var i = 0; i < n; ++i) {
- a[i] = void(!(- 'object')) % ~(delete 4);
+ a[i] = void !-'object' % ~delete 4;
}
}
@@ -62,14 +62,15 @@ for (i = 0; i < 5; ++i) {
// A similar issue, different subexpression of unary ! (e0 !== e1 is
// translated into !(e0 == e1)) and different effect context.
-function side_effect() { }
-function observe(x, y) { return x; }
-function test2(x) {
- return observe(this,
- (((side_effect.observe <= side_effect.side_effect) !== false),
- x + 1));
+function side_effect() {}
+function observe(x, y) {
+ return x;
}
-
+function test2(x) {
+ return observe(
+ this, (side_effect.observe <= side_effect.side_effect !== false, x + 1));
+};
+%PrepareFunctionForOptimization(test2);
for (var i = 0; i < 5; ++i) test2(0);
%OptimizeFunctionOnNextCall(test2);
test2(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-1210.js b/deps/v8/test/mjsunit/regress/regress-1210.js
index 43500e7ba1..ff5b5bc4aa 100644
--- a/deps/v8/test/mjsunit/regress/regress-1210.js
+++ b/deps/v8/test/mjsunit/regress/regress-1210.js
@@ -32,18 +32,24 @@
var a = 0;
-function observe(x, y) { return x; }
+function observe(x, y) {
+ return x;
+}
-function side_effect(x) { a = x; }
+function side_effect(x) {
+ a = x;
+}
function test() {
// We will trigger deoptimization of 'a + 0' which should bail out to
// immediately after the call to 'side_effect' (i.e., still in the key
// subexpression of the arguments access).
- return observe(a, arguments[side_effect(a), a + 0]);
+ return observe(a, arguments[(side_effect(a), a + 0)]);
}
// Run enough to optimize assuming global 'a' is a smi.
+;
+%PrepareFunctionForOptimization(test);
for (var i = 0; i < 10; ++i) test(0);
%OptimizeFunctionOnNextCall(test);
test(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-123512.js b/deps/v8/test/mjsunit/regress/regress-123512.js
index 8a747bc5f7..2ff3c5166e 100644
--- a/deps/v8/test/mjsunit/regress/regress-123512.js
+++ b/deps/v8/test/mjsunit/regress/regress-123512.js
@@ -33,7 +33,8 @@
function f(x) {
return [x][0];
-}
+};
+%PrepareFunctionForOptimization(f);
// Test data element on prototype.
Object.prototype[0] = 23;
@@ -47,6 +48,7 @@ assertSame(3, f(3));
Object.prototype.__defineGetter__(0, function() { throw Error(); });
assertSame(4, f(4));
assertSame(5, f(5));
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertSame(6, f(6));
%DeoptimizeFunction(f);
@@ -57,7 +59,8 @@ assertSame(6, f(6));
function g(x, y) {
var o = { foo:x, 0:y };
return o.foo + o[0];
-}
+};
+%PrepareFunctionForOptimization(g);
// Test data property and element on prototype.
Object.prototype[0] = 23;
@@ -73,6 +76,7 @@ Object.prototype.__defineGetter__(0, function() { throw Error(); });
Object.prototype.__defineGetter__('foo', function() { throw Error(); });
assertSame(3, g(1, 2));
assertSame(5, g(2, 3));
+%PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
assertSame(7, g(3, 4));
%DeoptimizeFunction(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-1237.js b/deps/v8/test/mjsunit/regress/regress-1237.js
index 111df803f4..a0865c52b2 100644
--- a/deps/v8/test/mjsunit/regress/regress-1237.js
+++ b/deps/v8/test/mjsunit/regress/regress-1237.js
@@ -29,11 +29,13 @@
// Deoptimization after a conditional expression in an effect context should
// not see the value of the expression.
-function observe(x, y) { return x; }
-function test(x) {
- return observe(1, ((x? observe(observe.prototype.x): 'c'), x + 1));
+function observe(x, y) {
+ return x;
}
-
+function test(x) {
+ return observe(1, (x ? observe(observe.prototype.x) : 'c', x + 1));
+};
+%PrepareFunctionForOptimization(test);
for (var i = 0; i < 5; ++i) test(0);
%OptimizeFunctionOnNextCall(test);
test(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-123919.js b/deps/v8/test/mjsunit/regress/regress-123919.js
index be3460815b..0eddf24556 100644
--- a/deps/v8/test/mjsunit/regress/regress-123919.js
+++ b/deps/v8/test/mjsunit/regress/regress-123919.js
@@ -27,20 +27,20 @@
// Flags: --allow-natives-syntax --gc-global
-function g(max,val) {
+function g(max, val) {
this.x = 0;
for (var i = 0; i < max; i++) {
- this.x = i/100;
+ this.x = i / 100;
}
this.val = val;
}
function f(max) {
var val = 0.5;
- var obj = new g(max,val);
+ var obj = new g(max, val);
assertSame(val, obj.val);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-124594.js b/deps/v8/test/mjsunit/regress/regress-124594.js
index d51e1f661c..c17dfde4f7 100644
--- a/deps/v8/test/mjsunit/regress/regress-124594.js
+++ b/deps/v8/test/mjsunit/regress/regress-124594.js
@@ -42,9 +42,9 @@ function f(deopt) {
function g(deopt) {
return new f(deopt);
-}
-
-assertEquals({x:1}, g(false));
-assertEquals({x:1}, g(false));
+};
+%PrepareFunctionForOptimization(g);
+assertEquals({x: 1}, g(false));
+assertEquals({x: 1}, g(false));
%OptimizeFunctionOnNextCall(g);
-assertEquals({x:"1foo"}, g(true));
+assertEquals({x: '1foo'}, g(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-1323.js b/deps/v8/test/mjsunit/regress/regress-1323.js
index 552a48db2d..d5d95545eb 100644
--- a/deps/v8/test/mjsunit/regress/regress-1323.js
+++ b/deps/v8/test/mjsunit/regress/regress-1323.js
@@ -30,8 +30,8 @@
// Regression test for load/store operating with wrong number of bits.
function get(a, index) {
return a[index];
-}
-
+};
+%PrepareFunctionForOptimization(get);
var a = new Float32Array(2);
a[0] = 2.5;
a[1] = 3.5;
@@ -42,7 +42,8 @@ assertEquals(3.5, get(a, 1));
function set(a, index, value) {
a[index] = value;
-}
+};
+%PrepareFunctionForOptimization(set);
for (var i = 0; i < 5; i++) set(a, 0, 4.5);
%OptimizeFunctionOnNextCall(set);
set(a, 0, 4.5);
diff --git a/deps/v8/test/mjsunit/regress/regress-1337.js b/deps/v8/test/mjsunit/regress/regress-1337.js
index ebcf84b99c..c284c3f820 100644
--- a/deps/v8/test/mjsunit/regress/regress-1337.js
+++ b/deps/v8/test/mjsunit/regress/regress-1337.js
@@ -29,12 +29,22 @@
// Test that the throw is not inlined if object literals cannot be
// inlined.
-function bar() { throw {}; }
-
-function foo() { bar(); }
+function bar() {
+ throw {};
+}
+function foo() {
+ bar();
+};
+%PrepareFunctionForOptimization(foo);
for (var i = 0; i < 5; ++i) {
- try { foo() } catch (e) { }
+ try {
+ foo();
+ } catch (e) {
+ }
+}
+%OptimizeFunctionOnNextCall(foo);
+try {
+ foo();
+} catch (e) {
}
-%OptimizeFunctionOnNextCall(foo)
-try { foo() } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-1351.js b/deps/v8/test/mjsunit/regress/regress-1351.js
index 656b19f6bb..359c216a71 100644
--- a/deps/v8/test/mjsunit/regress/regress-1351.js
+++ b/deps/v8/test/mjsunit/regress/regress-1351.js
@@ -30,13 +30,13 @@
// Test that the arguments value is does not escape when it appears as
// an intermediate value in an expression.
-function h() { }
+function h() {}
function f() {
var a = null;
h(a = arguments);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-137768.js b/deps/v8/test/mjsunit/regress/regress-137768.js
index 9fbd7f30ae..894bc8258b 100644
--- a/deps/v8/test/mjsunit/regress/regress-137768.js
+++ b/deps/v8/test/mjsunit/regress/regress-137768.js
@@ -34,7 +34,7 @@ function TestConstructor() {
this[2] = 3;
}
-function bad_func(o,a) {
+function bad_func(o, a) {
var s = 0;
for (var i = 0; i < 1; ++i) {
o.newFileToChangeMap = undefined;
@@ -42,8 +42,8 @@ function bad_func(o,a) {
s += x;
}
return s;
-}
-
+};
+%PrepareFunctionForOptimization(bad_func);
o = new Object();
a = new TestConstructor();
bad_func(o, a);
diff --git a/deps/v8/test/mjsunit/regress/regress-1412.js b/deps/v8/test/mjsunit/regress/regress-1412.js
index b043f1900f..48bb1636f7 100644
--- a/deps/v8/test/mjsunit/regress/regress-1412.js
+++ b/deps/v8/test/mjsunit/regress/regress-1412.js
@@ -30,22 +30,25 @@
// Flags: --allow-natives-syntax
-function strict() { "use strict"; return this; }
+function strict() {
+ 'use strict';
+ return this;
+}
function test_strict() {
assertEquals(void 0, strict.apply(undefined, arguments));
assertEquals(42, strict.apply(42, arguments));
assertEquals("asdf", strict.apply("asdf", arguments));
-}
-
+};
+%PrepareFunctionForOptimization(test_strict);
for (var i = 0; i < 10; i++) test_strict();
%OptimizeFunctionOnNextCall(test_strict);
test_strict();
function test_builtin(receiver) {
Object.prototype.valueOf.apply(receiver, arguments);
-}
-
+};
+%PrepareFunctionForOptimization(test_builtin);
for (var i = 0; i < 10; i++) test_builtin(this);
%OptimizeFunctionOnNextCall(test_builtin);
test_builtin(this);
@@ -53,7 +56,7 @@ test_builtin(this);
var exception = false;
try {
test_builtin(undefined);
-} catch(e) {
+} catch (e) {
exception = true;
}
assertTrue(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-1423.js b/deps/v8/test/mjsunit/regress/regress-1423.js
index b0d0ca3775..45726a1246 100644
--- a/deps/v8/test/mjsunit/regress/regress-1423.js
+++ b/deps/v8/test/mjsunit/regress/regress-1423.js
@@ -32,8 +32,8 @@
function f0() {
return f1('literal', true);
-}
-
+};
+%PrepareFunctionForOptimization(f0);
function f1(x, y) {
return f2(x, y);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-1434.js b/deps/v8/test/mjsunit/regress/regress-1434.js
index 6f197afaf9..5766abb722 100644
--- a/deps/v8/test/mjsunit/regress/regress-1434.js
+++ b/deps/v8/test/mjsunit/regress/regress-1434.js
@@ -29,8 +29,8 @@
function compare(a, b) {
return a === b;
-}
-
+};
+%PrepareFunctionForOptimization(compare);
compare(1.5, 2.5);
%OptimizeFunctionOnNextCall(compare);
assertTrue(compare(undefined, undefined));
diff --git a/deps/v8/test/mjsunit/regress/regress-1476.js b/deps/v8/test/mjsunit/regress/regress-1476.js
index 1277e7f6c5..43e9a8d62b 100644
--- a/deps/v8/test/mjsunit/regress/regress-1476.js
+++ b/deps/v8/test/mjsunit/regress/regress-1476.js
@@ -27,9 +27,11 @@
// Flags: --allow-natives-syntax
-function foo (i) { return (i % 2) | 0; }
-
-assertEquals (-1, foo(-1));
-assertEquals (-1, foo(-1));
+function foo(i) {
+ return i % 2 | 0;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(-1, foo(-1));
+assertEquals(-1, foo(-1));
%OptimizeFunctionOnNextCall(foo);
-assertEquals (-1, foo(-1));
+assertEquals(-1, foo(-1));
diff --git a/deps/v8/test/mjsunit/regress/regress-1521.js b/deps/v8/test/mjsunit/regress/regress-1521.js
index 3149f05a5e..a2aed5e739 100644
--- a/deps/v8/test/mjsunit/regress/regress-1521.js
+++ b/deps/v8/test/mjsunit/regress/regress-1521.js
@@ -33,14 +33,16 @@ function test(x) {
throw new Error();
} catch (e) {
var y = {f: 1};
- var f = function () {
+ var f = function() {
var z = y;
- var g = function () {
+ var g = function() {
if (y.f === z.f) return x;
};
+ ;
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
return g;
- }
+ };
assertEquals(3, f()());
}
}
diff --git a/deps/v8/test/mjsunit/regress/regress-1560.js b/deps/v8/test/mjsunit/regress/regress-1560.js
index a0aa7e64d5..98bbef1f44 100644
--- a/deps/v8/test/mjsunit/regress/regress-1560.js
+++ b/deps/v8/test/mjsunit/regress/regress-1560.js
@@ -47,11 +47,13 @@ function mkNumberDictionary() {
}
function write(a, i) { a[i] = "bazinga!"; }
+%PrepareFunctionForOptimization(write);
function test(factories, w) {
+ %PrepareFunctionForOptimization(w);
factories.forEach(function(f) { w(f(), 0); });
factories.forEach(function(f) { w(f(), 0); });
- %OptimizeFunctionOnNextCall(w);
+ %OptimizeFunctionOnNextCall(w);
factories.forEach(function(f) { w(f(), 0); });
}
diff --git a/deps/v8/test/mjsunit/regress/regress-1563.js b/deps/v8/test/mjsunit/regress/regress-1563.js
index 884b12595a..de2c636bec 100644
--- a/deps/v8/test/mjsunit/regress/regress-1563.js
+++ b/deps/v8/test/mjsunit/regress/regress-1563.js
@@ -33,8 +33,8 @@ obj = new Uint8ClampedArray(10);
// assignments.
function set_pixel(obj, arg) {
obj[0] = arg;
-}
-
+};
+%PrepareFunctionForOptimization(set_pixel);
set_pixel(obj, 1.5);
set_pixel(obj, NaN);
%OptimizeFunctionOnNextCall(set_pixel);
diff --git a/deps/v8/test/mjsunit/regress/regress-1582.js b/deps/v8/test/mjsunit/regress/regress-1582.js
index 346d68ac34..ca3f7b4c47 100644
--- a/deps/v8/test/mjsunit/regress/regress-1582.js
+++ b/deps/v8/test/mjsunit/regress/regress-1582.js
@@ -39,7 +39,9 @@ function f(restIsArray, rest) {
var arrIsArguments = (arr[1] !== rest);
assertEquals(restIsArray, arrIsArguments);
}
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(f);
f(false, 'b', 'c');
f(false, 'b', 'c');
f(false, 'b', 'c');
diff --git a/deps/v8/test/mjsunit/regress/regress-1583.js b/deps/v8/test/mjsunit/regress/regress-1583.js
index c4a344ccf8..6e7afdc159 100644
--- a/deps/v8/test/mjsunit/regress/regress-1583.js
+++ b/deps/v8/test/mjsunit/regress/regress-1583.js
@@ -43,6 +43,7 @@ function f() {
}
var o = f();
+%PrepareFunctionForOptimization(o.m);
assertEquals('hest', o.m());
assertEquals('hest', o.m());
assertEquals('hest', o.m());
diff --git a/deps/v8/test/mjsunit/regress/regress-1592.js b/deps/v8/test/mjsunit/regress/regress-1592.js
index 8f6fba01a9..eff8c1b01c 100644
--- a/deps/v8/test/mjsunit/regress/regress-1592.js
+++ b/deps/v8/test/mjsunit/regress/regress-1592.js
@@ -27,15 +27,15 @@
// Flags: --allow-natives-syntax
-var f = {
- apply: function(a, b) {}
-};
+var f = {apply: function(a, b) {}};
function test(a) {
f.apply(this, arguments);
}
// Initialize ICs.
+;
+%PrepareFunctionForOptimization(test);
test(1);
test(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-164442.js b/deps/v8/test/mjsunit/regress/regress-164442.js
index 1160d874f5..e2acd2e669 100644
--- a/deps/v8/test/mjsunit/regress/regress-164442.js
+++ b/deps/v8/test/mjsunit/regress/regress-164442.js
@@ -32,8 +32,8 @@
function ensureNotNegative(x) {
return Math.max(0, x | 0);
-}
-
+};
+%PrepareFunctionForOptimization(ensureNotNegative);
ensureNotNegative(1);
ensureNotNegative(2);
diff --git a/deps/v8/test/mjsunit/regress/regress-1647.js b/deps/v8/test/mjsunit/regress/regress-1647.js
index ab6608c1ef..51623e7dd6 100644
--- a/deps/v8/test/mjsunit/regress/regress-1647.js
+++ b/deps/v8/test/mjsunit/regress/regress-1647.js
@@ -29,13 +29,14 @@
// Test for correct deoptimization in named function expressions.
-var t = { foo: function() {} };
-
-var f = (function bar() {
- t.foo();
- assertEquals("function", typeof bar);
-});
+var t = {foo: function() {}};
+var f = function bar() {
+ t.foo();
+ assertEquals('function', typeof bar);
+};
+;
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 10; i++) f();
%OptimizeFunctionOnNextCall(f);
t.number = 2;
diff --git a/deps/v8/test/mjsunit/regress/regress-1650.js b/deps/v8/test/mjsunit/regress/regress-1650.js
index fb6a17814d..e9ea57a6c6 100644
--- a/deps/v8/test/mjsunit/regress/regress-1650.js
+++ b/deps/v8/test/mjsunit/regress/regress-1650.js
@@ -27,11 +27,13 @@
// Flags: --allow-natives-syntax
-function g(f) { return f.call.apply(f.bind, arguments); }
-
-var x = new Object;
+function g(f) {
+ return f.call.apply(f.bind, arguments);
+};
+%PrepareFunctionForOptimization(g);
+var x = new Object();
-function t() { }
+function t() {}
g(t, x);
g(t, x);
@@ -53,7 +55,7 @@ Fake.prototype.call = function () {
Fake.prototype.bind = function () {
};
-var fake = new Fake;
+var fake = new Fake();
g(fake, x);
diff --git a/deps/v8/test/mjsunit/regress/regress-166379.js b/deps/v8/test/mjsunit/regress/regress-166379.js
index 2cda61182b..2c68f32b78 100644
--- a/deps/v8/test/mjsunit/regress/regress-166379.js
+++ b/deps/v8/test/mjsunit/regress/regress-166379.js
@@ -27,12 +27,16 @@
// Flags: --allow-natives-syntax
-function mod(a, b) { return a % b; }
+function mod(a, b) {
+ return a % b;
+}
// Feed integer type info and optimize.
+;
+%PrepareFunctionForOptimization(mod);
assertEquals(0, mod(4, 2));
assertEquals(1, mod(3, 2));
%OptimizeFunctionOnNextCall(mod);
// Surprise mod with overflow.
-assertEquals(-Infinity, 1/mod(-2147483648, -1));
+assertEquals(-Infinity, 1 / mod(-2147483648, -1));
diff --git a/deps/v8/test/mjsunit/regress/regress-171641.js b/deps/v8/test/mjsunit/regress/regress-171641.js
index 8db6781821..d8caec6f78 100644
--- a/deps/v8/test/mjsunit/regress/regress-171641.js
+++ b/deps/v8/test/mjsunit/regress/regress-171641.js
@@ -32,8 +32,8 @@ function foo(k, p) {
p = Math.min(p, i);
}
m = Math.floor((k | 0) / p);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(0, 1);
foo(0, 1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-1898.js b/deps/v8/test/mjsunit/regress/regress-1898.js
index 5440446fbf..ef47abf62e 100644
--- a/deps/v8/test/mjsunit/regress/regress-1898.js
+++ b/deps/v8/test/mjsunit/regress/regress-1898.js
@@ -29,8 +29,8 @@
function f(x) {
Math.log(Math.min(0.1, Math.abs(x)));
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(0.1);
f(0.1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-201590.js b/deps/v8/test/mjsunit/regress/regress-201590.js
index 0e7ba57233..18bc8c3efd 100644
--- a/deps/v8/test/mjsunit/regress/regress-201590.js
+++ b/deps/v8/test/mjsunit/regress/regress-201590.js
@@ -60,6 +60,7 @@ function baz() {
assertEquals(288, b.y);
}
+%PrepareFunctionForOptimization(Foo.prototype.bar);
baz();
baz();
%OptimizeFunctionOnNextCall(Foo.prototype.bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-2030.js b/deps/v8/test/mjsunit/regress/regress-2030.js
index fb5a3d0c46..adbfc74c5d 100644
--- a/deps/v8/test/mjsunit/regress/regress-2030.js
+++ b/deps/v8/test/mjsunit/regress/regress-2030.js
@@ -42,8 +42,8 @@ var bb = new b();
function f(o) {
return o.x;
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertSame(1, f(aa));
assertSame(1, f(aa));
assertSame(2, f(bb));
diff --git a/deps/v8/test/mjsunit/regress/regress-2045.js b/deps/v8/test/mjsunit/regress/regress-2045.js
index 822ee1fa46..9cc7a4c949 100644
--- a/deps/v8/test/mjsunit/regress/regress-2045.js
+++ b/deps/v8/test/mjsunit/regress/regress-2045.js
@@ -38,9 +38,11 @@ function bar() {
function baz() {
return bar(1, 2);
-}
-
-G = {x: 0};
+};
+%PrepareFunctionForOptimization(baz);
+G = {
+ x: 0
+};
baz();
baz();
%OptimizeFunctionOnNextCall(baz);
diff --git a/deps/v8/test/mjsunit/regress/regress-2056.js b/deps/v8/test/mjsunit/regress/regress-2056.js
index d34a7500aa..6ac350f8aa 100644
--- a/deps/v8/test/mjsunit/regress/regress-2056.js
+++ b/deps/v8/test/mjsunit/regress/regress-2056.js
@@ -28,39 +28,37 @@
// Flags: --allow-natives-syntax
var cases = [
- [0.0, 0.0, 0.0, 0,0],
- [undefined, 0.0, NaN, NaN],
- [0.0, undefined, NaN, NaN],
- [NaN, 0.0, NaN, NaN],
- [0.0, NaN, NaN, NaN],
- [-NaN, 0.0, NaN, NaN],
- [0.0, -NaN, NaN, NaN],
- [Infinity, 0.0, Infinity, 0.0],
- [0.0, Infinity, Infinity, 0.0],
- [-Infinity, 0.0, 0.0, -Infinity],
- [0.0, -Infinity, 0.0, -Infinity]
+ [0.0, 0.0, 0.0, 0, 0], [undefined, 0.0, NaN, NaN], [0.0, undefined, NaN, NaN],
+ [NaN, 0.0, NaN, NaN], [0.0, NaN, NaN, NaN], [-NaN, 0.0, NaN, NaN],
+ [0.0, -NaN, NaN, NaN], [Infinity, 0.0, Infinity, 0.0],
+ [0.0, Infinity, Infinity, 0.0], [-Infinity, 0.0, 0.0, -Infinity],
+ [0.0, -Infinity, 0.0, -Infinity]
];
function do_min(a, b) {
- return Math.min(a, b);
-}
-
+ return Math.min(a, b);
+};
+%PrepareFunctionForOptimization(do_min);
function do_max(a, b) {
- return Math.max(a, b);
+ return Math.max(a, b);
}
// Make sure that non-crankshaft results match expectations.
+;
+%PrepareFunctionForOptimization(do_max);
for (i = 0; i < cases.length; ++i) {
- var c = cases[i];
- assertEquals(c[3], do_min(c[0], c[1]));
- assertEquals(c[2], do_max(c[0], c[1]));
+ var c = cases[i];
+ assertEquals(c[3], do_min(c[0], c[1]));
+ assertEquals(c[2], do_max(c[0], c[1]));
}
// Make sure that crankshaft results match expectations.
for (i = 0; i < cases.length; ++i) {
- var c = cases[i];
- %OptimizeFunctionOnNextCall(do_min);
- %OptimizeFunctionOnNextCall(do_max);
- assertEquals(c[3], do_min(c[0], c[1]));
- assertEquals(c[2], do_max(c[0], c[1]));
+ var c = cases[i];
+ %OptimizeFunctionOnNextCall(do_min);
+ %OptimizeFunctionOnNextCall(do_max);
+ assertEquals(c[3], do_min(c[0], c[1]));
+ assertEquals(c[2], do_max(c[0], c[1]));
+ %PrepareFunctionForOptimization(do_min);
+ %PrepareFunctionForOptimization(do_max);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-2110.js b/deps/v8/test/mjsunit/regress/regress-2110.js
index d7f78d26a7..e905708551 100644
--- a/deps/v8/test/mjsunit/regress/regress-2110.js
+++ b/deps/v8/test/mjsunit/regress/regress-2110.js
@@ -32,8 +32,8 @@ var uint8 = new Uint8Array(1);
function test() {
uint8[0] = 0x800000aa;
assertEquals(0xaa, uint8[0]);
-}
-
+};
+%PrepareFunctionForOptimization(test);
test();
test();
test();
@@ -45,8 +45,8 @@ var uint32 = new Uint32Array(1);
function test2() {
uint32[0] = 0x80123456789abcde;
assertEquals(0x789ac000, uint32[0]);
-}
-
+};
+%PrepareFunctionForOptimization(test2);
test2();
test2();
%OptimizeFunctionOnNextCall(test2);
diff --git a/deps/v8/test/mjsunit/regress/regress-2170.js b/deps/v8/test/mjsunit/regress/regress-2170.js
index 99f432dfcf..faf4416a05 100644
--- a/deps/v8/test/mjsunit/regress/regress-2170.js
+++ b/deps/v8/test/mjsunit/regress/regress-2170.js
@@ -35,15 +35,15 @@ function array_fun() {
for (var j = 0; j < a.length; j++) {
x.push(a[j]);
}
- for(var j = 0; j < x.length; j++) {
+ for (var j = 0; j < x.length; j++) {
if (typeof x[j] != 'number') {
throw "foo";
}
x[j] = x[j];
}
}
-}
-
+};
+%PrepareFunctionForOptimization(array_fun);
try {
for (var i = 0; i < 10; ++i) {
array_fun();
diff --git a/deps/v8/test/mjsunit/regress/regress-2193.js b/deps/v8/test/mjsunit/regress/regress-2193.js
index 4ec050e10a..068476b1b0 100644
--- a/deps/v8/test/mjsunit/regress/regress-2193.js
+++ b/deps/v8/test/mjsunit/regress/regress-2193.js
@@ -40,12 +40,15 @@ function MakeClosure() {
// Create two closures that share the same literal boilerplates.
var closure1 = MakeClosure();
+%PrepareFunctionForOptimization(closure1);
var closure2 = MakeClosure();
+%PrepareFunctionForOptimization(closure2);
var expected = [1,2,3,3,4,5,6,7,8,9,bozo];
// Make sure we generate optimized code for the first closure after
// warming it up properly so that the literals boilerplate is generated
// and the optimized code uses CreateArrayLiteralShallow runtime call.
+%PrepareFunctionForOptimization(closure1);
assertEquals(0, closure1(false));
assertEquals(expected, closure1(true));
%OptimizeFunctionOnNextCall(closure1);
@@ -53,6 +56,7 @@ assertEquals(expected, closure1(true));
// Optimize the second closure, which should reuse the optimized code
// from the first closure with the same literal boilerplates.
+%PrepareFunctionForOptimization(closure2);
assertEquals(0, closure2(false));
%OptimizeFunctionOnNextCall(closure2);
assertEquals(expected, closure2(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-2234.js b/deps/v8/test/mjsunit/regress/regress-2234.js
index 8da513e30e..4e55c63fd7 100644
--- a/deps/v8/test/mjsunit/regress/regress-2234.js
+++ b/deps/v8/test/mjsunit/regress/regress-2234.js
@@ -33,8 +33,8 @@ function test(i) {
// Check whether the first cache line has been accidentally overwritten
// with incorrect key.
assertEquals(0, Math.sin(0));
-}
-
+};
+%PrepareFunctionForOptimization(test);
for (i = 0; i < 10000; ++i) {
test(i);
if (i == 0) %OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-2250.js b/deps/v8/test/mjsunit/regress/regress-2250.js
index e2ce546628..a01606a0d8 100644
--- a/deps/v8/test/mjsunit/regress/regress-2250.js
+++ b/deps/v8/test/mjsunit/regress/regress-2250.js
@@ -53,6 +53,7 @@ function test() {
;
}
+%PrepareFunctionForOptimization(test);
eq({}, {});
eq({}, {});
eq(1, 1);
@@ -60,6 +61,7 @@ eq(1, 1);
test();
%OptimizeFunctionOnNextCall(test);
test();
+%PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
// Second compilation should have noticed that LICM wasn't a good idea, and now
// function should no longer deopt when called.
diff --git a/deps/v8/test/mjsunit/regress/regress-2261.js b/deps/v8/test/mjsunit/regress/regress-2261.js
index 000e07de5b..c480a7cf54 100644
--- a/deps/v8/test/mjsunit/regress/regress-2261.js
+++ b/deps/v8/test/mjsunit/regress/regress-2261.js
@@ -40,8 +40,8 @@
%DebugPrint(arguments[0]);
forceDeopt + 1;
return arguments[0];
- }
-
+ };
+ %PrepareFunctionForOptimization(inner);
assertEquals(1, inner(1));
assertEquals(1, inner(1));
%OptimizeFunctionOnNextCall(inner);
@@ -68,8 +68,8 @@
function outer(x) {
return inner(x);
- }
-
+ };
+ %PrepareFunctionForOptimization(outer);
assertEquals(1, outer(1));
assertEquals(1, outer(1));
%OptimizeFunctionOnNextCall(outer);
@@ -84,7 +84,7 @@
(function () {
var forceDeopt = 0;
- function inner(x,y,z) {
+ function inner(x, y, z) {
"use strict";
x = 3;
// Do not remove this %DebugPrint as it makes sure the deopt happens
@@ -97,13 +97,13 @@
function middle(x) {
"use strict";
x = 2;
- return inner(10*x, 20*x, 30*x) + arguments[0];
+ return inner(10 * x, 20 * x, 30 * x) + arguments[0];
}
function outer(x) {
- return middle(x);
- }
-
+ return middle(x);
+ };
+ %PrepareFunctionForOptimization(outer);
assertEquals(21, outer(1));
assertEquals(21, outer(1));
%OptimizeFunctionOnNextCall(outer);
diff --git a/deps/v8/test/mjsunit/regress/regress-2294.js b/deps/v8/test/mjsunit/regress/regress-2294.js
index 43ba10df03..6679d62654 100644
--- a/deps/v8/test/mjsunit/regress/regress-2294.js
+++ b/deps/v8/test/mjsunit/regress/regress-2294.js
@@ -62,8 +62,8 @@ function test() {
assertEquals(255, clampedArray[0]);
clampedArray[0] = -1000000000000;
assertEquals(0, clampedArray[0]);
-}
-
+};
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-2315.js b/deps/v8/test/mjsunit/regress/regress-2315.js
index 9e40d0d3e3..2ae4b4cb67 100644
--- a/deps/v8/test/mjsunit/regress/regress-2315.js
+++ b/deps/v8/test/mjsunit/regress/regress-2315.js
@@ -30,7 +30,9 @@
var foo = (function() {
return eval("(function bar() { return 1; })");
})();
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-234101.js b/deps/v8/test/mjsunit/regress/regress-234101.js
index 74228dfabe..1f2be03055 100644
--- a/deps/v8/test/mjsunit/regress/regress-234101.js
+++ b/deps/v8/test/mjsunit/regress/regress-234101.js
@@ -33,9 +33,9 @@
// either enhance the gap resolver or make sure that such moves don't happen.
function foo(x) {
- return (x ? NaN : 0.2) + 0.1;
-}
-
+ return (x ? NaN : 0.2) + 0.1;
+};
+%PrepareFunctionForOptimization(foo);
foo(false);
foo(false);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-247688.js b/deps/v8/test/mjsunit/regress/regress-247688.js
index 80e2884c70..d00dba81bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-247688.js
+++ b/deps/v8/test/mjsunit/regress/regress-247688.js
@@ -28,38 +28,38 @@
// Flags: --allow-natives-syntax
var a = {};
-a.x = 1
-a.y = 1.5
+a.x = 1;
+a.y = 1.5;
-var b = {}
+var b = {};
b.x = 1.5;
b.y = 1;
-var c = {}
+var c = {};
c.x = 1.5;
-var d = {}
+var d = {};
d.x = 1.5;
-var e = {}
+var e = {};
e.x = 1.5;
-var f = {}
+var f = {};
f.x = 1.5;
-var g = {}
+var g = {};
g.x = 1.5;
-var h = {}
+var h = {};
h.x = 1.5;
-var i = {}
+var i = {};
i.x = 1.5;
-var o = {}
-var p = {y : 10, z : 1}
+var o = {};
+var p = {y: 10, z: 1};
o.__proto__ = p;
-delete p.z
+delete p.z;
function foo(v, w) {
// Make load via IC in optimized code. Its target will get overwritten by
@@ -68,8 +68,8 @@ function foo(v, w) {
// Make store with transition to make this code dependent on the map.
w.y = 1;
return b.y;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(o, c);
foo(o, d);
foo(o, e);
diff --git a/deps/v8/test/mjsunit/regress/regress-2489.js b/deps/v8/test/mjsunit/regress/regress-2489.js
index 882c4f794a..1feeb3029a 100644
--- a/deps/v8/test/mjsunit/regress/regress-2489.js
+++ b/deps/v8/test/mjsunit/regress/regress-2489.js
@@ -31,8 +31,8 @@
function f(a, b) {
return g("c", "d");
-}
-
+};
+%PrepareFunctionForOptimization(f);
function g(a, b) {
g.constructor.apply(this, arguments);
}
@@ -40,7 +40,7 @@ function g(a, b) {
g.constructor = function(a, b) {
assertEquals("c", a);
assertEquals("d", b);
-}
+};
f("a", "b");
f("a", "b");
diff --git a/deps/v8/test/mjsunit/regress/regress-2499.js b/deps/v8/test/mjsunit/regress/regress-2499.js
index 52aad874db..7fc87fc843 100644
--- a/deps/v8/test/mjsunit/regress/regress-2499.js
+++ b/deps/v8/test/mjsunit/regress/regress-2499.js
@@ -28,13 +28,13 @@
// Flags: --allow-natives-syntax
function foo(word, nBits) {
- return (word[1] >>> nBits) | (word[0] << (32 - nBits));
-}
-
+ return word[1] >>> nBits | word[0] << 32 - nBits;
+};
+%PrepareFunctionForOptimization(foo);
word = [0x1001, 0];
var expected = foo(word, 1);
foo(word, 1);
%OptimizeFunctionOnNextCall(foo);
var optimized = foo(word, 1);
-assertEquals(expected, optimized)
+assertEquals(expected, optimized);
diff --git a/deps/v8/test/mjsunit/regress/regress-2537.js b/deps/v8/test/mjsunit/regress/regress-2537.js
index 1a86000619..4f7b6c6a2a 100644
--- a/deps/v8/test/mjsunit/regress/regress-2537.js
+++ b/deps/v8/test/mjsunit/regress/regress-2537.js
@@ -31,15 +31,15 @@ var large_int = 0x40000000;
function foo(x, expected) {
assertEquals(expected, x); // This succeeds.
- x += 0; // Force int32 representation so that
- // CompareNumericAndBranch is used.
+ x += 0; // Force int32 representation so that
+ // CompareNumericAndBranch is used.
if (3 != x) {
x += 0; // Poor man's "iDef".
// Fails due to Smi-tagging without overflow check.
assertEquals(expected, x);
}
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(1, 1);
foo(3, 3);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-2539.js b/deps/v8/test/mjsunit/regress/regress-2539.js
index 5d263f8912..d2587bb151 100644
--- a/deps/v8/test/mjsunit/regress/regress-2539.js
+++ b/deps/v8/test/mjsunit/regress/regress-2539.js
@@ -33,17 +33,18 @@ dispatcher.func = C;
function A() {
B(10, 11);
-}
-
-function B(x,y) {
- x = 0; y = 0;
+};
+%PrepareFunctionForOptimization(A);
+function B(x, y) {
+ x = 0;
+ y = 0;
dispatcher.func.apply(this, arguments);
assertSame(2, arguments.length);
assertSame(10, arguments[0]);
assertSame(11, arguments[1]);
}
-function C(x,y) {
+function C(x, y) {
assertSame(2, arguments.length);
assertSame(10, arguments[0]);
assertSame(11, arguments[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-2595.js b/deps/v8/test/mjsunit/regress/regress-2595.js
index c7e95711a8..f4cfee9bb6 100644
--- a/deps/v8/test/mjsunit/regress/regress-2595.js
+++ b/deps/v8/test/mjsunit/regress/regress-2595.js
@@ -27,12 +27,21 @@
// Flags: --allow-natives-syntax
-var p = { f: function () { return "p"; } };
+var p = {
+ f: function() {
+ return 'p';
+ }
+};
var o = Object.create(p);
o.x = true;
delete o.x; // slow case object
-var u = { x: 0, f: function () { return "u"; } }; // object with some other map
+var u = {
+ x: 0,
+ f: function() {
+ return 'u';
+ }
+}; // object with some other map
function F(x) {
return x.f();
@@ -41,8 +50,10 @@ function F(x) {
// First make CALL IC in F go MEGAMORPHIC and ensure that we put the stub
// that calls p.f (guarded by a negative dictionary lookup on the receiver)
// into the stub cache
-assertEquals("p", F(o));
-assertEquals("p", F(o));
+;
+%PrepareFunctionForOptimization(F);
+assertEquals('p', F(o));
+assertEquals('p', F(o));
assertEquals("u", F(u));
assertEquals("p", F(o));
assertEquals("u", F(u));
@@ -53,5 +64,7 @@ assertEquals("u", F(u));
assertEquals("p", F(o));
// Add f to o. o's map will *not* change.
-o.f = function () { return "o"; };
+o.f = function() {
+ return 'o';
+};
assertEquals("o", F(o));
diff --git a/deps/v8/test/mjsunit/regress/regress-2596.js b/deps/v8/test/mjsunit/regress/regress-2596.js
index 6500988bd6..f21079a809 100644
--- a/deps/v8/test/mjsunit/regress/regress-2596.js
+++ b/deps/v8/test/mjsunit/regress/regress-2596.js
@@ -29,7 +29,7 @@
var ab = new ArrayBuffer(8);
var i_view = new Int32Array(ab);
-i_view[0] = %GetHoleNaNUpper()
+i_view[0] = %GetHoleNaNUpper();
i_view[1] = %GetHoleNaNLower();
var doubles = new Float64Array(ab); // kHoleNaN
assertTrue(isNaN(doubles[0]));
@@ -42,8 +42,8 @@ assertTrue(%HasDoubleElements(array));
function boom(index) {
array[index] = doubles[0];
return array[index];
-}
-
+};
+%PrepareFunctionForOptimization(boom);
assertTrue(isNaN(boom(0)));
assertTrue(isNaN(boom(0)));
assertTrue(isNaN(boom(0)));
diff --git a/deps/v8/test/mjsunit/regress/regress-2612.js b/deps/v8/test/mjsunit/regress/regress-2612.js
index 600894845e..7603901498 100644
--- a/deps/v8/test/mjsunit/regress/regress-2612.js
+++ b/deps/v8/test/mjsunit/regress/regress-2612.js
@@ -68,7 +68,9 @@ for (var i = 750; i < 3000; i++) {
source += "x=1; return _0;"
var f = new Function(source);
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 6b80fbc2af..1cb56ca5c4 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -38,7 +38,10 @@ assertFalse(isAlwaysOptimize());
function f() {
do {
do {
- for (var i = 0; i < 10; i++) %OptimizeOsr();
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(f);
+ }
// Note: this check can't be wrapped in a function, because
// calling that function causes a deopt from lack of call
// feedback.
@@ -70,7 +73,10 @@ function g() {
do {
do {
do {
- for (var i = 0; i < 10; i++) %OptimizeOsr();
+ for (var i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(g);
+ }
var opt_status = %GetOptimizationStatus(g);
assertTrue(
(opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0 ||
diff --git a/deps/v8/test/mjsunit/regress/regress-264203.js b/deps/v8/test/mjsunit/regress/regress-264203.js
index fa00756625..0b066d4744 100644
--- a/deps/v8/test/mjsunit/regress/regress-264203.js
+++ b/deps/v8/test/mjsunit/regress/regress-264203.js
@@ -36,7 +36,9 @@ function foo(x) {
}
return result;
}
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(foo);
foo(0);
foo(0);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-2671-1.js b/deps/v8/test/mjsunit/regress/regress-2671-1.js
index 7937d6a336..d288738775 100644
--- a/deps/v8/test/mjsunit/regress/regress-2671-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-2671-1.js
@@ -36,7 +36,9 @@ function f() {
a[i]++;
assertTrue(isNaN(a[i]));
}
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(f);
f();
f();
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-2671.js b/deps/v8/test/mjsunit/regress/regress-2671.js
index 73a3098d80..6bdff7d5ba 100644
--- a/deps/v8/test/mjsunit/regress/regress-2671.js
+++ b/deps/v8/test/mjsunit/regress/regress-2671.js
@@ -36,7 +36,9 @@ function f() {
a[i] = 1/y;
assertFalse(isNaN(a[i]));
}
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(f);
f();
f();
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-2758.js b/deps/v8/test/mjsunit/regress/regress-2758.js
index ee78844400..9f97e71f59 100644
--- a/deps/v8/test/mjsunit/regress/regress-2758.js
+++ b/deps/v8/test/mjsunit/regress/regress-2758.js
@@ -38,6 +38,7 @@ var functions = [
for (var i = 0; i < 5; ++i) {
for (var j in functions) {
+ %PrepareFunctionForOptimization(functions[j]);
print(functions[i])
assertThrows(functions[j], TypeError)
}
diff --git a/deps/v8/test/mjsunit/regress/regress-2813.js b/deps/v8/test/mjsunit/regress/regress-2813.js
index 97ae43b316..9903e638e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-2813.js
+++ b/deps/v8/test/mjsunit/regress/regress-2813.js
@@ -36,8 +36,8 @@ function foo(x) {
}
}
return 0;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(0, foo(0));
assertEquals(0, foo(0));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-2843.js b/deps/v8/test/mjsunit/regress/regress-2843.js
index 5b28c2d794..f71161c788 100644
--- a/deps/v8/test/mjsunit/regress/regress-2843.js
+++ b/deps/v8/test/mjsunit/regress/regress-2843.js
@@ -37,7 +37,9 @@ function foo(x, fun) {
fun();
return a;
}
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(foo);
assertThrows("foo(1, bailout)");
assertThrows("foo(1, bailout)");
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-298269.js b/deps/v8/test/mjsunit/regress/regress-298269.js
index dd6b6a00fc..ee44e88fee 100644
--- a/deps/v8/test/mjsunit/regress/regress-298269.js
+++ b/deps/v8/test/mjsunit/regress/regress-298269.js
@@ -10,16 +10,22 @@ function Cb(a, trigger) {
g = a.charCodeAt(f);
// This will fail after OSR if Runtime_StringCharCodeAt is modified
// to iterates optimized frames and visit safepoint pointers.
- if (g == "C".charCodeAt(0)) %OptimizeOsr();
+ if (g == "C".charCodeAt(0)) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(Cb);
+ }
}
return g;
}
-%PrepareFunctionForOptimization(Cb);
var s1 = "long string to make cons string 1";
var s2 = "long string to make cons string 2";
+%PrepareFunctionForOptimization(Cb);
Cb(s1 + s2);
+%PrepareFunctionForOptimization(Cb);
Cb(s1);
var s3 = "string for triggering osr in Cb";
+%PrepareFunctionForOptimization(Cb);
Cb(s3 + s3);
+%PrepareFunctionForOptimization(Cb);
Cb(s1 + s2);
diff --git a/deps/v8/test/mjsunit/regress/regress-2987.js b/deps/v8/test/mjsunit/regress/regress-2987.js
index 02927e2227..94c15bd93f 100644
--- a/deps/v8/test/mjsunit/regress/regress-2987.js
+++ b/deps/v8/test/mjsunit/regress/regress-2987.js
@@ -35,7 +35,7 @@ function constructor() {
this.x = 0;
}
-var deopt = { deopt:false };
+var deopt = {deopt: false};
function boogeyman(mode, value) {
var object = new constructor();
if (mode) {
@@ -45,8 +45,8 @@ function boogeyman(mode, value) {
}
deopt.deopt;
assertEquals(value, object.x);
-}
-
+};
+%PrepareFunctionForOptimization(boogeyman);
boogeyman(true, 1);
boogeyman(true, 1);
boogeyman(false, 2);
diff --git a/deps/v8/test/mjsunit/regress/regress-2989.js b/deps/v8/test/mjsunit/regress/regress-2989.js
index 213c0df7b5..236fff3a50 100644
--- a/deps/v8/test/mjsunit/regress/regress-2989.js
+++ b/deps/v8/test/mjsunit/regress/regress-2989.js
@@ -30,9 +30,10 @@ if (isNeverOptimizeLiteMode()) {
(function ArgumentsObjectChange() {
function f(x) {
- x = 42;
- return f.arguments[0];
- }
+ x = 42;
+ return f.arguments[0];
+ };
+ %PrepareFunctionForOptimization(f);
%EnsureFeedbackVectorForFunction(f);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-3029.js b/deps/v8/test/mjsunit/regress/regress-3029.js
index ae412dff2b..ca9f4767fc 100644
--- a/deps/v8/test/mjsunit/regress/regress-3029.js
+++ b/deps/v8/test/mjsunit/regress/regress-3029.js
@@ -37,8 +37,8 @@ function f() {
function g() {
f();
-}
-
+};
+%PrepareFunctionForOptimization(g);
assertThrows("g()", TypeError);
assertThrows("g()", TypeError);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-3039.js b/deps/v8/test/mjsunit/regress/regress-3039.js
index 3c7f62c16e..42640cec02 100644
--- a/deps/v8/test/mjsunit/regress/regress-3039.js
+++ b/deps/v8/test/mjsunit/regress/regress-3039.js
@@ -28,10 +28,12 @@
// Flags: --allow-natives-syntax
function do_div(x, y) {
- return (x / y) | 0;
+ return x / y | 0;
}
// Preparation.
+;
+%PrepareFunctionForOptimization(do_div);
assertEquals(17, do_div(51, 3));
assertEquals(13, do_div(65, 5));
%OptimizeFunctionOnNextCall(do_div);
diff --git a/deps/v8/test/mjsunit/regress/regress-3158.js b/deps/v8/test/mjsunit/regress/regress-3158.js
index c69127395e..79f41eb966 100644
--- a/deps/v8/test/mjsunit/regress/regress-3158.js
+++ b/deps/v8/test/mjsunit/regress/regress-3158.js
@@ -9,8 +9,8 @@ delete Array.prototype[0];
function foo(a, i) {
return a[i];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
var a = new Array(100000);
a[3] = 'x';
diff --git a/deps/v8/test/mjsunit/regress/regress-3176.js b/deps/v8/test/mjsunit/regress/regress-3176.js
index dbfe8218fd..3a1bfde1cf 100644
--- a/deps/v8/test/mjsunit/regress/regress-3176.js
+++ b/deps/v8/test/mjsunit/regress/regress-3176.js
@@ -16,13 +16,16 @@ function foo(a) {
}
return sum;
}
+%PrepareFunctionForOptimization(foo);
var a = new Int32Array(10);
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
%OptimizeFunctionOnNextCall(foo);
foo(a);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(a);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-3183.js b/deps/v8/test/mjsunit/regress/regress-3183.js
index 4551621e65..290508226c 100644
--- a/deps/v8/test/mjsunit/regress/regress-3183.js
+++ b/deps/v8/test/mjsunit/regress/regress-3183.js
@@ -42,7 +42,9 @@
function foo() { return bar(arguments[0], arguments[1], arguments[2]); }
function baz(f, deopt) { return foo("x", deopt, f); }
+ %PrepareFunctionForOptimization(baz);
+ %PrepareFunctionForOptimization(baz);
baz(f1, 0);
baz(f2, 0);
%OptimizeFunctionOnNextCall(baz);
@@ -65,7 +67,9 @@
function foo() { return bar(arguments[0], arguments[1]); }
function baz(deopt) { return foo("x", deopt); }
+ %PrepareFunctionForOptimization(baz);
+ %PrepareFunctionForOptimization(baz);
baz(0);
baz(0);
%OptimizeFunctionOnNextCall(baz);
@@ -88,7 +92,9 @@
function foo() { return bar(arguments[0], arguments[1]); }
function baz(deopt) { return foo(0, deopt); }
+ %PrepareFunctionForOptimization(baz);
+ %PrepareFunctionForOptimization(baz);
baz(0);
baz(0);
%OptimizeFunctionOnNextCall(baz);
diff --git a/deps/v8/test/mjsunit/regress/regress-318420.js b/deps/v8/test/mjsunit/regress/regress-318420.js
index 77bef10ec4..ea458fb12e 100644
--- a/deps/v8/test/mjsunit/regress/regress-318420.js
+++ b/deps/v8/test/mjsunit/regress/regress-318420.js
@@ -38,8 +38,9 @@ var obj = {
a[d].apply(e[d], arguments)
}
}
-}
+};
+%PrepareFunctionForOptimization(obj.foo);
obj.foo(1, 2, 3, 4);
obj.foo(1, 2, 3, 4);
%OptimizeFunctionOnNextCall(obj.foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-3204.js b/deps/v8/test/mjsunit/regress/regress-3204.js
index b3161be497..f3e1b37186 100644
--- a/deps/v8/test/mjsunit/regress/regress-3204.js
+++ b/deps/v8/test/mjsunit/regress/regress-3204.js
@@ -7,11 +7,11 @@
// -----------------------------------------------------------------------------
function SmiTaggingCanOverflow(x) {
- x = x | 0;
- if (x == 0) return;
- return x;
-}
-
+ x = x | 0;
+ if (x == 0) return;
+ return x;
+};
+%PrepareFunctionForOptimization(SmiTaggingCanOverflow);
SmiTaggingCanOverflow(2147483647);
SmiTaggingCanOverflow(2147483647);
%OptimizeFunctionOnNextCall(SmiTaggingCanOverflow);
@@ -23,8 +23,8 @@ function ModILeftCanBeNegative() {
var x = 0;
for (var i = -1; i < 0; ++i) x = i % 2;
return x;
-}
-
+};
+%PrepareFunctionForOptimization(ModILeftCanBeNegative);
ModILeftCanBeNegative();
%OptimizeFunctionOnNextCall(ModILeftCanBeNegative);
assertEquals(-1, ModILeftCanBeNegative());
@@ -33,10 +33,10 @@ assertEquals(-1, ModILeftCanBeNegative());
function ModIRightCanBeZero() {
var x = 0;
- for (var i = -1; i <= 0; ++i) x = (2 % i) | 0;
+ for (var i = -1; i <= 0; ++i) x = 2 % i | 0;
return x;
-}
-
+};
+%PrepareFunctionForOptimization(ModIRightCanBeZero);
ModIRightCanBeZero();
%OptimizeFunctionOnNextCall(ModIRightCanBeZero);
ModIRightCanBeZero();
diff --git a/deps/v8/test/mjsunit/regress/regress-320532.js b/deps/v8/test/mjsunit/regress/regress-320532.js
index 7559550779..1b4990a440 100644
--- a/deps/v8/test/mjsunit/regress/regress-320532.js
+++ b/deps/v8/test/mjsunit/regress/regress-320532.js
@@ -30,12 +30,15 @@
function bar() { return new Array(); }
+%PrepareFunctionForOptimization(bar);
bar();
bar();
%OptimizeFunctionOnNextCall(bar);
a = bar();
function foo(len) { return new Array(len); }
+%PrepareFunctionForOptimization(foo);
foo(0);
foo(0);
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
foo(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-3218915.js b/deps/v8/test/mjsunit/regress/regress-3218915.js
index 4b08a6e825..2850092afa 100644
--- a/deps/v8/test/mjsunit/regress/regress-3218915.js
+++ b/deps/v8/test/mjsunit/regress/regress-3218915.js
@@ -37,7 +37,9 @@ function withCommaExpressionInConditional(x) {
}
return (y = x + 1, y > 1) ? 'medium' : 'small';
}
+%PrepareFunctionForOptimization(withCommaExpressionInConditional);
+%PrepareFunctionForOptimization(withCommaExpressionInConditional);
for (var i = 0; i < 5; i++) {
withCommaExpressionInConditional(i);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-323845.js b/deps/v8/test/mjsunit/regress/regress-323845.js
index 4e81657917..300dc928c2 100644
--- a/deps/v8/test/mjsunit/regress/regress-323845.js
+++ b/deps/v8/test/mjsunit/regress/regress-323845.js
@@ -40,8 +40,10 @@ function g(x) {
function f() {
g({});
-}
-
-f(); f(); f();
+};
+%PrepareFunctionForOptimization(f);
+f();
+f();
+f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-330046.js b/deps/v8/test/mjsunit/regress/regress-330046.js
index 24557b4cc6..bd7809ba94 100644
--- a/deps/v8/test/mjsunit/regress/regress-330046.js
+++ b/deps/v8/test/mjsunit/regress/regress-330046.js
@@ -40,6 +40,7 @@ function f(n, x, b) {
}
return sum;
}
+%PrepareFunctionForOptimization(f);
f(10, o3);
f(20, o3);
@@ -51,6 +52,7 @@ f(100000, o3);
// This causes all code for f to be lazily deopted.
o2.a = 5;
+%PrepareFunctionForOptimization(f);
// If OSR did not evict the old code, it will be installed in f here.
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-3307.js b/deps/v8/test/mjsunit/regress/regress-3307.js
index 1fc770d20c..d27d26d205 100644
--- a/deps/v8/test/mjsunit/regress/regress-3307.js
+++ b/deps/v8/test/mjsunit/regress/regress-3307.js
@@ -14,8 +14,8 @@ function f() {
a.x += b.x;
}
return a.x;
-}
-
+};
+%PrepareFunctionForOptimization(f);
new p(0.1); // make 'x' mutable box double field in p.
assertEquals(3, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-331416.js b/deps/v8/test/mjsunit/regress/regress-331416.js
index 0c60fced14..062d7b920f 100644
--- a/deps/v8/test/mjsunit/regress/regress-331416.js
+++ b/deps/v8/test/mjsunit/regress/regress-331416.js
@@ -29,8 +29,9 @@
function load(a, i) {
return a[i];
-}
-load([1, 2, 3], "length");
+};
+%PrepareFunctionForOptimization(load);
+load([1, 2, 3], 'length');
load(3);
load([1, 2, 3], 3);
load(0, 0);
@@ -40,8 +41,9 @@ assertEquals(undefined, load(0, 0));
function store(a, i, x) {
a[i] = x;
-}
-store([1, 2, 3], "length", 3);
+};
+%PrepareFunctionForOptimization(store);
+store([1, 2, 3], 'length', 3);
store(3);
store([1, 2, 3], 3, 3);
store(0, 0, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-333594.js b/deps/v8/test/mjsunit/regress/regress-333594.js
index 6f6dbaafcd..47da85b3c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-333594.js
+++ b/deps/v8/test/mjsunit/regress/regress-333594.js
@@ -34,8 +34,8 @@ var o = { x: {} };
function func() {
return {x: G};
-}
-
+};
+%PrepareFunctionForOptimization(func);
func();
func();
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/regress/regress-334708.js b/deps/v8/test/mjsunit/regress/regress-334708.js
index f0291bbdab..185da2e3bd 100644
--- a/deps/v8/test/mjsunit/regress/regress-334708.js
+++ b/deps/v8/test/mjsunit/regress/regress-334708.js
@@ -33,8 +33,8 @@ function foo(x, y) {
function bar(x, y) {
return foo(x + 1, y + 1);
-}
-
+};
+%PrepareFunctionForOptimization(bar);
foo(16, "4");
bar(64, 2);
diff --git a/deps/v8/test/mjsunit/regress/regress-3359.js b/deps/v8/test/mjsunit/regress/regress-3359.js
index 0973797e7e..ce800b86c0 100644
--- a/deps/v8/test/mjsunit/regress/regress-3359.js
+++ b/deps/v8/test/mjsunit/regress/regress-3359.js
@@ -6,7 +6,8 @@
function f() {
return 1 >> Boolean.constructor + 1;
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(1, f());
%OptimizeFunctionOnNextCall(f);
assertEquals(1, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-3380.js b/deps/v8/test/mjsunit/regress/regress-3380.js
index 2fae459b3b..8cf60afd19 100644
--- a/deps/v8/test/mjsunit/regress/regress-3380.js
+++ b/deps/v8/test/mjsunit/regress/regress-3380.js
@@ -5,12 +5,12 @@
// Flags: --allow-natives-syntax
function foo(a) {
- return (a[0] >>> 0) > 0;
-}
-
+ return a[0] >>> 0 > 0;
+};
+%PrepareFunctionForOptimization(foo);
var a = new Uint32Array([4]);
var b = new Uint32Array([0x80000000]);
assertTrue(foo(a));
assertTrue(foo(a));
%OptimizeFunctionOnNextCall(foo);
-assertTrue(foo(b))
+assertTrue(foo(b));
diff --git a/deps/v8/test/mjsunit/regress/regress-3392.js b/deps/v8/test/mjsunit/regress/regress-3392.js
index 375f30210c..f5b62740c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-3392.js
+++ b/deps/v8/test/mjsunit/regress/regress-3392.js
@@ -10,8 +10,8 @@ function foo() {
a.b = 1;
}
assertTrue(0 <= a.b);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-343609.js b/deps/v8/test/mjsunit/regress/regress-343609.js
index 520b54e52c..d7bfe63543 100644
--- a/deps/v8/test/mjsunit/regress/regress-343609.js
+++ b/deps/v8/test/mjsunit/regress/regress-343609.js
@@ -27,6 +27,7 @@ function dummy() {
(function () {
var o = {c: 10};
var f1 = get_closure2();
+ %PrepareFunctionForOptimization(f1);
f1(o);
f1(o);
%OptimizeFunctionOnNextCall(f1);
@@ -38,6 +39,7 @@ var o = new Ctor();
function opt() {
(function () {
var f1 = get_closure();
+ %PrepareFunctionForOptimization(f1);
f1(new Ctor());
f1(new Ctor());
%OptimizeFunctionOnNextCall(f1);
diff --git a/deps/v8/test/mjsunit/regress/regress-346343.js b/deps/v8/test/mjsunit/regress/regress-346343.js
index e4c1066313..6e3f7d5a3d 100644
--- a/deps/v8/test/mjsunit/regress/regress-346343.js
+++ b/deps/v8/test/mjsunit/regress/regress-346343.js
@@ -32,10 +32,13 @@ function f(o) {
var y = o.y;
}
}
-f({y:1.1});
-f({y:1.1});
+f({y: 1.1});
+f({y: 1.1});
-function g(x) { f({z:x}); }
+function g(x) {
+ f({z: x});
+};
+%PrepareFunctionForOptimization(g);
g(1);
g(2);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-346587.js b/deps/v8/test/mjsunit/regress/regress-346587.js
index e907002f38..961d5e666a 100644
--- a/deps/v8/test/mjsunit/regress/regress-346587.js
+++ b/deps/v8/test/mjsunit/regress/regress-346587.js
@@ -9,9 +9,9 @@ function bar(obj) {
}
function foo() {
- bar({ x : 'baz' });
-}
-
+ bar({x: 'baz'});
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-347262.js b/deps/v8/test/mjsunit/regress/regress-347262.js
index 76bc34a2cd..5f4b0389d8 100644
--- a/deps/v8/test/mjsunit/regress/regress-347262.js
+++ b/deps/v8/test/mjsunit/regress/regress-347262.js
@@ -35,7 +35,8 @@
function f(x) {
g();
return arguments[0];
- }
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -50,11 +51,12 @@
}
function f(x) {
- var o1 = { o2 : 1 };
+ var o1 = {o2: 1};
var a = g(o1);
o1.o2 = 3;
return arguments[0] + a[0].o2;
- }
+ };
+ %PrepareFunctionForOptimization(f);
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-347542.js b/deps/v8/test/mjsunit/regress/regress-347542.js
index 901d798fb7..3929a6076a 100644
--- a/deps/v8/test/mjsunit/regress/regress-347542.js
+++ b/deps/v8/test/mjsunit/regress/regress-347542.js
@@ -4,7 +4,8 @@
// Flags: --allow-natives-syntax
-function foo() {}
+function foo() {};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-347543.js b/deps/v8/test/mjsunit/regress/regress-347543.js
index ddea23a97f..bbef3895db 100644
--- a/deps/v8/test/mjsunit/regress/regress-347543.js
+++ b/deps/v8/test/mjsunit/regress/regress-347543.js
@@ -7,8 +7,8 @@
function f(a) {
a[5000000] = 256;
assertEquals(256, a[5000000]);
-}
-
+};
+%PrepareFunctionForOptimization(f);
var v1 = new Array(5000001);
var v2 = new Array(10);
f(v1);
diff --git a/deps/v8/test/mjsunit/regress/regress-3476.js b/deps/v8/test/mjsunit/regress/regress-3476.js
index f4333dbbfc..bb45252946 100644
--- a/deps/v8/test/mjsunit/regress/regress-3476.js
+++ b/deps/v8/test/mjsunit/regress/regress-3476.js
@@ -5,7 +5,11 @@
// Flags: --allow-natives-syntax
function MyWrapper(v) {
- return { valueOf: function() { return v } };
+ return {
+ valueOf: function() {
+ return v;
+ }
+ };
}
function f() {
@@ -16,8 +20,8 @@ function f() {
assertEquals("xtrue", "x" + true);
assertEquals("ytrue", new String("y") + true);
assertEquals("ztrue", new MyWrapper("z") + true);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-347904.js b/deps/v8/test/mjsunit/regress/regress-347904.js
index 1a27b054a4..967c8d3412 100644
--- a/deps/v8/test/mjsunit/regress/regress-347904.js
+++ b/deps/v8/test/mjsunit/regress/regress-347904.js
@@ -8,5 +8,7 @@ var v = /abc/;
function f() {
v = 1578221999;
};
+%PrepareFunctionForOptimization(f);
+;
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-347909.js b/deps/v8/test/mjsunit/regress/regress-347909.js
index 90a8e6a759..4cf4d2cb1f 100644
--- a/deps/v8/test/mjsunit/regress/regress-347909.js
+++ b/deps/v8/test/mjsunit/regress/regress-347909.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax
-var a = {y:1.5};
+var a = {y: 1.5};
a.y = 0;
var b = a.y;
a.y = {};
@@ -12,7 +12,8 @@ var d = 1;
function f() {
d = 0;
return {y: b};
-}
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-347914.js b/deps/v8/test/mjsunit/regress/regress-347914.js
index 170a4f8b52..6ec0ea5bf5 100644
--- a/deps/v8/test/mjsunit/regress/regress-347914.js
+++ b/deps/v8/test/mjsunit/regress/regress-347914.js
@@ -81,6 +81,7 @@ function __f_0(o) {
assertTrue(false);
}
}
+%PrepareFunctionForOptimization(__f_0);
__v_4 = {};
__v_4.size = function() { return 42; }
__v_4.g = function() { return this.size(); };
diff --git a/deps/v8/test/mjsunit/regress/regress-348280.js b/deps/v8/test/mjsunit/regress/regress-348280.js
index 319c270bef..90075ca1e4 100644
--- a/deps/v8/test/mjsunit/regress/regress-348280.js
+++ b/deps/v8/test/mjsunit/regress/regress-348280.js
@@ -4,12 +4,17 @@
// Flags: --allow-natives-syntax
-function baz(f) { f(); }
+function baz(f) {
+ f();
+}
function goo() {}
baz(goo);
baz(goo);
-function bar(p) { if (p == 0) baz(1); }
+function bar(p) {
+ if (p == 0) baz(1);
+};
+%PrepareFunctionForOptimization(bar);
bar(1);
bar(1);
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-348512.js b/deps/v8/test/mjsunit/regress/regress-348512.js
index 7d896664c2..ccb23dc55c 100644
--- a/deps/v8/test/mjsunit/regress/regress-348512.js
+++ b/deps/v8/test/mjsunit/regress/regress-348512.js
@@ -27,10 +27,16 @@
// Flags: --allow-natives-syntax
-function h(y) { assertEquals(42, y.u); }
-function g() { h.apply(0, arguments); }
-function f(x) { g({ u : x }); }
-
+function h(y) {
+ assertEquals(42, y.u);
+}
+function g() {
+ h.apply(0, arguments);
+}
+function f(x) {
+ g({u: x});
+};
+%PrepareFunctionForOptimization(f);
f(42);
f(42);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-349885.js b/deps/v8/test/mjsunit/regress/regress-349885.js
index dd3e795260..fa48d22929 100644
--- a/deps/v8/test/mjsunit/regress/regress-349885.js
+++ b/deps/v8/test/mjsunit/regress/regress-349885.js
@@ -8,7 +8,8 @@
function foo(a) {
a[292755462] = new Object();
-}
+};
+%PrepareFunctionForOptimization(foo);
foo(new Array(5));
foo(new Array(5));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-350863.js b/deps/v8/test/mjsunit/regress/regress-350863.js
index 616792b82c..3092197126 100644
--- a/deps/v8/test/mjsunit/regress/regress-350863.js
+++ b/deps/v8/test/mjsunit/regress/regress-350863.js
@@ -27,7 +27,7 @@
//
// Flags: --allow-natives-syntax
-var __v_7 = { };
+var __v_7 = {};
function __f_8(base, condition) {
__v_7[base + 3] = 0;
__v_7[base + 4] = 0;
@@ -38,7 +38,8 @@ function __f_8(base, condition) {
__v_7[base + 0] = 0;
__v_7[base + 18] = 0;
}
-}
+};
+%PrepareFunctionForOptimization(__f_8);
__f_8(1, true);
__f_8(1, false);
%OptimizeFunctionOnNextCall(__f_8);
diff --git a/deps/v8/test/mjsunit/regress/regress-351261.js b/deps/v8/test/mjsunit/regress/regress-351261.js
index 5a634fad12..1e4d7ad43f 100644
--- a/deps/v8/test/mjsunit/regress/regress-351261.js
+++ b/deps/v8/test/mjsunit/regress/regress-351261.js
@@ -9,11 +9,12 @@ function store(a) {
}
function foo() {
- var __v_8 = new Object;
+ var __v_8 = new Object();
var __v_7 = new Array(4999990);
store(__v_8);
store(__v_7);
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-351263.js b/deps/v8/test/mjsunit/regress/regress-351263.js
index 28edbcdb62..5f0819cfe2 100644
--- a/deps/v8/test/mjsunit/regress/regress-351263.js
+++ b/deps/v8/test/mjsunit/regress/regress-351263.js
@@ -29,8 +29,9 @@
var __v_12 = {};
function __f_30(x, sa) {
- return (x >>> sa) | (x << (__v_12 - sa));
-}
+ return x >>> sa | x << __v_12 - sa;
+};
+%PrepareFunctionForOptimization(__f_30);
__f_30(1.4, 1);
__f_30(1.4, 1);
%OptimizeFunctionOnNextCall(__f_30);
diff --git a/deps/v8/test/mjsunit/regress/regress-351319.js b/deps/v8/test/mjsunit/regress/regress-351319.js
index a2afbb6a98..9c61d7f924 100644
--- a/deps/v8/test/mjsunit/regress/regress-351319.js
+++ b/deps/v8/test/mjsunit/regress/regress-351319.js
@@ -30,7 +30,8 @@
function __f_0(a, base) {
a[base] = 1;
a[base] = -1749557862;
-}
+};
+%PrepareFunctionForOptimization(__f_0);
var __v_0 = new Array(1024);
var __v_1 = new Array(128);
__f_0(__v_0, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-351624.js b/deps/v8/test/mjsunit/regress/regress-351624.js
index fc3715b65a..9b67888a3b 100644
--- a/deps/v8/test/mjsunit/regress/regress-351624.js
+++ b/deps/v8/test/mjsunit/regress/regress-351624.js
@@ -7,14 +7,14 @@
var big = 1e10;
var backup = new Float64Array(1);
-function mult0(val){
+function mult0(val) {
var prod = val * big;
backup[0] = prod;
var rounded = Math.round(prod);
assertEquals(prod, backup[0]);
return rounded;
-}
-
+};
+%PrepareFunctionForOptimization(mult0);
var count = 5;
for (var i = 0; i < count; i++) {
if (i == count - 1) %OptimizeFunctionOnNextCall(mult0);
diff --git a/deps/v8/test/mjsunit/regress/regress-352982.js b/deps/v8/test/mjsunit/regress/regress-352982.js
index 5d3ce1c67d..174cf33b34 100644
--- a/deps/v8/test/mjsunit/regress/regress-352982.js
+++ b/deps/v8/test/mjsunit/regress/regress-352982.js
@@ -29,8 +29,10 @@
function __f_4(i1) {
return __v_3[i1] * __v_3[0];
-}
+};
+%PrepareFunctionForOptimization(__f_4);
function __f_3(i1) {
+ %PrepareFunctionForOptimization(__f_4);
__f_4(i1);
__f_4(i1 + 16);
__f_4(i1 + 32);
diff --git a/deps/v8/test/mjsunit/regress/regress-354433.js b/deps/v8/test/mjsunit/regress/regress-354433.js
index 80ea286230..de984d323a 100644
--- a/deps/v8/test/mjsunit/regress/regress-354433.js
+++ b/deps/v8/test/mjsunit/regress/regress-354433.js
@@ -47,6 +47,7 @@ function __f_5(fun,a,b) {
}
function __f_8(a,b) { return a%b };
+%PrepareFunctionForOptimization(__f_8);
__f_5(__f_8, 1 << 30, 1);
__f_5(__f_8, 1, 1 << 30);
diff --git a/deps/v8/test/mjsunit/regress/regress-355486.js b/deps/v8/test/mjsunit/regress/regress-355486.js
index 55362a1341..c7feb21342 100644
--- a/deps/v8/test/mjsunit/regress/regress-355486.js
+++ b/deps/v8/test/mjsunit/regress/regress-355486.js
@@ -4,9 +4,13 @@
// Flags: --allow-natives-syntax
-function f() { var v = arguments[0]; }
-function g() { f(); }
-
+function f() {
+ var v = arguments[0];
+}
+function g() {
+ f();
+};
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-355523.js b/deps/v8/test/mjsunit/regress/regress-355523.js
index d61fe844ed..854f53358d 100644
--- a/deps/v8/test/mjsunit/regress/regress-355523.js
+++ b/deps/v8/test/mjsunit/regress/regress-355523.js
@@ -28,9 +28,14 @@
// Flags: --allow-natives-syntax
// This test requires ASAN.
-function __f_4(a, b) { }
-function __f_8(n) { return __f_4(arguments[13], arguments[-10]); }
-function __f_6(a) { return __f_8(0, a); }
+function __f_4(a, b) {}
+function __f_8(n) {
+ return __f_4(arguments[13], arguments[-10]);
+};
+%PrepareFunctionForOptimization(__f_8);
+function __f_6(a) {
+ return __f_8(0, a);
+}
__f_8(0);
__f_8(0);
%OptimizeFunctionOnNextCall(__f_8);
diff --git a/deps/v8/test/mjsunit/regress/regress-3564.js b/deps/v8/test/mjsunit/regress/regress-3564.js
index a0b9eb2994..75f98d781c 100644
--- a/deps/v8/test/mjsunit/regress/regress-3564.js
+++ b/deps/v8/test/mjsunit/regress/regress-3564.js
@@ -5,7 +5,11 @@
// Flags: --allow-natives-syntax
function MyWrapper(v) {
- return { valueOf: function() { return v } };
+ return {
+ valueOf: function() {
+ return v;
+ }
+ };
}
function f() {
@@ -16,8 +20,8 @@ function f() {
assertFalse("a" > "x");
assertFalse("a" > new String("y"));
assertFalse("a" > new MyWrapper("z"));
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-357105.js b/deps/v8/test/mjsunit/regress/regress-357105.js
index d3eefd0f1b..77a718609f 100644
--- a/deps/v8/test/mjsunit/regress/regress-357105.js
+++ b/deps/v8/test/mjsunit/regress/regress-357105.js
@@ -4,9 +4,9 @@
// Flags: --allow-natives-syntax --expose-gc
-var global = { };
+var global = {};
-function do_nothing() { }
+function do_nothing() {}
function f(opt_gc) {
var x = new Array(3);
@@ -14,8 +14,8 @@ function f(opt_gc) {
opt_gc();
global[1] = 15.5;
return x;
-}
-
+};
+%PrepareFunctionForOptimization(f);
gc();
global = f(gc);
global = f(do_nothing);
diff --git a/deps/v8/test/mjsunit/regress/regress-358057.js b/deps/v8/test/mjsunit/regress/regress-358057.js
index c5fe73a032..2eea74cbef 100644
--- a/deps/v8/test/mjsunit/regress/regress-358057.js
+++ b/deps/v8/test/mjsunit/regress/regress-358057.js
@@ -12,8 +12,8 @@ function __f_12(__v_6) {
__v_1 = __v_0[__v_6 + 10];
return __v_1;
}
-}
-
+};
+%PrepareFunctionForOptimization(__f_12);
assertEquals(0xAA, __f_12(-1));
%OptimizeFunctionOnNextCall(__f_12);
assertEquals(0xAA, __f_12(-1));
diff --git a/deps/v8/test/mjsunit/regress/regress-358059.js b/deps/v8/test/mjsunit/regress/regress-358059.js
index 30738f9ae8..7bf44b9231 100644
--- a/deps/v8/test/mjsunit/regress/regress-358059.js
+++ b/deps/v8/test/mjsunit/regress/regress-358059.js
@@ -4,9 +4,14 @@
// Flags: --allow-natives-syntax
-function f(a, b) { return b + (a.x++); }
+function f(a, b) {
+ return b + a.x++;
+};
+%PrepareFunctionForOptimization(f);
var o = {};
-o.__defineGetter__('x', function() { return 1; });
+o.__defineGetter__('x', function() {
+ return 1;
+});
assertEquals(4, f(o, 3));
assertEquals(4, f(o, 3));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-359441.js b/deps/v8/test/mjsunit/regress/regress-359441.js
index d96468c335..f9d010525a 100644
--- a/deps/v8/test/mjsunit/regress/regress-359441.js
+++ b/deps/v8/test/mjsunit/regress/regress-359441.js
@@ -10,8 +10,8 @@ function g() {
function f() {
new g();
-}
-
+};
+%PrepareFunctionForOptimization(f);
function deopt(x) {
%DeoptimizeFunction(f);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-359491.js b/deps/v8/test/mjsunit/regress/regress-359491.js
index d72875a296..e205d61377 100644
--- a/deps/v8/test/mjsunit/regress/regress-359491.js
+++ b/deps/v8/test/mjsunit/regress/regress-359491.js
@@ -25,6 +25,8 @@
}
// Gather type feedback for g, but only on one branch for f.
+ ;
+ %PrepareFunctionForOptimization(g);
g(1);
g(1);
%OptimizeFunctionOnNextCall(g);
@@ -42,10 +44,10 @@
}
// Gather type feedback for both branches.
- f({ a : 1}, {b : 1}, 1);
- f({ c : 1}, {d : 1}, 1);
- f({ a : 1}, {c : 1}, 0);
- f({ b : 1}, {d : 1}, 0);
+ f({a: 1}, {b: 1}, 1);
+ f({c: 1}, {d: 1}, 1);
+ f({a: 1}, {c: 1}, 0);
+ f({b: 1}, {d: 1}, 0);
function g(mode) {
var x = 1e10 | 0;
@@ -53,6 +55,8 @@
}
// Gather type feedback for g, but only on one branch for f.
+ ;
+ %PrepareFunctionForOptimization(g);
g(1);
g(1);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-361608.js b/deps/v8/test/mjsunit/regress/regress-361608.js
index b3cc90cfd4..320d2cec65 100644
--- a/deps/v8/test/mjsunit/regress/regress-361608.js
+++ b/deps/v8/test/mjsunit/regress/regress-361608.js
@@ -14,6 +14,7 @@ function foo() {
f(function() { x = i; });
}
}
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-362128.js b/deps/v8/test/mjsunit/regress/regress-362128.js
index 18ac5db907..94ac7b78e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-362128.js
+++ b/deps/v8/test/mjsunit/regress/regress-362128.js
@@ -29,6 +29,7 @@ var foo = (function () {
return method.apply(receiver, arguments);
}
})();
+%PrepareFunctionForOptimization(foo);
foo("a", "b", "c");
foo("a", "b", "c");
diff --git a/deps/v8/test/mjsunit/regress/regress-363956.js b/deps/v8/test/mjsunit/regress/regress-363956.js
index 76d6728c02..680f65333b 100644
--- a/deps/v8/test/mjsunit/regress/regress-363956.js
+++ b/deps/v8/test/mjsunit/regress/regress-363956.js
@@ -4,8 +4,13 @@
// Flags: --allow-natives-syntax
-function Fuu() { this.x = this.x.x; }
-Fuu.prototype.x = {x: 1}
+function Fuu() {
+ this.x = this.x.x;
+};
+%PrepareFunctionForOptimization(Fuu);
+Fuu.prototype.x = {
+ x: 1
+};
new Fuu();
new Fuu();
%OptimizeFunctionOnNextCall(Fuu);
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-1.js b/deps/v8/test/mjsunit/regress/regress-3650-1.js
index f5aad4f259..6799bc0077 100644
--- a/deps/v8/test/mjsunit/regress/regress-3650-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-3650-1.js
@@ -14,6 +14,7 @@ function f(t) {
}
return result.join('');
}
+%PrepareFunctionForOptimization(f);
var t = {a: "1", b: "2"};
assertEquals("aa11ab12ba21bb22", f(t));
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-2.js b/deps/v8/test/mjsunit/regress/regress-3650-2.js
index aaa6d55b68..6f7c51e55b 100644
--- a/deps/v8/test/mjsunit/regress/regress-3650-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-3650-2.js
@@ -15,6 +15,7 @@ function foo(c) {
for (var p in c) { s++; }
return s;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(2, foo(a));
assertEquals(1, foo(b));
diff --git a/deps/v8/test/mjsunit/regress/regress-368243.js b/deps/v8/test/mjsunit/regress/regress-368243.js
index 6647d12286..73853d347a 100644
--- a/deps/v8/test/mjsunit/regress/regress-368243.js
+++ b/deps/v8/test/mjsunit/regress/regress-368243.js
@@ -12,6 +12,7 @@ function foo(a, c){
}
}
};
+%PrepareFunctionForOptimization(foo);
c = {
"one" : { x : 1},
diff --git a/deps/v8/test/mjsunit/regress/regress-370827.js b/deps/v8/test/mjsunit/regress/regress-370827.js
index e6d5185e70..f07e6d6859 100644
--- a/deps/v8/test/mjsunit/regress/regress-370827.js
+++ b/deps/v8/test/mjsunit/regress/regress-370827.js
@@ -6,15 +6,17 @@
function g(dummy, x) {
var start = "";
- if (x) { start = x + " - "; }
+ if (x) {
+ start = x + ' - ';
+ }
start = start + "array length";
};
function f() {
gc();
g([0.1]);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-379770.js b/deps/v8/test/mjsunit/regress/regress-379770.js
index 6234899c05..031d3f489d 100644
--- a/deps/v8/test/mjsunit/regress/regress-379770.js
+++ b/deps/v8/test/mjsunit/regress/regress-379770.js
@@ -6,7 +6,10 @@
function foo(obj) {
var counter = 1;
- for (var i = 0; i < obj.length; i++) %OptimizeOsr();
+ for (var i = 0; i < obj.length; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(foo);
+ }
counter += obj;
return counter;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-380092.js b/deps/v8/test/mjsunit/regress/regress-380092.js
index fe6b0b7619..ee149da8b0 100644
--- a/deps/v8/test/mjsunit/regress/regress-380092.js
+++ b/deps/v8/test/mjsunit/regress/regress-380092.js
@@ -6,8 +6,8 @@
function many_hoist(o, index) {
return o[index + 33554427];
-}
-
+};
+%PrepareFunctionForOptimization(many_hoist);
var obj = {};
many_hoist(obj, 0);
%OptimizeFunctionOnNextCall(many_hoist);
@@ -15,8 +15,8 @@ many_hoist(obj, 5);
function constant_too_large(o, index) {
return o[index + 1033554433];
-}
-
+};
+%PrepareFunctionForOptimization(constant_too_large);
constant_too_large(obj, 0);
%OptimizeFunctionOnNextCall(constant_too_large);
constant_too_large(obj, 5);
diff --git a/deps/v8/test/mjsunit/regress/regress-381313.js b/deps/v8/test/mjsunit/regress/regress-381313.js
index d2b9d7c11d..46e1f939c6 100644
--- a/deps/v8/test/mjsunit/regress/regress-381313.js
+++ b/deps/v8/test/mjsunit/regress/regress-381313.js
@@ -32,10 +32,10 @@ function f(x, deopt) {
deopt + 0;
- return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 +
- a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19;
-}
-
+ return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 + a10 + a11 + a12 +
+ a13 + a14 + a15 + a16 + a17 + a18 + a19;
+};
+%PrepareFunctionForOptimization(f);
f(0.5, 0);
f(0.5, 0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-385054.js b/deps/v8/test/mjsunit/regress/regress-385054.js
index 115bca0d21..0961242a47 100644
--- a/deps/v8/test/mjsunit/regress/regress-385054.js
+++ b/deps/v8/test/mjsunit/regress/regress-385054.js
@@ -8,8 +8,8 @@ function f(x) {
var a = [1, 2];
a[x];
return a[0 - x];
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-385565.js b/deps/v8/test/mjsunit/regress/regress-385565.js
index 4281fe0f8e..1403119681 100644
--- a/deps/v8/test/mjsunit/regress/regress-385565.js
+++ b/deps/v8/test/mjsunit/regress/regress-385565.js
@@ -61,6 +61,7 @@ assertTrue(r2 === r3);
%OptimizeFunctionOnNextCall(callsFReceiver);
r1 = callsFReceiver(o1);
+%PrepareFunctionForOptimization(callsFReceiver);
callsFReceiver(o1);
%OptimizeFunctionOnNextCall(callsFReceiver);
r2 = callsFReceiver(o1);
diff --git a/deps/v8/test/mjsunit/regress/regress-386034.js b/deps/v8/test/mjsunit/regress/regress-386034.js
index d770ce91bd..bdf1b1bff0 100644
--- a/deps/v8/test/mjsunit/regress/regress-386034.js
+++ b/deps/v8/test/mjsunit/regress/regress-386034.js
@@ -9,11 +9,13 @@ function f(x) {
for (i = 0; i < 1; i++) {
v.apply(this, arguments);
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
function g() {}
f(g);
f(g);
%OptimizeFunctionOnNextCall(f);
-assertThrows(function() { f('----'); }, TypeError);
+assertThrows(function() {
+ f('----');
+}, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-3865.js b/deps/v8/test/mjsunit/regress/regress-3865.js
index 0d1d02f00d..87d895f5bc 100644
--- a/deps/v8/test/mjsunit/regress/regress-3865.js
+++ b/deps/v8/test/mjsunit/regress/regress-3865.js
@@ -7,7 +7,8 @@
function bar() {
var radix = 10;
return 21 / radix | 0;
-}
+};
+%PrepareFunctionForOptimization(bar);
assertEquals(2, bar());
assertEquals(2, bar());
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-3884.js b/deps/v8/test/mjsunit/regress/regress-3884.js
index ecd000f6c7..3321a453e4 100644
--- a/deps/v8/test/mjsunit/regress/regress-3884.js
+++ b/deps/v8/test/mjsunit/regress/regress-3884.js
@@ -19,8 +19,8 @@ function f(x) {
if (x === 1.9 || x === 1.4) {
gc();
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f('some_other_string_1');
diff --git a/deps/v8/test/mjsunit/regress/regress-3969.js b/deps/v8/test/mjsunit/regress/regress-3969.js
index 4659e1caf8..31aeae31ee 100644
--- a/deps/v8/test/mjsunit/regress/regress-3969.js
+++ b/deps/v8/test/mjsunit/regress/regress-3969.js
@@ -18,8 +18,9 @@ var outer = new Outer(inner);
Outer.prototype.boom = function() {
return this.inner.property;
-}
+};
+%PrepareFunctionForOptimization(Outer.prototype.boom);
assertEquals("OK", outer.boom());
assertEquals("OK", outer.boom());
%OptimizeFunctionOnNextCall(Outer.prototype.boom);
diff --git a/deps/v8/test/mjsunit/regress/regress-397.js b/deps/v8/test/mjsunit/regress/regress-397.js
index 0e4143d032..fd347860f9 100644
--- a/deps/v8/test/mjsunit/regress/regress-397.js
+++ b/deps/v8/test/mjsunit/regress/regress-397.js
@@ -35,8 +35,8 @@ function test() {
assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
assertEquals(0, Math.pow(-Infinity, -0.5));
-}
-
+};
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-3985.js b/deps/v8/test/mjsunit/regress/regress-3985.js
index 6dbc4bdadd..0c1cecafb8 100644
--- a/deps/v8/test/mjsunit/regress/regress-3985.js
+++ b/deps/v8/test/mjsunit/regress/regress-3985.js
@@ -8,7 +8,8 @@ var shouldThrow = false;
function h() {
try { // Prevent inlining in Crankshaft.
- } catch(e) { }
+ } catch (e) {
+ }
var res = g.arguments[0].x;
if (shouldThrow) {
throw res;
@@ -16,20 +17,22 @@ function h() {
return res;
}
-function g(o) { h(); }
+function g(o) {
+ h();
+}
function f1() {
- var o = { x : 1 };
+ var o = {x: 1};
g(o);
return o.x;
-}
-
+};
+%PrepareFunctionForOptimization(f1);
function f2() {
- var o = { x : 2 };
+ var o = {x: 2};
g(o);
return o.x;
-}
-
+};
+%PrepareFunctionForOptimization(f2);
f1();
f2();
f1();
@@ -37,9 +40,13 @@ f2();
%OptimizeFunctionOnNextCall(f1);
%OptimizeFunctionOnNextCall(f2);
shouldThrow = true;
-try { f1(); } catch(e) {
+try {
+ f1();
+} catch (e) {
assertEquals(e, 1);
}
-try { f2(); } catch(e) {
+try {
+ f2();
+} catch (e) {
assertEquals(e, 2);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-4023.js b/deps/v8/test/mjsunit/regress/regress-4023.js
index 902741f6f5..16bbeccbe8 100644
--- a/deps/v8/test/mjsunit/regress/regress-4023.js
+++ b/deps/v8/test/mjsunit/regress/regress-4023.js
@@ -14,14 +14,16 @@ function Outer() {
}
function KeepMapAlive(o) {
return o.o;
-}
+};
+%PrepareFunctionForOptimization(KeepMapAlive);
function SetInner(o, i) {
o.inner_field = i;
-}
+};
+%PrepareFunctionForOptimization(SetInner);
function Crash(o) {
return o.inner_field.property;
-}
-
+};
+%PrepareFunctionForOptimization(Crash);
var inner = new Inner();
var outer = new Outer();
diff --git a/deps/v8/test/mjsunit/regress/regress-410912.js b/deps/v8/test/mjsunit/regress/regress-410912.js
index 9a2e46d7bf..761b009098 100644
--- a/deps/v8/test/mjsunit/regress/regress-410912.js
+++ b/deps/v8/test/mjsunit/regress/regress-410912.js
@@ -38,7 +38,9 @@ function __f_0(__v_1,__v_0,i) {
__v_1.a = __v_0[i];
gc();
}
+%PrepareFunctionForOptimization(__f_0);
try {
+ %PrepareFunctionForOptimization(__f_0);
__f_0(__v_1,__v_0,0);
__f_0(__v_1,__v_0,0);
%OptimizeFunctionOnNextCall(__f_0);
@@ -84,6 +86,7 @@ function __f_7(o) {
return o.__f_4();
}
try {
+ %PrepareFunctionForOptimization(__f_7);
for (var __v_7 = 0; __v_7 < 5; __v_7++) __f_7(__v_5);
%OptimizeFunctionOnNextCall(__f_7);
__f_7(__v_5);
@@ -111,6 +114,7 @@ gc();
var __v_8;
} catch(e) { print("Caught: " + e); }
function __f_9(n) { return __v_9.charAt(n); }
+%PrepareFunctionForOptimization(__f_9);
try {
for (var __v_7 = 0; __v_7 < 5; __v_7++) {
__v_8 = __f_9(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-4121.js b/deps/v8/test/mjsunit/regress/regress-4121.js
index fcf625a061..a2f5d74953 100644
--- a/deps/v8/test/mjsunit/regress/regress-4121.js
+++ b/deps/v8/test/mjsunit/regress/regress-4121.js
@@ -9,9 +9,9 @@ function literals_sharing_test(warmup, optimize) {
// Ensure small array literals start in specific element kind mode.
assertTrue(%HasSmiElements([]));
assertTrue(%HasSmiElements([1]));
- assertTrue(%HasSmiElements([1,2]));
+ assertTrue(%HasSmiElements([1, 2]));
assertTrue(%HasDoubleElements([1.1]));
- assertTrue(%HasDoubleElements([1.1,2]));
+ assertTrue(%HasDoubleElements([1.1, 2]));
var a = [1, 2, 3];
if (warmup) {
@@ -23,6 +23,8 @@ function literals_sharing_test(warmup, optimize) {
// propagated to the next closure.
assertTrue(%HasDoubleElements(a));
};
+ %PrepareFunctionForOptimization(closure);
+ ;
%EnsureFeedbackVectorForFunction(closure);
if (optimize) %OptimizeFunctionOnNextCall(closure);
closure();
@@ -32,7 +34,7 @@ function literals_sharing_test(warmup, optimize) {
function test() {
var warmup = true;
for (var i = 0; i < 3; i++) {
- print("iter: " + i + ", warmup: "+ warmup);
+ print('iter: ' + i + ', warmup: ' + warmup);
literals_sharing_test(warmup, false);
warmup = false;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-412162.js b/deps/v8/test/mjsunit/regress/regress-412162.js
index 6a7ad0c57f..ad7f954679 100644
--- a/deps/v8/test/mjsunit/regress/regress-412162.js
+++ b/deps/v8/test/mjsunit/regress/regress-412162.js
@@ -6,8 +6,8 @@
function test() {
Math.abs(-NaN).toString();
-}
-
+};
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-416730.js b/deps/v8/test/mjsunit/regress/regress-416730.js
index 8d7f207fd9..70fe623cf7 100644
--- a/deps/v8/test/mjsunit/regress/regress-416730.js
+++ b/deps/v8/test/mjsunit/regress/regress-416730.js
@@ -7,13 +7,11 @@
var d = {x: undefined, y: undefined};
function Crash(left, right) {
- var c = {
- x: right.x - left.x,
- y: right.y - left.y
- };
- return c.x * c.y;
-}
+ var c = {x: right.x - left.x, y: right.y - left.y};
+ return c.x * c.y;
+};
+%PrepareFunctionForOptimization(Crash);
var a = {x: 0.5, y: 0};
var b = {x: 1, y: 0};
diff --git a/deps/v8/test/mjsunit/regress/regress-4173.js b/deps/v8/test/mjsunit/regress/regress-4173.js
index 9aa49be701..8ac75affd7 100644
--- a/deps/v8/test/mjsunit/regress/regress-4173.js
+++ b/deps/v8/test/mjsunit/regress/regress-4173.js
@@ -6,11 +6,12 @@
function Migrator(o) {
return o.foo;
-}
+};
+%PrepareFunctionForOptimization(Migrator);
function Loader(o) {
return o[0];
-}
-
+};
+%PrepareFunctionForOptimization(Loader);
var first_smi_array = [1];
var second_smi_array = [2];
var first_object_array = ["first"];
diff --git a/deps/v8/test/mjsunit/regress/regress-4266.js b/deps/v8/test/mjsunit/regress/regress-4266.js
index f886250a87..4fdc71cd5d 100644
--- a/deps/v8/test/mjsunit/regress/regress-4266.js
+++ b/deps/v8/test/mjsunit/regress/regress-4266.js
@@ -10,8 +10,8 @@ function test() {
} catch (e) {
return e.message;
}
-}
-
+};
+%PrepareFunctionForOptimization(test);
assertEquals("[].foo is not a function", test());
%OptimizeFunctionOnNextCall(test);
assertEquals("[].foo is not a function", test());
diff --git a/deps/v8/test/mjsunit/regress/regress-4267.js b/deps/v8/test/mjsunit/regress/regress-4267.js
index f8cf746723..f1519fa03d 100644
--- a/deps/v8/test/mjsunit/regress/regress-4267.js
+++ b/deps/v8/test/mjsunit/regress/regress-4267.js
@@ -10,6 +10,7 @@ var a = [];
Object.defineProperty(a, "0", {configurable: false, value: 10});
assertEquals(1, a.length);
var setter = ()=>{ a.length = 0; };
+%PrepareFunctionForOptimization(setter);
assertThrows(setter);
assertThrows(setter);
%OptimizeFunctionOnNextCall(setter);
diff --git a/deps/v8/test/mjsunit/regress/regress-4325.js b/deps/v8/test/mjsunit/regress/regress-4325.js
index 27d690d579..64fe43b5fb 100644
--- a/deps/v8/test/mjsunit/regress/regress-4325.js
+++ b/deps/v8/test/mjsunit/regress/regress-4325.js
@@ -5,12 +5,12 @@
// Flags: --allow-natives-syntax --expose-gc
function Inner() {
- this.p1 = 0;
- this.p2 = 3;
+ this.p1 = 0;
+ this.p2 = 3;
}
function Outer() {
- this.p3 = 0;
+ this.p3 = 0;
}
var i1 = new Inner();
@@ -38,8 +38,9 @@ print(o1.p3);
// but in fact o1.inner.map is still Inner.map2!
function loader(o) {
- return o.inner.p2;
-}
+ return o.inner.p2;
+};
+%PrepareFunctionForOptimization(loader);
loader(o2);
loader(o2);
%OptimizeFunctionOnNextCall(loader);
diff --git a/deps/v8/test/mjsunit/regress/regress-435073.js b/deps/v8/test/mjsunit/regress/regress-435073.js
index dbaa612afa..96bffde838 100644
--- a/deps/v8/test/mjsunit/regress/regress-435073.js
+++ b/deps/v8/test/mjsunit/regress/regress-435073.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax --verify-heap
-function test(x) { [x,,]; }
-
+function test(x) {
+ [x, , ];
+};
+%PrepareFunctionForOptimization(test);
test(0);
test(0);
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-435477.js b/deps/v8/test/mjsunit/regress/regress-435477.js
index 0a150002c9..d6eb3d0abc 100644
--- a/deps/v8/test/mjsunit/regress/regress-435477.js
+++ b/deps/v8/test/mjsunit/regress/regress-435477.js
@@ -7,8 +7,8 @@ var a = new Array(128);
function f(a, base) {
a[base] = 2;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(a, undefined);
f("r12", undefined);
f(a, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-436893.js b/deps/v8/test/mjsunit/regress/regress-436893.js
index 38e7b5fb86..c3eb4a2fde 100644
--- a/deps/v8/test/mjsunit/regress/regress-436893.js
+++ b/deps/v8/test/mjsunit/regress/regress-436893.js
@@ -33,5 +33,6 @@ function foo() {
}
// Test passing null or undefined as receiver.
function g() { return foo.apply(null, x()++); }
+%PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
assertThrows(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-4374.js b/deps/v8/test/mjsunit/regress/regress-4374.js
index afae71c595..1d6cc779fd 100644
--- a/deps/v8/test/mjsunit/regress/regress-4374.js
+++ b/deps/v8/test/mjsunit/regress/regress-4374.js
@@ -9,6 +9,7 @@ var f = (function() {
var max = Math.max;
return function f() { return max(0, -1); };
})();
+%PrepareFunctionForOptimization(f);
assertEquals(0, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-437765.js b/deps/v8/test/mjsunit/regress/regress-437765.js
index 698c39d378..63cee8158c 100644
--- a/deps/v8/test/mjsunit/regress/regress-437765.js
+++ b/deps/v8/test/mjsunit/regress/regress-437765.js
@@ -14,8 +14,8 @@ function bar(x, y) {
function baz() {
bar(64, 2);
-}
-
+};
+%PrepareFunctionForOptimization(baz);
baz();
baz();
%OptimizeFunctionOnNextCall(baz);
diff --git a/deps/v8/test/mjsunit/regress/regress-4388.js b/deps/v8/test/mjsunit/regress/regress-4388.js
index 908bcccb4e..a0853f205b 100644
--- a/deps/v8/test/mjsunit/regress/regress-4388.js
+++ b/deps/v8/test/mjsunit/regress/regress-4388.js
@@ -14,6 +14,7 @@ function test_hole_check_for_let(a) {
}
}
}
+%PrepareFunctionForOptimization(test_hole_check_for_let);
assertDoesNotThrow("test_hole_check_for_let(0)");
assertThrows("test_hole_check_for_let(1)", ReferenceError);
%OptimizeFunctionOnNextCall(test_hole_check_for_let)
@@ -29,6 +30,7 @@ function test_hole_check_for_const(a) {
}
}
}
+%PrepareFunctionForOptimization(test_hole_check_for_const);
assertThrows("test_hole_check_for_const(0)", TypeError);
assertThrows("test_hole_check_for_const(1)", ReferenceError);
%OptimizeFunctionOnNextCall(test_hole_check_for_const)
diff --git a/deps/v8/test/mjsunit/regress/regress-446389.js b/deps/v8/test/mjsunit/regress/regress-446389.js
index d6006387e4..892320279f 100644
--- a/deps/v8/test/mjsunit/regress/regress-446389.js
+++ b/deps/v8/test/mjsunit/regress/regress-446389.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function runNearStackLimit(f) { function t() { try { t(); } catch(e) { f(); } }; try { t(); } catch(e) {} }
+%PrepareFunctionForOptimization(__f_3);
%OptimizeFunctionOnNextCall(__f_3);
function __f_3() {
var __v_5 = a[0];
diff --git a/deps/v8/test/mjsunit/regress/regress-447526.js b/deps/v8/test/mjsunit/regress/regress-447526.js
index 9f9396f2ee..f4ce1dd9c8 100644
--- a/deps/v8/test/mjsunit/regress/regress-447526.js
+++ b/deps/v8/test/mjsunit/regress/regress-447526.js
@@ -19,6 +19,7 @@ function foo() {
bar();
}
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-449291.js b/deps/v8/test/mjsunit/regress/regress-449291.js
index fb56027b67..721a1f3dc9 100644
--- a/deps/v8/test/mjsunit/regress/regress-449291.js
+++ b/deps/v8/test/mjsunit/regress/regress-449291.js
@@ -4,15 +4,19 @@
// Flags: --allow-natives-syntax
-a = {y:1.5};
+a = {
+ y: 1.5
+};
a.y = 1093445778;
b = a.y;
-c = {y:{}};
+c = {
+ y: {}
+};
function f() {
return {y: b};
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-4493-1.js b/deps/v8/test/mjsunit/regress/regress-4493-1.js
index a24c8b07dc..5c16f99f59 100644
--- a/deps/v8/test/mjsunit/regress/regress-4493-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-4493-1.js
@@ -4,13 +4,20 @@
// Flags: --allow-natives-syntax
-function baz(x, f) { return x.length; };
+function baz(x, f) {
+ return x.length;
+};
function bar(x, y) {
if (y) {}
- baz(x, function() { return x; });
+ baz(x, function() {
+ return x;
+ });
};
-function foo(x) { bar(x, ''); }
+function foo(x) {
+ bar(x, '');
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(['a']);
diff --git a/deps/v8/test/mjsunit/regress/regress-4507.js b/deps/v8/test/mjsunit/regress/regress-4507.js
index a3fe5107be..a36031b1ac 100644
--- a/deps/v8/test/mjsunit/regress/regress-4507.js
+++ b/deps/v8/test/mjsunit/regress/regress-4507.js
@@ -5,14 +5,15 @@
// Flags: --allow-natives-syntax
function broken(value) {
- return Math.floor(value/65536);
+ return Math.floor(value / 65536);
}
function toUnsigned(i) {
return i >>> 0;
}
function outer(i) {
return broken(toUnsigned(i));
-}
+};
+%PrepareFunctionForOptimization(outer);
for (var i = 0; i < 5; i++) outer(0);
broken(0x80000000); // Spice things up with a sprinkling of type feedback.
%OptimizeFunctionOnNextCall(outer);
diff --git a/deps/v8/test/mjsunit/regress/regress-451322.js b/deps/v8/test/mjsunit/regress/regress-451322.js
index b7794f52f0..21871c07cc 100644
--- a/deps/v8/test/mjsunit/regress/regress-451322.js
+++ b/deps/v8/test/mjsunit/regress/regress-451322.js
@@ -9,8 +9,8 @@ var foo = 0;
function bar() {
var baz = 0 - {};
if (foo > 24) return baz * 0;
-}
-
+};
+%PrepareFunctionForOptimization(bar);
bar();
bar();
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-4515.js b/deps/v8/test/mjsunit/regress/regress-4515.js
index 81610f08a5..d98377bd98 100644
--- a/deps/v8/test/mjsunit/regress/regress-4515.js
+++ b/deps/v8/test/mjsunit/regress/regress-4515.js
@@ -6,8 +6,8 @@
function f(array) {
return array.length >>> 0;
-}
-
+};
+%PrepareFunctionForOptimization(f);
var a = new Array();
a[4000000000] = "A";
diff --git a/deps/v8/test/mjsunit/regress/regress-451958.js b/deps/v8/test/mjsunit/regress/regress-451958.js
index 33695f2b3e..41cd23bae2 100644
--- a/deps/v8/test/mjsunit/regress/regress-451958.js
+++ b/deps/v8/test/mjsunit/regress/regress-451958.js
@@ -15,6 +15,7 @@ function foo(a) {
for (i = 0; i < 1; i++) ;
}
}
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
@@ -27,5 +28,6 @@ var __v_45;
for (__v_45 = 128; __v_45 < 256; __v_45++) {
}
}
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-4521.js b/deps/v8/test/mjsunit/regress/regress-4521.js
index f9bdafcea8..26462967e7 100644
--- a/deps/v8/test/mjsunit/regress/regress-4521.js
+++ b/deps/v8/test/mjsunit/regress/regress-4521.js
@@ -13,6 +13,7 @@ class B {
class C extends B {
bar() { return super[%DeoptimizeFunction(C.prototype.bar), "foo"]() }
}
+%PrepareFunctionForOptimization(C.prototype.bar);
assertEquals(23, new C().bar());
assertEquals(23, new C().bar());
diff --git a/deps/v8/test/mjsunit/regress/regress-4525.js b/deps/v8/test/mjsunit/regress/regress-4525.js
index b962dc07ca..1439108ea4 100644
--- a/deps/v8/test/mjsunit/regress/regress-4525.js
+++ b/deps/v8/test/mjsunit/regress/regress-4525.js
@@ -25,6 +25,7 @@ function check(x, value, type) {
}
var o = construct(receiver);
+%PrepareFunctionForOptimization(o.bar);
check(o.bar.call(123), Object(123), Number);
check(o.bar.call("a"), Object("a"), String);
check(o.bar.call(undefined), this, Object);
diff --git a/deps/v8/test/mjsunit/regress/regress-457935.js b/deps/v8/test/mjsunit/regress/regress-457935.js
index d34db05de4..54f9134013 100644
--- a/deps/v8/test/mjsunit/regress/regress-457935.js
+++ b/deps/v8/test/mjsunit/regress/regress-457935.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax
-function dummy(x) { };
+function dummy(x) {};
function g() {
return g.arguments;
@@ -17,8 +17,8 @@ function f(limit) {
o.y = +o.y;
g();
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-460917.js b/deps/v8/test/mjsunit/regress/regress-460917.js
index ed64c9cb78..88e8cd96d1 100644
--- a/deps/v8/test/mjsunit/regress/regress-460917.js
+++ b/deps/v8/test/mjsunit/regress/regress-460917.js
@@ -15,8 +15,10 @@ function boom(a1, a2) {
}
// Prepare type feedback for the "t = a1[0]" load: fast elements.
+;
+%PrepareFunctionForOptimization(boom);
var fast_elem = new Array(1);
-fast_elem[0] = "tagged";
+fast_elem[0] = 'tagged';
boom(fast_elem, [1]);
// Prepare type feedback for the "a2[0] = 0.3" store: double elements.
diff --git a/deps/v8/test/mjsunit/regress/regress-466993.js b/deps/v8/test/mjsunit/regress/regress-466993.js
index 6bf02bbbae..4bc51826e6 100644
--- a/deps/v8/test/mjsunit/regress/regress-466993.js
+++ b/deps/v8/test/mjsunit/regress/regress-466993.js
@@ -5,10 +5,12 @@
// Flags: --allow-natives-syntax
var test = function() {
- var a = {"1": false, "2": false, "3": false, "4": false};
+ var a = {'1': false, '2': false, '3': false, '4': false};
assertEquals(false, a[1]);
a[1] = true;
};
+;
+%PrepareFunctionForOptimization(test);
test();
test();
test();
diff --git a/deps/v8/test/mjsunit/regress/regress-467481.js b/deps/v8/test/mjsunit/regress/regress-467481.js
index dcb12d89b0..185a366e18 100644
--- a/deps/v8/test/mjsunit/regress/regress-467481.js
+++ b/deps/v8/test/mjsunit/regress/regress-467481.js
@@ -8,7 +8,8 @@ function f(a1, a2) {
var v7 = a2[0];
var v8 = a1[0];
a2[0] = 0.3;
-}
+};
+%PrepareFunctionForOptimization(f);
v6 = new Array(1);
v6[0] = "tagged";
f(v6, [1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-4715.js b/deps/v8/test/mjsunit/regress/regress-4715.js
index 33041208e7..aeacf8214a 100644
--- a/deps/v8/test/mjsunit/regress/regress-4715.js
+++ b/deps/v8/test/mjsunit/regress/regress-4715.js
@@ -36,6 +36,7 @@ function foo(object) {
}
return key;
}
+%PrepareFunctionForOptimization(foo);
// Collect type feedback for slow-mode for-in.
foo(training);
diff --git a/deps/v8/test/mjsunit/regress/regress-4788-1.js b/deps/v8/test/mjsunit/regress/regress-4788-1.js
index 347ab5e592..9258f7ba77 100644
--- a/deps/v8/test/mjsunit/regress/regress-4788-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-4788-1.js
@@ -12,6 +12,8 @@ var f = (function() {
return foo;
})();
+%PrepareFunctionForOptimization(f);
+
function deopt(f) {
return {
toString : function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-4788-2.js b/deps/v8/test/mjsunit/regress/regress-4788-2.js
index 5441705d54..6339b0432f 100644
--- a/deps/v8/test/mjsunit/regress/regress-4788-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-4788-2.js
@@ -12,9 +12,11 @@ var f = (function() {
return foo;
})();
+%PrepareFunctionForOptimization(f);
+
function deopt(f) {
return {
- toString : function() {
+ toString: function() {
%DeoptimizeFunction(f);
return "2";
}
diff --git a/deps/v8/test/mjsunit/regress/regress-4800.js b/deps/v8/test/mjsunit/regress/regress-4800.js
index af7cbc0b8f..5885cf67bb 100644
--- a/deps/v8/test/mjsunit/regress/regress-4800.js
+++ b/deps/v8/test/mjsunit/regress/regress-4800.js
@@ -70,6 +70,7 @@ function f(x, len) {
}
}
+%PrepareFunctionForOptimization(f);
f(0, 10);
f(0, 10);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-487981.js b/deps/v8/test/mjsunit/regress/regress-487981.js
index 829c25c59d..f548944ba7 100644
--- a/deps/v8/test/mjsunit/regress/regress-487981.js
+++ b/deps/v8/test/mjsunit/regress/regress-487981.js
@@ -10,13 +10,15 @@ function __f_2(o) {
return o.field.b.x;
}
+%PrepareFunctionForOptimization(__f_2);
+
try {
%OptimizeFunctionOnNextCall(__f_2);
__v_1 = __f_2();
-} catch(e) { }
+} catch (e) { }
function __f_3() { __f_3(/./.test()); };
try {
-__f_3();
-} catch(e) { }
+ __f_3();
+} catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-488398.js b/deps/v8/test/mjsunit/regress/regress-488398.js
index 77ea293a26..756c668b54 100644
--- a/deps/v8/test/mjsunit/regress/regress-488398.js
+++ b/deps/v8/test/mjsunit/regress/regress-488398.js
@@ -12,6 +12,7 @@ function __f_17(__v_16, base) {
__v_16[base + 1] = 1;
__v_16[base + 4] = base + 4;
}
+%PrepareFunctionForOptimization(__f_17);
__f_17(__v_16, true);
__f_17(__v_16, 14);
%OptimizeFunctionOnNextCall(__f_17);
diff --git a/deps/v8/test/mjsunit/regress/regress-4970.js b/deps/v8/test/mjsunit/regress/regress-4970.js
index da0033b34f..b34b53910d 100644
--- a/deps/v8/test/mjsunit/regress/regress-4970.js
+++ b/deps/v8/test/mjsunit/regress/regress-4970.js
@@ -10,6 +10,7 @@ function g() {
f();
}
+%PrepareFunctionForOptimization(g);
assertThrows(g, SyntaxError);
%OptimizeFunctionOnNextCall(g);
assertThrows(g, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4971.js b/deps/v8/test/mjsunit/regress/regress-4971.js
index 041f6c2a57..7866a5fb02 100644
--- a/deps/v8/test/mjsunit/regress/regress-4971.js
+++ b/deps/v8/test/mjsunit/regress/regress-4971.js
@@ -12,8 +12,9 @@
Object.defineProperty(C.prototype, "boom", { get: function() {
if (should_deoptimize_caller) %DeoptimizeFunction(D.prototype.f);
return this.m
- }})
+ }});
+ %PrepareFunctionForOptimization(D.prototype.f);
assertEquals(23, new D().f());
assertEquals(23, new D().f());
%OptimizeFunctionOnNextCall(D.prototype.f);
@@ -30,8 +31,9 @@
Object.defineProperty(C.prototype, "boom", { get: function() {
if (should_deoptimize_caller) %DeoptimizeFunction(D.prototype.f);
return this.m
- }})
+ }});
+ %PrepareFunctionForOptimization(D.prototype.f);
assertEquals(23, new D().f("boom"));
assertEquals(23, new D().f("boom"));
%OptimizeFunctionOnNextCall(D.prototype.f);
diff --git a/deps/v8/test/mjsunit/regress/regress-5006.js b/deps/v8/test/mjsunit/regress/regress-5006.js
index 29f145de0a..48ffb5fbea 100644
--- a/deps/v8/test/mjsunit/regress/regress-5006.js
+++ b/deps/v8/test/mjsunit/regress/regress-5006.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function foo(x) { return Math.imul(x|0, 2); }
+function foo(x) {
+ return Math.imul(x | 0, 2);
+};
+%PrepareFunctionForOptimization(foo);
print(foo(1));
print(foo(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-500831.js b/deps/v8/test/mjsunit/regress/regress-500831.js
index 3895c4de04..d86fbf4544 100644
--- a/deps/v8/test/mjsunit/regress/regress-500831.js
+++ b/deps/v8/test/mjsunit/regress/regress-500831.js
@@ -11,9 +11,9 @@ function deepEquals(a, b) {
return true;
}
if (typeof a != typeof b) return false;
- if (typeof a == "number");
- if (typeof a !== "object" && typeof a !== "function")
- return false;
+ if (typeof a == 'number')
+ ;
+ if (typeof a !== "object" && typeof a !== "function") return false;
var objectClass = classOf();
if (b) return false;
if (objectClass === "RegExp") {;
@@ -29,8 +29,8 @@ function deepEquals(a, b) {
}
return true;
}
- if (objectClass == "String" || objectClass == "Number" ||
- objectClass == "Boolean" || objectClass == "Date") {
+ if (objectClass == 'String' || objectClass == 'Number' ||
+ objectClass == 'Boolean' || objectClass == 'Date') {
if (a.valueOf()) return false;
};
}
@@ -88,7 +88,9 @@ function __f_6() {
equals();
__v_9[0] = -1000000000000;
equals(__v_6[0]);
-}
+};
+%PrepareFunctionForOptimization(__f_6);
+__f_6();
__f_6();
-__f_6(); %OptimizeFunctionOnNextCall(__f_6);
+%OptimizeFunctionOnNextCall(__f_6);
__f_6();
diff --git a/deps/v8/test/mjsunit/regress/regress-5033.js b/deps/v8/test/mjsunit/regress/regress-5033.js
index 728094fc6d..4cf292bda8 100644
--- a/deps/v8/test/mjsunit/regress/regress-5033.js
+++ b/deps/v8/test/mjsunit/regress/regress-5033.js
@@ -6,15 +6,13 @@
var test = function() {
var t = Date.now(); // Just any non-constant double value.
- var o = {
- ['p']: 1,
- t
- };
+ var o = {['p']: 1, t};
};
function caller() {
test();
-}
+};
+%PrepareFunctionForOptimization(caller);
caller();
caller();
%OptimizeFunctionOnNextCall(caller);
diff --git a/deps/v8/test/mjsunit/regress/regress-5085.js b/deps/v8/test/mjsunit/regress/regress-5085.js
index 167bfa0f44..e141ded37c 100644
--- a/deps/v8/test/mjsunit/regress/regress-5085.js
+++ b/deps/v8/test/mjsunit/regress/regress-5085.js
@@ -4,31 +4,31 @@
// Flags: --allow-natives-syntax
-g = async function () {
+g = async function() {
await 10;
-}
-assertEquals(undefined, g.prototype)
+};
+assertEquals(undefined, g.prototype);
g();
-assertEquals(undefined, g.prototype)
+assertEquals(undefined, g.prototype);
-gen = function* () {
+gen = function*() {
yield 10;
-}
-assertTrue(gen.prototype != undefined && gen.prototype != null)
-gen()
-assertTrue(gen.prototype != undefined && gen.prototype != null)
+};
+assertTrue(gen.prototype != undefined && gen.prototype != null);
+gen();
+assertTrue(gen.prototype != undefined && gen.prototype != null);
-async_gen = async function* () {
+async_gen = async function*() {
yield 10;
-}
-assertTrue(async_gen.prototype != undefined && async_gen.prototype != null)
-async_gen()
-assertTrue(async_gen.prototype != undefined && async_gen.prototype != null)
+};
+assertTrue(async_gen.prototype != undefined && async_gen.prototype != null);
+async_gen();
+assertTrue(async_gen.prototype != undefined && async_gen.prototype != null);
function foo(x) {
return x instanceof Proxy;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
function test_for_exception() {
caught_exception = false;
try {
@@ -39,7 +39,7 @@ function test_for_exception() {
'Function has non-object prototype \'undefined\' in instanceof check',
e.message);
} finally {
- assertTrue(caught_exception)
+ assertTrue(caught_exception);
}
}
@@ -54,7 +54,7 @@ assertTrue((() => {}) instanceof Proxy);
assertEquals(
new Proxy({}, {
get(o, s) {
- return s
+ return s;
}
}).test,
'test');
@@ -63,6 +63,7 @@ Proxy.__proto__ = {
prototype: {b: 2},
a: 1
};
+
assertEquals(Proxy.prototype, {b: 2});
(function testProxyCreationContext() {
diff --git a/deps/v8/test/mjsunit/regress/regress-5205.js b/deps/v8/test/mjsunit/regress/regress-5205.js
index 0d88f45053..547a1d9d2c 100644
--- a/deps/v8/test/mjsunit/regress/regress-5205.js
+++ b/deps/v8/test/mjsunit/regress/regress-5205.js
@@ -9,6 +9,7 @@
if (o == 'warmup') { return g() }
with (o) { return x }
}
+ %PrepareFunctionForOptimization(f);
function g() {
// Only a marker function serving as weak embedded object.
}
diff --git a/deps/v8/test/mjsunit/regress/regress-5252.js b/deps/v8/test/mjsunit/regress/regress-5252.js
index 5dd0310637..fdc910ccec 100644
--- a/deps/v8/test/mjsunit/regress/regress-5252.js
+++ b/deps/v8/test/mjsunit/regress/regress-5252.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --ignition-osr
+// Flags: --allow-natives-syntax
(function TestNonLoopyLoop() {
function f() {
@@ -13,6 +13,7 @@
}
%PrepareFunctionForOptimization(f);
assertEquals(23, f());
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5262.js b/deps/v8/test/mjsunit/regress/regress-5262.js
index d980ba8e91..05fc0e513b 100644
--- a/deps/v8/test/mjsunit/regress/regress-5262.js
+++ b/deps/v8/test/mjsunit/regress/regress-5262.js
@@ -12,6 +12,7 @@ function f(osr_and_recurse) {
for (var i = 0; i < 3; ++i) {
if (i == 1) %OptimizeOsr();
}
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f(false); // Trigger tier-up due to recursive call.
boom(this); // Causes a deopt due to below dependency.
diff --git a/deps/v8/test/mjsunit/regress/regress-5275-1.js b/deps/v8/test/mjsunit/regress/regress-5275-1.js
index 542bae0602..5951619e4b 100644
--- a/deps/v8/test/mjsunit/regress/regress-5275-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-5275-1.js
@@ -8,11 +8,11 @@ function foo(x) {
var a = new Array(1);
a[0] = x;
return a;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals([1], foo(1));
assertEquals([1], foo(1));
%OptimizeFunctionOnNextCall(foo);
assertEquals([1], foo(1));
-Array.prototype.__defineSetter__("0", function() {});
+Array.prototype.__defineSetter__('0', function() {});
assertEquals([undefined], foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-5275-2.js b/deps/v8/test/mjsunit/regress/regress-5275-2.js
index 2da422de97..04b61143be 100644
--- a/deps/v8/test/mjsunit/regress/regress-5275-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-5275-2.js
@@ -8,11 +8,11 @@ function foo(x) {
var a = new Array(1);
a[0] = x;
return a;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals([1], foo(1));
assertEquals([1], foo(1));
%OptimizeFunctionOnNextCall(foo);
assertEquals([1], foo(1));
-Object.prototype.__defineSetter__("0", function() {});
+Object.prototype.__defineSetter__('0', function() {});
assertEquals([undefined], foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-5286.js b/deps/v8/test/mjsunit/regress/regress-5286.js
index 210d986a66..1b5f736122 100644
--- a/deps/v8/test/mjsunit/regress/regress-5286.js
+++ b/deps/v8/test/mjsunit/regress/regress-5286.js
@@ -5,37 +5,45 @@
// Flags: --allow-natives-syntax
(function() {
- function foo(x, y) { return x % y; }
-
- assertEquals(0, foo(2, 2));
- assertEquals(0, foo(4, 4));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-0, foo(-8, 8));
+function foo(x, y) {
+ return x % y;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo(2, 2));
+assertEquals(0, foo(4, 4));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-0, foo(-8, 8));
})();
(function() {
- function foo(x, y) { return x % y; }
-
- assertEquals(0, foo(1, 1));
- assertEquals(0, foo(2, 2));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-0, foo(-3, 3));
+function foo(x, y) {
+ return x % y;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo(1, 1));
+assertEquals(0, foo(2, 2));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-0, foo(-3, 3));
})();
(function() {
- function foo(x, y) { return x % y; }
-
- assertEquals(0, foo(1, 1));
- assertEquals(0, foo(2, 2));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-0, foo(-2147483648, -1));
+function foo(x, y) {
+ return x % y;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo(1, 1));
+assertEquals(0, foo(2, 2));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-0, foo(-2147483648, -1));
})();
(function() {
- function foo(x, y) { return x % y; }
-
- assertEquals(0, foo(1, 1));
- assertEquals(0, foo(2, 2));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-0, foo(-2147483648, -2147483648));
+function foo(x, y) {
+ return x % y;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo(1, 1));
+assertEquals(0, foo(2, 2));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-0, foo(-2147483648, -2147483648));
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5332.js b/deps/v8/test/mjsunit/regress/regress-5332.js
index f0a0ef9ac8..5849d6638b 100644
--- a/deps/v8/test/mjsunit/regress/regress-5332.js
+++ b/deps/v8/test/mjsunit/regress/regress-5332.js
@@ -5,27 +5,27 @@
// Flags: --allow-natives-syntax
(function() {
- function foo() {
- var a = new Array(2);
- a[1] = 1.5;
- return a;
- }
-
- assertEquals(undefined, foo()[0]);
- assertEquals(undefined, foo()[0]);
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(undefined, foo()[0]);
+function foo() {
+ var a = new Array(2);
+ a[1] = 1.5;
+ return a;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(undefined, foo()[0]);
+assertEquals(undefined, foo()[0]);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo()[0]);
})();
(function() {
- function foo() {
- var a = Array(2);
- a[1] = 1.5;
- return a;
- }
-
- assertEquals(undefined, foo()[0]);
- assertEquals(undefined, foo()[0]);
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(undefined, foo()[0]);
+function foo() {
+ var a = Array(2);
+ a[1] = 1.5;
+ return a;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(undefined, foo()[0]);
+assertEquals(undefined, foo()[0]);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo()[0]);
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5357.js b/deps/v8/test/mjsunit/regress/regress-5357.js
index 11ada60708..938c15715c 100644
--- a/deps/v8/test/mjsunit/regress/regress-5357.js
+++ b/deps/v8/test/mjsunit/regress/regress-5357.js
@@ -9,8 +9,8 @@ function foo(a) {
a = Math.max(0, a);
a++;
return a;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(0);
foo(0);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-5404.js b/deps/v8/test/mjsunit/regress/regress-5404.js
index eddd1837a8..104aed0a21 100644
--- a/deps/v8/test/mjsunit/regress/regress-5404.js
+++ b/deps/v8/test/mjsunit/regress/regress-5404.js
@@ -8,6 +8,7 @@ function foo(a, b) {
return a + "0123456789012";
}
+%PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
@@ -16,6 +17,7 @@ foo("a");
var a = "a".repeat(%StringMaxLength());
assertThrows(function() { foo(a); }, RangeError);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(function() { foo(a); }, RangeError);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-556543.js b/deps/v8/test/mjsunit/regress/regress-556543.js
index 9e9bedd6f5..f2d079f7ad 100644
--- a/deps/v8/test/mjsunit/regress/regress-556543.js
+++ b/deps/v8/test/mjsunit/regress/regress-556543.js
@@ -12,6 +12,7 @@ function f() {
}
}
}
-}
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-5636-1.js b/deps/v8/test/mjsunit/regress/regress-5636-1.js
index c1cafe639b..4be9eb5579 100644
--- a/deps/v8/test/mjsunit/regress/regress-5636-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-5636-1.js
@@ -6,10 +6,13 @@
function f(n) {
var a = [];
- function g() { return x }
+ function g() {
+ return x;
+ };
for (var i = 0; i < n; ++i) {
var x = i;
a[i] = g;
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-5636-2.js b/deps/v8/test/mjsunit/regress/regress-5636-2.js
index e76733654d..a03dc06ca6 100644
--- a/deps/v8/test/mjsunit/regress/regress-5636-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-5636-2.js
@@ -7,10 +7,13 @@
function f(n) {
"use asm";
var a = [];
- function g() { return x }
+ function g() {
+ return x;
+ };
for (var i = 0; i < n; ++i) {
var x = i;
a[i] = g;
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-5638.js b/deps/v8/test/mjsunit/regress/regress-5638.js
index 034e4c40be..1df1b6c733 100644
--- a/deps/v8/test/mjsunit/regress/regress-5638.js
+++ b/deps/v8/test/mjsunit/regress/regress-5638.js
@@ -19,6 +19,7 @@ class B extends A {
}
}
}
+%PrepareFunctionForOptimization(B);
var thrower = new Proxy(A, {
get(target, property, receiver) {
diff --git a/deps/v8/test/mjsunit/regress/regress-5638b.js b/deps/v8/test/mjsunit/regress/regress-5638b.js
index b72a80eac5..980432773a 100644
--- a/deps/v8/test/mjsunit/regress/regress-5638b.js
+++ b/deps/v8/test/mjsunit/regress/regress-5638b.js
@@ -18,6 +18,7 @@ class B extends A {
construction_counter++;
}
}
+%PrepareFunctionForOptimization(B);
var superclass_counter = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-572589.js b/deps/v8/test/mjsunit/regress/regress-572589.js
index 1fd755ad1e..03686e6533 100644
--- a/deps/v8/test/mjsunit/regress/regress-572589.js
+++ b/deps/v8/test/mjsunit/regress/regress-572589.js
@@ -7,5 +7,6 @@
"use strict";
eval();
var f = ({x}) => { };
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-5749.js b/deps/v8/test/mjsunit/regress/regress-5749.js
index 42e88321ef..26b17acda9 100644
--- a/deps/v8/test/mjsunit/regress/regress-5749.js
+++ b/deps/v8/test/mjsunit/regress/regress-5749.js
@@ -5,8 +5,9 @@
// Flags: --allow-natives-syntax
function f(x) {
- (x ** 1) === '';
-}
+ x ** 1 === '';
+};
+%PrepareFunctionForOptimization(f);
f();
f();
f();
@@ -14,8 +15,9 @@ f();
f();
function g(x) {
- '' === (x ** 1);
-}
+ '' === x ** 1;
+};
+%PrepareFunctionForOptimization(g);
g();
g();
g();
diff --git a/deps/v8/test/mjsunit/regress/regress-5767.js b/deps/v8/test/mjsunit/regress/regress-5767.js
deleted file mode 100644
index 75c83c027d..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-5767.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-assertEquals("0", Number.MIN_VALUE.toString(35));
diff --git a/deps/v8/test/mjsunit/regress/regress-5790.js b/deps/v8/test/mjsunit/regress/regress-5790.js
index eb405237e1..3e8d3f881b 100644
--- a/deps/v8/test/mjsunit/regress/regress-5790.js
+++ b/deps/v8/test/mjsunit/regress/regress-5790.js
@@ -9,10 +9,12 @@ function foo(a) {
if (a) return arguments[1];
}
+%PrepareFunctionForOptimization(foo);
foo(false);
foo(false);
%OptimizeFunctionOnNextCall(foo);
foo(true, 1);
+%PrepareFunctionForOptimization(foo);
foo(true, 1);
%OptimizeFunctionOnNextCall(foo);
foo(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-5802.js b/deps/v8/test/mjsunit/regress/regress-5802.js
index 57c8198c0c..7c40ea471f 100644
--- a/deps/v8/test/mjsunit/regress/regress-5802.js
+++ b/deps/v8/test/mjsunit/regress/regress-5802.js
@@ -9,11 +9,13 @@
var o = { [Symbol.toPrimitive]: () => "o" };
+ %PrepareFunctionForOptimization(eq);
assertTrue(eq(o, o));
assertTrue(eq(o, o));
%OptimizeFunctionOnNextCall(eq);
assertTrue(eq(o, o));
assertTrue(eq("o", o));
+ %PrepareFunctionForOptimization(eq);
assertTrue(eq(o, "o"));
%OptimizeFunctionOnNextCall(eq);
assertTrue(eq(o, o));
@@ -27,11 +29,13 @@
var o = { [Symbol.toPrimitive]: () => "o" };
+ %PrepareFunctionForOptimization(ne);
assertFalse(ne(o, o));
assertFalse(ne(o, o));
%OptimizeFunctionOnNextCall(ne);
assertFalse(ne(o, o));
assertFalse(ne("o", o));
+ %PrepareFunctionForOptimization(ne);
assertFalse(ne(o, "o"));
%OptimizeFunctionOnNextCall(ne);
assertFalse(ne(o, o));
@@ -47,6 +51,7 @@
var b = {b};
var u = %GetUndetectable();
+ %PrepareFunctionForOptimization(eq);
assertTrue(eq(a, a));
assertTrue(eq(b, b));
assertFalse(eq(a, b));
@@ -61,6 +66,7 @@
assertFalse(eq(a, b));
assertFalse(eq(b, a));
assertTrue(eq(null, u));
+ %PrepareFunctionForOptimization(eq);
assertTrue(eq(undefined, u));
assertTrue(eq(u, null));
assertTrue(eq(u, undefined));
@@ -83,6 +89,7 @@
var b = {b};
var u = %GetUndetectable();
+ %PrepareFunctionForOptimization(ne);
assertFalse(ne(a, a));
assertFalse(ne(b, b));
assertTrue(ne(a, b));
@@ -97,6 +104,7 @@
assertTrue(ne(a, b));
assertTrue(ne(b, a));
assertFalse(ne(null, u));
+ %PrepareFunctionForOptimization(ne);
assertFalse(ne(undefined, u));
assertFalse(ne(u, null));
assertFalse(ne(u, undefined));
diff --git a/deps/v8/test/mjsunit/regress/regress-585041.js b/deps/v8/test/mjsunit/regress/regress-585041.js
index c072ed2a15..4471daf685 100644
--- a/deps/v8/test/mjsunit/regress/regress-585041.js
+++ b/deps/v8/test/mjsunit/regress/regress-585041.js
@@ -14,8 +14,8 @@ function boom(dummy) {
if (dummy) {
f(arr, -2147483648);
}
-}
-
+};
+%PrepareFunctionForOptimization(boom);
boom(false);
%OptimizeFunctionOnNextCall(boom);
boom(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-590074.js b/deps/v8/test/mjsunit/regress/regress-590074.js
index 80ae41b276..f06af30037 100644
--- a/deps/v8/test/mjsunit/regress/regress-590074.js
+++ b/deps/v8/test/mjsunit/regress/regress-590074.js
@@ -23,7 +23,8 @@ function __f_9() {
__v_5 = __f_10();
__v_4 = __f_10();
__f_2(__v_5);
-}
+};
+%PrepareFunctionForOptimization(__f_9);
__f_9();
%OptimizeFunctionOnNextCall(__f_9);
__f_9();
diff --git a/deps/v8/test/mjsunit/regress/regress-592341.js b/deps/v8/test/mjsunit/regress/regress-592341.js
index 2fa475294f..293973a671 100644
--- a/deps/v8/test/mjsunit/regress/regress-592341.js
+++ b/deps/v8/test/mjsunit/regress/regress-592341.js
@@ -10,27 +10,27 @@ function id(a) {
(function LiteralCompareNullDeopt() {
function f() {
- return id(null == %DeoptimizeNow());
- }
-
+ return id(null == %DeoptimizeNow());
+ };
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertTrue(f());
})();
(function LiteralCompareUndefinedDeopt() {
function f() {
- return id(undefined == %DeoptimizeNow());
- }
-
+ return id(undefined == %DeoptimizeNow());
+ };
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertTrue(f());
})();
(function LiteralCompareTypeofDeopt() {
function f() {
- return id("undefined" == typeof(%DeoptimizeNow()));
- }
-
+ return id('undefined' == typeof %DeoptimizeNow());
+ };
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertTrue(f());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-592353.js b/deps/v8/test/mjsunit/regress/regress-592353.js
index f4e3b6859a..858df09bee 100644
--- a/deps/v8/test/mjsunit/regress/regress-592353.js
+++ b/deps/v8/test/mjsunit/regress/regress-592353.js
@@ -6,5 +6,6 @@
with ({}) {}
f = ({x}) => { };
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f({});
diff --git a/deps/v8/test/mjsunit/regress/regress-5943.js b/deps/v8/test/mjsunit/regress/regress-5943.js
index df21c20e95..0b769cacf9 100644
--- a/deps/v8/test/mjsunit/regress/regress-5943.js
+++ b/deps/v8/test/mjsunit/regress/regress-5943.js
@@ -6,8 +6,8 @@
function createHTML() {
return '' + '<div><div><di';
-}
-
+};
+%PrepareFunctionForOptimization(createHTML);
createHTML();
%OptimizeFunctionOnNextCall(createHTML);
diff --git a/deps/v8/test/mjsunit/regress/regress-5972.js b/deps/v8/test/mjsunit/regress/regress-5972.js
index 68fe3ac720..f4846276ea 100644
--- a/deps/v8/test/mjsunit/regress/regress-5972.js
+++ b/deps/v8/test/mjsunit/regress/regress-5972.js
@@ -9,8 +9,8 @@ var undetectable = %GetUndetectable();
function foo(a) {
const o = a ? foo : undetectable;
return typeof o === 'function';
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertFalse(foo(false));
assertFalse(foo(false));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js b/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js
index 887c00099a..cd4222cc0c 100644
--- a/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js
+++ b/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js
@@ -28,7 +28,8 @@
(function f() {
function assignSloppy() {
f += "x";
- }
+ };
+ %PrepareFunctionForOptimization(assignSloppy);
assertDoesNotThrow(assignSloppy);
assertDoesNotThrow(assignSloppy);
%OptimizeFunctionOnNextCall(assignSloppy);
@@ -37,7 +38,8 @@
function assignStrict() {
'use strict';
f += "x";
- }
+ };
+ %PrepareFunctionForOptimization(assignStrict);
assertThrows(assignStrict, TypeError);
assertThrows(assignStrict, TypeError);
%OptimizeFunctionOnNextCall(assignStrict);
diff --git a/deps/v8/test/mjsunit/regress/regress-599412.js b/deps/v8/test/mjsunit/regress/regress-599412.js
index d5c411d0f1..f6125e5f6a 100644
--- a/deps/v8/test/mjsunit/regress/regress-599412.js
+++ b/deps/v8/test/mjsunit/regress/regress-599412.js
@@ -9,14 +9,16 @@ function h(a) {
print();
}
-function g(a) { return a.length; }
+function g(a) {
+ return a.length;
+}
g('0');
g('1');
function f() {
h(g([]));
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-599710.js b/deps/v8/test/mjsunit/regress/regress-599710.js
index dd1ba8d969..0f768b2c07 100644
--- a/deps/v8/test/mjsunit/regress/regress-599710.js
+++ b/deps/v8/test/mjsunit/regress/regress-599710.js
@@ -4,46 +4,74 @@
// Flags: --allow-natives-syntax
-var f1 = function() { while (1) { } }
+var f1 = function() {
+ while (1) {
+ }
+};
function g1() {
var s = "hey";
- f1 = function() { return true; }
- if (f1()) { return s; }
-}
-
+ f1 = function() {
+ return true;
+ };
+ if (f1()) {
+ return s;
+ }
+};
+%PrepareFunctionForOptimization(g1);
%OptimizeFunctionOnNextCall(g1);
assertEquals("hey", g1());
-var f2 = function() { do { } while (1); }
+var f2 = function() {
+ do {
+ } while (1);
+};
function g2() {
var s = "hey";
- f2 = function() { return true; }
- if (f2()) { return s; }
-}
-
+ f2 = function() {
+ return true;
+ };
+ if (f2()) {
+ return s;
+ }
+};
+%PrepareFunctionForOptimization(g2);
%OptimizeFunctionOnNextCall(g2);
assertEquals("hey", g2());
-var f3 = function() { for (;;); }
+var f3 = function() {
+ for (;;)
+ ;
+};
function g3() {
var s = "hey";
- f3 = function() { return true; }
- if (f3()) { return s; }
-}
-
+ f3 = function() {
+ return true;
+ };
+ if (f3()) {
+ return s;
+ }
+};
+%PrepareFunctionForOptimization(g3);
%OptimizeFunctionOnNextCall(g3);
assertEquals("hey", g3());
-var f4 = function() { for (;;); }
+var f4 = function() {
+ for (;;)
+ ;
+};
function g4() {
var s = "hey";
- f4 = function() { return true; }
- while (f4()) { return s; }
-}
-
+ f4 = function() {
+ return true;
+ };
+ while (f4()) {
+ return s;
+ }
+};
+%PrepareFunctionForOptimization(g4);
%OptimizeFunctionOnNextCall(g4);
assertEquals("hey", g4());
diff --git a/deps/v8/test/mjsunit/regress/regress-606021.js b/deps/v8/test/mjsunit/regress/regress-606021.js
index 54b283efc4..2044c9835d 100644
--- a/deps/v8/test/mjsunit/regress/regress-606021.js
+++ b/deps/v8/test/mjsunit/regress/regress-606021.js
@@ -14,6 +14,7 @@ function foo() {
var f1 = foo();
var f2 = foo();
+%PrepareFunctionForOptimization(f1);
// Both closures point to full code.
f1(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-6063.js b/deps/v8/test/mjsunit/regress/regress-6063.js
index 4a0d16824a..8704066a75 100644
--- a/deps/v8/test/mjsunit/regress/regress-6063.js
+++ b/deps/v8/test/mjsunit/regress/regress-6063.js
@@ -9,8 +9,8 @@ U16[0] = 0xffff;
function foo(a, i) {
return U16[0] === 0xffff;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-6082.js b/deps/v8/test/mjsunit/regress/regress-6082.js
index aec1be0fb7..e1a70cfb95 100644
--- a/deps/v8/test/mjsunit/regress/regress-6082.js
+++ b/deps/v8/test/mjsunit/regress/regress-6082.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function foo() { return Number.isNaN(); }
+function foo() {
+ return Number.isNaN();
+};
+%PrepareFunctionForOptimization(foo);
assertFalse(foo());
assertFalse(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-6121.js b/deps/v8/test/mjsunit/regress/regress-6121.js
index c29ba2b067..0a45764611 100644
--- a/deps/v8/test/mjsunit/regress/regress-6121.js
+++ b/deps/v8/test/mjsunit/regress/regress-6121.js
@@ -12,6 +12,7 @@ function foo(o) {
return true;
}
}
+%PrepareFunctionForOptimization(foo);
var o = new Proxy({a:1},{
getOwnPropertyDescriptor(target, property) { throw target; }
diff --git a/deps/v8/test/mjsunit/regress/regress-612146.js b/deps/v8/test/mjsunit/regress/regress-612146.js
index 1bd3f0b1f0..0ceed5ad14 100644
--- a/deps/v8/test/mjsunit/regress/regress-612146.js
+++ b/deps/v8/test/mjsunit/regress/regress-612146.js
@@ -15,18 +15,19 @@ function f() {
return arguments_[0];
}
};
-
+%PrepareFunctionForOptimization(f);
+;
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
assertEquals(1, f(1));
function g() {
- var a = arguments;
- %DeoptimizeNow();
- return a.length;
-}
-
+ var a = arguments;
+ %DeoptimizeNow();
+ return a.length;
+};
+%PrepareFunctionForOptimization(g);
g(1);
g(1);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-6248.js b/deps/v8/test/mjsunit/regress/regress-6248.js
index 0631892549..e16452df92 100644
--- a/deps/v8/test/mjsunit/regress/regress-6248.js
+++ b/deps/v8/test/mjsunit/regress/regress-6248.js
@@ -16,6 +16,8 @@ class C extends Object {
}
}
}
+
+%PrepareFunctionForOptimization(C);
Object.setPrototypeOf(C, parseInt);
assertSame(sentinelObject, new C());
assertSame(sentinelObject, new C());
diff --git a/deps/v8/test/mjsunit/regress/regress-625121.js b/deps/v8/test/mjsunit/regress/regress-625121.js
index 27ad0f5faf..44f890cd1f 100644
--- a/deps/v8/test/mjsunit/regress/regress-625121.js
+++ b/deps/v8/test/mjsunit/regress/regress-625121.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(f) {
+ %PrepareFunctionForOptimization(f);
f(0);
f(NaN);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-632289.js b/deps/v8/test/mjsunit/regress/regress-632289.js
index e216035ab9..f9885051d9 100644
--- a/deps/v8/test/mjsunit/regress/regress-632289.js
+++ b/deps/v8/test/mjsunit/regress/regress-632289.js
@@ -5,7 +5,9 @@
// Flags: --always-opt --allow-natives-syntax
try {
-} catch(e) {; }
+} catch (e) {
+ ;
+}
(function __f_12() {
})();
(function __f_6() {
@@ -15,8 +17,9 @@ try {
try {
} catch (e) {
}
- }
- __f_4();
+ };
+ %PrepareFunctionForOptimization(__f_4);
+ __f_4();
%OptimizeFunctionOnNextCall(__f_4);
- __f_4();
+ __f_4();
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-634-debug.js b/deps/v8/test/mjsunit/regress/regress-634-debug.js
index 17ca828c68..8eaaa4fe09 100644
--- a/deps/v8/test/mjsunit/regress/regress-634-debug.js
+++ b/deps/v8/test/mjsunit/regress/regress-634-debug.js
@@ -35,6 +35,7 @@ function f() {
%SetAllocationTimeout(-1, -1, true);
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-6373.js b/deps/v8/test/mjsunit/regress/regress-6373.js
index 8673e83f22..4aa4641dfb 100644
--- a/deps/v8/test/mjsunit/regress/regress-6373.js
+++ b/deps/v8/test/mjsunit/regress/regress-6373.js
@@ -4,19 +4,19 @@
// Flags: --allow-natives-syntax
-var A = {}
+var A = {};
A[Symbol.hasInstance] = function(x) {
%DeoptimizeFunction(foo);
return 1;
-}
+};
-var a = {}
+var a = {};
function foo(o) {
return o instanceof A;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(a);
foo(a);
assertTrue(foo(a) !== 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-639270.js b/deps/v8/test/mjsunit/regress/regress-639270.js
index 731175dcfc..fe7dd8b84f 100644
--- a/deps/v8/test/mjsunit/regress/regress-639270.js
+++ b/deps/v8/test/mjsunit/regress/regress-639270.js
@@ -8,6 +8,7 @@
var g = (async () => { return JSON.stringify() });
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-642409.js b/deps/v8/test/mjsunit/regress/regress-642409.js
index eceb070a7f..72fa103d2c 100644
--- a/deps/v8/test/mjsunit/regress/regress-642409.js
+++ b/deps/v8/test/mjsunit/regress/regress-642409.js
@@ -16,6 +16,7 @@ class SubClass extends SuperClass {
}
}
+%PrepareFunctionForOptimization(SubClass);
new SubClass();
new SubClass();
%OptimizeFunctionOnNextCall(SubClass);
diff --git a/deps/v8/test/mjsunit/regress/regress-653407.js b/deps/v8/test/mjsunit/regress/regress-653407.js
index 66d537ee55..45fa2ebe62 100644
--- a/deps/v8/test/mjsunit/regress/regress-653407.js
+++ b/deps/v8/test/mjsunit/regress/regress-653407.js
@@ -8,19 +8,19 @@
// constructors in turbofan.
class superClass {
- constructor () {}
+ constructor() {}
}
class subClass extends superClass {
- constructor () {
+ constructor() {
super();
}
}
function f() {
- new subClass();
-}
-
+ new subClass();
+};
+%PrepareFunctionForOptimization(f);
f(); // We need this to collect feedback, so that subClass gets inlined in f.
-%OptimizeFunctionOnNextCall(f)
+%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-662845.js b/deps/v8/test/mjsunit/regress/regress-662845.js
index 7740ed10ff..4a2fdbc071 100644
--- a/deps/v8/test/mjsunit/regress/regress-662845.js
+++ b/deps/v8/test/mjsunit/regress/regress-662845.js
@@ -5,10 +5,12 @@
// Flags: --allow-natives-syntax
function foo(x) {
- (function() { x = 1; })()
+ (function() {
+ x = 1;
+ })();
return arguments[0];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo(42));
assertEquals(1, foo(42));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-662904.js b/deps/v8/test/mjsunit/regress/regress-662904.js
index 00ab1cd65f..d8275595ed 100644
--- a/deps/v8/test/mjsunit/regress/regress-662904.js
+++ b/deps/v8/test/mjsunit/regress/regress-662904.js
@@ -13,6 +13,7 @@ function foo(a) {
return sum;
}
+%PrepareFunctionForOptimization(foo);
assertEquals("0a", foo());
assertEquals("0a", foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-664087.js b/deps/v8/test/mjsunit/regress/regress-664087.js
index 6739167930..52eadb53d8 100644
--- a/deps/v8/test/mjsunit/regress/regress-664087.js
+++ b/deps/v8/test/mjsunit/regress/regress-664087.js
@@ -8,14 +8,20 @@ function g() {
throw 1;
}
-var v = { valueOf : g };
+var v = {valueOf: g};
function foo(v) {
v++;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
%NeverOptimizeFunction(g);
-assertThrows(function () { foo(v); });
-assertThrows(function () { foo(v); });
+assertThrows(function() {
+ foo(v);
+});
+assertThrows(function() {
+ foo(v);
+});
%OptimizeFunctionOnNextCall(foo);
-assertThrows(function () { foo(v); });
+assertThrows(function() {
+ foo(v);
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-666046.js b/deps/v8/test/mjsunit/regress/regress-666046.js
index 23e991dc17..5cdaa11ab5 100644
--- a/deps/v8/test/mjsunit/regress/regress-666046.js
+++ b/deps/v8/test/mjsunit/regress/regress-666046.js
@@ -21,7 +21,6 @@ A.prototype = proto;
function foo(o) {
return o.a0;
}
-%EnsureFeedbackVectorForFunction(foo);
// Ensure |proto| is in old space.
gc();
@@ -30,6 +29,7 @@ gc();
// Ensure |proto| is marked as "should be fast".
var o = new A();
+%EnsureFeedbackVectorForFunction(foo);
foo(o);
foo(o);
foo(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-6681.js b/deps/v8/test/mjsunit/regress/regress-6681.mjs
index d35ae41ffb..e9f7126cb9 100644
--- a/deps/v8/test/mjsunit/regress/regress-6681.js
+++ b/deps/v8/test/mjsunit/regress/regress-6681.mjs
@@ -1,10 +1,8 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-import * as ns from "./regress-6681.js";
+import * as ns from "./regress-6681.mjs";
export var foo;
assertEquals(false, Reflect.defineProperty(ns, 'foo', {value: 123}));
diff --git a/deps/v8/test/mjsunit/regress/regress-669024.js b/deps/v8/test/mjsunit/regress/regress-669024.js
index 079cb968e1..a5469f357e 100644
--- a/deps/v8/test/mjsunit/regress/regress-669024.js
+++ b/deps/v8/test/mjsunit/regress/regress-669024.js
@@ -4,15 +4,19 @@
// Flags: --allow-natives-syntax
-function h(y) { return y.u; }
+function h(y) {
+ return y.u;
+}
-function g() { return h.apply(0, arguments); }
+function g() {
+ return h.apply(0, arguments);
+}
function f(x) {
- var o = { u : x };
+ var o = {u: x};
return g(o);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(42);
f(0.1);
diff --git a/deps/v8/test/mjsunit/regress/regress-673242.js b/deps/v8/test/mjsunit/regress/regress-673242.js
index ceb60f563c..c2388d2a5b 100644
--- a/deps/v8/test/mjsunit/regress/regress-673242.js
+++ b/deps/v8/test/mjsunit/regress/regress-673242.js
@@ -5,13 +5,15 @@
// Flags: --mark-shared-functions-for-tier-up --allow-natives-syntax --expose-gc
function foo() {
- function bar() {
- }
+ function bar() {};
+ %PrepareFunctionForOptimization(bar);
return bar;
}
// Mark bar's shared function info for tier-up
// (but don't optimize).
+;
+%PrepareFunctionForOptimization(foo);
var bar = foo();
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-681383.js b/deps/v8/test/mjsunit/regress/regress-681383.js
index e8c3593574..ba8cf17424 100644
--- a/deps/v8/test/mjsunit/regress/regress-681383.js
+++ b/deps/v8/test/mjsunit/regress/regress-681383.js
@@ -13,6 +13,7 @@ function f(deopt) {
}
}
+%PrepareFunctionForOptimization(f);
f(false);
f(false);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-683617.js b/deps/v8/test/mjsunit/regress/regress-683617.js
index 18da682f2e..0fba26b0b8 100644
--- a/deps/v8/test/mjsunit/regress/regress-683617.js
+++ b/deps/v8/test/mjsunit/regress/regress-683617.js
@@ -10,7 +10,8 @@ function f(deopt) {
if (deopt) {
return it.next().value;
}
-}
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-6907.js b/deps/v8/test/mjsunit/regress/regress-6907.js
index 0749365fed..dc58a58162 100644
--- a/deps/v8/test/mjsunit/regress/regress-6907.js
+++ b/deps/v8/test/mjsunit/regress/regress-6907.js
@@ -13,7 +13,8 @@
try { throw 0 } catch(e) {
return b.forEach(callback);
}
- }
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-696651.js b/deps/v8/test/mjsunit/regress/regress-696651.js
index 650c9787f6..9a97dde87d 100644
--- a/deps/v8/test/mjsunit/regress/regress-696651.js
+++ b/deps/v8/test/mjsunit/regress/regress-696651.js
@@ -4,8 +4,12 @@
// Flags: --allow-natives-syntax
-function get_a() { return "aaaaaaaaaaaaaa"; }
-function get_b() { return "bbbbbbbbbbbbbb"; }
+function get_a() {
+ return 'aaaaaaaaaaaaaa';
+}
+function get_b() {
+ return 'bbbbbbbbbbbbbb';
+}
function get_string() {
return get_a() + get_b();
@@ -13,8 +17,8 @@ function get_string() {
function prefix(s) {
return s + get_string();
-}
-
+};
+%PrepareFunctionForOptimization(prefix);
prefix("");
prefix("");
%OptimizeFunctionOnNextCall(prefix);
diff --git a/deps/v8/test/mjsunit/regress/regress-698790.js b/deps/v8/test/mjsunit/regress/regress-698790.js
index 8791cb5d17..c4c58adad2 100644
--- a/deps/v8/test/mjsunit/regress/regress-698790.js
+++ b/deps/v8/test/mjsunit/regress/regress-698790.js
@@ -11,8 +11,11 @@ new RegExp(cons_string);
// Same thing but using TF lowering.
-function make_cons_string(s) { return s + "aaaaaaaaaaaaaa"; }
-make_cons_string("");
+function make_cons_string(s) {
+ return s + 'aaaaaaaaaaaaaa';
+};
+%PrepareFunctionForOptimization(make_cons_string);
+make_cons_string('');
%OptimizeFunctionOnNextCall(make_cons_string);
var cons_str = make_cons_string("");
new RegExp(cons_str);
diff --git a/deps/v8/test/mjsunit/regress/regress-709782.js b/deps/v8/test/mjsunit/regress/regress-709782.js
index e33f694ec9..91fbb881ed 100644
--- a/deps/v8/test/mjsunit/regress/regress-709782.js
+++ b/deps/v8/test/mjsunit/regress/regress-709782.js
@@ -5,9 +5,13 @@
// Flags: --allow-natives-syntax
var a = [0];
-function bar(x) { return x; }
-function foo() { return a.reduce(bar); }
-
+function bar(x) {
+ return x;
+}
+function foo() {
+ return a.reduce(bar);
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(0, foo());
assertEquals(0, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-718891.js b/deps/v8/test/mjsunit/regress/regress-718891.js
index 209568dca3..80aa1ec705 100644
--- a/deps/v8/test/mjsunit/regress/regress-718891.js
+++ b/deps/v8/test/mjsunit/regress/regress-718891.js
@@ -24,6 +24,7 @@ function TestDontSelfHealWithDeoptedCode(run_unoptimized, ClosureFactory) {
// so that the closure doesn't leak in a dead register).
(() => {
var optimized_closure = ClosureFactory();
+ %PrepareFunctionForOptimization(optimized_closure);
// Use .call to avoid the CallIC retaining the JSFunction in the
// feedback vector via a weak map, which would mean it wouldn't be
// collected in the minor gc below.
@@ -40,6 +41,7 @@ function TestDontSelfHealWithDeoptedCode(run_unoptimized, ClosureFactory) {
// old->new remember set.
(() => {
var dummy = function() { return 1; };
+ %PrepareFunctionForOptimization(dummy);
%OptimizeFunctionOnNextCall(dummy);
dummy();
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-725858.js b/deps/v8/test/mjsunit/regress/regress-725858.js
index 466673f816..dbb65268b8 100644
--- a/deps/v8/test/mjsunit/regress/regress-725858.js
+++ b/deps/v8/test/mjsunit/regress/regress-725858.js
@@ -7,5 +7,6 @@
function f() {}
var src = 'f(' + '0,'.repeat(0x201f) + ')';
var boom = new Function(src);
+%PrepareFunctionForOptimization(boom);
%OptimizeFunctionOnNextCall(boom);
boom();
diff --git a/deps/v8/test/mjsunit/regress/regress-727662.js b/deps/v8/test/mjsunit/regress/regress-727662.js
index 62b53b4187..d7eed6924b 100644
--- a/deps/v8/test/mjsunit/regress/regress-727662.js
+++ b/deps/v8/test/mjsunit/regress/regress-727662.js
@@ -5,18 +5,18 @@
// Flags: --allow-natives-syntax --expose-gc
(function() {
- function thingo(i, b) {
- var s = b ? "ac" : "abcd";
- i = i >>> 0;
- if (i < s.length) {
- var c = s.charCodeAt(i);
- gc();
- return c;
- }
+function thingo(i, b) {
+ var s = b ? "ac" : "abcd";
+ i = i >>> 0;
+ if (i < s.length) {
+ var c = s.charCodeAt(i);
+ gc();
+ return c;
}
- thingo(0, true);
- thingo(0, true);
- %OptimizeFunctionOnNextCall(thingo);
- thingo(0, true);
-
+};
+%PrepareFunctionForOptimization(thingo);
+thingo(0, true);
+thingo(0, true);
+%OptimizeFunctionOnNextCall(thingo);
+thingo(0, true);
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-730254.js b/deps/v8/test/mjsunit/regress/regress-730254.js
index 2db49ce4bd..92419462df 100644
--- a/deps/v8/test/mjsunit/regress/regress-730254.js
+++ b/deps/v8/test/mjsunit/regress/regress-730254.js
@@ -30,8 +30,9 @@
var __v_0 = {};
__v_0 = new Map();
function __f_0() {
- __v_0[0] --;
-}
+ __v_0[0]--;
+};
+%PrepareFunctionForOptimization(__f_0);
__f_0();
%OptimizeFunctionOnNextCall(__f_0);
__f_0();
diff --git a/deps/v8/test/mjsunit/regress/regress-747075.js b/deps/v8/test/mjsunit/regress/regress-747075.js
index 2816507935..a04b490fd1 100644
--- a/deps/v8/test/mjsunit/regress/regress-747075.js
+++ b/deps/v8/test/mjsunit/regress/regress-747075.js
@@ -9,12 +9,13 @@ r = [
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14
];
-
function f() {
- r2 = r.map(function(y) {return y/64} );
+ r2 = r.map(function(y) {
+ return y / 64;
+ });
assertTrue(r2[0] < 1);
-}
-
+};
+%PrepareFunctionForOptimization(f);
for (let i = 0; i < 1000; ++i) f();
for (let i = 0; i < 1000; ++i) f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-747825.js b/deps/v8/test/mjsunit/regress/regress-747825.js
index f61693e2cd..7be2323f9c 100644
--- a/deps/v8/test/mjsunit/regress/regress-747825.js
+++ b/deps/v8/test/mjsunit/regress/regress-747825.js
@@ -12,6 +12,7 @@ function f() {
g(r);
}
+%PrepareFunctionForOptimization(f);
f(); f(); %OptimizeFunctionOnNextCall(f); // Warm-up.
var re;
diff --git a/deps/v8/test/mjsunit/regress/regress-7510.js b/deps/v8/test/mjsunit/regress/regress-7510.js
index 0dd934573a..4cd562ad18 100644
--- a/deps/v8/test/mjsunit/regress/regress-7510.js
+++ b/deps/v8/test/mjsunit/regress/regress-7510.js
@@ -10,10 +10,12 @@ function foo(a) {
}
}
+%PrepareFunctionForOptimization(foo);
foo([1]);
foo([1]);
%OptimizeFunctionOnNextCall(foo);
foo([1]);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo([1]);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-7740.js b/deps/v8/test/mjsunit/regress/regress-7740.js
index 01c21891e2..7da0cc794b 100644
--- a/deps/v8/test/mjsunit/regress/regress-7740.js
+++ b/deps/v8/test/mjsunit/regress/regress-7740.js
@@ -10,8 +10,8 @@ x = 42;
function foo(a, b) {
let y = a < a;
if (b) x = y;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(1, false);
foo(1, false);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-774824.js b/deps/v8/test/mjsunit/regress/regress-774824.js
index ca2deccad9..abb3671598 100644
--- a/deps/v8/test/mjsunit/regress/regress-774824.js
+++ b/deps/v8/test/mjsunit/regress/regress-774824.js
@@ -9,8 +9,8 @@ function f() {
a.outOfObjectProperty = undefined;
%DeoptimizeNow();
return !a;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-775888.js b/deps/v8/test/mjsunit/regress/regress-775888.js
index 8aa809e812..270d0719c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-775888.js
+++ b/deps/v8/test/mjsunit/regress/regress-775888.js
@@ -7,10 +7,10 @@
function __f_7586(__v_27535) {
let a = __v_27535.shift();
return a;
-}
-
+};
+%PrepareFunctionForOptimization(__f_7586);
function __f_7587() {
- var __v_27536 = [ 1, 15, 16];
+ var __v_27536 = [1, 15, 16];
__f_7586(__v_27536);
__v_27536.unshift(__v_27536);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-776309.js b/deps/v8/test/mjsunit/regress/regress-776309.js
index 98a38c05e5..9ec4e7ff3b 100644
--- a/deps/v8/test/mjsunit/regress/regress-776309.js
+++ b/deps/v8/test/mjsunit/regress/regress-776309.js
@@ -14,6 +14,7 @@ function f(b) {
%_DeoptimizeNow();
return o.t;
}
+%PrepareFunctionForOptimization(f);
// Finish slack tracking for C.
for (var i = 0; i < 1000; i++) new C();
diff --git a/deps/v8/test/mjsunit/regress/regress-781218.js b/deps/v8/test/mjsunit/regress/regress-781218.js
index f51e99f32b..e1b870bf19 100644
--- a/deps/v8/test/mjsunit/regress/regress-781218.js
+++ b/deps/v8/test/mjsunit/regress/regress-781218.js
@@ -6,7 +6,7 @@
var m = new Map();
-function C() { }
+function C() {}
// Make sure slack tracking kicks in and shrinks the default size to prevent
// any further in-object properties.
@@ -17,10 +17,11 @@ function f(o) {
}
// Warm up {f}.
+;
+%PrepareFunctionForOptimization(f);
f(new C());
f(new C());
-
var o = new C();
%HeapObjectVerify(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-782754.js b/deps/v8/test/mjsunit/regress/regress-782754.js
index 608c48ad9e..87d5bf1ec8 100644
--- a/deps/v8/test/mjsunit/regress/regress-782754.js
+++ b/deps/v8/test/mjsunit/regress/regress-782754.js
@@ -4,11 +4,14 @@
// Flags: --allow-natives-syntax
-let a = [1,2];
-function f(skip) { g(undefined, skip) }
+let a = [1, 2];
+function f(skip) {
+ g(undefined, skip);
+};
+%PrepareFunctionForOptimization(f);
function g(x, skip) {
if (skip) return;
- return a[x+1];
+ return a[x + 1];
}
g(0, false);
g(0, false);
diff --git a/deps/v8/test/mjsunit/regress/regress-783051.js b/deps/v8/test/mjsunit/regress/regress-783051.js
index 0e5af37a21..ee69bedddb 100644
--- a/deps/v8/test/mjsunit/regress/regress-783051.js
+++ b/deps/v8/test/mjsunit/regress/regress-783051.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function f() { return Math.abs([][0]); }
+function f() {
+ return Math.abs([][0]);
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-791334.js b/deps/v8/test/mjsunit/regress/regress-791334.mjs
index 9f2748fdad..7e79b2e55f 100644
--- a/deps/v8/test/mjsunit/regress/regress-791334.js
+++ b/deps/v8/test/mjsunit/regress/regress-791334.mjs
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// MODULE
-
let foo = () => { return this };
assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-791958.js b/deps/v8/test/mjsunit/regress/regress-791958.js
index 443ef6e359..5b7e753efc 100644
--- a/deps/v8/test/mjsunit/regress/regress-791958.js
+++ b/deps/v8/test/mjsunit/regress/regress-791958.js
@@ -4,12 +4,15 @@
// Flags: --allow-natives-syntax
-obj = {m: print};
+obj = {
+ m: print
+};
function foo() {
for (var x = -536870912; x != -536870903; ++x) {
obj.m(-x >= 1000000 ? x % 1000000 : y);
}
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-794822.js b/deps/v8/test/mjsunit/regress/regress-794822.js
index c9b46001d1..bc769e703d 100644
--- a/deps/v8/test/mjsunit/regress/regress-794822.js
+++ b/deps/v8/test/mjsunit/regress/regress-794822.js
@@ -9,11 +9,13 @@ function* opt(arg = () => arg) {
for (;;) {
arg;
yield;
- function inner() { tmp }
+ function inner() {
+ tmp;
+ }
break;
}
-}
-
+};
+%PrepareFunctionForOptimization(opt);
opt();
%OptimizeFunctionOnNextCall(opt);
opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-794825.js b/deps/v8/test/mjsunit/regress/regress-794825.js
index 3709e8eabc..bd4f846fc3 100644
--- a/deps/v8/test/mjsunit/regress/regress-794825.js
+++ b/deps/v8/test/mjsunit/regress/regress-794825.js
@@ -45,8 +45,8 @@ function* opt() {
} else {
yield;
}
-}
-
+};
+%PrepareFunctionForOptimization(opt);
opt();
// Optimize function to trigger the iteration over jump
// table.
diff --git a/deps/v8/test/mjsunit/regress/regress-797581.js b/deps/v8/test/mjsunit/regress/regress-797581.js
index 3dfad4c463..0613d1cd43 100644
--- a/deps/v8/test/mjsunit/regress/regress-797581.js
+++ b/deps/v8/test/mjsunit/regress/regress-797581.js
@@ -21,7 +21,7 @@ function TryToLoadModule(filename, expect_error, token) {
if (expect_error) {
assertTrue(caught_error instanceof SyntaxError);
- assertEquals("Unexpected token " + token, caught_error.message);
+ assertEquals("Unexpected token '" + token + "'", caught_error.message);
} else {
assertEquals(undefined, caught_error);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-802060.js b/deps/v8/test/mjsunit/regress/regress-802060.js
index e975615484..d223cd04a9 100644
--- a/deps/v8/test/mjsunit/regress/regress-802060.js
+++ b/deps/v8/test/mjsunit/regress/regress-802060.js
@@ -7,12 +7,13 @@
function assertEquals(expected, found) {
found.length !== expected.length;
}
-assertEquals([], [])
+assertEquals([], []);
assertEquals("a", "a");
assertEquals([], []);
function f() {
assertEquals(0, undefined);
-}
+};
+%PrepareFunctionForOptimization(f);
try {
f();
} catch (e) {
diff --git a/deps/v8/test/mjsunit/regress/regress-804837.js b/deps/v8/test/mjsunit/regress/regress-804837.js
index 2e5e603a7f..2844ff3b10 100644
--- a/deps/v8/test/mjsunit/regress/regress-804837.js
+++ b/deps/v8/test/mjsunit/regress/regress-804837.js
@@ -6,8 +6,11 @@
var __v_25662 = [, 1.8];
function __f_6214(__v_25668) {
- __v_25662.reduce(() => {1});
-}
+ __v_25662.reduce(() => {
+ 1;
+ });
+};
+%PrepareFunctionForOptimization(__f_6214);
__f_6214();
__f_6214();
%OptimizeFunctionOnNextCall(__f_6214);
diff --git a/deps/v8/test/mjsunit/regress/regress-805768.js b/deps/v8/test/mjsunit/regress/regress-805768.js
index e36106fdb3..e9aea27ba9 100644
--- a/deps/v8/test/mjsunit/regress/regress-805768.js
+++ b/deps/v8/test/mjsunit/regress/regress-805768.js
@@ -10,8 +10,10 @@ function foo() {
return a;
}
-function bar(a) { a[0] = "bazinga!"; }
-
+function bar(a) {
+ a[0] = 'bazinga!';
+};
+%PrepareFunctionForOptimization(bar);
for (var i = 0; i < 5; i++) bar([]);
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-818070.js b/deps/v8/test/mjsunit/regress/regress-818070.js
index 7afb695b29..f94a01d561 100644
--- a/deps/v8/test/mjsunit/regress/regress-818070.js
+++ b/deps/v8/test/mjsunit/regress/regress-818070.js
@@ -9,6 +9,7 @@ function f(a) {
}
x = { [Symbol.toPrimitive]: () => FAIL };
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
@@ -19,6 +20,7 @@ function f(a) {
}
x = { [Symbol.toPrimitive]: () => FAIL };
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
@@ -29,6 +31,7 @@ function f(a) {
}
x = { [Symbol.toPrimitive]: () => FAIL };
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-8384.js b/deps/v8/test/mjsunit/regress/regress-8384.js
index bbb0d575e4..c4d85e0b49 100644
--- a/deps/v8/test/mjsunit/regress/regress-8384.js
+++ b/deps/v8/test/mjsunit/regress/regress-8384.js
@@ -4,7 +4,9 @@
// Flags: --allow-natives-syntax
-function assert(cond) { if (!cond) throw "Assert"; }
+function assert(cond) {
+ if (!cond) throw 'Assert';
+}
function Constructor() {
this.padding1 = null;
@@ -42,20 +44,19 @@ function Constructor() {
function f(k) {
var c = k.accumulator | 0;
- k.accumulator = k.array[(k.accumulator + 1 | 0)] | 0;
- k.array[c + 1 | 0] = (-1);
+ k.accumulator = k.array[k.accumulator + 1 | 0] | 0;
+ k.array[c + 1 | 0] = -1;
var head = k.accumulator;
- assert((head + c) & 1);
+ assert(head + c & 1);
while (head >= 0) {
head = k.array[head + 1 | 0];
}
return;
-}
-
+};
+%PrepareFunctionForOptimization(f);
const tmp = new Constructor();
tmp.array = new Int32Array(5);
-for (var i = 1; i < 5; i++)
- tmp.array[i] = i | 0;
+for (var i = 1; i < 5; i++) tmp.array[i] = i | 0;
tmp.accumulator = 0;
f(tmp);
diff --git a/deps/v8/test/mjsunit/regress/regress-838766.js b/deps/v8/test/mjsunit/regress/regress-838766.js
index 1626ee2428..d342f8066b 100644
--- a/deps/v8/test/mjsunit/regress/regress-838766.js
+++ b/deps/v8/test/mjsunit/regress/regress-838766.js
@@ -7,7 +7,8 @@
function foo(x) {
x = x | 2147483648;
return Number.parseInt(x + 65535, 8);
-}
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(-72161, foo());
assertEquals(-72161, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-842612.js b/deps/v8/test/mjsunit/regress/regress-842612.js
index d489f969c5..0ce4c04d1d 100644
--- a/deps/v8/test/mjsunit/regress/regress-842612.js
+++ b/deps/v8/test/mjsunit/regress/regress-842612.js
@@ -8,8 +8,8 @@ var arr = [undefined];
function f() {
assertEquals(0, arr.indexOf(undefined, -1));
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-843062-3.js b/deps/v8/test/mjsunit/regress/regress-843062-3.js
index e9ff907b4a..638a26f4dd 100644
--- a/deps/v8/test/mjsunit/regress/regress-843062-3.js
+++ b/deps/v8/test/mjsunit/regress/regress-843062-3.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function bar(len) { return new Array(len); }
+function bar(len) {
+ return new Array(len);
+};
+%PrepareFunctionForOptimization(bar);
bar(0);
%OptimizeFunctionOnNextCall(bar);
bar(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-843543.js b/deps/v8/test/mjsunit/regress/regress-843543.js
index 7e7f86b572..50e2de7c59 100644
--- a/deps/v8/test/mjsunit/regress/regress-843543.js
+++ b/deps/v8/test/mjsunit/regress/regress-843543.js
@@ -4,13 +4,15 @@
//
// Flags: --allow-natives-syntax
-const o = {x:9};
+const o = {
+ x: 9
+};
o.__proto__ = Array.prototype;
function foo(o) {
return o.indexOf(undefined);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(-1, foo(o));
assertEquals(-1, foo(o));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-8510.js b/deps/v8/test/mjsunit/regress/regress-8510.js
new file mode 100644
index 0000000000..bbaccd9f7c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-8510.js
@@ -0,0 +1,7 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stress-compaction --throws
+
+load('test/mjsunit/regress/regress-8510.js');
diff --git a/deps/v8/test/mjsunit/regress/regress-8630.js b/deps/v8/test/mjsunit/regress/regress-8630.js
index 4a5aa5409b..205fb1b8e1 100644
--- a/deps/v8/test/mjsunit/regress/regress-8630.js
+++ b/deps/v8/test/mjsunit/regress/regress-8630.js
@@ -7,7 +7,7 @@
// Parameters can't have parentheses (both patterns and identifiers)
assertThrows("( ({x: 1}) ) => {};", SyntaxError);
assertThrows("( (x) ) => {}", SyntaxError);
-assertThrows("( ({x: 1}) = y ) => {}", ReferenceError);
+assertThrows("( ({x: 1}) = y ) => {}", SyntaxError);
assertThrows("( (x) = y ) => {}", SyntaxError);
// Declarations can't have parentheses (both patterns and identifiers)
@@ -21,8 +21,8 @@ assertThrows("var [({x: 1}) = y] = [];", SyntaxError);
assertThrows("var [(x) = y] = [];", SyntaxError);
// Patterns can't have parentheses in assignments either
-assertThrows("[({x: 1}) = y] = [];", ReferenceError);
-assertThrows("({a,b}) = {a:2,b:3}", ReferenceError);
+assertThrows("[({x: 1}) = y] = [];", SyntaxError);
+assertThrows("({a,b}) = {a:2,b:3}", SyntaxError);
// Parentheses are fine around identifiers in assignments though, even inside a
// pattern
diff --git a/deps/v8/test/mjsunit/regress/regress-865310.js b/deps/v8/test/mjsunit/regress/regress-865310.js
index 57f976991a..17fe2cd122 100644
--- a/deps/v8/test/mjsunit/regress/regress-865310.js
+++ b/deps/v8/test/mjsunit/regress/regress-865310.js
@@ -6,15 +6,15 @@
check = function() {
assertEquals(null, check.caller);
-}
+};
var obj = {};
obj.valueOf = check;
function f() {
Number(obj);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-869735.js b/deps/v8/test/mjsunit/regress/regress-869735.js
index dfa7b8385b..6338bc274c 100644
--- a/deps/v8/test/mjsunit/regress/regress-869735.js
+++ b/deps/v8/test/mjsunit/regress/regress-869735.js
@@ -6,8 +6,8 @@
function f() {
return arguments.length;
-}
-
+};
+%PrepareFunctionForOptimization(f);
var a = [];
%OptimizeFunctionOnNextCall(f);
a.length = 81832;
diff --git a/deps/v8/test/mjsunit/regress/regress-8913.js b/deps/v8/test/mjsunit/regress/regress-8913.js
index 7ebdd063f5..72dd492168 100644
--- a/deps/v8/test/mjsunit/regress/regress-8913.js
+++ b/deps/v8/test/mjsunit/regress/regress-8913.js
@@ -11,6 +11,7 @@ foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
foo(1);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(1);
assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-895691.js b/deps/v8/test/mjsunit/regress/regress-895691.js
index 6f63ac6c9b..7f2176cf3f 100644
--- a/deps/v8/test/mjsunit/regress/regress-895691.js
+++ b/deps/v8/test/mjsunit/regress/regress-895691.js
@@ -4,15 +4,15 @@
// Flags: --allow-natives-syntax
-const n = 2**32;
+const n = 2 ** 32;
const x = new Float32Array();
function f() {
for (var i = 96; i < 100; i += 4) {
x[i] = i + n;
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-897815.js b/deps/v8/test/mjsunit/regress/regress-897815.js
index 40a8c5e1ef..943e8c1260 100644
--- a/deps/v8/test/mjsunit/regress/regress-897815.js
+++ b/deps/v8/test/mjsunit/regress/regress-897815.js
@@ -8,9 +8,11 @@
function __f_19351() {
function __f_19352() {
}
- }
+ };
+ %PrepareFunctionForOptimization(__f_19351);
try {
__f_19350();
- } catch (e) {}
- %OptimizeFunctionOnNextCall(__f_19351)
+ } catch (e) {
+ }
+ %OptimizeFunctionOnNextCall(__f_19351);
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-899115.js b/deps/v8/test/mjsunit/regress/regress-899115.js
index 5b4099792f..4e8af5f3ce 100644
--- a/deps/v8/test/mjsunit/regress/regress-899115.js
+++ b/deps/v8/test/mjsunit/regress/regress-899115.js
@@ -6,8 +6,8 @@
function foo() {
Object.getPrototypeOf([]).includes();
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-900585.js b/deps/v8/test/mjsunit/regress/regress-900585.js
index 8969644f95..456f2c8ee6 100644
--- a/deps/v8/test/mjsunit/regress/regress-900585.js
+++ b/deps/v8/test/mjsunit/regress/regress-900585.js
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-assertThrows("/*for..in*/for(var [x5, functional] = this = function(id) { return id } in false) var x2, x;", ReferenceError);
+assertThrows("/*for..in*/for(var [x5, functional] = this = function(id) { return id } in false) var x2, x;", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-901798.js b/deps/v8/test/mjsunit/regress/regress-901798.js
index 67022a70e8..a9a4d943ca 100644
--- a/deps/v8/test/mjsunit/regress/regress-901798.js
+++ b/deps/v8/test/mjsunit/regress/regress-901798.js
@@ -6,8 +6,8 @@
function f(a) {
return (a >>> 1073741824) + -3;
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertEquals(-3, f(0));
assertEquals(-2, f(1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-904417.js b/deps/v8/test/mjsunit/regress/regress-904417.js
index dc469cca08..90568dd1ad 100644
--- a/deps/v8/test/mjsunit/regress/regress-904417.js
+++ b/deps/v8/test/mjsunit/regress/regress-904417.js
@@ -6,8 +6,8 @@
function bar(o) {
return o.hello, Object.getPrototypeOf(o);
-}
-
+};
+%PrepareFunctionForOptimization(bar);
var y = { __proto__: {}, hello: 44 };
var z = { hello: 45 };
diff --git a/deps/v8/test/mjsunit/regress/regress-919340.js b/deps/v8/test/mjsunit/regress/regress-919340.js
index 900bf6fde2..d28b505742 100644
--- a/deps/v8/test/mjsunit/regress/regress-919340.js
+++ b/deps/v8/test/mjsunit/regress/regress-919340.js
@@ -7,10 +7,10 @@
var E = 'Σ';
var PI = 123;
function f() {
- print(E = 2, /b/.test(E) || /b/.test(E = 2));
- ((E = 3) * PI);
-}
-
+ print(E = 2, /b/.test(E) || /b/.test(E = 2));
+ (E = 3) * PI;
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-932953.js b/deps/v8/test/mjsunit/regress/regress-932953.js
index 5e211c79d1..59d7499d41 100644
--- a/deps/v8/test/mjsunit/regress/regress-932953.js
+++ b/deps/v8/test/mjsunit/regress/regress-932953.js
@@ -7,18 +7,20 @@
(function NonExtensibleBetweenSetterAndGetter() {
o = {};
o.x = 42;
- o.__defineGetter__("y", function() { });
+ o.__defineGetter__('y', function() {});
Object.preventExtensions(o);
- o.__defineSetter__("y", function() { });
+ o.__defineSetter__('y', function() {});
o.x = 0.1;
})();
(function InterleavedIntegrityLevel() {
o = {};
o.x = 42;
- o.__defineSetter__("y", function() { });
+ o.__defineSetter__('y', function() {});
Object.preventExtensions(o);
- o.__defineGetter__("y", function() { return 44; });
+ o.__defineGetter__('y', function() {
+ return 44;
+ });
Object.seal(o);
o.x = 0.1;
assertEquals(44, o.y);
@@ -41,6 +43,8 @@
}
// Warm up the IC.
+ ;
+ %PrepareFunctionForOptimization(f);
f(o1);
f(o1);
f(o1);
diff --git a/deps/v8/test/mjsunit/regress/regress-936077.js b/deps/v8/test/mjsunit/regress/regress-936077.js
index fcd5254bd2..2bc6dc52c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-936077.js
+++ b/deps/v8/test/mjsunit/regress/regress-936077.js
@@ -7,7 +7,11 @@
function main() {
var obj = {};
- function foo() { return obj[0]; };
+ function foo() {
+ return obj[0];
+ };
+ %PrepareFunctionForOptimization(foo);
+ ;
gc();
obj.x = 10;
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-9383.js b/deps/v8/test/mjsunit/regress/regress-9383.js
new file mode 100644
index 0000000000..8814955bfb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9383.js
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --throws --cache=code --enable-lazy-source-positions
+
+// Test ensures that the getters and setters are added in the same order in the
+// generated bytecode regardless of the isolate's hash seed. This gets tested
+// because of the use of the code cache.
+var c = {
+ get b() {
+ },
+ get getter() {
+ },
+ set a(n) {
+ },
+ set a(n) {
+ },
+ set setter1(n) {
+ },
+ set setter2(n) {
+ },
+ set setter3(n) {
+ },
+ set setter4(n) {
+ },
+ set setter5(n) {
+ },
+ set setter6(n) {
+ },
+ set setter7(n) {
+ },
+ set setter8(n) {
+ },
+ set setter9(n) {
+ },
+ set setter10(n) {
+ },
+ set setter11(n) {
+ },
+ set setter12(n) {
+ },
+ set setter12(n) {
+ },
+};
+
+for (x in c) {
+ print(x);
+}
+
+throw new Error();
diff --git a/deps/v8/test/mjsunit/regress/regress-940361.js b/deps/v8/test/mjsunit/regress/regress-940361.js
index 28b21ad145..6dae01d8dd 100644
--- a/deps/v8/test/mjsunit/regress/regress-940361.js
+++ b/deps/v8/test/mjsunit/regress/regress-940361.js
@@ -12,8 +12,8 @@ delete re.__proto__.test;
function foo(s) {
return re.test(s);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertTrue(foo('abc'));
assertTrue(foo('abc'));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-9466.js b/deps/v8/test/mjsunit/regress/regress-9466.js
new file mode 100644
index 0000000000..a3ddc0d0a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9466.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const o = [];
+o.__proto__ = {};
+o.constructor = function() {};
+o.constructor[Symbol.species] = function f() {};
+o.__proto__ = Array.prototype;
+assertEquals(o.constructor[Symbol.species], o.concat([1,2,3]).constructor);
diff --git a/deps/v8/test/mjsunit/regress/regress-950328.js b/deps/v8/test/mjsunit/regress/regress-950328.js
index f00e90bd4e..b8f3e56ce4 100644
--- a/deps/v8/test/mjsunit/regress/regress-950328.js
+++ b/deps/v8/test/mjsunit/regress/regress-950328.js
@@ -10,10 +10,10 @@
function f() {
- var o = { __proto__ : a };
+ var o = {__proto__: a};
o.push;
- }
-
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -35,8 +35,8 @@
arr_proto.__proto__ = i32arr;
obj.__proto__ = arr;
arr_proto.__proto__ = i32arr;
- }
-
+ };
+ %PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
arr[1024] = [];
diff --git a/deps/v8/test/mjsunit/regress/regress-961709-classes-opt.js b/deps/v8/test/mjsunit/regress/regress-961709-classes-opt.js
new file mode 100644
index 0000000000..dfa6446937
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-961709-classes-opt.js
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+function foo(a, i) {
+ a[i] = 1;
+ return a[i];
+}
+
+class MyArray extends (class C extends Array {
+}){};
+
+o = new MyArray;
+
+%PrepareFunctionForOptimization(foo);
+assertEquals(1, foo(o, 0));
+assertEquals(1, foo(o, 1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo(o, 2));
+assertOptimized(foo);
+
+// Change prototype
+o.__proto__.__proto__ = new Int32Array(3);
+
+
+// Check it still works
+assertEquals(undefined, foo(o, 3));
+assertUnoptimized(foo);
+%PrepareFunctionForOptimization(foo);
+assertEquals(undefined, foo(o, 3));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo(o, 3));
diff --git a/deps/v8/test/mjsunit/regress/regress-961709-classes.js b/deps/v8/test/mjsunit/regress/regress-961709-classes.js
new file mode 100644
index 0000000000..a574321774
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-961709-classes.js
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Flags: --allow-natives-syntax
+
+function foo(a, i) {
+ a[i] = 1;
+ return a[i];
+}
+
+class MyArray extends (class C extends Array {
+}){};
+
+o = new MyArray;
+
+%EnsureFeedbackVectorForFunction(foo);
+// initialize IC
+assertEquals(1, foo(o, 0));
+assertEquals(1, foo(o, 1));
+
+// Change prototype
+o.__proto__.__proto__ = new Int32Array(2);
+
+
+// Check it still works
+assertEquals(undefined, foo(o, 2));
+assertEquals(undefined, foo(o, 2));
diff --git a/deps/v8/test/mjsunit/regress/regress-962.js b/deps/v8/test/mjsunit/regress/regress-962.js
index 85ada0c8ab..794005528c 100644
--- a/deps/v8/test/mjsunit/regress/regress-962.js
+++ b/deps/v8/test/mjsunit/regress/regress-962.js
@@ -52,6 +52,7 @@ F.prototype.foo = function () {
var ctx = new F;
+%PrepareFunctionForOptimization(F.prototype.foo);
for (var i = 0; i < 5; i++) ctx.foo();
%OptimizeFunctionOnNextCall(F.prototype.foo);
ctx.foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-963891.js b/deps/v8/test/mjsunit/regress/regress-963891.js
index 28bf920d1d..32a087a32a 100644
--- a/deps/v8/test/mjsunit/regress/regress-963891.js
+++ b/deps/v8/test/mjsunit/regress/regress-963891.js
@@ -8,7 +8,8 @@ var bar = true;
bar = false;
function foo() {
return !bar;
-}
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(foo(), true);
%OptimizeFunctionOnNextCall(foo);
assertEquals(foo(), true);
diff --git a/deps/v8/test/mjsunit/regress/regress-97116.js b/deps/v8/test/mjsunit/regress/regress-97116.js
index b858ca5e86..924348b3a1 100644
--- a/deps/v8/test/mjsunit/regress/regress-97116.js
+++ b/deps/v8/test/mjsunit/regress/regress-97116.js
@@ -31,15 +31,17 @@
// have a pending lazy deoptimization on the stack.
function deopt() {
- try { } catch (e) { } // Avoid inlining.
+ try {
+ } catch (e) {
+ } // Avoid inlining.
%DeoptimizeFunction(outer);
for (var i = 0; i < 10; i++) gc(); // Force code flushing.
}
function outer(should_deopt) {
inner(should_deopt);
-}
-
+};
+%PrepareFunctionForOptimization(outer);
function inner(should_deopt) {
if (should_deopt) deopt();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-97116b.js b/deps/v8/test/mjsunit/regress/regress-97116b.js
index 91e7d6e0ca..908109ad9b 100644
--- a/deps/v8/test/mjsunit/regress/regress-97116b.js
+++ b/deps/v8/test/mjsunit/regress/regress-97116b.js
@@ -38,8 +38,8 @@ function deopt() {
function outer(should_deopt) {
inner(should_deopt);
-}
-
+};
+%PrepareFunctionForOptimization(outer);
function inner(should_deopt) {
if (should_deopt) deopt();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-977870.js b/deps/v8/test/mjsunit/regress/regress-977870.js
new file mode 100644
index 0000000000..89175cc618
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-977870.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ v_0 = {};
+ Object.defineProperty(v_0, '0', {});
+ v_0.p_0 = 0;
+ assertArrayEquals(['0', 'p_0'],
+ Object.getOwnPropertyNames(v_0));
+ assertArrayEquals(['0', 'p_0'],
+ Object.getOwnPropertyNames(v_0));
+}
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-980891.js b/deps/v8/test/mjsunit/regress/regress-980891.js
new file mode 100644
index 0000000000..7d6593286f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-980891.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let str = "";
+
+// Many named captures force the resulting named capture backing store into
+// large object space.
+for (var i = 0; i < 0x2000; i++) str += "(?<a"+i+">)|";
+str += "(?<b>)";
+
+const regexp = new RegExp(str);
+const result = "xxx".match(regexp);
+
+assertNotNull(result);
diff --git a/deps/v8/test/mjsunit/regress/regress-981236.js b/deps/v8/test/mjsunit/regress/regress-981236.js
new file mode 100644
index 0000000000..e6ff956946
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-981236.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var count = 0;
+function keyedSta(a) {
+ a[0] = {
+ valueOf() {
+ count += 1;
+ return 42n;
+ }
+ };
+};
+
+array1 = keyedSta(new BigInt64Array(1));
+var r = keyedSta(new BigInt64Array());
+assertEquals(count, 2);
diff --git a/deps/v8/test/mjsunit/regress/regress-982702.js b/deps/v8/test/mjsunit/regress/regress-982702.js
new file mode 100644
index 0000000000..43a32d576c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-982702.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A {
+ static #foo = 3;
+ constructor() {
+ print(A.prototype.#foo);
+ }
+}
+
+assertThrows(() => new A(), TypeError);
+
+class B {
+ static #foo = 3;
+ constructor() {
+ B.prototype.#foo = 2;
+ }
+}
+
+assertThrows(() => new B(), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js b/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
index cb77cdfb61..9c0ddc9994 100644
--- a/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
+++ b/deps/v8/test/mjsunit/regress/regress-add-minus-zero.js
@@ -29,9 +29,12 @@
var o = { a: 0 };
-function f(x) { return -o.a + 0; };
-
-assertEquals("Infinity", String(1/f()));
-assertEquals("Infinity", String(1/f()));
+function f(x) {
+ return -o.a + 0;
+};
+%PrepareFunctionForOptimization(f);
+;
+assertEquals('Infinity', String(1 / f()));
+assertEquals('Infinity', String(1 / f()));
%OptimizeFunctionOnNextCall(f);
-assertEquals("Infinity", String(1/f()));
+assertEquals('Infinity', String(1 / f()));
diff --git a/deps/v8/test/mjsunit/regress/regress-alloc-smi-check.js b/deps/v8/test/mjsunit/regress/regress-alloc-smi-check.js
index 295048a13e..8fcc793253 100644
--- a/deps/v8/test/mjsunit/regress/regress-alloc-smi-check.js
+++ b/deps/v8/test/mjsunit/regress/regress-alloc-smi-check.js
@@ -8,8 +8,8 @@ var x = {};
function f(a) {
a[200000000] = x;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(new Array(100000));
f([]);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js b/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js
index 2a30dc87a3..d79e997847 100644
--- a/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js
+++ b/deps/v8/test/mjsunit/regress/regress-arg-materialize-store.js
@@ -9,13 +9,13 @@ function f() {
}
function g(deopt) {
- var o = { x : 2 };
+ var o = {x: 2};
f();
o.x = 1;
deopt + 0;
return o.x;
-}
-
+};
+%PrepareFunctionForOptimization(g);
g(0);
g(0);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js b/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js
index 95c2c00262..5525a4b5f7 100644
--- a/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js
+++ b/deps/v8/test/mjsunit/regress/regress-arguments-liveness-analysis.js
@@ -4,17 +4,20 @@
// Flags: --allow-natives-syntax
-function r(v) { return v.f }
-function h() { }
+function r(v) {
+ return v.f;
+}
+function h() {}
function y(v) {
var x = arguments;
h.apply(r(v), x);
};
-
-y({f:3});
-y({f:3});
-y({f:3});
+%PrepareFunctionForOptimization(y);
+;
+y({f: 3});
+y({f: 3});
+y({f: 3});
%OptimizeFunctionOnNextCall(y);
-y({ f : 3, u : 4 });
+y({f: 3, u: 4});
diff --git a/deps/v8/test/mjsunit/regress/regress-arm64-spillslots.js b/deps/v8/test/mjsunit/regress/regress-arm64-spillslots.js
index 1791b24843..827090b367 100644
--- a/deps/v8/test/mjsunit/regress/regress-arm64-spillslots.js
+++ b/deps/v8/test/mjsunit/regress/regress-arm64-spillslots.js
@@ -23,8 +23,8 @@ function Process(input) {
var ret = [];
ret.push(Inlined(input[0], 1, 2));
return ret;
-}
-
+};
+%PrepareFunctionForOptimization(Process);
var input = [new Message("TEST PASS")];
Process(input);
diff --git a/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js b/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js
index 9a0d35d3aa..ac8ffe5cd5 100644
--- a/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-array-pop-deopt.js
@@ -27,13 +27,13 @@
// Flags: --allow-natives-syntax
-var o = [6,7,8,9];
+var o = [6, 7, 8, 9];
function f(b) {
var v = o.pop() + b;
return v;
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertEquals(10, f(1));
assertEquals(9, f(1));
assertEquals(8, f(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-bce-underflow.js b/deps/v8/test/mjsunit/regress/regress-bce-underflow.js
index daa776005e..0ab7cdf3c8 100644
--- a/deps/v8/test/mjsunit/regress/regress-bce-underflow.js
+++ b/deps/v8/test/mjsunit/regress/regress-bce-underflow.js
@@ -15,6 +15,7 @@ function f(a, i, bool) {
}
return result;
}
+%PrepareFunctionForOptimization(f);
function f2(a, c, x, i, d) {
return a[x + c] + a[x - 0] + a[i - d];
diff --git a/deps/v8/test/mjsunit/regress/regress-binop.js b/deps/v8/test/mjsunit/regress/regress-binop.js
index 7a8b41924d..0824804092 100644
--- a/deps/v8/test/mjsunit/regress/regress-binop.js
+++ b/deps/v8/test/mjsunit/regress/regress-binop.js
@@ -37,6 +37,7 @@ assertEquals(e31, -e31*(-1));
var x = {toString : function() {return 1}}
function add(a,b){return a+b;}
+%PrepareFunctionForOptimization(add);
add(1,x);
add(1,x);
%OptimizeFunctionOnNextCall(add);
@@ -85,6 +86,7 @@ function test(fun,check_fun,a,b,does_throw) {
function minus(a,b) { return a-b };
function check_minus(a,b) { return a-b };
function mod(a,b) { return a%b };
+%PrepareFunctionForOptimization(mod);
function check_mod(a,b) { return a%b };
test(minus,check_minus,1,2);
@@ -124,16 +126,20 @@ test(mod,check_mod,1,2);
test(mod,check_mod,1,2);
test(mod,check_mod,1<<30,1);
+%PrepareFunctionForOptimization(mod);
%OptimizeFunctionOnNextCall(mod);
test(mod,check_mod,1<<30,1);
test(mod,check_mod,1,1<<30);
+%PrepareFunctionForOptimization(mod);
%OptimizeFunctionOnNextCall(mod);
test(mod,check_mod,1,1<<30);
test(mod,check_mod,1<<30,-(1<<30));
+%PrepareFunctionForOptimization(mod);
%OptimizeFunctionOnNextCall(mod);
test(mod,check_mod,1<<30,-(1<<30));
test(mod,check_mod,1,{},2);
+%PrepareFunctionForOptimization(mod);
%OptimizeFunctionOnNextCall(mod);
test(mod,check_mod,1,{},2);
@@ -172,6 +178,7 @@ function string_add(a,i) {
var d = [0.1, ,0.3];
return a + d[i];
}
+%PrepareFunctionForOptimization(string_add);
string_add(1.1, 0);
string_add("", 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js b/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js
index a306e5d9d8..fe2f368eed 100644
--- a/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js
+++ b/deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js
@@ -28,22 +28,27 @@
// Flags: --allow-natives-syntax
function f() {
- return 1;
+ return 1;
}
function C1(f) {
- this.f = f;
+ this.f = f;
}
var o1 = new C1(f);
-var o2 = {__proto__: new C1(f) }
+var o2 = {__proto__: new C1(f)};
function foo(o) {
- return o.f();
-}
+ return o.f();
+};
+%PrepareFunctionForOptimization(foo);
foo(o1);
foo(o1);
foo(o2);
foo(o1);
-var o3 = new C1(function() { return 2; });
+var o3 = new C1(function() {
+ return 2;
+});
%OptimizeFunctionOnNextCall(foo);
assertEquals(1, foo(o2));
-o2.__proto__.f = function() { return 3; };
+o2.__proto__.f = function() {
+ return 3;
+};
assertEquals(3, foo(o2));
diff --git a/deps/v8/test/mjsunit/regress/regress-captured-object-no-dummy-use.js b/deps/v8/test/mjsunit/regress/regress-captured-object-no-dummy-use.js
index cdf548d5fa..880d70279c 100644
--- a/deps/v8/test/mjsunit/regress/regress-captured-object-no-dummy-use.js
+++ b/deps/v8/test/mjsunit/regress/regress-captured-object-no-dummy-use.js
@@ -5,13 +5,16 @@
// Flags: --allow-natives-syntax
var global = "10.1";
-function f() { }
-function g(a) { this.d = a; }
+function f() {}
+function g(a) {
+ this.d = a;
+}
function h() {
var x = new f();
global.dummy = this;
var y = new g(x);
-}
+};
+%PrepareFunctionForOptimization(h);
h();
h();
%OptimizeFunctionOnNextCall(h);
diff --git a/deps/v8/test/mjsunit/regress/regress-charat-empty.js b/deps/v8/test/mjsunit/regress/regress-charat-empty.js
index 6548ad5b2f..32d60eda65 100644
--- a/deps/v8/test/mjsunit/regress/regress-charat-empty.js
+++ b/deps/v8/test/mjsunit/regress/regress-charat-empty.js
@@ -6,8 +6,9 @@
(() => {
function f(s) {
return s.charAt();
- }
- f("");
+ };
+ %PrepareFunctionForOptimization(f);
+ f('');
f("");
%OptimizeFunctionOnNextCall(f);
f("");
diff --git a/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js b/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
index 3791c35f71..d89147d451 100644
--- a/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
+++ b/deps/v8/test/mjsunit/regress/regress-check-eliminate-loop-phis.js
@@ -15,6 +15,7 @@ function f() {
return result;
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js b/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js
index 7795ae9a30..cec9467592 100644
--- a/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js
+++ b/deps/v8/test/mjsunit/regress/regress-clobbered-fp-regs.js
@@ -37,7 +37,7 @@ function store(a, x, y) {
var f7 = 0.7 * y;
var f8 = 0.8 * y;
a[0] = x;
- var sum = (f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8);
+ var sum = f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8;
assertEquals(1, y);
var expected = 3.6;
if (Math.abs(expected - sum) > 0.01) {
@@ -46,9 +46,11 @@ function store(a, x, y) {
}
// Generate TransitionElementsKindStub.
+;
+%PrepareFunctionForOptimization(store);
store([1], 1, 1);
store([1], 1.1, 1);
store([1], 1.1, 1);
%OptimizeFunctionOnNextCall(store);
// This will trap on allocation site in TransitionElementsKindStub.
-store([1], 1, 1)
+store([1], 1, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js b/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js
index 0f8ffe307d..748591346b 100644
--- a/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js
+++ b/deps/v8/test/mjsunit/regress/regress-compare-constant-doubles.js
@@ -51,6 +51,7 @@ function prepare(base) {
right = 0.5 * base;
}
+%PrepareFunctionForOptimization(foo);
prepare(21);
assertEquals("ok", foo());
assertEquals("ok", foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-convert-hole.js b/deps/v8/test/mjsunit/regress/regress-convert-hole.js
index 1e9c3f3138..6e12acafb2 100644
--- a/deps/v8/test/mjsunit/regress/regress-convert-hole.js
+++ b/deps/v8/test/mjsunit/regress/regress-convert-hole.js
@@ -43,6 +43,7 @@ function f_store(test, test2, a, i) {
}
var a1 = [0, 0, 0, {}];
+%PrepareFunctionForOptimization(f_store);
f_store(true, false, a1, 0);
f_store(true, true, a1, 0);
f_store(false, false, a1, 1);
@@ -72,6 +73,7 @@ function f_call(f, test, test2, i) {
return d;
}
+%PrepareFunctionForOptimization(f_call);
f_call(test_arg(1.5), true, false, 0);
f_call(test_arg(2.5), true, true, 0);
f_call(test_arg(1), false, false, 1);
@@ -100,6 +102,7 @@ function f_external(test, test2, test3, a, i) {
}
var a2 = new Int32Array(10);
+%PrepareFunctionForOptimization(f_external);
f_external(true, false, true, a2, 0);
f_external(true, true, true, a2, 0);
f_external(false, false, true, a2, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-convert-hole2.js b/deps/v8/test/mjsunit/regress/regress-convert-hole2.js
index 02ef4dd1dd..bd89077f19 100644
--- a/deps/v8/test/mjsunit/regress/regress-convert-hole2.js
+++ b/deps/v8/test/mjsunit/regress/regress-convert-hole2.js
@@ -34,6 +34,7 @@ function f(a, i, l) {
return l + v;
}
+%PrepareFunctionForOptimization(f);
assertEquals("test1.5", f(a, 0, "test"));
assertEquals("test1.5", f(a, 0, "test"));
%OptimizeFunctionOnNextCall(f);
@@ -51,6 +52,7 @@ function f2(b, a1, a2) {
return "test" + v + x;
}
+%PrepareFunctionForOptimization(f2);
f2(true, [1.4,1.8,,1.9], [1.4,1.8,,1.9]);
f2(true, [1.4,1.8,,1.9], [1.4,1.8,,1.9]);
f2(false, [1.4,1.8,,1.9], [1.4,1.8,,1.9]);
@@ -63,6 +65,7 @@ function t_smi(a) {
a[0] = 1.5;
}
+%PrepareFunctionForOptimization(t_smi);
t_smi([1,,3]);
t_smi([1,,3]);
t_smi([1,,3]);
@@ -77,6 +80,7 @@ function t(b) {
b[1] = {};
}
+%PrepareFunctionForOptimization(t);
t([1.4, 1.6,,1.8, NaN]);
t([1.4, 1.6,,1.8, NaN]);
%OptimizeFunctionOnNextCall(t);
diff --git a/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js b/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js
index fa3db92928..2a7464254f 100644
--- a/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js
+++ b/deps/v8/test/mjsunit/regress/regress-copy-hole-to-field.js
@@ -28,30 +28,30 @@
// Flags: --allow-natives-syntax
// Copy a hole from HOLEY_DOUBLE to double field.
-var a = [1.5,,1.7];
-var o = {a:1.8};
+var a = [1.5, , 1.7];
+var o = {a: 1.8};
-function f1(o,a,i) {
+function f1(o, a, i) {
o.a = a[i];
-}
-
-f1(o,a,0);
-f1(o,a,0);
+};
+%PrepareFunctionForOptimization(f1);
+f1(o, a, 0);
+f1(o, a, 0);
assertEquals(1.5, o.a);
%OptimizeFunctionOnNextCall(f1);
-f1(o,a,1);
+f1(o, a, 1);
assertEquals(undefined, o.a);
// Copy a hole from HOLEY_SMI to smi field.
-var a = [1,,3];
-var o = {ab:5};
+var a = [1, , 3];
+var o = {ab: 5};
-function f2(o,a,i) {
+function f2(o, a, i) {
o.ab = a[i];
-}
-
-f2(o,a,0);
-f2(o,a,0);
+};
+%PrepareFunctionForOptimization(f2);
+f2(o, a, 0);
+f2(o, a, 0);
%OptimizeFunctionOnNextCall(f2);
-f2(o,a,1);
+f2(o, a, 1);
assertEquals(undefined, o.ab);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-125148.js b/deps/v8/test/mjsunit/regress/regress-crbug-125148.js
index 0f7bcd8cab..2f70d5b3d6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-125148.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-125148.js
@@ -44,6 +44,7 @@ C = Object.create(B);
function bar(x) { return x.foo(); }
+%PrepareFunctionForOptimization(bar);
assertEquals(111, bar(C));
assertEquals(111, bar(C));
ToDictionaryMode(B);
@@ -61,6 +62,7 @@ C = Object.create(B);
function boo(x) { return x.baz; }
+%PrepareFunctionForOptimization(boo);
assertEquals(111, boo(C));
assertEquals(111, boo(C));
ToDictionaryMode(B);
@@ -83,6 +85,7 @@ function fuu(x) {
return setterValue;
}
+%PrepareFunctionForOptimization(fuu);
assertEquals(111, fuu(C));
assertEquals(111, fuu(C));
ToDictionaryMode(B);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-134055.js b/deps/v8/test/mjsunit/regress/regress-crbug-134055.js
index 9b658fb6f6..c6d1151f4e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-134055.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-134055.js
@@ -29,8 +29,8 @@
function crash(obj) {
return obj.foo;
-}
-
+};
+%PrepareFunctionForOptimization(crash);
function base(number_of_properties) {
var result = new Array();
for (var i = 0; i < number_of_properties; i++) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-134609.js b/deps/v8/test/mjsunit/regress/regress-crbug-134609.js
index da7d85dcb6..01129ae687 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-134609.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-134609.js
@@ -27,18 +27,19 @@
// Flags: --allow-natives-syntax --inline-accessors
-var forceDeopt = {x:0};
+var forceDeopt = {x: 0};
-var objectWithGetterProperty = (function (value) {
+var objectWithGetterProperty = function(value) {
var obj = {};
- Object.defineProperty(obj, "getterProperty", {
+ Object.defineProperty(obj, 'getterProperty', {
get: function foo() {
forceDeopt.x;
return value;
- },
+ }
});
+
return obj;
-})("bad");
+}('bad');
function test() {
var iAmContextAllocated = "good";
@@ -46,9 +47,11 @@ function test() {
return iAmContextAllocated;
// Make sure that the local variable is context allocated.
- function unused() { iAmContextAllocated; }
-}
-
+ function unused() {
+ iAmContextAllocated;
+ }
+};
+%PrepareFunctionForOptimization(test);
assertEquals("good", test());
assertEquals("good", test());
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-138887.js b/deps/v8/test/mjsunit/regress/regress-crbug-138887.js
index 8d8e1694b6..bd65e3b2d9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-138887.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-138887.js
@@ -40,9 +40,11 @@ function factory(worker) {
var f1 = factory(worker1);
var f2 = factory(f1);
+%PrepareFunctionForOptimization(f1);
assertEquals(11, f2(1)); // Result: 1 + f1(0) == 1 + 10.
assertEquals(11, f2(1));
%OptimizeFunctionOnNextCall(f1);
+%PrepareFunctionForOptimization(f2);
assertEquals(10, f1(0)); // Terminates immediately -> returns 10.
%OptimizeFunctionOnNextCall(f2);
assertEquals(102, f2(1000)); // 1 + f1(999) == 1 + 1 + worker1(998) == 102
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-140083.js b/deps/v8/test/mjsunit/regress/regress-crbug-140083.js
index e38192cd8a..95548023d2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-140083.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-140083.js
@@ -29,16 +29,19 @@
// Test that the absence of a setter in a compound/count operation works.
-Object.defineProperty(Object.prototype, "foo",
- { get: function() { return 123; } });
+Object.defineProperty(Object.prototype, 'foo', {
+ get: function() {
+ return 123;
+ }
+});
function bar(o) {
o.foo += 42;
o.foo++;
-}
-
+};
+%PrepareFunctionForOptimization(bar);
var baz = {};
bar(baz);
bar(baz);
-%OptimizeFunctionOnNextCall(bar)
+%OptimizeFunctionOnNextCall(bar);
bar(baz);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-142218.js b/deps/v8/test/mjsunit/regress/regress-crbug-142218.js
index 373f83bca3..cc7d45f6bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-142218.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-142218.js
@@ -32,8 +32,8 @@ a = new Array(length);
function insert_element(key) {
a[key] = 42;
-}
-
+};
+%PrepareFunctionForOptimization(insert_element);
insert_element(1);
%OptimizeFunctionOnNextCall(insert_element);
insert_element(new Object());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-145961.js b/deps/v8/test/mjsunit/regress/regress-crbug-145961.js
index eb88945e0f..ec981ed4aa 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-145961.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-145961.js
@@ -31,8 +31,8 @@ function test() {
var a = new Int32Array(2);
var x = a[0];
return Math.min(x, x);
-}
-
+};
+%PrepareFunctionForOptimization(test);
assertEquals(0, test());
assertEquals(0, test());
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-147475.js b/deps/v8/test/mjsunit/regress/regress-crbug-147475.js
index 180744c730..2f0e750d5a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-147475.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-147475.js
@@ -40,8 +40,10 @@ function factory(worker) {
var f1 = factory(worker1);
var f2 = factory(f1);
+%PrepareFunctionForOptimization(f1);
assertEquals(11, f2(1));
%OptimizeFunctionOnNextCall(f1);
+%PrepareFunctionForOptimization(f2);
assertEquals(10, f1(0));
%OptimizeFunctionOnNextCall(f2);
assertEquals(102, f2(2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
index fb21f3d57d..04c42a6f35 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
@@ -46,7 +46,10 @@
function outer() {
inner(1,2,3);
- for (var i = 0; i < 3; i++) %OptimizeOsr();
+ for (var i = 0; i < 3; i++) {
+ %OptimizeOsr();
+ %PrepareFunctionForOptimization(outer);
+ }
}
%PrepareFunctionForOptimization(outer);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-150729.js b/deps/v8/test/mjsunit/regress/regress-crbug-150729.js
index 15aa587d18..cb0493ed00 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-150729.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-150729.js
@@ -32,7 +32,8 @@ function burn() {
i = [t, 1];
var M = [i[0], Math.cos(t) + i[7074959]];
t += .05;
-}
+};
+%PrepareFunctionForOptimization(burn);
for (var j = 0; j < 5; j++) {
if (j == 2) %OptimizeFunctionOnNextCall(burn);
burn();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-163530.js b/deps/v8/test/mjsunit/regress/regress-crbug-163530.js
index 7abae14a8b..32c6f5ae4a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-163530.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-163530.js
@@ -44,6 +44,7 @@
return arguments.length;
};
+ %PrepareFunctionForOptimization(object.a);
assertSame(0, object.a());
assertSame(0, object.a());
%OptimizeFunctionOnNextCall(object.a);
@@ -71,6 +72,7 @@
return arguments.length;
};
+ %PrepareFunctionForOptimization(object.a);
assertSame(8, object.a());
assertSame(8, object.a());
%OptimizeFunctionOnNextCall(object.a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-173907.js b/deps/v8/test/mjsunit/regress/regress-crbug-173907.js
index 9f92fefa78..df5055f104 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-173907.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-173907.js
@@ -34,7 +34,9 @@ var O = 0;
var result = new Float64Array(2);
function spill() {
- try { } catch (e) { }
+ try {
+ } catch (e) {
+ }
}
function buggy() {
@@ -45,13 +47,13 @@ function buggy() {
spill(); // At this point initial values for phi1 and phi2 are spilled.
var xmm1 = v;
- var xmm2 = v*v*v;
- var xmm3 = v*v*v*v;
- var xmm4 = v*v*v*v*v;
- var xmm5 = v*v*v*v*v*v;
- var xmm6 = v*v*v*v*v*v*v;
- var xmm7 = v*v*v*v*v*v*v*v;
- var xmm8 = v*v*v*v*v*v*v*v*v;
+ var xmm2 = v * v * v;
+ var xmm3 = v * v * v * v;
+ var xmm4 = v * v * v * v * v;
+ var xmm5 = v * v * v * v * v * v;
+ var xmm6 = v * v * v * v * v * v * v;
+ var xmm7 = v * v * v * v * v * v * v * v;
+ var xmm8 = v * v * v * v * v * v * v * v * v;
// All registers are blocked and phis for phi1 and phi2 are spilled because
// their left (incoming) value is spilled, there are no free registers,
@@ -73,10 +75,10 @@ function buggy() {
// Now we want to get values of phi1 and phi2. However we would like to
// do it in a way that does not produce any uses of phi1&phi2 that have
// a register beneficial policy. How? We just hide these uses behind phis.
- result[0] = (O === 0) ? phi1 : phi2;
- result[1] = (O !== 0) ? phi1 : phi2;
-}
-
+ result[0] = O === 0 ? phi1 : phi2;
+ result[1] = O !== 0 ? phi1 : phi2;
+};
+%PrepareFunctionForOptimization(buggy);
function test() {
buggy();
assertArrayEquals([X + K, X - K], result);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js b/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js
index 4ecfd64eaf..f01ac131df 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-173907b.js
@@ -45,13 +45,13 @@ function buggy() {
spill(); // At this point initial values for phi1 and phi2 are spilled.
var xmm1 = v;
- var xmm2 = v*v*v;
- var xmm3 = v*v*v*v;
- var xmm4 = v*v*v*v*v;
- var xmm5 = v*v*v*v*v*v;
- var xmm6 = v*v*v*v*v*v*v;
- var xmm7 = v*v*v*v*v*v*v*v;
- var xmm8 = v*v*v*v*v*v*v*v*v;
+ var xmm2 = v * v * v;
+ var xmm3 = v * v * v * v;
+ var xmm4 = v * v * v * v * v;
+ var xmm5 = v * v * v * v * v * v;
+ var xmm6 = v * v * v * v * v * v * v;
+ var xmm7 = v * v * v * v * v * v * v * v;
+ var xmm8 = v * v * v * v * v * v * v * v * v;
// All registers are blocked and phis for phi1 and phi2 are spilled because
// their left (incoming) value is spilled, there are no free registers,
@@ -73,10 +73,10 @@ function buggy() {
// Now we want to get values of phi1 and phi2. However we would like to
// do it in a way that does not produce any uses of phi1&phi2 that have
// a register beneficial policy. How? We just hide these uses behind phis.
- result[0] = (O === 0) ? phi1 : phi2;
- result[1] = (O !== 0) ? phi1 : phi2;
-}
-
+ result[0] = O === 0 ? phi1 : phi2;
+ result[1] = O !== 0 ? phi1 : phi2;
+};
+%PrepareFunctionForOptimization(buggy);
function test() {
buggy();
assertArrayEquals([X + K, X - K], result);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-173974.js b/deps/v8/test/mjsunit/regress/regress-crbug-173974.js
index 905bd6058a..c273dd65e6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-173974.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-173974.js
@@ -29,8 +29,9 @@
function f() {
var count = "";
- count[0] --;
-}
+ count[0]--;
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-196583.js b/deps/v8/test/mjsunit/regress/regress-crbug-196583.js
index c486158e22..0111353af1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-196583.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-196583.js
@@ -29,20 +29,31 @@
var a = 1;
a.__proto__.f = 1;
-a.__proto__.f = function() { return 1; }
+a.__proto__.f = function() {
+ return 1;
+};
// Create some polymorphism.
function B() {}
-B.prototype = {f: function() { return 2; }};
+B.prototype = {
+ f: function() {
+ return 2;
+ }
+};
var b = new B();
function C() {}
-C.prototype = {g: "foo", f: function() { return 3; }};
+C.prototype = {
+ g: 'foo',
+ f: function() {
+ return 3;
+ }
+};
var c = new C();
function crash(obj) {
return obj.f();
-}
-
+};
+%PrepareFunctionForOptimization(crash);
for (var i = 0; i < 2; i++) {
crash(a);
crash(b);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-217858.js b/deps/v8/test/mjsunit/regress/regress-crbug-217858.js
index 26414c2b5a..598325fc92 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-217858.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-217858.js
@@ -9,7 +9,10 @@ function f() {
r[r] = function() {};
}
-for (var i = 0; i < 300; i++) {
- f();
- if (i == 150) %OptimizeOsr();
+function g() {
+ for (var i = 0; i < 300; i++) {
+ f();
+ if (i == 150) %OptimizeOsr();
+ }
}
+%PrepareFunctionForOptimization(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-233737.js b/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
index df8aa15c93..8d8d79ec3a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-233737.js
@@ -34,8 +34,8 @@ assertTrue(%HasHoleyElements(a));
function hole(i) {
return a[i] << 0;
-}
-
+};
+%PrepareFunctionForOptimization(hole);
assertEquals(1, hole(0));
assertEquals(1, hole(0));
%OptimizeFunctionOnNextCall(hole);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-240032.js b/deps/v8/test/mjsunit/regress/regress-crbug-240032.js
index 7ce95d34bd..08eff6a546 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-240032.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-240032.js
@@ -41,7 +41,8 @@ o.func = mk();
// Optimize object comparison with new-space RHS.
function cmp(o, f) {
return f === o.func;
-}
+};
+%PrepareFunctionForOptimization(cmp);
assertTrue(cmp(o, o.func));
assertTrue(cmp(o, o.func));
%OptimizeFunctionOnNextCall(cmp);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-242502.js b/deps/v8/test/mjsunit/regress/regress-crbug-242502.js
index 8ee764029d..6024744256 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-242502.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-242502.js
@@ -33,14 +33,14 @@ function f() {
function call(o) {
return o['']();
-}
-
+};
+%PrepareFunctionForOptimization(call);
function test() {
- var o1 = %ToFastProperties(Object.create({ foo:1 }, { '': { value:f }}));
- var o2 = %ToFastProperties(Object.create({ bar:1 }, { '': { value:f }}));
- var o3 = %ToFastProperties(Object.create({ baz:1 }, { '': { value:f }}));
- var o4 = %ToFastProperties(Object.create({ qux:1 }, { '': { value:f }}));
- var o5 = %ToFastProperties(Object.create({ loo:1 }, { '': { value:f }}));
+ var o1 = %ToFastProperties(Object.create({foo: 1}, {'': {value: f}}));
+ var o2 = %ToFastProperties(Object.create({bar: 1}, {'': {value: f}}));
+ var o3 = %ToFastProperties(Object.create({baz: 1}, {'': {value: f}}));
+ var o4 = %ToFastProperties(Object.create({qux: 1}, {'': {value: f}}));
+ var o5 = %ToFastProperties(Object.create({loo: 1}, {'': {value: f}}));
// Called twice on o1 to turn monomorphic.
assertEquals(23, call(o1));
assertEquals(23, call(o1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-242870.js b/deps/v8/test/mjsunit/regress/regress-crbug-242870.js
index 7183375ca8..cf93a3591d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-242870.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-242870.js
@@ -30,9 +30,9 @@
var non_const_true = true;
function f() {
- return (non_const_true || true && g());
-}
-
+ return non_const_true || true && g();
+};
+%PrepareFunctionForOptimization(f);
function g() {
for (;;) {}
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-242924.js b/deps/v8/test/mjsunit/regress/regress-crbug-242924.js
index 68ad7c6fd4..439f3883d0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-242924.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-242924.js
@@ -28,21 +28,21 @@
// Flags: --allow-natives-syntax --expose-gc --gc-global
function f() {
- return [,{}];
-}
-
-assertEquals([,{}], f());
-assertEquals([,{}], f());
+ return [, {}];
+};
+%PrepareFunctionForOptimization(f);
+assertEquals([, {}], f());
+assertEquals([, {}], f());
%OptimizeFunctionOnNextCall(f);
-assertEquals([,{}], f());
+assertEquals([, {}], f());
gc();
function g() {
- return [[,1.5],{}];
-}
-
-assertEquals([[,1.5],{}], g());
-assertEquals([[,1.5],{}], g());
+ return [[, 1.5], {}];
+};
+%PrepareFunctionForOptimization(g);
+assertEquals([[, 1.5], {}], g());
+assertEquals([[, 1.5], {}], g());
%OptimizeFunctionOnNextCall(g);
-assertEquals([[,1.5],{}], g());
+assertEquals([[, 1.5], {}], g());
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-243868.js b/deps/v8/test/mjsunit/regress/regress-crbug-243868.js
index 106d9cc78b..a542465bc4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-243868.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-243868.js
@@ -30,12 +30,14 @@
var non_const_true = true;
function f(o) {
- return (non_const_true && (o.val == null || false));
+ return non_const_true && (o.val == null || false);
}
// Create an object with a constant function in another realm.
+;
+%PrepareFunctionForOptimization(f);
var realm = Realm.create();
-var realmObject = Realm.eval(realm, "function g() {}; var o = { val:g }; o;")
+var realmObject = Realm.eval(realm, 'function g() {}; var o = { val:g }; o;');
// Make the CompareNil IC in the function monomorphic.
assertFalse(f(realmObject));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
index 2afb76ac12..fca334ce0d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
@@ -30,8 +30,8 @@
function foo(arg) {
var a = arg();
return a;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(Array);
foo(Array);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-245424.js b/deps/v8/test/mjsunit/regress/regress-crbug-245424.js
index 005c8baba9..56e22cd81b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-245424.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-245424.js
@@ -28,13 +28,11 @@
// Flags: --allow-natives-syntax
function boom() {
- var a = {
- foo: "bar",
- foo: "baz"
- };
- return a;
-}
+ var a = {foo: 'bar', foo: 'baz'};
+ return a;
+};
+%PrepareFunctionForOptimization(boom);
assertEquals("baz", boom().foo);
assertEquals("baz", boom().foo);
%OptimizeFunctionOnNextCall(boom);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-245480.js b/deps/v8/test/mjsunit/regress/regress-crbug-245480.js
index b8557c9d91..1d052f0cac 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-245480.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-245480.js
@@ -38,8 +38,8 @@ function assertHoley(obj, name_opt) {
function create_array(arg) {
return new Array(arg);
-}
-
+};
+%PrepareFunctionForOptimization(create_array);
obj = create_array(0);
assertHoley(obj);
create_array(0);
@@ -49,9 +49,9 @@ assertHoley(obj);
// The code below would assert in debug or crash in release
function f(length) {
- return new Array(length)
-}
-
+ return new Array(length);
+};
+%PrepareFunctionForOptimization(f);
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-258519.js b/deps/v8/test/mjsunit/regress/regress-crbug-258519.js
index b2015a8426..2f4601b89c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-258519.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-258519.js
@@ -28,16 +28,18 @@
// Flags: --allow-natives-syntax
var a = {
- compare_null: function(x) { return null != x; },
+ compare_null: function(x) {
+ return null != x;
+ },
kaboom: function() {}
-}
+};
function crash(x) {
var b = a;
b.compare_null(x) && b.kaboom();
return "ok";
-}
-
+};
+%PrepareFunctionForOptimization(crash);
assertEquals("ok", crash(null));
assertEquals("ok", crash(null));
%OptimizeFunctionOnNextCall(crash);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-263276.js b/deps/v8/test/mjsunit/regress/regress-crbug-263276.js
index 05aa94cc85..0842fe35ec 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-263276.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-263276.js
@@ -35,8 +35,8 @@ array2.bar = true;
function bad(array) {
array[array.length] = 1;
-}
-
+};
+%PrepareFunctionForOptimization(bad);
bad(array1);
bad(array1);
bad(array2); // Length is now 1.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-272564.js b/deps/v8/test/mjsunit/regress/regress-crbug-272564.js
index 5475298a88..1af856381e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-272564.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-272564.js
@@ -33,14 +33,14 @@ function Bb(w) {
function ce(a, b) {
"number" == typeof a && (a = (b ? Math.round(a) : a) + "px");
- return a
+ return a;
}
function pe(a, b, c) {
if (b instanceof Bb) b = b.width;
a.width = ce(b, !0);
-}
-
+};
+%PrepareFunctionForOptimization(pe);
var a = new Bb(1);
var b = new Bb(5);
pe(a, b, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-274438.js b/deps/v8/test/mjsunit/regress/regress-crbug-274438.js
index 5d6817d129..1e37d2badd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-274438.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-274438.js
@@ -28,12 +28,12 @@
// Flags: --allow-natives-syntax
function f(a, b) {
- var x = { a:a };
- switch(b) { case "string": }
- var y = { b:b };
+ var x = {a: a};
+ switch (b) { case 'string': }
+ var y = {b: b};
return y;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f("a", "b");
f("a", "b");
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-280333.js b/deps/v8/test/mjsunit/regress/regress-crbug-280333.js
index ca3fdc7746..2071a3042f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-280333.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-280333.js
@@ -40,6 +40,7 @@ function foo(x, fun) {
return 0;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(0, foo(1, funky));
assertEquals(0, foo(1, funky));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-285355.js b/deps/v8/test/mjsunit/regress/regress-crbug-285355.js
index ebd480a710..5bcd27ac6d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-285355.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-285355.js
@@ -35,9 +35,9 @@ function inverted_index() {
function crash(array) {
return array[~inverted_index()] = 2;
-}
-
+};
+%PrepareFunctionForOptimization(crash);
assertEquals(2, crash(new Array(1)));
assertEquals(2, crash(new Array(1)));
-%OptimizeFunctionOnNextCall(crash)
+%OptimizeFunctionOnNextCall(crash);
assertEquals(2, crash(new Array(1)));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-305309.js b/deps/v8/test/mjsunit/regress/regress-crbug-305309.js
index cd89bedc11..85affd616d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-305309.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-305309.js
@@ -41,7 +41,8 @@ var a = new Ctor();
function Two(x) {
return x.two;
-}
+};
+%PrepareFunctionForOptimization(Two);
assertEquals(2, Two(a));
assertEquals(2, Two(a));
b2.constant_function = "no longer constant!";
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-306851.js b/deps/v8/test/mjsunit/regress/regress-crbug-306851.js
index 77b711a656..125486da4b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-306851.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-306851.js
@@ -32,8 +32,12 @@ function Counter() {
};
Object.defineProperty(Counter.prototype, 'count', {
- get: function() { return this.value; },
- set: function(value) { this.value = value; }
+ get: function() {
+ return this.value;
+ },
+ set: function(value) {
+ this.value = value;
+ }
});
var obj = new Counter();
@@ -41,8 +45,8 @@ var obj = new Counter();
function bummer() {
obj.count++;
return obj.count;
-}
-
+};
+%PrepareFunctionForOptimization(bummer);
assertEquals(1, bummer());
assertEquals(2, bummer());
assertEquals(3, bummer());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-309623.js b/deps/v8/test/mjsunit/regress/regress-crbug-309623.js
index c77611af46..ef35a67d63 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-309623.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-309623.js
@@ -38,8 +38,8 @@ assertTrue(%HasSmiElements(a));
function foo(i) {
a[0] = u[i];
return a[0];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(u[0], foo(0));
assertEquals(u[0], foo(0));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-315252.js b/deps/v8/test/mjsunit/regress/regress-crbug-315252.js
index 51454bf285..670a092b9c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-315252.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-315252.js
@@ -28,27 +28,29 @@
// Flags: --allow-natives-syntax
function f(a, b, c) {
- this.a = a;
- this.b = b;
- this.c = c;
+ this.a = a;
+ this.b = b;
+ this.c = c;
}
var o3 = new f(1, 2, 3.5);
var o4 = new f(1, 2.5, 3);
var o1 = new f(1.5, 2, 3);
var o2 = new f(1.5, 2, 3);
function migrate(o) {
- return o.a;
+ return o.a;
}
// Use migrate to stabilize o1, o2 and o4 in [double, double, smi].
migrate(o4);
migrate(o1);
migrate(o2);
function store_transition(o) {
- o.d = 1;
+ o.d = 1;
}
// Optimize "store_transition" to transition from [double, double, smi] to
// [double, double, smi, smi]. This adds a dependency on the
// [double, double, smi] map.
+;
+%PrepareFunctionForOptimization(store_transition);
store_transition(o4);
store_transition(o1);
store_transition(o2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-318671.js b/deps/v8/test/mjsunit/regress/regress-crbug-318671.js
index 54a7d5eeb6..e04d37ffd0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-318671.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-318671.js
@@ -27,8 +27,10 @@
// Flags: --allow-natives-syntax
-function add(x, y) { return x + y; }
-
+function add(x, y) {
+ return x + y;
+};
+%PrepareFunctionForOptimization(add);
print(add({ a: 1 }, "a"));
print(add({ b: 1 }, "b"));
print(add({ c: 1 }, "c"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-319835.js b/deps/v8/test/mjsunit/regress/regress-crbug-319835.js
index 48f871f0a0..66cbd21e20 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-319835.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-319835.js
@@ -27,7 +27,9 @@
// Flags: --allow-natives-syntax
-try {} catch(e) {} // No need to optimize the top level.
+try {
+} catch (e) {
+} // No need to optimize the top level.
var size = 0x20000;
var a = new Float64Array(size);
@@ -37,8 +39,8 @@ function store(a, index) {
for (var i = 0; i < 1; i++) {
a[index + offset] = 0xcc;
}
-}
-
+};
+%PrepareFunctionForOptimization(store);
store(training, -0x20000000);
store(training, -0x20000000 + 1);
store(training, -0x20000000);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-319860.js b/deps/v8/test/mjsunit/regress/regress-crbug-319860.js
index e0fd4812d0..1ba21c9fb9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-319860.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-319860.js
@@ -43,6 +43,6 @@ read(a, 0);
%OptimizeFunctionOnNextCall(read);
// Segfault maybe?
-for (var i = 0; i > -1000000; --i) {
- read(a, i);
+for (var i = 0; i > -100000; i -= 987) {
+ assertEquals(0, read(a, i));
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-323942.js b/deps/v8/test/mjsunit/regress/regress-crbug-323942.js
index 15af494b0f..3e255ec432 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-323942.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-323942.js
@@ -30,16 +30,26 @@
"use strict";
// Function is defined on the prototype chain.
-var holder = { f: function() { return 42; } };
-var receiver = { };
-receiver.__proto__ = { };
+var holder = {
+ f: function() {
+ return 42;
+ }
+};
+var receiver = {};
+receiver.__proto__ = {};
receiver.__proto__.__proto__ = holder;
// Inline two levels.
-function h(o) { return o.f.apply(this, arguments); }
-function g(o) { return h(o); }
+function h(o) {
+ return o.f.apply(this, arguments);
+}
+function g(o) {
+ return h(o);
+}
// Collect type information for apply call.
+;
+%PrepareFunctionForOptimization(g);
assertEquals(42, g(receiver));
assertEquals(42, g(receiver));
@@ -50,7 +60,9 @@ receiver.__proto__.__proto__ = {};
// Lookup of o.f during graph creation fails.
%OptimizeFunctionOnNextCall(g);
-assertThrows(function() { g(receiver); });
+assertThrows(function() {
+ g(receiver);
+});
// Put function back.
receiver.__proto__.__proto__ = holder;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-329709.js b/deps/v8/test/mjsunit/regress/regress-crbug-329709.js
index c5316f391c..98919d6faa 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-329709.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-329709.js
@@ -28,14 +28,17 @@
// Flags: --allow-natives-syntax
function boom(x) {
- switch(x) {
- case 1: return "one";
- case 1500000000: return "non-smi int32";
- default: return "default";
+ switch (x) {
+ case 1:
+ return 'one';
+ case 1500000000:
+ return 'non-smi int32';
+ default:
+ return 'default';
}
-}
-
+};
+%PrepareFunctionForOptimization(boom);
assertEquals("one", boom(1));
assertEquals("one", boom(1));
-%OptimizeFunctionOnNextCall(boom)
+%OptimizeFunctionOnNextCall(boom);
assertEquals("non-smi int32", boom(1500000000));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-336148.js b/deps/v8/test/mjsunit/regress/regress-crbug-336148.js
index 8157c9fcc1..87895f9007 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-336148.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-336148.js
@@ -32,6 +32,7 @@ function f(o) {
if (true) return o.v && a;
}
+%PrepareFunctionForOptimization(f);
f({});
f({});
%OptimizeFunctionOnNextCall(f);
@@ -43,6 +44,8 @@ function f2() { return 1 || 2; };
function f3() { return 0 && 2; };
function f4() { return 0 || 2; };
+[f1, f2, f3, f4].forEach(function(f) { %PrepareFunctionForOptimization(f); });
+
function test() {
assertEquals(2, f1());
assertEquals(1, f2());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-340064.js b/deps/v8/test/mjsunit/regress/regress-crbug-340064.js
index f2ab1d6675..ff71ac50ef 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-340064.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-340064.js
@@ -29,8 +29,8 @@
function f(v) {
return v.length;
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertEquals(4, f("test"));
assertEquals(4, f("test"));
assertEquals(undefined, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-344186.js b/deps/v8/test/mjsunit/regress/regress-crbug-344186.js
index 6486f3864f..7f002b874d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-344186.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-344186.js
@@ -10,7 +10,8 @@ function fun(base) {
array[base - 95] = 1;
array[base - 99] = 2;
array[base + 4] = 3;
-}
+};
+%PrepareFunctionForOptimization(fun);
fun(100);
%OptimizeFunctionOnNextCall(fun);
fun(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-345715.js b/deps/v8/test/mjsunit/regress/regress-crbug-345715.js
index a3753417df..8f974473f2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-345715.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-345715.js
@@ -4,10 +4,14 @@
// Flags: --allow-natives-syntax
-a = {y:1.5};
+a = {
+ y: 1.5
+};
a.y = 0;
b = a.y;
-c = {y:{}};
+c = {
+ y: {}
+};
function f() {
return 1;
@@ -15,11 +19,11 @@ function f() {
function g() {
var e = {y: b};
- var d = {x:f()};
- var d = {x:f()};
+ var d = {x: f()};
+ var d = {x: f()};
return [e, d];
-}
-
+};
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-345820.js b/deps/v8/test/mjsunit/regress/regress-crbug-345820.js
index bdd0af9b12..af7f001d92 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-345820.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-345820.js
@@ -12,7 +12,8 @@ function __f_7(N) {
for (var i = -1; i < N; i++) {
__v_6[i] = i;
}
-}
+};
+%PrepareFunctionForOptimization(__f_7);
__f_7(1);
%OptimizeFunctionOnNextCall(__f_7);
__f_7(__v_6.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-346636.js b/deps/v8/test/mjsunit/regress/regress-crbug-346636.js
index 247f8be482..5d8b773c81 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-346636.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-346636.js
@@ -6,15 +6,15 @@
function assertSame(expected, found) {
if (found === expected) {
- if (expected !== 0 || (1 / expected) == (1 / found)) return;
+ if (expected !== 0 || 1 / expected == 1 / found) return;
}
return;
};
function foo(x) {
return x.bar;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
function getter1() {
assertSame(this, this);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-347903.js b/deps/v8/test/mjsunit/regress/regress-crbug-347903.js
index 62572da095..31abee9359 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-347903.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-347903.js
@@ -11,9 +11,11 @@ function f() {
var b = new Array(84632);
var c = new Array(84632);
return [a, b, c];
-}
-f(); f();
+};
+%PrepareFunctionForOptimization(f);
+f();
+f();
%OptimizeFunctionOnNextCall(f);
-for(var i = 0; i < 10; i++) {
+for (var i = 0; i < 10; i++) {
f();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-349079.js b/deps/v8/test/mjsunit/regress/regress-crbug-349079.js
index b1076ea435..1ee5ece6f7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-349079.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-349079.js
@@ -16,7 +16,8 @@ function crash() {
assertEquals(b, Math.max(b++, c++));
assertEquals(c, Math.min(b++, c++));
assertEquals(b, Math.max(b++, a++));
-}
+};
+%PrepareFunctionForOptimization(crash);
crash();
crash();
%OptimizeFunctionOnNextCall(crash);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-349465.js b/deps/v8/test/mjsunit/regress/regress-crbug-349465.js
index 21887504cc..5de6b93984 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-349465.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-349465.js
@@ -8,7 +8,8 @@ function f(a, base) {
a[base] = 1;
a[base + 4] = 2;
a[base] = 3;
-}
+};
+%PrepareFunctionForOptimization(f);
var a1 = new Array(1024);
var a2 = new Array(128);
f(a1, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-349878.js b/deps/v8/test/mjsunit/regress/regress-crbug-349878.js
index 5ed048ff54..ca37e63435 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-349878.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-349878.js
@@ -27,6 +27,7 @@ function g() {
return h(b);
}
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-350434.js b/deps/v8/test/mjsunit/regress/regress-crbug-350434.js
index 8a9a8e5301..9df2c9bf1e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-350434.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-350434.js
@@ -13,7 +13,7 @@ var p = new Ctor();
function crash(o, timeout) {
- var s = "4000111222"; // Outside Smi range.
+ var s = '4000111222'; // Outside Smi range.
%SetAllocationTimeout(100000, timeout);
// This allocates a heap number, causing a GC, triggering lazy deopt.
var end = s >>> 0;
@@ -21,8 +21,8 @@ function crash(o, timeout) {
// This creates a map dependency, which gives the GC a reason to trigger
// a lazy deopt when that map dies.
o.bar = 2;
-}
-
+};
+%PrepareFunctionForOptimization(crash);
crash(o, 100000);
crash(o, 100000);
crash(p, 100000);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-351320.js b/deps/v8/test/mjsunit/regress/regress-crbug-351320.js
index 3ffef0ba82..e5b90d3ede 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-351320.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-351320.js
@@ -6,7 +6,9 @@
var result = 0;
var o1 = {};
-o2 = {y:1.5};
+o2 = {
+ y: 1.5
+};
o2.y = 0;
o3 = o2.y;
@@ -14,8 +16,8 @@ function crash() {
for (var i = 0; i < 10; i++) {
result += o1.x + o3.foo;
}
-}
-
+};
+%PrepareFunctionForOptimization(crash);
crash();
%OptimizeFunctionOnNextCall(crash);
crash();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-351658.js b/deps/v8/test/mjsunit/regress/regress-crbug-351658.js
index ae6b50ec87..3e53c744ef 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-351658.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-351658.js
@@ -10,5 +10,5 @@ try {
f();
assertUnreachable();
} catch(e) {
- assertTrue(e instanceof ReferenceError);
+ assertTrue(e instanceof SyntaxError);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-352058.js b/deps/v8/test/mjsunit/regress/regress-crbug-352058.js
index e270d83007..1bf27edb5b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-352058.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-352058.js
@@ -9,8 +9,8 @@ var v2 = this;
function f() {
v2 = [1.2, 2.3];
v0 = [12, 23];
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-352929.js b/deps/v8/test/mjsunit/regress/regress-crbug-352929.js
index a5872c1258..78e6351ffd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-352929.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-352929.js
@@ -9,7 +9,7 @@ array = new Int32Array(100);
var dummy2 = new Int32Array(100);
array[-17] = 0;
-function fun(base,cond) {
+function fun(base, cond) {
array[base - 1] = 1;
array[base - 2] = 2;
if (cond) {
@@ -19,11 +19,12 @@ function fun(base,cond) {
array[base - 6] = 5;
array[base - 100] = 777;
}
-}
-fun(5,true);
-fun(7,false);
+};
+%PrepareFunctionForOptimization(fun);
+fun(5, true);
+fun(7, false);
%OptimizeFunctionOnNextCall(fun);
-fun(7,false);
+fun(7, false);
for (var i = 0; i < dummy.length; i++) {
assertEquals(0, dummy[i]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-354391.js b/deps/v8/test/mjsunit/regress/regress-crbug-354391.js
index e652bd3d0f..33517f6511 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-354391.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-354391.js
@@ -6,8 +6,8 @@
function load(a, i) {
return a[i];
-}
-
+};
+%PrepareFunctionForOptimization(load);
function f2(a, b, c, d, index) {
return load(arguments, index);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-357330.js b/deps/v8/test/mjsunit/regress/regress-crbug-357330.js
index b3edf00843..b6ee249744 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-357330.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-357330.js
@@ -10,6 +10,7 @@ function f(foo) {
if (null != g) {}
};
+%PrepareFunctionForOptimization(f);
f(1.4);
f(1.4);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-374838.js b/deps/v8/test/mjsunit/regress/regress-crbug-374838.js
index 614b4d9a87..f116bb425f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-374838.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-374838.js
@@ -12,8 +12,8 @@ function foo() {
a.shift();
}
return result;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
assertEquals(1, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-380512.js b/deps/v8/test/mjsunit/regress/regress-crbug-380512.js
index af78ba7183..7134245195 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-380512.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-380512.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function f() { [].lastIndexOf(42); }
-
+function f() {
+ [].lastIndexOf(42);
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-381534.js b/deps/v8/test/mjsunit/regress/regress-crbug-381534.js
index 2aa3929677..4a3d319ab5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-381534.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-381534.js
@@ -7,32 +7,34 @@
var obj = {};
function f(v) {
- var v1 = -(4/3);
+ var v1 = -(4 / 3);
var v2 = 1;
- var arr = new Array(+0, true, 0, -0, false, undefined, null, "0", obj, v1, -(4/3), -1.3333333333333, "str", v2, 1, false);
- assertEquals(10, arr.lastIndexOf(-(4/3)));
- assertEquals(9, arr.indexOf(-(4/3)));
+ var arr = new Array(
+ +0, true, 0, -0, false, undefined, null, '0', obj, v1, -(4 / 3),
+ -1.3333333333333, 'str', v2, 1, false);
+ assertEquals(10, arr.lastIndexOf(-(4 / 3)));
+ assertEquals(9, arr.indexOf(-(4 / 3)));
assertEquals(10, arr.lastIndexOf(v));
assertEquals(9, arr.indexOf(v));
assertEquals(8, arr.lastIndexOf(obj));
assertEquals(8, arr.indexOf(obj));
-}
-
+};
+%PrepareFunctionForOptimization(f);
function g(v, x, index) {
- var arr = new Array({}, x-1.1, x-2, x-3.1);
+ var arr = new Array({}, x - 1.1, x - 2, x - 3.1);
assertEquals(index, arr.indexOf(0));
assertEquals(index, arr.lastIndexOf(0));
assertEquals(index, arr.indexOf(v));
assertEquals(index, arr.lastIndexOf(v));
-}
-
-f(-(4/3));
-f(-(4/3));
+};
+%PrepareFunctionForOptimization(g);
+f(-(4 / 3));
+f(-(4 / 3));
%OptimizeFunctionOnNextCall(f);
-f(-(4/3));
+f(-(4 / 3));
g(0, 2, 2);
g(0, 3.1, 3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-382513.js b/deps/v8/test/mjsunit/regress/regress-crbug-382513.js
index 59d2dcac72..5b2e14caf3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-382513.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-382513.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function foo() { return [+0,false].indexOf(-(4/3)); }
+function foo() {
+ return [+0, false].indexOf(-(4 / 3));
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-387636.js b/deps/v8/test/mjsunit/regress/regress-crbug-387636.js
index 1e50ace45a..c4ab3f27ff 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-387636.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-387636.js
@@ -6,8 +6,8 @@
function f() {
[].indexOf(0x40000000);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-390918.js b/deps/v8/test/mjsunit/regress/regress-crbug-390918.js
index 4c202b3a9b..d19e983101 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-390918.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-390918.js
@@ -10,8 +10,8 @@ function f(scale) {
for (var i = 0; i < 2; i++) {
arr[2 * scale] = 0;
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
f({});
f({});
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-397662.js b/deps/v8/test/mjsunit/regress/regress-crbug-397662.js
new file mode 100644
index 0000000000..15e8631b48
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-397662.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --mock-arraybuffer-allocator --allow-natives-syntax
+
+var a = new Uint8Array(%MaxSmi() >> 1);
+a.x = 1;
+assertThrows(()=>Object.entries(a), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405517.js b/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
index 578e76aded..23632a560a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
@@ -5,11 +5,11 @@
// Flags: --allow-natives-syntax --gc-interval=203
function f() {
- var e = [0];
- Object.preventExtensions(e);
- for (var i = 0; i < 4; i++) e.shift();
-}
-
+ var e = [0];
+ Object.preventExtensions(e);
+ for (var i = 0; i < 4; i++) e.shift();
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-407946.js b/deps/v8/test/mjsunit/regress/regress-crbug-407946.js
index d5687cca34..33da171a5c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-407946.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-407946.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function f(n) { return [0].indexOf((n - n) + 0); }
-
+function f(n) {
+ return [0].indexOf(n - n + 0);
+};
+%PrepareFunctionForOptimization(f);
assertEquals(0, f(.1));
assertEquals(0, f(.1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412208.js b/deps/v8/test/mjsunit/regress/regress-crbug-412208.js
index a194f855b4..a214f70407 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-412208.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412208.js
@@ -10,6 +10,7 @@ function f() {
return non_const_true || (f() = this);
}
+%PrepareFunctionForOptimization(f);
assertTrue(f());
assertTrue(f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412210.js b/deps/v8/test/mjsunit/regress/regress-crbug-412210.js
index 6ec7d62379..8f9c13cd6d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-412210.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412210.js
@@ -7,6 +7,7 @@
function f(x) {
return (x ? "" >> 0 : "") + /a/;
};
-
+%PrepareFunctionForOptimization(f);
+;
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412215.js b/deps/v8/test/mjsunit/regress/regress-crbug-412215.js
index ad926fc4a2..c1da1f0847 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-412215.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412215.js
@@ -15,6 +15,7 @@ function f( ) {
d = 357;
return {foo: b};
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -27,6 +28,7 @@ function g(obj) {
return obj.foo.length;
}
+%PrepareFunctionForOptimization(g);
g(dummy);
g(dummy);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412319.js b/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
index 158fc59ae6..3a72887c5e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
@@ -5,11 +5,12 @@
// Flags: --allow-natives-syntax
function __f_6() {
- var __v_7 = [0];
- Object.preventExtensions(__v_7);
- for (var __v_6 = -2; __v_6 < 19; __v_6++) __v_7.shift();
- __f_7(__v_7);
-}
+ var __v_7 = [0];
+ Object.preventExtensions(__v_7);
+ for (var __v_6 = -2; __v_6 < 19; __v_6++) __v_7.shift();
+ __f_7(__v_7);
+};
+%PrepareFunctionForOptimization(__f_6);
__f_6();
__f_6();
%OptimizeFunctionOnNextCall(__f_6);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-417508.js b/deps/v8/test/mjsunit/regress/regress-crbug-417508.js
index 589fb88443..705ab3609d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-417508.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-417508.js
@@ -7,23 +7,51 @@
function foo(x) {
var k = "value";
return x[k] = 1;
-}
+};
+%PrepareFunctionForOptimization(foo);
var obj = {};
-Object.defineProperty(obj, "value", {set: function(x) { throw "nope"; }});
-try { foo(obj); } catch(e) {}
-try { foo(obj); } catch(e) {}
+Object.defineProperty(obj, 'value', {
+ set: function(x) {
+ throw 'nope';
+ }
+});
+try {
+ foo(obj);
+} catch (e) {
+}
+try {
+ foo(obj);
+} catch (e) {
+}
%OptimizeFunctionOnNextCall(foo);
-try { foo(obj); } catch(e) {}
+try {
+ foo(obj);
+} catch (e) {
+}
function bar(x) {
var k = "value";
return (x[k] = 1) ? "ok" : "nope";
-}
+};
+%PrepareFunctionForOptimization(bar);
var obj2 = {};
-Object.defineProperty(obj2, "value",
- {set: function(x) { throw "nope"; return true; } });
+Object.defineProperty(obj2, 'value', {
+ set: function(x) {
+ throw 'nope';
+ return true;
+ }
+});
-try { bar(obj2); } catch(e) {}
-try { bar(obj2); } catch(e) {}
+try {
+ bar(obj2);
+} catch (e) {
+}
+try {
+ bar(obj2);
+} catch (e) {
+}
%OptimizeFunctionOnNextCall(bar);
-try { bar(obj2); } catch(e) {}
+try {
+ bar(obj2);
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-425519.js b/deps/v8/test/mjsunit/regress/regress-crbug-425519.js
index d08e7b91aa..2534b8520b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-425519.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-425519.js
@@ -6,8 +6,8 @@
function load(a, i) {
return a[i];
-}
-
+};
+%PrepareFunctionForOptimization(load);
load([]);
load(0);
load("x", 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-433332.js b/deps/v8/test/mjsunit/regress/regress-crbug-433332.js
index d763243b2c..2a69a3996a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-433332.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-433332.js
@@ -10,6 +10,7 @@ function f(foo) {
if (null != g) {}
};
+%PrepareFunctionForOptimization(f);
f(1.4);
f(1.4);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js
index 4dbb41b7d4..e179d5e7dd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-476477-2.js
@@ -8,8 +8,8 @@ function foo(x) {
var s = Math.floor(x / 3600);
Math.floor(s);
return s % 24;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(12345678);
foo(12345678);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-478612.js b/deps/v8/test/mjsunit/regress/regress-crbug-478612.js
index 3419722cd0..cbf068292e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-478612.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-478612.js
@@ -5,14 +5,17 @@
// Flags: --allow-natives-syntax
// This is used to force binary operations below to have tagged representation.
-var z = {valueOf: function() { return 3; }};
-
+var z = {
+ valueOf: function() {
+ return 3;
+ }
+};
function f() {
var y = -2;
return (1 & z) - y++;
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertEquals(3, f());
assertEquals(3, f());
%OptimizeFunctionOnNextCall(f);
@@ -21,9 +24,9 @@ assertEquals(3, f());
function g() {
var y = 2;
- return (1 & z) | y++;
-}
-
+ return 1 & z | y++;
+};
+%PrepareFunctionForOptimization(g);
assertEquals(3, g());
assertEquals(3, g());
%OptimizeFunctionOnNextCall(g);
@@ -32,9 +35,9 @@ assertEquals(3, g());
function h() {
var y = 3;
- return (3 & z) & y++;
-}
-
+ return 3 & z & y++;
+};
+%PrepareFunctionForOptimization(h);
assertEquals(3, h());
assertEquals(3, h());
%OptimizeFunctionOnNextCall(h);
@@ -43,9 +46,9 @@ assertEquals(3, h());
function i() {
var y = 2;
- return (1 & z) ^ y++;
-}
-
+ return 1 & z ^ y++;
+};
+%PrepareFunctionForOptimization(i);
assertEquals(3, i());
assertEquals(3, i());
%OptimizeFunctionOnNextCall(i);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-485410.js b/deps/v8/test/mjsunit/regress/regress-crbug-485410.js
index bb11c82908..8c2fa1e84f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-485410.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-485410.js
@@ -8,15 +8,16 @@ var doubles = new Float64Array(1);
function ToHeapNumber(i) {
doubles[0] = i;
return doubles[0];
-}
+};
+%PrepareFunctionForOptimization(ToHeapNumber);
for (var i = 0; i < 3; i++) ToHeapNumber(i);
%OptimizeFunctionOnNextCall(ToHeapNumber);
ToHeapNumber(1);
function Fail(a, i, v) {
a[i] = v;
-}
-
+};
+%PrepareFunctionForOptimization(Fail);
for (var i = 0; i < 3; i++) Fail(new Array(1), 1, i);
%OptimizeFunctionOnNextCall(Fail);
// 1050 > JSObject::kMaxGap, causing stub failure and runtime call.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-485548-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-485548-1.js
index 6a25dcd2df..ffb564d790 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-485548-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-485548-1.js
@@ -5,15 +5,19 @@
// Flags: --allow-natives-syntax --expose-gc
var inner = new Array();
-inner.a = {x:1};
+inner.a = {
+ x: 1
+};
inner[0] = 1.5;
-inner.b = {x:2};
+inner.b = {
+ x: 2
+};
assertTrue(%HasDoubleElements(inner));
function foo(o) {
return o.field.a.x;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
var outer = {};
outer.field = inner;
foo(outer);
@@ -23,7 +27,11 @@ foo(outer);
foo(outer);
// Generalize representation of field "a" of inner object.
-var v = { get x() { return 0x7fffffff; } };
+var v = {
+ get x() {
+ return 0x7fffffff;
+ }
+};
inner.a = v;
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-485548-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-485548-2.js
index 02c6326d12..c85bb59815 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-485548-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-485548-2.js
@@ -5,15 +5,19 @@
// Flags: --allow-natives-syntax --expose-gc
var inner = new Array();
-inner.a = {x:1};
+inner.a = {
+ x: 1
+};
inner[0] = 1.5;
-inner.b = {x:2};
+inner.b = {
+ x: 2
+};
assertTrue(%HasDoubleElements(inner));
function foo(o) {
return o.field.b.x;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
var outer = {};
outer.field = inner;
foo(outer);
@@ -23,7 +27,11 @@ foo(outer);
foo(outer);
// Generalize representation of field "b" of inner object.
-var v = { get x() { return 0x7fffffff; } };
+var v = {
+ get x() {
+ return 0x7fffffff;
+ }
+};
inner.b = v;
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-487608.js b/deps/v8/test/mjsunit/regress/regress-crbug-487608.js
index c1eafce5ef..b5fb0dd872 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-487608.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-487608.js
@@ -14,8 +14,8 @@ function foo(index) {
result += a[index];
result += inlined(a, index);
return result;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(0);
foo(0);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-489293.js b/deps/v8/test/mjsunit/regress/regress-crbug-489293.js
index bcfc702df3..ef71a18bdc 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-489293.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-489293.js
@@ -8,9 +8,10 @@
function f() {
var x = 0;
for (var y = 0; y < 0; ++y) {
- x = (x + y) | 0;
+ x = x + y | 0;
}
return unbound;
-}
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertThrows(f, ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-490021.js b/deps/v8/test/mjsunit/regress/regress-crbug-490021.js
index 745c0a8010..36b113fbf8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-490021.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-490021.js
@@ -7,8 +7,8 @@
var global = new Object(3);
function f() {
global[0] = global[0] >>> 15.5;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-500435.js b/deps/v8/test/mjsunit/regress/regress-crbug-500435.js
index acc17ac5ec..1f603a9a37 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-500435.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-500435.js
@@ -16,6 +16,7 @@ function foo(a) {
}
}
+%PrepareFunctionForOptimization(foo);
foo([1,2]);
foo([2,3]);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
index 4917193261..b77fa4f39a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
@@ -11,14 +11,16 @@ var global = []; // Used to keep some objects alive.
function Ctor() {
var result = {a: {}, b: {}, c: {}, d: {}, e: {}, f: {}, g: {}};
return result;
-}
-
+};
+%PrepareFunctionForOptimization(Ctor);
gc();
for (var i = 0; i < 120; i++) {
// Make the "a" property long-lived, while everything else is short-lived.
global.push(Ctor().a);
- (function FillNewSpace() { new Array(10000); })();
+ (function FillNewSpace() {
+ new Array(10000);
+ })();
}
// The bad situation is only triggered if Ctor wasn't optimized too early.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-500824.js b/deps/v8/test/mjsunit/regress/regress-crbug-500824.js
index 08d0d107ca..15f32229c4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-500824.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-500824.js
@@ -19,5 +19,6 @@ var f = (function(v) {
};
})(get_thrower()).fun;
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505354.js b/deps/v8/test/mjsunit/regress/regress-crbug-505354.js
index 61c40c44da..6237cfa6ac 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-505354.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505354.js
@@ -8,7 +8,9 @@ function f() {
"use strict";
try {
for (let i = 0; i < 10; i++) {}
- } catch(e) {}
-}
+ } catch (e) {
+ }
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-510738.js b/deps/v8/test/mjsunit/regress/regress-crbug-510738.js
index 0e154a9a94..013f9b4ecf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-510738.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-510738.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function check(f, result) {
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(result, f());
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-513471.js b/deps/v8/test/mjsunit/regress/regress-crbug-513471.js
index 48c793e512..11afbe8af5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-513471.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-513471.js
@@ -4,7 +4,9 @@
// Flags: --allow-natives-syntax
-var g = (function*(){});
+var g = function*() {};
+;
+%PrepareFunctionForOptimization(g);
var f = g();
%OptimizeFunctionOnNextCall(g);
f.next();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-522895.js b/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
index b2c9dc929c..8258524e13 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
@@ -10,6 +10,7 @@ var body =
" while (i-- > 31) {" +
" %OptimizeOsr(); " +
" j = 9; " +
+ " %PrepareFunctionForOptimization(bar1); " +
" while (j-- > 7);" +
" } " +
" return i; " +
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523213.js b/deps/v8/test/mjsunit/regress/regress-crbug-523213.js
index 15b16bb4f9..00fc08e720 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-523213.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523213.js
@@ -8,14 +8,14 @@ var v1 = [];
var v2 = [];
v1.__proto__ = v2;
-function f(){
+function f() {
var a = [];
- for(var i=0; i<2; i++){
+ for (var i = 0; i < 2; i++) {
a.push([]);
a = v2;
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523307.js b/deps/v8/test/mjsunit/regress/regress-crbug-523307.js
index f2909675b2..8d5d4bca88 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-523307.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523307.js
@@ -8,8 +8,8 @@ function f(x) {
var c = x * x << 366;
var a = c + c;
return a;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-527364.js b/deps/v8/test/mjsunit/regress/regress-crbug-527364.js
index 914bed01ab..2391ca0b20 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-527364.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-527364.js
@@ -22,5 +22,6 @@ function run_close_to_stack_limit(f) {
}
var boom = module().f;
+%PrepareFunctionForOptimization(boom);
%OptimizeFunctionOnNextCall(boom)
run_close_to_stack_limit(boom);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-530598.js b/deps/v8/test/mjsunit/regress/regress-crbug-530598.js
index f38552377f..07f48f02cf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-530598.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-530598.js
@@ -10,6 +10,7 @@ var f1 = (function() {
function f() { return g(); }
return f;
})();
+%PrepareFunctionForOptimization(f1);
assertThrows("f1()");
%OptimizeFunctionOnNextCall(f1);
assertThrows("f1()");
@@ -20,6 +21,7 @@ var f2 = (function() {
function f(a) { return a || g(); }
return f;
})();
+%PrepareFunctionForOptimization(f2);
assertTrue(f2(true));
%OptimizeFunctionOnNextCall(f2);
assertTrue(f2(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-537444.js b/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
index d6fe6b89a3..0aca33c165 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-537444.js
@@ -17,8 +17,8 @@ function g(x) {
function h(x) {
var z = g(x, 1);
return z + 1;
-}
-
+};
+%PrepareFunctionForOptimization(h);
h(1);
h(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-551287.js b/deps/v8/test/mjsunit/regress/regress-crbug-551287.js
index a85deef4bb..1517b5658e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-551287.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-551287.js
@@ -4,14 +4,18 @@
// Flags: --allow-natives-syntax
-function f() { do { } while (true); }
+function f() {
+ do {
+ } while (true);
+}
function boom(x) {
- switch(x) {
+ switch (x) {
case 1:
- case f(): return;
+ case f():
+ return;
}
-}
-
-%OptimizeFunctionOnNextCall(boom)
+};
+%PrepareFunctionForOptimization(boom);
+%OptimizeFunctionOnNextCall(boom);
boom(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-557807.js b/deps/v8/test/mjsunit/regress/regress-crbug-557807.js
index a96bc99003..384579fd82 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-557807.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-557807.js
@@ -4,8 +4,13 @@
// Flags: --allow-natives-syntax
-function bar() { return { __proto__: this }; }
-function foo(a) { a[0] = 0.3; }
+function bar() {
+ return {__proto__: this};
+}
+function foo(a) {
+ a[0] = 0.3;
+};
+%PrepareFunctionForOptimization(foo);
foo(bar());
%OptimizeFunctionOnNextCall(foo);
foo(bar());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-571064.js b/deps/v8/test/mjsunit/regress/regress-crbug-571064.js
index a28a3833b1..7b9666ca79 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-571064.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-571064.js
@@ -12,7 +12,8 @@ function CallFunc(a) {
}
function CallFuncWithPrototype() {
CallFunc(prototype);
-}
+};
+%PrepareFunctionForOptimization(CallFuncWithPrototype);
CallFunc([]);
CallFunc([]);
%OptimizeFunctionOnNextCall(CallFuncWithPrototype);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-573858.js b/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
index 270df5a64a..744176ca78 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
@@ -4,13 +4,24 @@
// Flags: --allow-natives-syntax
-var throw_type_error = Object.getOwnPropertyDescriptor(
- (function() {"use strict"}).__proto__, "caller").get;
+var throw_type_error = Object
+ .getOwnPropertyDescriptor(
+ function() {
+ 'use strict';
+ }.__proto__,
+ 'caller')
+ .get;
-function create_initial_map() { this instanceof throw_type_error }
+function create_initial_map() {
+ this instanceof throw_type_error;
+};
+%PrepareFunctionForOptimization(create_initial_map);
%OptimizeFunctionOnNextCall(create_initial_map);
assertThrows(create_initial_map);
-function test() { new throw_type_error }
+function test() {
+ new throw_type_error();
+};
+%PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
assertThrows(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-577112.js b/deps/v8/test/mjsunit/regress/regress-crbug-577112.js
index 504f921a33..0e8d20f5bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-577112.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-577112.js
@@ -8,7 +8,8 @@ Array.prototype.__proto__ = null;
var prototype = Array.prototype;
function f() {
prototype.lastIndexOf({});
-}
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-589792.js b/deps/v8/test/mjsunit/regress/regress-crbug-589792.js
index f735afceae..bba887a87a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-589792.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-589792.js
@@ -16,5 +16,6 @@ var boom = (function(stdlib, foreign, heap) {
}
return foo
})(this, 0, new ArrayBuffer(256));
+%PrepareFunctionForOptimization(boom);
%OptimizeFunctionOnNextCall(boom);
boom(0, 0x1000);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js
index 73118eb20e..a974b5541b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js
@@ -9,10 +9,12 @@ var p = {foo: 1.5}
function g(x) { return x.foo === +x.foo; }
+%PrepareFunctionForOptimization(g);
assertEquals(false, g(o));
assertEquals(false, g(o));
%OptimizeFunctionOnNextCall(g);
assertEquals(false, g(o)); // Still fine here.
assertEquals(true, g(p));
+%PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
assertEquals(false, g(o)); // Confused by type feedback.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js
index cae1d9db5b..33ab7d83e7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function f(x) { return x === +x; }
-
+function f(x) {
+ return x === +x;
+};
+%PrepareFunctionForOptimization(f);
assertEquals(false, f(undefined));
assertEquals(false, f(undefined));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-595615.js b/deps/v8/test/mjsunit/regress/regress-crbug-595615.js
index f5d0ee5344..8b009efd85 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-595615.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-595615.js
@@ -8,8 +8,18 @@
function f(o) {
return o.x();
+};
+%PrepareFunctionForOptimization(f);
+try {
+ f({x: 1});
+} catch (e) {
+}
+try {
+ f({x: 1});
+} catch (e) {
}
-try { f({ x: 1 }); } catch(e) {}
-try { f({ x: 1 }); } catch(e) {}
%OptimizeFunctionOnNextCall(f);
-try { f({ x: 1 }); } catch(e) {}
+try {
+ f({x: 1});
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-598998.js b/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
index a2a02623f5..3a9143cec2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-598998.js
@@ -20,8 +20,8 @@ function g(x) {
function h(x) {
g(x, 1);
-}
-
+};
+%PrepareFunctionForOptimization(h);
%NeverOptimizeFunction(deopt_function);
h(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599003.js b/deps/v8/test/mjsunit/regress/regress-crbug-599003.js
index da29455661..06427582ae 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-599003.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599003.js
@@ -23,8 +23,8 @@ function g1() {
function g2() {
return new A();
-}
-
+};
+%PrepareFunctionForOptimization(g2);
var o = g1();
%OptimizeFunctionOnNextCall(g2);
g2();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-601617.js b/deps/v8/test/mjsunit/regress/regress-crbug-601617.js
index e1a5cbd2c7..bd47605096 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-601617.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-601617.js
@@ -15,7 +15,7 @@ function g(o) {
}
function f1() {
- var o = { x : 1 };
+ var o = {x: 1};
var res = g(o);
return res;
}
@@ -23,8 +23,8 @@ function f1() {
function f0() {
"use strict";
return f1(5);
-}
-
+};
+%PrepareFunctionForOptimization(f0);
%NeverOptimizeFunction(h);
f0();
f0();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-602595.js b/deps/v8/test/mjsunit/regress/regress-crbug-602595.js
index 7f6d478e05..aaad9d77d6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-602595.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-602595.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax --turbo-escape
-function f(a) { return [a] }
-
+function f(a) {
+ return [a];
+};
+%PrepareFunctionForOptimization(f);
assertEquals([23], f(23));
assertEquals([42], f(42));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-604680.js b/deps/v8/test/mjsunit/regress/regress-crbug-604680.js
index 8835807e86..b26935bfc7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-604680.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-604680.js
@@ -15,20 +15,20 @@ function g(o) {
}
function f1() {
- var o = { x : 42 };
+ var o = {x: 42};
var res = g(o);
return 1;
}
-function f0(a, b) {
+function f0(a, b) {
"use strict";
return f1(5);
-}
-
+};
+%PrepareFunctionForOptimization(f0);
function boom(b) {
if (b) throw new Error("boom!");
-}
-
+};
+%PrepareFunctionForOptimization(boom);
%NeverOptimizeFunction(h);
f0();
f0();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-608278.js b/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
index c8d2fcc745..e18605c577 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-608278.js
@@ -7,7 +7,7 @@
"use strict";
function h() {
- var stack = (new Error("boom")).stack;
+ var stack = new Error('boom').stack;
print(stack);
%DeoptimizeFunction(f1);
%DeoptimizeFunction(f2);
@@ -27,8 +27,8 @@ function f1() {
var o = {};
o.__defineGetter__('p', g);
o.p;
-}
-
+};
+%PrepareFunctionForOptimization(f1);
f1();
f1();
%OptimizeFunctionOnNextCall(f1);
@@ -39,8 +39,8 @@ function f2() {
var o = {};
o.__defineSetter__('q', g);
o.q = 1;
-}
-
+};
+%PrepareFunctionForOptimization(f2);
f2();
f2();
%OptimizeFunctionOnNextCall(f2);
@@ -53,8 +53,8 @@ function A() {
function f3() {
new A();
-}
-
+};
+%PrepareFunctionForOptimization(f3);
f3();
f3();
%OptimizeFunctionOnNextCall(f3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-613494.js b/deps/v8/test/mjsunit/regress/regress-crbug-613494.js
index 6fcc1e94f4..b6f2ebb3b3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-613494.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-613494.js
@@ -6,8 +6,11 @@
function f() {
var bound = 0;
- function g() { return bound }
-}
+ function g() {
+ return bound;
+ }
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-613919.js b/deps/v8/test/mjsunit/regress/regress-crbug-613919.js
index cbd3e43b96..3124565bd7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-613919.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-613919.js
@@ -11,7 +11,8 @@ function g(a) {
}
function f() {
return g(false);
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(23, f());
assertEquals(23, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-614292.js b/deps/v8/test/mjsunit/regress/regress-crbug-614292.js
index 3a67c17f60..e0d3517cbe 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-614292.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-614292.js
@@ -6,8 +6,8 @@
function foo() {
return [] | 0 && values[0] || false;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
try {
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-614644.js b/deps/v8/test/mjsunit/regress/regress-crbug-614644.js
index d219cd3b92..a2bfb1f60f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-614644.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-614644.js
@@ -7,9 +7,9 @@
function f(a, x) {
a.shift(2, a.length = 2);
a[0] = x;
-}
-
-f([ ], 1.1);
+};
+%PrepareFunctionForOptimization(f);
+f([], 1.1);
f([1], 1.1);
%OptimizeFunctionOnNextCall(f);
f([1], 1.1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js
index 75abe3c2e1..66acd91bd6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js
@@ -6,13 +6,13 @@
// Make the Object prototype have dictionary properties.
for (var i = 0; i < 2000; i++) {
- Object.prototype['X'+i] = true;
+ Object.prototype['X' + i] = true;
}
function boom(a1) {
return a1[0];
-}
-
+};
+%PrepareFunctionForOptimization(boom);
var a = new Array(1);
a[0] = 0.1;
boom(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js
index 27e5d2d9da..9e4e245a44 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js
@@ -6,13 +6,13 @@
// Make the Array prototype have dictionary properties.
for (var i = 0; i < 2000; i++) {
- Array.prototype['X'+i] = true;
+ Array.prototype['X' + i] = true;
}
function boom(a1) {
return a1[0];
-}
-
+};
+%PrepareFunctionForOptimization(boom);
var a = new Array(1);
a[0] = 0.1;
boom(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-617567.js b/deps/v8/test/mjsunit/regress/regress-crbug-617567.js
index f0c696e14b..d883543872 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-617567.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-617567.js
@@ -17,8 +17,8 @@ var v3 = {};
function f() {
v3 = v2;
g();
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertThrows(g);
%OptimizeFunctionOnNextCall(f);
assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-621816.js b/deps/v8/test/mjsunit/regress/regress-crbug-621816.js
index 8ef291a00c..64032a6566 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-621816.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-621816.js
@@ -9,9 +9,10 @@ function f() {
o.a = 1;
}
function g() {
- var o = { ['a']: function(){} };
+ var o = {['a']: function() {}};
f();
-}
+};
+%PrepareFunctionForOptimization(g);
f();
f();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-621868.js b/deps/v8/test/mjsunit/regress/regress-crbug-621868.js
index dcd7b8755b..f582481d08 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-621868.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-621868.js
@@ -10,10 +10,10 @@ function f(a) { // First parameter is tagged.
function g() {
f();
- var d = {x : f()};
+ var d = {x: f()};
return [d];
-}
-
+};
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-624747.js b/deps/v8/test/mjsunit/regress/regress-crbug-624747.js
index 7927263f8e..e0acbfafbf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-624747.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-624747.js
@@ -10,13 +10,13 @@ function bar() {
try {
unref;
} catch (e) {
- return (1 instanceof TypeError) && unref(); // Call in tail position!
+ return 1 instanceof TypeError && unref(); // Call in tail position!
}
}
function foo() {
return bar(); // Call in tail position!
-}
-
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-624919.js b/deps/v8/test/mjsunit/regress/regress-crbug-624919.js
index 5a2b100daf..d4cff87afc 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-624919.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-624919.js
@@ -6,8 +6,8 @@
function f(a, b, c, d, e) {
if (a && (b, c ? d() : e())) return 0;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-625547.js b/deps/v8/test/mjsunit/regress/regress-crbug-625547.js
index 20eb85db5e..0c3e022d80 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-625547.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-625547.js
@@ -10,12 +10,14 @@ var v2 = {};
v2 = 0;
gc();
-var minus_zero = {z:-0.0}.z;
+var minus_zero = {z: -0.0}.z;
var nan = undefined + 1;
function f() {
v1 = minus_zero;
v2 = nan;
};
+%PrepareFunctionForOptimization(f);
+;
%OptimizeFunctionOnNextCall(f);
f();
gc(); // Boom!
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-627828.js b/deps/v8/test/mjsunit/regress/regress-crbug-627828.js
index 12c59a37c6..9389cc0bdb 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-627828.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-627828.js
@@ -12,8 +12,14 @@
return "x";
}
};
- return { [o]() { return 23 } };
- }
+
+ return {
+ [o]() {
+ return 23;
+ }
+ };
+ };
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f().x());
assertEquals(23, f().x());
%OptimizeFunctionOnNextCall(f);
@@ -25,14 +31,12 @@
// happens in the object literal.
Object.defineProperty(Object.prototype, 'x_proto', {
- get: function () {
+ get: function() {
return 21;
},
- set: function () {
- }
+ set: function() {}
});
-
function f() {
var o = {
toString: function() {
@@ -40,8 +44,14 @@
return "x_proto";
}
};
- return { [o]() { return 23 } };
- }
+
+ return {
+ [o]() {
+ return 23;
+ }
+ };
+ };
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f().x_proto());
assertEquals(23, f().x_proto());
%OptimizeFunctionOnNextCall(f);
@@ -59,11 +69,16 @@
return "y";
}
};
+
class C {
- [o]() { return 42 };
+ [o]() {
+ return 42;
+ }
}
+
return new C();
- }
+ };
+ %PrepareFunctionForOptimization(g);
assertEquals(42, g().y());
assertEquals(42, g().y());
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629062.js b/deps/v8/test/mjsunit/regress/regress-crbug-629062.js
index 228ae6d2d5..8234481105 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-629062.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629062.js
@@ -5,9 +5,9 @@
// Flags: --allow-natives-syntax
function foo(x) {
- return 1 + ((1 == 0) && undefined);
-}
-
+ return 1 + (1 == 0 && undefined);
+};
+%PrepareFunctionForOptimization(foo);
foo(false);
foo(false);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629435.js b/deps/v8/test/mjsunit/regress/regress-crbug-629435.js
index b73f601c71..1a126bef52 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-629435.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629435.js
@@ -14,6 +14,7 @@ bar([]);
function foo() {
var x = -0;
bar(x + 1);
-}
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629823.js b/deps/v8/test/mjsunit/regress/regress-crbug-629823.js
index bbf74b80af..55d3cfeb2c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-629823.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629823.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax
-var o = {}
+var o = {};
function bar() {
o[0] = +o[0];
o = /\u23a1|__v_4/;
@@ -12,6 +12,9 @@ function bar() {
bar();
bar();
bar();
-function foo() { bar(); }
+function foo() {
+ bar();
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-630923.js b/deps/v8/test/mjsunit/regress/regress-crbug-630923.js
index ff0d2dd05e..05ccc0a8f8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-630923.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-630923.js
@@ -10,7 +10,8 @@ function bar(o) {
}
function foo() {
bar(o);
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631027.js b/deps/v8/test/mjsunit/regress/regress-crbug-631027.js
index f3d04b8efd..6fa1f7738c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631027.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631027.js
@@ -7,6 +7,7 @@
function f() {
with ({ value:"foo" }) { return value; }
}
+%PrepareFunctionForOptimization(f);
assertEquals("foo", f());
%OptimizeFunctionOnNextCall(f);
assertEquals("foo", f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js
index bd40dcd3a2..6647882ad7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x < x; }
+function foo(x) {
+ return x < x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js
index 1c4fccaac1..d7456c430b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x << x; }
+function foo(x) {
+ return x << x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js
index a03a125ede..beccf8dce4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x >> x; }
+function foo(x) {
+ return x >> x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js
index f710bd0149..35fea62fe0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x >>> x; }
+function foo(x) {
+ return x >>> x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js
index 7a784481ee..3ee5f0f96b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x & x; }
+function foo(x) {
+ return x & x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js
index 829bf900b6..d58c47b274 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x | x; }
+function foo(x) {
+ return x | x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js
index 1257d797ae..6057e5e99e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x ^ x; }
+function foo(x) {
+ return x ^ x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js
index ce46b27886..bda54c3796 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x > x; }
+function foo(x) {
+ return x > x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js
index 4258b15508..b0b93feaae 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x >= x; }
+function foo(x) {
+ return x >= x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js
index 7e8cdf8f56..4d6360a2b5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x <= x; }
+function foo(x) {
+ return x <= x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js
index acdedcba13..de4e121b53 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x + x; }
+function foo(x) {
+ return x + x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js
index d17772f17c..91023445e9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x / x; }
+function foo(x) {
+ return x / x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js
index 7d03fa8551..61d638be7c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x * x; }
+function foo(x) {
+ return x * x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js
index 474110b53d..322d0d010c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x % x; }
+function foo(x) {
+ return x % x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js
index ad472e0722..acdcc514a5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js
@@ -4,11 +4,16 @@
// Flags: --allow-natives-syntax
-function foo(x) { return x - x; }
+function foo(x) {
+ return x - x;
+}
foo(1);
foo(2);
-function bar(x) { foo(x); }
+function bar(x) {
+ foo(x);
+};
+%PrepareFunctionForOptimization(bar);
%OptimizeFunctionOnNextCall(bar);
assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-635923.js b/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
index 5101b7973e..7ce504fba8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
@@ -4,9 +4,14 @@
// Flags: --allow-natives-syntax --turbo-filter=f
-function f(x) { return x + 23 }
-function g(x) { return f(x) + 42 }
-
+function f(x) {
+ return x + 23;
+};
+%PrepareFunctionForOptimization(f);
+function g(x) {
+ return f(x) + 42;
+};
+%PrepareFunctionForOptimization(g);
assertEquals(23, f(0));
assertEquals(24, f(1));
assertEquals(67, g(2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-638551.js b/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
index 46f307e559..a793032f2a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
@@ -17,4 +17,5 @@ gc(); // Make sure that ...
gc(); // ... code flushing ...
gc(); // ... clears code ...
gc(); // ... attached to {g}.
+%PrepareFunctionForOptimization(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-640497.js b/deps/v8/test/mjsunit/regress/regress-crbug-640497.js
index 3b90d1bcfe..eb028029e4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-640497.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-640497.js
@@ -5,14 +5,19 @@
// Flags: --allow-natives-syntax --turbo-escape
// Warm up {g} with arrays and strings.
-function g(v) { return v.length; }
+function g(v) {
+ return v.length;
+}
assertEquals(1, g("x"));
assertEquals(2, g("xy"));
assertEquals(1, g([1]));
-assertEquals(2, g([1,2]));
+assertEquals(2, g([1, 2]));
// Inline into {f}, where we see only an array.
-function f() { assertEquals(0, g([])); }
+function f() {
+ assertEquals(0, g([]));
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-642056.js b/deps/v8/test/mjsunit/regress/regress-crbug-642056.js
index ca9fc78ef6..74b279e712 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-642056.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-642056.js
@@ -6,9 +6,9 @@
function f(o) {
return o.x instanceof Array;
-}
-
-var o = { x : 1.5 };
+};
+%PrepareFunctionForOptimization(f);
+var o = {x: 1.5};
o.x = 0;
f(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-643073.js b/deps/v8/test/mjsunit/regress/regress-crbug-643073.js
index 1301ddd184..b2ed7abbac 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-643073.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-643073.js
@@ -4,10 +4,12 @@
// Flags: --allow-natives-syntax
-for (i in [0,0]) {}
+for (i in [0, 0]) {
+}
function foo() {
i = 0;
return i < 0;
-}
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-644245.js b/deps/v8/test/mjsunit/regress/regress-crbug-644245.js
index 86714902c0..937e5bdda0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-644245.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644245.js
@@ -12,6 +12,7 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-644689-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-644689-1.js
index 49bf902047..dd49e14246 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-644689-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644689-1.js
@@ -6,8 +6,10 @@
for (var i = 0; i < 1024; ++i) Object.prototype["i" + i] = i;
-function foo() { [].push(1); }
-
+function foo() {
+ [].push(1);
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-644689-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-644689-2.js
index 03831b15a8..958ad72b51 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-644689-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644689-2.js
@@ -6,8 +6,10 @@
for (var i = 0; i < 1024; ++i) Object.prototype["i" + i] = i;
-function foo() { [1].pop(); }
-
+function foo() {
+ [1].pop();
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-645103.js b/deps/v8/test/mjsunit/regress/regress-crbug-645103.js
index b81b9f2b04..04f04e26cd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-645103.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-645103.js
@@ -11,6 +11,7 @@ class Subclass extends Base {
super();
}
}
+%PrepareFunctionForOptimization(Subclass);
new Subclass();
new Subclass();
%OptimizeFunctionOnNextCall(Subclass);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-645438.js b/deps/v8/test/mjsunit/regress/regress-crbug-645438.js
index ff171524a0..a8ab1c383d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-645438.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-645438.js
@@ -4,13 +4,15 @@
// Flags: --allow-natives-syntax
-function n(x,y){
- y = (y-(0x80000000|0)|0);
- return (x/y)|0;
+function n(x, y) {
+ y = y - (0x80000000 | 0) | 0;
+ return x / y | 0;
};
+%PrepareFunctionForOptimization(n);
+;
var x = -0x80000000;
var y = 0x7fffffff;
-n(x,y);
-n(x,y);
+n(x, y);
+n(x, y);
%OptimizeFunctionOnNextCall(n);
-assertEquals(x, n(x,y));
+assertEquals(x, n(x, y));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-647217.js b/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
index ab45c9afa9..2beae1ecba 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-647217.js
@@ -7,6 +7,12 @@
var source = "return 1" + new Array(2048).join(' + a') + "";
eval("function g(a) {" + source + "}");
-function f(a) { return g(a) }
+function f(a) {
+ return g(a);
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
-try { f(0) } catch(e) {}
+try {
+ f(0);
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-647887.js b/deps/v8/test/mjsunit/regress/regress-crbug-647887.js
index 84e598d5aa..e9ef5b3891 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-647887.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-647887.js
@@ -10,5 +10,6 @@ function f(obj) {
return key === undefined;
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertFalse(f({ foo:0 }));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-648539.js b/deps/v8/test/mjsunit/regress/regress-crbug-648539.js
index c12f16dce4..78a9ba0e78 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-648539.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-648539.js
@@ -10,7 +10,8 @@ function f() {
}
function g() {
return f();
-}
+};
+%PrepareFunctionForOptimization(g);
assertThrows(g, TypeError);
assertThrows(g, TypeError);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-648737.js b/deps/v8/test/mjsunit/regress/regress-crbug-648737.js
index e78cb20d6f..c70d92edd4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-648737.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-648737.js
@@ -7,8 +7,9 @@
function f(str) {
var s = "We turn {" + str + "} into a ConsString now";
return s.length;
-}
-assertEquals(33, f("a"));
+};
+%PrepareFunctionForOptimization(f);
+assertEquals(33, f('a'));
assertEquals(33, f("b"));
%OptimizeFunctionOnNextCall(f);
assertEquals(33, f("c"));
@@ -17,8 +18,9 @@ function g(str) {
var s = "We also try to materalize {" + str + "} when deopting";
%DeoptimizeNow();
return s.length;
-}
-assertEquals(43, g("a"));
+};
+%PrepareFunctionForOptimization(g);
+assertEquals(43, g('a'));
assertEquals(43, g("b"));
%OptimizeFunctionOnNextCall(g);
assertEquals(43, g("c"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-650404.js b/deps/v8/test/mjsunit/regress/regress-crbug-650404.js
index ebf14e69b3..e01a97457c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-650404.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-650404.js
@@ -23,10 +23,14 @@ var bomb = c4(2, 2);
function reader(o, i) {
// Dummy try-catch, so that TurboFan is used to optimize this.
- try {} catch(e) {}
+ try {
+ } catch (e) {
+ }
return o[i];
}
// Optimize reader!
+;
+%PrepareFunctionForOptimization(reader);
for (var i = 0; i < 3; i++) reader(bomb, 0);
%OptimizeFunctionOnNextCall(reader);
reader(bomb, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-654723.js b/deps/v8/test/mjsunit/regress/regress-crbug-654723.js
index fa81233522..6d61d90213 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-654723.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-654723.js
@@ -8,8 +8,8 @@ var k = "0101010101010101" + "01010101";
function foo(s) {
return k + s;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo("a");
foo("a");
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-655004.js b/deps/v8/test/mjsunit/regress/regress-crbug-655004.js
index 1cba1efc82..28c1bff2f0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-655004.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-655004.js
@@ -8,7 +8,8 @@ function foo(a) {
a.x = 0;
if (a.x === 0) a[1] = 0.1;
a.x = {};
-}
+};
+%PrepareFunctionForOptimization(foo);
foo(new Array(1));
foo(new Array(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-656037.js b/deps/v8/test/mjsunit/regress/regress-crbug-656037.js
index 47d09aaa4b..a7372e229a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-656037.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-656037.js
@@ -6,8 +6,8 @@
function foo(a) {
return a.push(true);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
var a = [];
assertEquals(1, foo(a));
assertEquals(2, foo(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-656275.js b/deps/v8/test/mjsunit/regress/regress-crbug-656275.js
index 74b29c1458..dafcb2a5b6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-656275.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-656275.js
@@ -6,8 +6,10 @@
var a = 1;
-function foo(x) { a = Math.fround(x + 1); }
-
+function foo(x) {
+ a = Math.fround(x + 1);
+};
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-657478.js b/deps/v8/test/mjsunit/regress/regress-crbug-657478.js
index 0827a7a1c7..2fdfd352c3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-657478.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-657478.js
@@ -6,6 +6,7 @@
function foo(o) { return %_ToLength(o.length); }
+%PrepareFunctionForOptimization(foo);
foo(new Array(4));
foo(new Array(Math.pow(2, 32) - 1));
foo({length: 10});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-658185.js b/deps/v8/test/mjsunit/regress/regress-crbug-658185.js
index 60de8d6458..1b3d6a8bbd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-658185.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-658185.js
@@ -6,14 +6,15 @@
var t = 0;
function foo() {
- var o = {x:1};
- var p = {y:2.5, x:0};
+ var o = {x: 1};
+ var p = {y: 2.5, x: 0};
o = [];
for (var i = 0; i < 2; ++i) {
t = o.x;
o = p;
}
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-658691.js b/deps/v8/test/mjsunit/regress/regress-crbug-658691.js
index 4f226e78bb..6f8fef6d91 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-658691.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-658691.js
@@ -15,8 +15,10 @@ function f(a, b, c) {
// The {g} function is compiled using Ignition.
// 1) The call to {f} requires arguments adaptation.
// 2) The call to {f} is not in tail position.
+;
+%PrepareFunctionForOptimization(f);
function g() {
- return f() + "-no-tail";
+ return f() + '-no-tail';
}
assertEquals("true-no-tail", g());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659475-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-659475-1.js
index 2648203b8c..dd138dc74f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-659475-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659475-1.js
@@ -14,11 +14,13 @@ function Check() {
n.xyz = 0x826852f4;
}
+%PrepareFunctionForOptimization(Ctor);
Ctor();
Ctor();
%OptimizeFunctionOnNextCall(Ctor);
Ctor();
+%PrepareFunctionForOptimization(Check);
Check();
Check();
%OptimizeFunctionOnNextCall(Check);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659475-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-659475-2.js
index 49e02fde00..6ae3f49ac3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-659475-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659475-2.js
@@ -15,11 +15,13 @@ function Check() {
n.xyz = 0x826852f4;
}
+%PrepareFunctionForOptimization(Ctor);
Ctor();
Ctor();
%OptimizeFunctionOnNextCall(Ctor);
Ctor();
+%PrepareFunctionForOptimization(Check);
Check();
Check();
%OptimizeFunctionOnNextCall(Check);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js b/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js
index 507a0f134c..1e6a12dca5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js
@@ -17,6 +17,7 @@ function h(a) {
function boom() { return g(1) }
+%PrepareFunctionForOptimization(boom);
assertEquals(1, h(1));
assertEquals(2, boom());
assertEquals(3, boom());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js b/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js
index 6fdb13f491..16f9ffcf3c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js
@@ -5,16 +5,25 @@
// Flags: --allow-natives-syntax
(function() {
- var x = 23;
- function f() { return x; }
- function g() { [x] = [x + 1]; }
- function h() { g(); return x; }
+var x = 23;
+function f() {
+ return x;
+}
+function g() {
+ [x] = [x + 1];
+}
+function h() {
+ g();
+ return x;
+}
- function boom() { return h() }
-
- assertEquals(24, boom());
- assertEquals(25, boom());
- assertEquals(26, boom());
- %OptimizeFunctionOnNextCall(boom);
- assertEquals(27, boom());
+function boom() {
+ return h();
+};
+%PrepareFunctionForOptimization(boom);
+assertEquals(24, boom());
+assertEquals(25, boom());
+assertEquals(26, boom());
+%OptimizeFunctionOnNextCall(boom);
+assertEquals(27, boom());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-660379.js b/deps/v8/test/mjsunit/regress/regress-crbug-660379.js
index 84634628d6..6c65829e86 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-660379.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-660379.js
@@ -13,9 +13,10 @@
try {
g(); // Right at the end of try.
} catch (e) {
- assertEquals("boom", e)
+ assertEquals('boom', e);
}
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertDoesNotThrow(f);
assertDoesNotThrow(f);
%OptimizeFunctionOnNextCall(f);
@@ -34,7 +35,8 @@
} catch (e) {
assertUnreachable();
}
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-661949.js b/deps/v8/test/mjsunit/regress/regress-crbug-661949.js
index e083cae349..08093a56fe 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-661949.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-661949.js
@@ -4,11 +4,14 @@
// Flags: --allow-natives-syntax
-var foo = (function() {
+var foo = function() {
'use asm';
- function foo() { ''[0]; }
+ function foo() {
+ ''[0];
+ };
+ %PrepareFunctionForOptimization(foo);
return foo;
-})();
+}();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662367.js b/deps/v8/test/mjsunit/regress/regress-crbug-662367.js
index fcaf545427..700531b58c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-662367.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662367.js
@@ -9,7 +9,8 @@ var zero = 0;
(function ConstantFoldZeroDivZero() {
function f() {
return 0 / zero;
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertTrue(isNaN(f()));
assertTrue(isNaN(f()));
%OptimizeFunctionOnNextCall(f);
@@ -19,7 +20,8 @@ var zero = 0;
(function ConstantFoldMinusZeroDivZero() {
function f() {
return -0 / zero;
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertTrue(isNaN(f()));
assertTrue(isNaN(f()));
%OptimizeFunctionOnNextCall(f);
@@ -29,7 +31,8 @@ var zero = 0;
(function ConstantFoldNaNDivZero() {
function f() {
return NaN / 0;
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertTrue(isNaN(f()));
assertTrue(isNaN(f()));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662410.js b/deps/v8/test/mjsunit/regress/regress-crbug-662410.js
index f1cbc6b824..5818a932e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-662410.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662410.js
@@ -4,7 +4,9 @@
// Flags: --allow-natives-syntax
-function g(v) { return v.constructor; }
+function g(v) {
+ return v.constructor;
+}
g({});
g({});
@@ -15,7 +17,7 @@ function f() {
i = i + 1;
g(i);
} while (i < 1);
-}
-
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662830.js b/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
index eec1da2193..27ba39d8e6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
@@ -11,6 +11,7 @@ function f() {
function g() {
try { f(); } catch(e) { }
+ %PrepareFunctionForOptimization(g);
for (var i = 0; i < 3; ++i) if (i === 1) %OptimizeOsr();
%_DeoptimizeNow();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-663340.js b/deps/v8/test/mjsunit/regress/regress-crbug-663340.js
index 37d285dc73..1343921437 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-663340.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-663340.js
@@ -7,10 +7,11 @@
var expected = undefined;
function foo() {
- var a = [0,,{}];
+ var a = [0, , {}];
a.shift();
assertEquals(expected, a[0]);
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -22,10 +23,11 @@ Array.prototype[1] = expected;
foo();
function bar() {
- var a = [0,,{}];
+ var a = [0, , {}];
a.shift();
assertEquals(expected, a[0]);
-}
+};
+%PrepareFunctionForOptimization(bar);
bar();
bar();
%OptimizeFunctionOnNextCall(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-663402.js b/deps/v8/test/mjsunit/regress/regress-crbug-663402.js
index 5368bd6e80..4349972578 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-663402.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-663402.js
@@ -23,6 +23,7 @@ function emit_f(size) {
// a single instruction's immediate field (2^12).
var kLength = 701;
emit_f(kLength);
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-663750.js b/deps/v8/test/mjsunit/regress/regress-crbug-663750.js
index 1b3b531a58..885bb9be08 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-663750.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-663750.js
@@ -11,6 +11,7 @@ function foo(a) {
this.x = 0;
delete x;
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -19,6 +20,7 @@ assertEquals(undefined, v);
Object.freeze(this);
+%PrepareFunctionForOptimization(foo);
foo(4);
foo(5);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664084.js b/deps/v8/test/mjsunit/regress/regress-crbug-664084.js
index 79e221e495..55eb4d2ead 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-664084.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664084.js
@@ -6,8 +6,8 @@
function foo() {
return +({} + 1);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(NaN, foo());
assertEquals(NaN, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664942.js b/deps/v8/test/mjsunit/regress/regress-crbug-664942.js
index 3b76990511..70416375b9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-664942.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664942.js
@@ -6,7 +6,8 @@
function foo() {
return 'x'[0];
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
assertEquals("x", foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-665793.js b/deps/v8/test/mjsunit/regress/regress-crbug-665793.js
index d445d5194e..8ff8460c61 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-665793.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-665793.js
@@ -6,7 +6,8 @@
function foo() {
return 'x'[1];
-}
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-667689.js b/deps/v8/test/mjsunit/regress/regress-crbug-667689.js
index e83c40eeda..174a7d99c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-667689.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-667689.js
@@ -5,12 +5,18 @@
// Flags: --allow-natives-syntax
function foo() {}
-foo.__defineGetter__(undefined, function() {})
+foo.__defineGetter__(undefined, function() {});
function bar() {}
-function baz(x) { return x instanceof bar };
+function baz(x) {
+ return x instanceof bar;
+};
+%PrepareFunctionForOptimization(baz);
+;
%OptimizeFunctionOnNextCall(baz);
baz();
Object.setPrototypeOf(bar, null);
-bar[Symbol.hasInstance] = function() { return true };
+bar[Symbol.hasInstance] = function() {
+ return true;
+};
assertTrue(baz());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-669451.js b/deps/v8/test/mjsunit/regress/regress-crbug-669451.js
index 0bbc1c1a96..7354ce3eb4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-669451.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-669451.js
@@ -6,10 +6,20 @@
function foo() {
var a = [,];
- a[0] = {}
+ a[0] = {};
a[0].toString = FAIL;
+};
+%PrepareFunctionForOptimization(foo);
+try {
+ foo();
+} catch (e) {
+}
+try {
+ foo();
+} catch (e) {
}
-try { foo(); } catch (e) {}
-try { foo(); } catch (e) {}
%OptimizeFunctionOnNextCall(foo);
-try { foo(); } catch (e) {}
+try {
+ foo();
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-669850.js b/deps/v8/test/mjsunit/regress/regress-crbug-669850.js
index 50f5940f08..7330f788cd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-669850.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-669850.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
eval('function f(a) { return [' + new Array(1 << 17) + ',a] }');
+%PrepareFunctionForOptimization(f);
assertEquals(23, f(23)[1 << 17]);
assertEquals(42, f(42)[1 << 17]);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-671576.js b/deps/v8/test/mjsunit/regress/regress-crbug-671576.js
index b9c7bccede..94710b3b51 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-671576.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-671576.js
@@ -8,6 +8,7 @@ function f() {
for (var i of [NaN].keys());
}
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-672792.js b/deps/v8/test/mjsunit/regress/regress-crbug-672792.js
index d458f7c6d5..91af1e78fc 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-672792.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-672792.js
@@ -7,6 +7,8 @@
// Generate a function {f} containing a large array literal of doubles.
eval("function f() { return [" + String("0.1,").repeat(65535) + "] }");
+%PrepareFunctionForOptimization(f);
+
// Running the function once will initialize the boilerplate.
assertEquals(65535, f().length);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-679202.js b/deps/v8/test/mjsunit/regress/regress-crbug-679202.js
index e4350224fd..7300490dc1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-679202.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-679202.js
@@ -6,8 +6,10 @@
var x = Object.prototype;
-function f() { return x <= x; }
-
+function f() {
+ return x <= x;
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-679378.js b/deps/v8/test/mjsunit/regress/regress-crbug-679378.js
index fded13fc81..f6bba5dca1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-679378.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-679378.js
@@ -6,12 +6,19 @@
var x = {};
x.__defineGetter__('0', () => 0);
-x.a = {v: 1.51};
+x.a = {
+ v: 1.51
+};
var y = {};
-y.a = {u:"OK"};
+y.a = {
+ u: 'OK'
+};
-function foo(o) { return o.a.u; }
+function foo(o) {
+ return o.a.u;
+};
+%PrepareFunctionForOptimization(foo);
foo(y);
foo(y);
foo(x);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-681983.js b/deps/v8/test/mjsunit/regress/regress-crbug-681983.js
index eab161c97c..a812a82c3f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-681983.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-681983.js
@@ -14,5 +14,6 @@ function f() {
return g(-1);
}
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertEquals(4294967295, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-684208.js b/deps/v8/test/mjsunit/regress/regress-crbug-684208.js
index 7055a7eb82..26d66d1386 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-684208.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-684208.js
@@ -9,6 +9,7 @@ function foo() {
%_DeoptimizeNow();
return a[2];
}
+%PrepareFunctionForOptimization(foo);
assertSame(undefined, foo());
assertSame(undefined, foo());
%OptimizeFunctionOnNextCall(foo)
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-685050.js b/deps/v8/test/mjsunit/regress/regress-crbug-685050.js
index 2560e2ee61..72d9db33e8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-685050.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-685050.js
@@ -14,6 +14,7 @@ bar([1, 2, 3]);
function foo() {
bar([1, 2, 3]);
bar(a);
-}
+};
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-685506.js b/deps/v8/test/mjsunit/regress/regress-crbug-685506.js
index 1be9c02bc0..087e98988a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-685506.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-685506.js
@@ -11,14 +11,15 @@ function init() {
for (var __v_1 = 0; __v_1 < 10016; __v_1++) {
a.push({});
}
- a.map(function() {}) + "";
+ a.map(function() {}) + '';
}
init();
function foo() {
a.push((a + "!", 23));
return a;
-}
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(23, foo()[10016]);
assertEquals(23, foo()[10017]);
assertEquals(23, foo()[10018]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-685634.js b/deps/v8/test/mjsunit/regress/regress-crbug-685634.js
index 2032744539..9b4d70d349 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-685634.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-685634.js
@@ -6,7 +6,10 @@
"use strict";
-function foo(f) { return f.apply(this, arguments); }
+function foo(f) {
+ return f.apply(this, arguments);
+};
+%PrepareFunctionForOptimization(foo);
function bar() {}
foo(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-685680.js b/deps/v8/test/mjsunit/regress/regress-crbug-685680.js
index 3c23e414cb..ff15965304 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-685680.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-685680.js
@@ -7,8 +7,8 @@
function foo(s) {
s = s + '0123456789012';
return s.indexOf('0');
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(0, foo('0'));
assertEquals(0, foo('0'));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-686102.js b/deps/v8/test/mjsunit/regress/regress-crbug-686102.js
index 13f4503541..7aa0a356c2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-686102.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-686102.js
@@ -8,7 +8,8 @@ var a = [];
Object.freeze(a);
function foo() {
return a.length;
-}
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(0, foo());
assertEquals(0, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-686427.js b/deps/v8/test/mjsunit/regress/regress-crbug-686427.js
index b1827a4d68..8ee3664fad 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-686427.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-686427.js
@@ -7,7 +7,8 @@
function f(a, base) {
a[base + 4] = 23;
return a;
-}
+};
+%PrepareFunctionForOptimization(f);
var i = 1073741824;
assertEquals(23, f({}, 1)[1 + 4]);
assertEquals(23, f([], 2)[2 + 4]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-686737.js b/deps/v8/test/mjsunit/regress/regress-crbug-686737.js
index aab7113c6a..5c85aac0d1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-686737.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-686737.js
@@ -4,9 +4,14 @@
// Flags: --allow-natives-syntax
-Object.prototype.__defineGetter__(0, () => { throw Error() });
-var a = [,0.1];
-function foo(i) { a[i]; }
+Object.prototype.__defineGetter__(0, () => {
+ throw Error();
+});
+var a = [, 0.1];
+function foo(i) {
+ a[i];
+};
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-687029.js b/deps/v8/test/mjsunit/regress/regress-crbug-687029.js
index c3ca01fad1..056e252583 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-687029.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-687029.js
@@ -7,7 +7,8 @@
function foo(x) {
x = Math.clz32(x);
return "a".indexOf("a", x);
-}
+};
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-687063.js b/deps/v8/test/mjsunit/regress/regress-crbug-687063.js
index 8c579331fb..c887f706c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-687063.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-687063.js
@@ -19,9 +19,10 @@ function foo() {
return undefined;
}
});
- return lhs < rhs;
-}
+ return lhs < rhs;
+};
+%PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertEquals([Symbol.toPrimitive, 'valueOf', 'toString'], actual);
assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-687990.js b/deps/v8/test/mjsunit/regress/regress-crbug-687990.js
index 24100854c4..da82f2ec09 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-687990.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-687990.js
@@ -12,5 +12,6 @@ var foo = (function() {
return function() { o.x = null; }
})();
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-694416.js b/deps/v8/test/mjsunit/regress/regress-crbug-694416.js
index 14303f25bf..fe1017854a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-694416.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-694416.js
@@ -9,8 +9,8 @@ var boom = 42;
function foo(name) {
return this[name];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(23, foo('good'));
assertEquals(23, foo('good'));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-694709.js b/deps/v8/test/mjsunit/regress/regress-crbug-694709.js
index 407b47732c..945024b60d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-694709.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-694709.js
@@ -6,7 +6,8 @@
function f(primitive) {
return primitive.__proto__;
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(Symbol.prototype, f(Symbol()));
assertEquals(Symbol.prototype, f(Symbol()));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-696622.js b/deps/v8/test/mjsunit/regress/regress-crbug-696622.js
index 5b8d2e3ffe..79c4144101 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-696622.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-696622.js
@@ -8,6 +8,7 @@ class C {}
class D extends C { constructor() { super(...unresolved, 75) } }
D.__proto__ = null;
+%PrepareFunctionForOptimization(D);
assertThrows(() => new D(), TypeError);
assertThrows(() => new D(), TypeError);
%OptimizeFunctionOnNextCall(D);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-698607.js b/deps/v8/test/mjsunit/regress/regress-crbug-698607.js
index c9df4257f1..07d53cc21f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-698607.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-698607.js
@@ -6,16 +6,16 @@
function assertSame(expected, found) {
if (found === expected) {
- } else if ((expected !== expected) && (found !== found)) {
+ } else if (expected !== expected && found !== found) {
}
}
function foo() {
- var x = {var: 0.5};
+ var x = {var : 0.5};
assertSame(x, x.val);
return () => x;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-699282.js b/deps/v8/test/mjsunit/regress/regress-crbug-699282.js
index 726b1f7f32..21d96cbb22 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-699282.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-699282.js
@@ -5,7 +5,10 @@
// Flags: --allow-natives-syntax
var v = 1;
-function foo() { return Math.floor(-v / 125); }
+function foo() {
+ return Math.floor(-v / 125);
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(-1, foo());
%OptimizeFunctionOnNextCall(foo);
assertEquals(-1, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-700733.js b/deps/v8/test/mjsunit/regress/regress-crbug-700733.js
index 0a130fa34b..5683e7ac8d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-700733.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-700733.js
@@ -27,6 +27,7 @@
var obj = {};
obj.load_boom = smi_arr;
+ %PrepareFunctionForOptimization(do_keyed_load);
do_keyed_load(arrs);
do_keyed_load(arrs);
%OptimizeFunctionOnNextCall(do_keyed_load);
@@ -57,6 +58,7 @@
var obj = {};
obj.store_boom = smi_arr;
+ %PrepareFunctionForOptimization(do_keyed_store);
do_keyed_store(arrs);
do_keyed_store(arrs);
%OptimizeFunctionOnNextCall(do_keyed_store);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-703610.js b/deps/v8/test/mjsunit/regress/regress-crbug-703610.js
index a8901d56d1..3115fe78a4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-703610.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-703610.js
@@ -9,7 +9,8 @@ fun.prototype = 42;
new fun();
function f() {
return fun.prototype;
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(42, f());
assertEquals(42, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-706642.js b/deps/v8/test/mjsunit/regress/regress-crbug-706642.js
index 4467c68495..a10cdc0509 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-706642.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-706642.js
@@ -31,6 +31,7 @@ var observer = new Proxy(A, {
}
});
+%PrepareFunctionForOptimization(B);
Reflect.construct(B, [], observer);
Reflect.construct(B, [], observer);
%OptimizeFunctionOnNextCall(B);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-708050-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-708050-1.js
index be26e10c84..4b4ae40090 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-708050-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-708050-1.js
@@ -4,14 +4,14 @@
// Flags: --allow-natives-syntax
-var v = {}
+var v = {};
function foo() {
v[0] = 5;
v[-0] = 27;
return v[0];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(27, foo());
%OptimizeFunctionOnNextCall(foo);
assertEquals(27, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-708050-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-708050-2.js
index a7d9b29c2e..a8c50a4e62 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-708050-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-708050-2.js
@@ -10,8 +10,8 @@ function foo() {
v[0] = 5;
v[-0] = 27;
return v[0];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(27, foo());
%OptimizeFunctionOnNextCall(foo);
assertEquals(27, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-709537.js b/deps/v8/test/mjsunit/regress/regress-crbug-709537.js
index 088c65b332..05c38eb211 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-709537.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-709537.js
@@ -6,7 +6,8 @@
function foo() {
return { 0: {}, x: {} };
-}
+};
+%PrepareFunctionForOptimization(foo);
var ref = foo();
assertEquals(ref, foo());
assertEquals(ref, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-709753.js b/deps/v8/test/mjsunit/regress/regress-crbug-709753.js
index e4d4d4ed37..315d8c6451 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-709753.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-709753.js
@@ -4,9 +4,11 @@
// Flags: --allow-natives-syntax
-function foo(a, i) { a[i].x; }
-
-var a = [,0.1];
+function foo(a, i) {
+ a[i].x;
+};
+%PrepareFunctionForOptimization(foo);
+var a = [, 0.1];
foo(a, 1);
foo(a, 1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-711166.js b/deps/v8/test/mjsunit/regress/regress-crbug-711166.js
index 7f4acb963d..93fbaf894a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-711166.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-711166.js
@@ -4,18 +4,25 @@
// Flags: --allow-natives-syntax
-'use strict'
+'use strict';
function g() {
var x = 1;
- try { undefined.x } catch (e) { x = e; }
- (function() { x });
+ try {
+ undefined.x;
+ } catch (e) {
+ x = e;
+ }
+ (function() {
+ x;
+ });
return x;
}
function f(a) {
var args = arguments;
assertInstanceof(g(), TypeError);
return args.length;
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(1, f(0));
assertEquals(1, f(0));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-712802.js b/deps/v8/test/mjsunit/regress/regress-crbug-712802.js
index e23519e179..de99a589a2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-712802.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-712802.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function foo(...args) { return Array.isArray(args); }
-
+function foo(...args) {
+ return Array.isArray(args);
+};
+%PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715151.js b/deps/v8/test/mjsunit/regress/regress-crbug-715151.js
index c0b2c5dba8..3bd99ec4a4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-715151.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715151.js
@@ -8,7 +8,8 @@ function foo() {
var a = [0];
Object.preventExtensions(a);
return a.pop();
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715404.js b/deps/v8/test/mjsunit/regress/regress-crbug-715404.js
index 8ff2d00ba0..0667d201ec 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-715404.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715404.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function foo() { Array(-1); }
+function foo() {
+ Array(-1);
+};
+%PrepareFunctionForOptimization(foo);
assertThrows(foo, RangeError);
assertThrows(foo, RangeError);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715862.js b/deps/v8/test/mjsunit/regress/regress-crbug-715862.js
index 60e836ddc4..b64ff19691 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-715862.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715862.js
@@ -8,8 +8,8 @@ function f(a) {
a.x = 0;
a[1] = 0.1;
a.x = {};
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(new Array(1));
f(new Array());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-719479.js b/deps/v8/test/mjsunit/regress/regress-crbug-719479.js
index dac49de3b7..40e3895175 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-719479.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-719479.js
@@ -15,9 +15,10 @@ function bar(expected, found) {
};
bar([{}, 6, NaN], [1.8, , NaN]);
function foo() {
- var a = [1,2,3,4];
+ var a = [1, 2, 3, 4];
bar(a.length, a.length);
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-722756.js b/deps/v8/test/mjsunit/regress/regress-crbug-722756.js
index b4d82ad939..aa5a7352e3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-722756.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-722756.js
@@ -7,31 +7,33 @@
var array = [[{}], [1.1]];
function transition() {
- for(var i = 0; i < array.length; i++){
+ for (var i = 0; i < array.length; i++) {
var arr = array[i];
arr[0] = {};
}
}
-var double_arr2 = [1.1,2.2];
+var double_arr2 = [1.1, 2.2];
var flag = 0;
function swap() {
- try {} catch(e) {} // Prevent Crankshaft from inlining this.
+ try {
+ } catch (e) {
+ } // Prevent Crankshaft from inlining this.
if (flag == 1) {
array[1] = double_arr2;
}
}
var expected = 6.176516726456e-312;
-function f(){
+function f() {
swap();
double_arr2[0] = 1;
transition();
double_arr2[1] = expected;
-}
-
-for(var i = 0; i < 3; i++) {
+};
+%PrepareFunctionForOptimization(f);
+for (var i = 0; i < 3; i++) {
f();
}
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-723455.js b/deps/v8/test/mjsunit/regress/regress-crbug-723455.js
index 85f5e3c1d5..d42b15bdcf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-723455.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-723455.js
@@ -8,8 +8,8 @@ function f(a) {
a.x = 0;
a[0] = 0.1;
a.x = {};
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(new Array(1));
f(new Array(1));
f(new Array());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-724153.js b/deps/v8/test/mjsunit/regress/regress-crbug-724153.js
index e167bec2f5..a571f8e0bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-724153.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-724153.js
@@ -11,6 +11,7 @@
}
src += 'c) { return a + c })';
var f = eval(src);
+ %PrepareFunctionForOptimization(f);
assertEquals(NaN, f(1));
assertEquals(NaN, f(2));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-724608.js b/deps/v8/test/mjsunit/regress/regress-crbug-724608.js
index cd91211f07..54ffddd9f4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-724608.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-724608.js
@@ -6,7 +6,8 @@
function foo(x) {
return {['p']: 0, x};
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
var a = {['p']: ''};
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-725201.js b/deps/v8/test/mjsunit/regress/regress-crbug-725201.js
index c540b6dffc..bedad8fc55 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-725201.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-725201.js
@@ -8,6 +8,7 @@ function __f_1() {
function __f_2() {
Array.prototype.__proto__ = { 77e4 : null };
}
+ %PrepareFunctionForOptimization(__f_2);
__f_2();
%OptimizeFunctionOnNextCall(__f_2);
__f_2();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-729573-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-729573-1.js
index 1b596abe2f..8d1ecf3d42 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-729573-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-729573-1.js
@@ -5,63 +5,69 @@
// Flags: --allow-natives-syntax
(function() {
- function foo() {
- var a = foo.bind(this);
- %DeoptimizeNow();
- if (!a) return a;
- return 0;
- }
-
- assertEquals(0, foo());
- assertEquals(0, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo());
+function foo() {
+ var a = foo.bind(this);
+ %DeoptimizeNow();
+ if (!a) return a;
+ return 0;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo());
+assertEquals(0, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo());
})();
(function() {
- "use strict";
-
- function foo() {
- var a = foo.bind(this);
- %DeoptimizeNow();
- if (!a) return a;
- return 0;
- }
+"use strict";
- assertEquals(0, foo());
- assertEquals(0, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo());
+function foo() {
+ var a = foo.bind(this);
+ %DeoptimizeNow();
+ if (!a) return a;
+ return 0;
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(0, foo());
+assertEquals(0, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo());
})();
(function() {
- function foo() {
- var a = foo.bind(this);
- %DeoptimizeNow();
- if (!a) return a;
- return 0;
- }
- foo.prototype = {custom: "prototype"};
+function foo() {
+ var a = foo.bind(this);
+ %DeoptimizeNow();
+ if (!a) return a;
+ return 0;
+};
+%PrepareFunctionForOptimization(foo);
+foo.prototype = {
+ custom: 'prototype'
+};
- assertEquals(0, foo());
- assertEquals(0, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo());
+assertEquals(0, foo());
+assertEquals(0, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo());
})();
(function() {
- "use strict";
+"use strict";
- function foo() {
- var a = foo.bind(this);
- %DeoptimizeNow();
- if (!a) return a;
- return 0;
- }
- foo.prototype = {custom: "prototype"};
+function foo() {
+ var a = foo.bind(this);
+ %DeoptimizeNow();
+ if (!a) return a;
+ return 0;
+};
+%PrepareFunctionForOptimization(foo);
+foo.prototype = {
+ custom: 'prototype'
+};
- assertEquals(0, foo());
- assertEquals(0, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0, foo());
+assertEquals(0, foo());
+assertEquals(0, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-729573-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-729573-2.js
index 45486ec2a3..16910a3a3d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-729573-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-729573-2.js
@@ -12,10 +12,12 @@ function bar(x) {
function foo(f) {
var a = bar.bind(42, 1);
return f() ? 0 : a;
+};
+%PrepareFunctionForOptimization(foo);
+function t() {
+ return true;
}
-function t() { return true; }
-
assertEquals(0, foo(t));
assertEquals(0, foo(t));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-732169.js b/deps/v8/test/mjsunit/regress/regress-crbug-732169.js
index 41dc9d2f38..913b86288c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-732169.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-732169.js
@@ -7,6 +7,7 @@
(function TestGeneratorMaterialization() {
function* f([x]) { yield x }
// No warm-up of {f} to trigger soft deopt.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
var gen = f([23]);
assertEquals("[object Generator]", gen.toString());
@@ -22,6 +23,7 @@
return gen;
}
function h() { f() }
+ %PrepareFunctionForOptimization(h);
// Enough warm-up to make {p} an in-object property.
for (var i = 0; i < 100; ++i) { g(); h(); }
%OptimizeFunctionOnNextCall(h);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-736575.js b/deps/v8/test/mjsunit/regress/regress-crbug-736575.js
index 3622b09b97..7edf684166 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-736575.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-736575.js
@@ -5,9 +5,9 @@
// Flags: --allow-natives-syntax
function f() {
- return [...[/*hole*/, 2.3]];
-}
-
+ return [...[, /*hole*/ 2.3]];
+};
+%PrepareFunctionForOptimization(f);
assertEquals(undefined, f()[0]);
assertEquals(undefined, f()[0]);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-736633.js b/deps/v8/test/mjsunit/regress/regress-crbug-736633.js
index 4c9f0cc97d..90badabd47 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-736633.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-736633.js
@@ -13,8 +13,10 @@ function f(x) {
case 2147483647:
return x + 1;
}
+
return 0;
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(1, f(0));
assertEquals(2, f(1));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-740116.js b/deps/v8/test/mjsunit/regress/regress-crbug-740116.js
index dc6fb6401c..de894a21cf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-740116.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-740116.js
@@ -5,7 +5,10 @@
// Flags: --allow-natives-syntax
(function TestReflectGetPrototypeOfOnPrimitive() {
- function f() { return Reflect.getPrototypeOf(""); }
+ function f() {
+ return Reflect.getPrototypeOf('');
+ };
+ %PrepareFunctionForOptimization(f);
assertThrows(f, TypeError);
assertThrows(f, TypeError);
%OptimizeFunctionOnNextCall(f);
@@ -13,7 +16,10 @@
})();
(function TestObjectGetPrototypeOfOnPrimitive() {
- function f() { return Object.getPrototypeOf(""); }
+ function f() {
+ return Object.getPrototypeOf('');
+ };
+ %PrepareFunctionForOptimization(f);
assertSame(String.prototype, f());
assertSame(String.prototype, f());
%OptimizeFunctionOnNextCall(f);
@@ -21,7 +27,10 @@
})();
(function TestDunderProtoOnPrimitive() {
- function f() { return "".__proto__; }
+ function f() {
+ return ''.__proto__;
+ };
+ %PrepareFunctionForOptimization(f);
assertSame(String.prototype, f());
assertSame(String.prototype, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-741078.js b/deps/v8/test/mjsunit/regress/regress-crbug-741078.js
index 8dfea72617..2dd071b807 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-741078.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-741078.js
@@ -4,8 +4,8 @@
// Flags: --allow-natives-syntax --expose-gc
-function* gen() {}
-
+function* gen() {};
+%PrepareFunctionForOptimization(gen);
(function warmup() {
for (var i = 0; i < 100; ++i) {
var g = gen();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-747062.js b/deps/v8/test/mjsunit/regress/regress-crbug-747062.js
index 7e0e92ad7e..bd82ec69be 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-747062.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-747062.js
@@ -5,7 +5,10 @@
// Flags: --allow-natives-syntax
(function TestNonCallableForEach() {
- function foo() { [].forEach(undefined) }
+ function foo() {
+ [].forEach(undefined);
+ };
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -13,7 +16,14 @@
})();
(function TestNonCallableForEachCaught() {
- function foo() { try { [].forEach(undefined) } catch(e) { return e } }
+ function foo() {
+ try {
+ [].forEach(undefined);
+ } catch (e) {
+ return e;
+ }
+ };
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), TypeError);
assertInstanceof(foo(), TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -21,7 +31,10 @@
})();
(function TestNonCallableMap() {
- function foo() { [].map(undefined); }
+ function foo() {
+ [].map(undefined);
+ };
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -29,7 +42,14 @@
})();
(function TestNonCallableMapCaught() {
- function foo() { try { [].map(undefined) } catch(e) { return e } }
+ function foo() {
+ try {
+ [].map(undefined);
+ } catch (e) {
+ return e;
+ }
+ };
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), TypeError);
assertInstanceof(foo(), TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -37,7 +57,10 @@
})();
(function TestNonCallableFilter() {
- function foo() { [].filter(undefined); }
+ function foo() {
+ [].filter(undefined);
+ };
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo, TypeError);
assertThrows(foo, TypeError);
%OptimizeFunctionOnNextCall(foo);
@@ -45,7 +68,14 @@
})();
(function TestNonCallableFilterCaught() {
- function foo() { try { [].filter(undefined) } catch(e) { return e } }
+ function foo() {
+ try {
+ [].filter(undefined);
+ } catch (e) {
+ return e;
+ }
+ };
+ %PrepareFunctionForOptimization(foo);
assertInstanceof(foo(), TypeError);
assertInstanceof(foo(), TypeError);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-747979.js b/deps/v8/test/mjsunit/regress/regress-crbug-747979.js
index bbdea1ddf5..add85635a8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-747979.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-747979.js
@@ -8,8 +8,8 @@ function f(a) {
%HeapObjectVerify(a);
a[1] = 0;
%HeapObjectVerify(a);
-}
-
+};
+%PrepareFunctionForOptimization(f);
function foo() {}
var arr1 = [0];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-751715.js b/deps/v8/test/mjsunit/regress/regress-crbug-751715.js
index 68241ebb18..f1e20a4fe1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-751715.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-751715.js
@@ -8,6 +8,7 @@ class Base {}
class Derived extends Base {
constructor() { super(); }
}
+%PrepareFunctionForOptimization(Derived);
var proxy = new Proxy(Base, { get() {} });
assertDoesNotThrow(() => Reflect.construct(Derived, []));
assertThrows(() => Reflect.construct(Derived, [], proxy), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-752481.js b/deps/v8/test/mjsunit/regress/regress-crbug-752481.js
index d0cc6a32a2..3cf181dd53 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-752481.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-752481.js
@@ -7,6 +7,7 @@
const A = class A {}
function test(foo) {
+ %PrepareFunctionForOptimization(foo);
assertThrows(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-752826.js b/deps/v8/test/mjsunit/regress/regress-crbug-752826.js
index d0e124ed2b..8cb6111350 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-752826.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-752826.js
@@ -15,7 +15,8 @@ function g() {
}
function f() {
return g(23, 42);
-}
+};
+%PrepareFunctionForOptimization(f);
assertEquals(42, f());
assertEquals(42, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-752846.js b/deps/v8/test/mjsunit/regress/regress-crbug-752846.js
index c2095dc57f..9daf3fbbe3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-752846.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-752846.js
@@ -17,5 +17,5 @@ for (let val of values) {
var proto = Object.getPrototypeOf(val);
var proxy = new Proxy({}, {});
- Object.setPrototypeOf(proto, proxy);
+ Object.setPrototypeOf(proxy, proto);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-755044.js b/deps/v8/test/mjsunit/regress/regress-crbug-755044.js
index 45f0e84661..6032e9a6d1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-755044.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-755044.js
@@ -4,12 +4,13 @@
// Flags: --allow-natives-syntax
-function foo(f){
+function foo(f) {
f.caller;
}
function bar(f) {
new foo(f);
-}
+};
+%PrepareFunctionForOptimization(bar);
bar(function() {});
%OptimizeFunctionOnNextCall(bar);
bar(function() {});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-757199.js b/deps/v8/test/mjsunit/regress/regress-crbug-757199.js
index d2e9502b39..879f6b3046 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-757199.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-757199.js
@@ -16,12 +16,12 @@ function g(v) {
}
function f() {
g(obj1);
-}
-
+};
+%PrepareFunctionForOptimization(f);
obj1.x = 0;
f();
-obj1.__defineGetter__("x", function() {});
+obj1.__defineGetter__('x', function() {});
g(obj2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-762874-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-762874-1.js
index ab1b7c1578..0925b61555 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-762874-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-762874-1.js
@@ -10,8 +10,8 @@ const s = 'A'.repeat(maxLength);
function foo(s) {
let x = s.indexOf("", maxLength);
return x === maxLength;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertTrue(foo(s));
assertTrue(foo(s));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-762874-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-762874-2.js
index 6d301b5ae3..4daea83ba0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-762874-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-762874-2.js
@@ -10,8 +10,8 @@ const s = 'A'.repeat(maxLength);
function foo(s) {
let x = s.lastIndexOf("", maxLength);
return x === maxLength;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertTrue(foo(s));
assertTrue(foo(s));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-766635.js b/deps/v8/test/mjsunit/regress/regress-crbug-766635.js
index ae0de0a600..9676936206 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-766635.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-766635.js
@@ -4,29 +4,80 @@
// Flags: --allow-natives-syntax
-function classOf() {; }
-function PrettyPrint(value) { return ""; }
-function fail() { }
-function deepEquals(a, b) { if (a === b) { if (a === 0)1 / b; return true; } if (typeof a != typeof b) return false; if (typeof a == "number") return isNaN(); if (typeof a !== "object" && typeof a !== "function") return false; var objectClass = classOf(); if (b) return false; if (objectClass === "RegExp") {; } if (objectClass === "Function") return false; if (objectClass === "Array") { var elementCount = 0; if (a.length != b.length) { return false; } for (var i = 0; i < a.length; i++) { if (a[i][i]) return false; } return true; } if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") { if (a.valueOf()) return false; }; }
-assertSame = function assertSame() { if (found === expected) { if (1 / found) return; } else if ((expected !== expected) && (found !== found)) { return; }; }; assertEquals = function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(PrettyPrint(expected),); } };
+function classOf() {
+ ;
+}
+function PrettyPrint(value) {
+ return '';
+}
+function fail() {}
+function deepEquals(a, b) {
+ if (a === b) {
+ if (a === 0) 1 / b;
+ return true;
+ }
+ if (typeof a != typeof b) return false;
+ if (typeof a == 'number') return isNaN();
+ if (typeof a !== 'object' && typeof a !== 'function') return false;
+ var objectClass = classOf();
+ if (b) return false;
+ if (objectClass === 'RegExp') {
+ ;
+ }
+ if (objectClass === 'Function') return false;
+ if (objectClass === 'Array') {
+ var elementCount = 0;
+ if (a.length != b.length) {
+ return false;
+ }
+ for (var i = 0; i < a.length; i++) {
+ if (a[i][i]) return false;
+ }
+ return true;
+ }
+ if (objectClass == 'String' || objectClass == 'Number' ||
+ objectClass == 'Boolean' || objectClass == 'Date') {
+ if (a.valueOf()) return false;
+ };
+}
+assertSame = function assertSame() {
+ if (found === expected) {
+ if (1 / found) return;
+ } else if (expected !== expected && found !== found) {
+ return;
+ };
+};
+assertEquals = function assertEquals(expected, found, name_opt) {
+ if (!deepEquals(found, expected)) {
+ fail(PrettyPrint(expected));
+ }
+};
var __v_3 = {};
function __f_0() {
assertEquals();
}
try {
__f_0();
-} catch(e) {; }
+} catch (e) {
+ ;
+}
__v_2 = 0;
-o2 = {y:1.5};
+o2 = {
+ y: 1.5
+};
o2.y = 0;
o3 = o2.y;
function __f_1() {
for (var __v_1 = 0; __v_1 < 10; __v_1++) {
__v_2 += __v_3.x + o3.foo;
- [ 3].filter(__f_9);
+ [3].filter(__f_9);
}
-}
+};
+%PrepareFunctionForOptimization(__f_1);
__f_1();
%OptimizeFunctionOnNextCall(__f_1);
__f_1();
-function __f_9(){ "use __f_9"; assertEquals( this); }
+function __f_9() {
+ 'use __f_9';
+ assertEquals(this);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-768080.js b/deps/v8/test/mjsunit/regress/regress-crbug-768080.js
index cfd1fc1f35..1e32c13d02 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-768080.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-768080.js
@@ -11,7 +11,8 @@
}
function f() {
return new g();
- }
+ };
+ %PrepareFunctionForOptimization(f);
new C(); // Warm-up!
assertThrows(f, TypeError);
assertThrows(f, TypeError);
@@ -29,7 +30,8 @@
}
function f() {
return new g();
- }
+ };
+ %PrepareFunctionForOptimization(f);
new C(); // Warm-up!
assertThrows(f, TypeError);
assertThrows(f, TypeError);
@@ -43,7 +45,8 @@
}
function f() {
return new g();
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertThrows(f, TypeError);
assertThrows(f, TypeError);
%OptimizeFunctionOnNextCall(f);
@@ -56,7 +59,8 @@
}
function f() {
return g();
- }
+ };
+ %PrepareFunctionForOptimization(f);
assertThrows(f, TypeError);
assertThrows(f, TypeError);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-768367.js b/deps/v8/test/mjsunit/regress/regress-crbug-768367.js
index d1041f32ce..c04937b0ad 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-768367.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-768367.js
@@ -6,8 +6,10 @@
const o = {};
-function foo() { return o[4294967295]; }
-
+function foo() {
+ return o[4294967295];
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo());
assertEquals(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-769852.js b/deps/v8/test/mjsunit/regress/regress-crbug-769852.js
index 120ea0109e..9fb96244d6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-769852.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-769852.js
@@ -7,8 +7,9 @@
function f(o) {
function g() {}
Object.keys(o).forEach(suite => g());
-}
+};
+%PrepareFunctionForOptimization(f);
assertDoesNotThrow(() => f({}));
-assertDoesNotThrow(() => f({ x:0 }));
+assertDoesNotThrow(() => f({x: 0}));
%OptimizeFunctionOnNextCall(f);
-assertDoesNotThrow(() => f({ x:0 }));
+assertDoesNotThrow(() => f({x: 0}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-770543.js b/deps/v8/test/mjsunit/regress/regress-crbug-770543.js
index 5397a499c3..ac8301ce21 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-770543.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-770543.js
@@ -10,7 +10,8 @@
Object.getOwnPropertyDescriptor(g, "caller");
};
[0].forEach(g);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -23,7 +24,8 @@
}
function f() {
[0].forEach(g);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-770581.js b/deps/v8/test/mjsunit/regress/regress-crbug-770581.js
index 64edec97cd..c07b9fa50b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-770581.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-770581.js
@@ -6,12 +6,12 @@
function f(callback) {
[Object].forEach(callback);
-}
-
+};
+%PrepareFunctionForOptimization(f);
function message_of_f() {
try {
f("a teapot");
- } catch(e) {
+ } catch (e) {
return String(e);
}
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-771971.js b/deps/v8/test/mjsunit/regress/regress-crbug-771971.js
index cb40db5aa3..cd3aa737b2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-771971.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-771971.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function f() { Object.is(); }
-
+function f() {
+ Object.is();
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772610.js b/deps/v8/test/mjsunit/regress/regress-crbug-772610.js
index d68ebbf2be..6681966032 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772610.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772610.js
@@ -11,6 +11,7 @@ function f() {
%_DeoptimizeNow();
return o.length;
}
+%PrepareFunctionForOptimization(f);
assertEquals(1, f());
assertEquals(1, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772672.js b/deps/v8/test/mjsunit/regress/regress-crbug-772672.js
index 86e738344a..02ba84da30 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772672.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772672.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function foo() { return new Array(120 * 1024); }
-
+function foo() {
+ return new Array(120 * 1024);
+};
+%PrepareFunctionForOptimization(foo);
foo()[0] = 0.1;
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772689.js b/deps/v8/test/mjsunit/regress/regress-crbug-772689.js
index 32e220daa7..d0f753c059 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772689.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772689.js
@@ -9,13 +9,13 @@ const A = class A extends Array {
super();
this.y = 1;
}
-}
+};
function foo(x) {
var a = new A();
if (x) return a.y;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(false));
assertEquals(undefined, foo(false));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-772720.js b/deps/v8/test/mjsunit/regress/regress-crbug-772720.js
index 3e359f6c16..dc83bfb4b7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-772720.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-772720.js
@@ -10,6 +10,7 @@ function f() {
local += 'abcdefghijkl' + (0 + global);
global += 'abcdefghijkl';
}
+%PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-774459.js b/deps/v8/test/mjsunit/regress/regress-crbug-774459.js
index 4263c3252d..dc6e57c8f1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-774459.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-774459.js
@@ -5,16 +5,16 @@
// Flags: --allow-natives-syntax
(function() {
- const m = new Map();
- const k = Math.pow(2, 31) - 1;
- m.set(k, 1);
+const m = new Map();
+const k = Math.pow(2, 31) - 1;
+m.set(k, 1);
- function foo(m, k) {
- return m.get(k | 0);
- }
-
- assertEquals(1, foo(m, k));
- assertEquals(1, foo(m, k));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(1, foo(m, k));
+function foo(m, k) {
+ return m.get(k | 0);
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(1, foo(m, k));
+assertEquals(1, foo(m, k));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo(m, k));
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-776511.js b/deps/v8/test/mjsunit/regress/regress-crbug-776511.js
index f757bc2cc2..62805c9c7d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-776511.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-776511.js
@@ -26,6 +26,7 @@ function __getRandomProperty(obj, seed) {
};
return __v_59904.filter(__v_59909);
};
+ %PrepareFunctionForOptimization(__v_59906);
print(__v_59906());
__v_59904[__getRandomProperty(__v_59904, 366855)] = this, gc();
print(__v_59906());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-779367.js b/deps/v8/test/mjsunit/regress/regress-crbug-779367.js
index 3836b34fc1..6784a490a5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-779367.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-779367.js
@@ -6,8 +6,8 @@
function g(o) {
return o.x;
-}
-
+};
+%PrepareFunctionForOptimization(g);
Object.defineProperty(g, 'x', {set(v) {}});
g.prototype = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
index fb3f7da54e..50693eb3ac 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781116-1.js
@@ -9,13 +9,17 @@ function baz(obj, store) {
}
function bar(store) {
baz(Array.prototype, store);
-}
+};
+%PrepareFunctionForOptimization(bar);
bar(false);
bar(false);
%OptimizeFunctionOnNextCall(bar);
bar(true);
-function foo() { [].push(); }
+function foo() {
+ [].push();
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
index 0af8d6f1a8..3deb4a5b6d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781116-2.js
@@ -9,13 +9,17 @@ function baz(obj, store) {
}
function bar(store) {
baz(Object.prototype, store);
-}
+};
+%PrepareFunctionForOptimization(bar);
bar(false);
bar(false);
%OptimizeFunctionOnNextCall(bar);
bar(true);
-function foo() { [].push(); }
+function foo() {
+ [].push();
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js
index 6048fb9250..4617c38c36 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781506-1.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function foo(a) { return a[0]; }
-
+function foo(a) {
+ return a[0];
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo(x => x));
assertEquals(undefined, foo({}));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js
index 71801df14a..a7239f7000 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781506-2.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function foo(o) { return o[0]; }
-
+function foo(o) {
+ return o[0];
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo({}));
Array.prototype[0] = 0;
assertEquals(undefined, foo({}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js
index 70b29896f7..b73dac9a0f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781506-3.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function foo(a, i) { return a[i] + 0.5; }
-
+function foo(a, i) {
+ return a[i] + 0.5;
+};
+%PrepareFunctionForOptimization(foo);
foo({}, 1);
Array.prototype.unshift(1.5);
assertTrue(Number.isNaN(foo({}, 1)));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-781583.js b/deps/v8/test/mjsunit/regress/regress-crbug-781583.js
index fd14ad7bce..f2c85fff94 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-781583.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-781583.js
@@ -6,8 +6,8 @@
function* generator(a) {
a.pop().next();
-}
-
+};
+%PrepareFunctionForOptimization(generator);
function prepareGenerators(n) {
var a = [{ next: () => 0 }];
for (var i = 0; i < n; ++i) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-786723.js b/deps/v8/test/mjsunit/regress/regress-crbug-786723.js
index d4e0957c5e..f6f8c24f66 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-786723.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-786723.js
@@ -10,6 +10,8 @@ function f() {
o.x = 1;
return Object.create(o);
};
+ %PrepareFunctionForOptimization(g);
+ ;
gc();
o.x = 10;
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js
index 0d51f8a4a0..cc4a80c0d3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-791245-1.js
@@ -4,14 +4,14 @@
// Flags: --allow-natives-syntax
-const s = new Map;
+const s = new Map();
function foo(s) {
const i = s[Symbol.iterator]();
i.next();
return i;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
console.log(foo(s));
console.log(foo(s));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js
index 6734ed2baa..9f40a110b9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-791245-2.js
@@ -4,14 +4,14 @@
// Flags: --allow-natives-syntax
-const s = new Set;
+const s = new Set();
function foo(s) {
const i = s[Symbol.iterator]();
i.next();
return i;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
console.log(foo(s));
console.log(foo(s));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-801627.js b/deps/v8/test/mjsunit/regress/regress-crbug-801627.js
index 0e51fff2b1..cfaa2b72e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-801627.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-801627.js
@@ -19,6 +19,7 @@ class Derived extends Base {
// Feed a bound function as new.target
// to the profiler, so HeapObjectMatcher
// can find it.
+%PrepareFunctionForOptimization(Derived);
Reflect.construct(Derived, [], Object.bind());
%OptimizeFunctionOnNextCall(Derived);
new Derived();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-802333.js b/deps/v8/test/mjsunit/regress/regress-crbug-802333.js
index 35d762187b..63697bac5d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-802333.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-802333.js
@@ -5,21 +5,19 @@
// Flags: --allow-natives-syntax
function deferred_func() {
- class C {
- method1() {
-
- }
- }
+ class C {
+ method1() {}
+ }
}
let bound = (a => a).bind(this, 0);
function opt() {
- deferred_func.prototype; // ReduceJSLoadNamed
-
- return bound();
-}
+ deferred_func.prototype; // ReduceJSLoadNamed
+ return bound();
+};
+%PrepareFunctionForOptimization(opt);
assertEquals(0, opt());
%OptimizeFunctionOnNextCall(opt);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-819086.js b/deps/v8/test/mjsunit/regress/regress-crbug-819086.js
index 85f76a1fb5..edc13ec63c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-819086.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-819086.js
@@ -6,8 +6,8 @@
function foo() {
return [...[, -Infinity]];
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo()[0];
foo()[0];
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-819298.js b/deps/v8/test/mjsunit/regress/regress-crbug-819298.js
index 0c28aeb48c..f4ebd75eda 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-819298.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-819298.js
@@ -8,8 +8,8 @@ var a = new Int32Array(2);
function foo(base) {
a[base - 91] = 1;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
foo("");
foo("");
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-820820.js b/deps/v8/test/mjsunit/regress/regress-crbug-820820.js
index bdee558991..1dd93839b1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-820820.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-820820.js
@@ -17,6 +17,7 @@ function foo() {
try { undefined[0] = bar(); } catch (e) { }
Math.min(bar(), bar(), bar());
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-822284.js b/deps/v8/test/mjsunit/regress/regress-crbug-822284.js
index 97a38259e3..e27e12f69f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-822284.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-822284.js
@@ -10,11 +10,11 @@ function foo(a) {
}
// Add '1' to the number to string table (as SeqString).
+%PrepareFunctionForOptimization(foo);
String.fromCharCode(49);
-
// Turn the SeqString into a ThinString via forced internalization.
const o = {};
-o[(1).toString()] = 1;
+o[1..toString()] = 1;
assertEquals(49, foo(1));
assertEquals(49, foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-825045.js b/deps/v8/test/mjsunit/regress/regress-crbug-825045.js
index 34af20897a..85367f0118 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-825045.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-825045.js
@@ -4,10 +4,16 @@
// Flags: --allow-natives-syntax
-const obj = new class A extends (async function (){}.constructor) {};
+const obj = new class A extends async function
+() {}
+.constructor {}
+();
delete obj.name;
Number.prototype.__proto__ = obj;
-function foo() { return obj.bind(); }
+function foo() {
+ return obj.bind();
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-879560.js b/deps/v8/test/mjsunit/regress/regress-crbug-879560.js
index a17deadfcd..da5c6d9ec0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-879560.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-879560.js
@@ -7,8 +7,10 @@
function foo() {
var x = 1;
x = undefined;
- while (x--) ;
-}
+ while (x--)
+ ;
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-879898.js b/deps/v8/test/mjsunit/regress/regress-crbug-879898.js
index c97001ae5c..b4e5c88a0f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-879898.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-879898.js
@@ -6,7 +6,8 @@
function foo() {
return Symbol.toPrimitive++;
-}
+};
+%PrepareFunctionForOptimization(foo);
assertThrows(foo);
%OptimizeFunctionOnNextCall(foo);
assertThrows(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-880207.js b/deps/v8/test/mjsunit/regress/regress-crbug-880207.js
index 09796a9ff4..6778b5e1b0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-880207.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-880207.js
@@ -7,8 +7,8 @@
(function TestOptimizedFastExpm1MinusZero() {
function foo() {
return Object.is(Math.expm1(-0), -0);
- }
-
+ };
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo());
@@ -17,12 +17,12 @@
(function TestOptimizedExpm1MinusZeroSlowPath() {
function f(x) {
return Object.is(Math.expm1(x), -0);
- }
-
+ };
+ %PrepareFunctionForOptimization(f);
function g() {
return f(-0);
- }
-
+ };
+ %PrepareFunctionForOptimization(g);
f(0);
// Compile function optimistically for numbers (with fast inlined
// path for Math.expm1).
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-884933.js b/deps/v8/test/mjsunit/regress/regress-crbug-884933.js
index 447d303bbf..229e0b0163 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-884933.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-884933.js
@@ -6,80 +6,80 @@
// Test Uint8 -> Word64 conversions.
(function() {
- function bar(x, y) {
- return x + y;
- }
-
- bar(0.1, 0.2);
- bar(0.1, 0.2);
-
- function foo(dv) {
- return bar(dv.getUint8(0, true), 0xFFFFFFFF);
- }
-
- const dv = new DataView(new ArrayBuffer(8));
- assertEquals(0xFFFFFFFF, foo(dv));
- assertEquals(0xFFFFFFFF, foo(dv));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0xFFFFFFFF, foo(dv));
+function bar(x, y) {
+ return x + y;
+}
+
+bar(0.1, 0.2);
+bar(0.1, 0.2);
+
+function foo(dv) {
+ return bar(dv.getUint8(0, true), 0xFFFFFFFF);
+};
+%PrepareFunctionForOptimization(foo);
+const dv = new DataView(new ArrayBuffer(8));
+assertEquals(0xFFFFFFFF, foo(dv));
+assertEquals(0xFFFFFFFF, foo(dv));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0xFFFFFFFF, foo(dv));
})();
// Test Int8 -> Word64 conversions.
(function() {
- function bar(x, y) {
- return x + y;
- }
-
- bar(0.1, 0.2);
- bar(0.1, 0.2);
-
- function foo(dv) {
- return bar(dv.getInt8(0, true), 0xFFFFFFFF);
- }
-
- const dv = new DataView(new ArrayBuffer(8));
- assertEquals(0xFFFFFFFF, foo(dv));
- assertEquals(0xFFFFFFFF, foo(dv));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0xFFFFFFFF, foo(dv));
+function bar(x, y) {
+ return x + y;
+}
+
+bar(0.1, 0.2);
+bar(0.1, 0.2);
+
+function foo(dv) {
+ return bar(dv.getInt8(0, true), 0xFFFFFFFF);
+};
+%PrepareFunctionForOptimization(foo);
+const dv = new DataView(new ArrayBuffer(8));
+assertEquals(0xFFFFFFFF, foo(dv));
+assertEquals(0xFFFFFFFF, foo(dv));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0xFFFFFFFF, foo(dv));
})();
// Test Uint16 -> Word64 conversions.
(function() {
- function bar(x, y) {
- return x + y;
- }
-
- bar(0.1, 0.2);
- bar(0.1, 0.2);
-
- function foo(dv) {
- return bar(dv.getUint16(0, true), 0xFFFFFFFF);
- }
-
- const dv = new DataView(new ArrayBuffer(8));
- assertEquals(0xFFFFFFFF, foo(dv));
- assertEquals(0xFFFFFFFF, foo(dv));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0xFFFFFFFF, foo(dv));
+function bar(x, y) {
+ return x + y;
+}
+
+bar(0.1, 0.2);
+bar(0.1, 0.2);
+
+function foo(dv) {
+ return bar(dv.getUint16(0, true), 0xFFFFFFFF);
+};
+%PrepareFunctionForOptimization(foo);
+const dv = new DataView(new ArrayBuffer(8));
+assertEquals(0xFFFFFFFF, foo(dv));
+assertEquals(0xFFFFFFFF, foo(dv));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0xFFFFFFFF, foo(dv));
})();
// Test Int16 -> Word64 conversions.
(function() {
- function bar(x, y) {
- return x + y;
- }
-
- bar(0.1, 0.2);
- bar(0.1, 0.2);
-
- function foo(dv) {
- return bar(dv.getInt16(0, true), 0xFFFFFFFF);
- }
-
- const dv = new DataView(new ArrayBuffer(8));
- assertEquals(0xFFFFFFFF, foo(dv));
- assertEquals(0xFFFFFFFF, foo(dv));
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(0xFFFFFFFF, foo(dv));
+function bar(x, y) {
+ return x + y;
+}
+
+bar(0.1, 0.2);
+bar(0.1, 0.2);
+
+function foo(dv) {
+ return bar(dv.getInt16(0, true), 0xFFFFFFFF);
+};
+%PrepareFunctionForOptimization(foo);
+const dv = new DataView(new ArrayBuffer(8));
+assertEquals(0xFFFFFFFF, foo(dv));
+assertEquals(0xFFFFFFFF, foo(dv));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0xFFFFFFFF, foo(dv));
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-890243.js b/deps/v8/test/mjsunit/regress/regress-crbug-890243.js
index 0d889b2787..e6a9ebca89 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-890243.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-890243.js
@@ -5,23 +5,28 @@
// Flags: --allow-natives-syntax
// We need a SpeculativeNumberAdd with Number feedback.
-function bar(x) { return x + x; }
+function bar(x) {
+ return x + x;
+}
bar(0.1);
// We also need an indirection via an object field such
// that only after escape analysis TurboFan can figure
// out that the value `y` is actually a Number in the
// safe integer range.
-function baz(y) { return {y}; }
-baz(null); baz(0);
+function baz(y) {
+ return {y};
+}
+baz(null);
+baz(0);
// Now we can put all of that together to get a kRepBit
// use of a kWord64 value (on 64-bit architectures).
function foo(o) {
return !baz(bar(o.x)).y;
-}
-
-assertFalse(foo({x:1}));
-assertFalse(foo({x:1}));
+};
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo({x: 1}));
+assertFalse(foo({x: 1}));
%OptimizeFunctionOnNextCall(foo);
-assertFalse(foo({x:1}));
+assertFalse(foo({x: 1}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-891627.js b/deps/v8/test/mjsunit/regress/regress-crbug-891627.js
index afe4093c96..b21c25f415 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-891627.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-891627.js
@@ -18,6 +18,7 @@ bar(0.1);
return bar(x | -1) == 4294967295;
}
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1));
assertFalse(foo(0));
%OptimizeFunctionOnNextCall(foo);
@@ -35,6 +36,7 @@ bar(0.1);
makeFoo(0); // Defeat the function context specialization.
const foo = makeFoo(1);
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(1));
assertFalse(foo(0));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-895199.js b/deps/v8/test/mjsunit/regress/regress-crbug-895199.js
index 7975ffc699..b70d4cefeb 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-895199.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-895199.js
@@ -10,8 +10,9 @@ function foo() {
a[1] = 25.1234;
%DeoptimizeNow();
return a[2];
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
-foo()
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-899524.js b/deps/v8/test/mjsunit/regress/regress-crbug-899524.js
index 32d28c9b09..9c1cca7e4b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-899524.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-899524.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax
-function empty() { }
+function empty() {}
function baz(expected, found) {
var start = "";
@@ -16,16 +16,17 @@ function baz(expected, found) {
}
}
-baz([1], new (class A extends Array {}));
+baz([1], new class A extends Array {}());
(function () {
"use strict";
function bar() {
- baz([1,2], arguments);
+ baz([1, 2], arguments);
}
function foo() {
- bar(2147483648,-[]);
- }
+ bar(2147483648, -[]);
+ };
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-900674.js b/deps/v8/test/mjsunit/regress/regress-crbug-900674.js
index 7549b36a4e..22a9eadfc4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-900674.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-900674.js
@@ -6,7 +6,8 @@
function foo() {
let val = Promise.resolve().then();
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-902395.js b/deps/v8/test/mjsunit/regress/regress-crbug-902395.js
index 79aaecf6fa..129a23490d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-902395.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-902395.js
@@ -5,33 +5,30 @@
// Flags: --allow-natives-syntax
function opt() {
- try{
- Object.seal({})
- }finally{
- try{
+ try {
+ Object.seal({});
+ } finally {
+ try {
// Carefully crafted by clusterfuzz to alias the temporary object literal
// register with the below dead try block's context register.
- (
- {
- toString(){
- }
- }
- ).apply(-1).x( )
- }
- finally{
- if(2.2)
- {
- return
+ ({toString() {}})
+ .
+
+ apply(-1)
+ .x();
+ } finally {
+ if (2.2) {
+ return;
}
// This code should be dead.
- try{
- Reflect.construct
- }finally{
+ try {
+ Reflect.construct;
+ } finally {
}
}
}
-}
-
+};
+%PrepareFunctionForOptimization(opt);
opt();
%OptimizeFunctionOnNextCall(opt);
opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-903043.js b/deps/v8/test/mjsunit/regress/regress-crbug-903043.js
index a877e6e12a..7337a69d07 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-903043.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-903043.js
@@ -5,35 +5,35 @@
// Flags: --allow-natives-syntax
(function() {
- function foo() {
- const x = 1e-1;
- return Object.is(-0, x * (-1e-308));
- }
-
- assertFalse(foo());
- assertFalse(foo());
- %OptimizeFunctionOnNextCall(foo);
- assertFalse(foo());
+function foo() {
+ const x = 1e-1;
+ return Object.is(-0, x * -1e-308);
+};
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo());
+assertFalse(foo());
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo());
})();
(function() {
- function foo(x) {
- return Object.is(-0, x * (-1e-308));
- }
-
- assertFalse(foo(1e-1));
- assertFalse(foo(1e-1));
- %OptimizeFunctionOnNextCall(foo);
- assertFalse(foo(1e-1));
+function foo(x) {
+ return Object.is(-0, x * -1e-308);
+};
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo(1e-1));
+assertFalse(foo(1e-1));
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo(1e-1));
})();
(function() {
- function foo(x) {
- return Object.is(-0, x);
- }
-
- assertFalse(foo(1e-1 * (-1e-308)));
- assertFalse(foo(1e-1 * (-1e-308)));
- %OptimizeFunctionOnNextCall(foo);
- assertFalse(foo(1e-1 * (-1e-308)));
+function foo(x) {
+ return Object.is(-0, x);
+};
+%PrepareFunctionForOptimization(foo);
+assertFalse(foo(1e-1 * -1e-308));
+assertFalse(foo(1e-1 * -1e-308));
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo(1e-1 * -1e-308));
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-905457.js b/deps/v8/test/mjsunit/regress/regress-crbug-905457.js
index 3a97a87520..ca820a745e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-905457.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-905457.js
@@ -5,45 +5,45 @@
// Flags: --allow-natives-syntax
(function() {
- function foo(x) {
- return Math.abs(Math.min(+x, 0));
- }
-
- assertEquals(NaN, foo());
- assertEquals(NaN, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(NaN, foo());
+function foo(x) {
+ return Math.abs(Math.min(+x, 0));
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(NaN, foo());
+assertEquals(NaN, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(NaN, foo());
})();
(function() {
- function foo(x) {
- return Math.abs(Math.min(-x, 0));
- }
-
- assertEquals(NaN, foo());
- assertEquals(NaN, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(NaN, foo());
+function foo(x) {
+ return Math.abs(Math.min(-x, 0));
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(NaN, foo());
+assertEquals(NaN, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(NaN, foo());
})();
(function() {
- function foo(x) {
- return Math.abs(Math.max(0, +x));
- }
-
- assertEquals(NaN, foo());
- assertEquals(NaN, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(NaN, foo());
+function foo(x) {
+ return Math.abs(Math.max(0, +x));
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(NaN, foo());
+assertEquals(NaN, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(NaN, foo());
})();
(function() {
- function foo(x) {
- return Math.abs(Math.max(0, -x));
- }
-
- assertEquals(NaN, foo());
- assertEquals(NaN, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(NaN, foo());
+function foo(x) {
+ return Math.abs(Math.max(0, -x));
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(NaN, foo());
+assertEquals(NaN, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(NaN, foo());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-906043.js b/deps/v8/test/mjsunit/regress/regress-crbug-906043.js
index dbc283fa9f..eb10ec2b34 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-906043.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-906043.js
@@ -21,6 +21,7 @@ a3.fill(3.3);
var a4 = [1.1];
+%PrepareFunctionForOptimization(fun);
for (let i = 0; i < 3; i++) fun(...a4);
%OptimizeFunctionOnNextCall(fun);
fun(...a4);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-906220.js b/deps/v8/test/mjsunit/regress/regress-crbug-906220.js
index 580ff59bdd..9085f68f9b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-906220.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-906220.js
@@ -4,8 +4,10 @@
// Flags: --allow-natives-syntax
-function foo() { new Array().pop(); }
-
+function foo() {
+ new Array().pop();
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo());
assertEquals(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-906870.js b/deps/v8/test/mjsunit/regress/regress-crbug-906870.js
index d94ee67a4c..7dc86bffaf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-906870.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-906870.js
@@ -5,45 +5,45 @@
// Flags: --allow-natives-syntax
(function() {
- function foo() {
- return Infinity / Math.max(-0, +0);
- }
-
- assertEquals(+Infinity, foo());
- assertEquals(+Infinity, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(+Infinity, foo());
+function foo() {
+ return Infinity / Math.max(-0, +0);
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(+Infinity, foo());
+assertEquals(+Infinity, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(+Infinity, foo());
})();
(function() {
- function foo() {
- return Infinity / Math.max(+0, -0);
- }
-
- assertEquals(+Infinity, foo());
- assertEquals(+Infinity, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(+Infinity, foo());
+function foo() {
+ return Infinity / Math.max(+0, -0);
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(+Infinity, foo());
+assertEquals(+Infinity, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(+Infinity, foo());
})();
(function() {
- function foo() {
- return Infinity / Math.min(-0, +0);
- }
-
- assertEquals(-Infinity, foo());
- assertEquals(-Infinity, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-Infinity, foo());
+function foo() {
+ return Infinity / Math.min(-0, +0);
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(-Infinity, foo());
+assertEquals(-Infinity, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-Infinity, foo());
})();
(function() {
- function foo() {
- return Infinity / Math.min(+0, -0);
- }
-
- assertEquals(-Infinity, foo());
- assertEquals(-Infinity, foo());
- %OptimizeFunctionOnNextCall(foo);
- assertEquals(-Infinity, foo());
+function foo() {
+ return Infinity / Math.min(+0, -0);
+};
+%PrepareFunctionForOptimization(foo);
+assertEquals(-Infinity, foo());
+assertEquals(-Infinity, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-Infinity, foo());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-908309.js b/deps/v8/test/mjsunit/regress/regress-crbug-908309.js
index c2d939001d..8a2b463cd5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-908309.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-908309.js
@@ -5,23 +5,29 @@
// Flags: --allow-natives-syntax
const p = Object.defineProperty(Promise.resolve(), 'then', {
- value() { return 0; }
+ value() {
+ return 0;
+ }
});
(function() {
- function foo() { return p.catch().catch(); }
-
- assertThrows(foo, TypeError);
- assertThrows(foo, TypeError);
- %OptimizeFunctionOnNextCall(foo);
- assertThrows(foo, TypeError);
+function foo() {
+ return p.catch().catch();
+};
+%PrepareFunctionForOptimization(foo);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
})();
(function() {
- function foo() { return p.finally().finally(); }
-
- assertThrows(foo, TypeError);
- assertThrows(foo, TypeError);
- %OptimizeFunctionOnNextCall(foo);
- assertThrows(foo, TypeError);
+function foo() {
+ return p.finally().finally();
+};
+%PrepareFunctionForOptimization(foo);
+assertThrows(foo, TypeError);
+assertThrows(foo, TypeError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, TypeError);
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-913296.js b/deps/v8/test/mjsunit/regress/regress-crbug-913296.js
index 3fab06607f..feb2eaf1be 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-913296.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-913296.js
@@ -6,8 +6,8 @@
function foo(trigger) {
return Object.is((trigger ? -0 : 0) - 0, -0);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertFalse(foo(false));
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-930948.js b/deps/v8/test/mjsunit/regress/regress-crbug-930948.js
index 06dcf40646..3c5975bf7a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-930948.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-930948.js
@@ -7,7 +7,8 @@
// This checks that TransitionAndStoreNumberElement silences NaNs.
function foo() {
return [undefined].map(Math.asin);
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -16,7 +17,8 @@ foo();
// This checks that TransitionAndStoreElement silences NaNs.
function bar(b) {
return [undefined].map(x => b ? Math.asin(x) : "string");
-}
+};
+%PrepareFunctionForOptimization(bar);
bar(true);
bar(false);
bar(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-931664.js b/deps/v8/test/mjsunit/regress/regress-crbug-931664.js
index b4fc85367e..e1f14955bd 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-931664.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-931664.js
@@ -14,6 +14,7 @@ function opt(){
finally{}
}
}
+%PrepareFunctionForOptimization(opt);
opt();
%OptimizeFunctionOnNextCall(opt);
opt();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-934166.js b/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
index b23026f8f0..bb445490f0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
@@ -7,6 +7,7 @@
{
function f() {
for(let i = 0; i < 10; ++i){
+ %PrepareFunctionForOptimization(f);
try{
// Carefully constructed by a fuzzer to use a new register for s(), whose
// write is dead due to the unconditional throw after s()=N, but which is
@@ -17,6 +18,6 @@
%OptimizeOsr();
}
}
- %PrepareFunctionForOptimization(f);
+ %EnsureFeedbackVectorForFunction(f);
f();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-935932.js b/deps/v8/test/mjsunit/regress/regress-crbug-935932.js
index a34b7743e0..1a60ec9276 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-935932.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-935932.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(func, expect) {
+ %PrepareFunctionForOptimization(func);
assertTrue(func() == expect);
%OptimizeFunctionOnNextCall(func);
assertTrue(func() == expect);
@@ -50,6 +51,7 @@ test(check_v4, true);
function testIn(index, array) {
return index in array;
}
+ %PrepareFunctionForOptimization(testIn);
let a = [];
a.__proto__ = [0,1,2];
@@ -65,6 +67,7 @@ test(check_v4, true);
%ClearFunctionFeedback(testIn);
%DeoptimizeFunction(testIn);
+ %PrepareFunctionForOptimization(testIn);
// First load will set IC to Load handle with allow hole to undefined conversion false.
assertTrue(testIn(0, a));
@@ -75,6 +78,7 @@ test(check_v4, true);
// Repeat the same testing for access out-of-bounds of the array, but in bounds of it's prototype.
%ClearFunctionFeedback(testIn);
%DeoptimizeFunction(testIn);
+ %PrepareFunctionForOptimization(testIn);
assertTrue(testIn(2, a));
assertTrue(testIn(2, a));
@@ -83,6 +87,7 @@ test(check_v4, true);
%ClearFunctionFeedback(testIn);
%DeoptimizeFunction(testIn);
+ %PrepareFunctionForOptimization(testIn);
assertTrue(testIn(2, a));
%OptimizeFunctionOnNextCall(testIn);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-936302.js b/deps/v8/test/mjsunit/regress/regress-crbug-936302.js
index c8d3c136a1..4969fe3e31 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-936302.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-936302.js
@@ -5,21 +5,21 @@
// Flags: --allow-natives-syntax --opt
(function() {
- 'use strict';
+'use strict';
- function baz() {
- 'use asm';
- function f() {}
- return {f: f};
- }
+function baz() {
+ 'use asm';
+ function f() {}
+ return {f: f};
+}
- function foo(x) {
- baz(x);
- %DeoptimizeFunction(foo);
- }
-
- foo();
- foo();
- %OptimizeFunctionOnNextCall(foo);
- foo();
+function foo(x) {
+ baz(x);
+ %DeoptimizeFunction(foo);
+};
+%PrepareFunctionForOptimization(foo);
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-937618.js b/deps/v8/test/mjsunit/regress/regress-crbug-937618.js
index 71ea8a8507..5e004f7a39 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-937618.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-937618.js
@@ -4,10 +4,12 @@
// Flags: --allow-natives-syntax
-let target = {0:42, a:42};
+let target = {0: 42, a: 42};
let proxy = new Proxy(target, {
- has: function() { return false; },
+ has: function() {
+ return false;
+ }
});
Object.preventExtensions(target);
@@ -18,16 +20,17 @@ function testLookupElementInProxy() {
// 9.5.7 [[HasProperty]] 9. states that if the trap returns false, and the
// target hasOwnProperty, and the target is non-extensible, throw a type error.
-
+;
+%PrepareFunctionForOptimization(testLookupElementInProxy);
assertThrows(testLookupElementInProxy, TypeError);
assertThrows(testLookupElementInProxy, TypeError);
%OptimizeFunctionOnNextCall(testLookupElementInProxy);
assertThrows(testLookupElementInProxy, TypeError);
-function testLookupPropertyInProxy(){
+function testLookupPropertyInProxy() {
"a" in proxy;
-}
-
+};
+%PrepareFunctionForOptimization(testLookupPropertyInProxy);
assertThrows(testLookupPropertyInProxy, TypeError);
assertThrows(testLookupPropertyInProxy, TypeError);
%OptimizeFunctionOnNextCall(testLookupPropertyInProxy);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-937649.js b/deps/v8/test/mjsunit/regress/regress-crbug-937649.js
index 5cc5a3db9a..5dd794db47 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-937649.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-937649.js
@@ -4,14 +4,15 @@
// Flags: --allow-natives-syntax
(function() {
- function foo(x) {
- const i = x > 0;
- const dv = new DataView(ab);
- return dv.getUint16(i);
- }
- const ab = new ArrayBuffer(2);
- foo(0);
- foo(0);
- %OptimizeFunctionOnNextCall(foo);
- foo(0);
+function foo(x) {
+ const i = x > 0;
+ const dv = new DataView(ab);
+ return dv.getUint16(i);
+};
+%PrepareFunctionForOptimization(foo);
+const ab = new ArrayBuffer(2);
+foo(0);
+foo(0);
+%OptimizeFunctionOnNextCall(foo);
+foo(0);
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-941743.js b/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
index 8fc4ad4322..eaac4c4c4d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-941743.js
@@ -5,13 +5,14 @@
// Flags: --allow-natives-syntax --noenable-slow-asserts
// This call ensures that TurboFan won't inline array constructors.
-Array(2**30);
+Array(2 ** 30);
// Set up a fast holey smi array, and generate optimized code.
-let a = [1, 2, ,,, 3];
+let a = [1, 2, , , , 3];
function mapping(a) {
return a.map(v => v);
-}
+};
+%PrepareFunctionForOptimization(mapping);
mapping(a);
mapping(a);
%OptimizeFunctionOnNextCall(mapping);
@@ -19,8 +20,8 @@ mapping(a);
// Now lengthen the array, but ensure that it points to a non-dictionary
// backing store.
-a.length = (32 * 1024 * 1024)-1;
-a.fill(1,0);
+a.length = 32 * 1024 * 1024 - 1;
+a.fill(1, 0);
a.push(2);
a.length += 500;
// Now, the non-inlined array constructor should produce an array with
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-942068.js b/deps/v8/test/mjsunit/regress/regress-crbug-942068.js
index 9994d9c524..a0b68d476c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-942068.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-942068.js
@@ -6,8 +6,8 @@
function foo(index, array) {
return index in array;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
let arr = [];
arr.__proto__ = [0];
assertFalse(foo(0, {}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-944865.js b/deps/v8/test/mjsunit/regress/regress-crbug-944865.js
index 06c8919a5d..630563f0c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-944865.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-944865.js
@@ -6,9 +6,10 @@
function foo() {
const r = {e: NaN, g: undefined, c: undefined};
- const u = {__proto__: {}, e: new Set(), g: 0, c: undefined};
+ const u = {__proto__: {}, e: new Set(), g: 0, c: undefined};
return r;
-}
+};
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
const o = foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-951400.js b/deps/v8/test/mjsunit/regress/regress-crbug-951400.js
index f43a1be897..17ed9d4e62 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-951400.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-951400.js
@@ -7,8 +7,8 @@
function foo(arr) {
gc();
eval(arr);
-}
-
+};
+%PrepareFunctionForOptimization(foo);
try {
foo("tag`Hello${tag}`");
} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-961522.js b/deps/v8/test/mjsunit/regress/regress-crbug-961522.js
index c7e1eb8bb5..aba7e6165a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-961522.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-961522.js
@@ -10,7 +10,8 @@
}
function foo() {
arr.reduce(inlined);
- }
+ };
+ %PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js
index 4cc40c5127..c03d5d5be1 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js
@@ -10,6 +10,17 @@ function foo() {
return a[0];
}
+function bar() {
+ const a = new Array(10);
+ a[0] = 1;
+ return a[0];
+}
+
Object.setPrototypeOf(Array.prototype, new Int8Array());
+%EnsureFeedbackVectorForFunction(foo);
assertEquals(undefined, foo());
assertEquals(undefined, foo());
+
+%EnsureFeedbackVectorForFunction(bar);
+assertEquals(undefined, bar());
+assertEquals(undefined, bar());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js
index dcbf8dcb2d..0201411d6f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js
@@ -8,12 +8,25 @@ function foo() {
const a = [];
a[0] = 1;
return a[0];
-}
+};
+
+function bar() {
+ const a = new Array(10);
+ a[0] = 1;
+ return a[0];
+};
-%EnsureFeedbackVectorForFunction(foo);
Object.setPrototypeOf(Array.prototype, new Int8Array());
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo());
assertEquals(undefined, foo());
%OptimizeFunctionOnNextCall(foo);
assertEquals(undefined, foo());
assertOptimized(foo);
+
+%PrepareFunctionForOptimization(bar);
+assertEquals(undefined, bar());
+assertEquals(undefined, bar());
+%OptimizeFunctionOnNextCall(bar);
+assertEquals(undefined, bar());
+assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-964833.js b/deps/v8/test/mjsunit/regress/regress-crbug-964833.js
index 094f86cefa..d5683b7b8a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-964833.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-964833.js
@@ -26,6 +26,7 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-966450.js b/deps/v8/test/mjsunit/regress/regress-crbug-966450.js
new file mode 100644
index 0000000000..3ba5ebaaff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-966450.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let prop = "someName";
+function foo(a, b, v) { return a[b] = 0 }
+try {
+ foo("", prop);
+} catch(e) {}
+var target = {};
+var traps = { set() {return 42} };
+var proxy = new Proxy(target, traps);
+Object.defineProperty(target, prop, { value: 0 });
+try {
+ foo(proxy, prop);
+} catch (e) { }
+foo(proxy, prop, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-967101.js b/deps/v8/test/mjsunit/regress/regress-crbug-967101.js
new file mode 100644
index 0000000000..18365f98ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-967101.js
@@ -0,0 +1,48 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For packed sealed object.
+function packedStore() {
+ let a = Object.seal([""]);
+ a[0] = 0;
+ assertEquals(a[0], 0);
+}
+
+packedStore();
+packedStore();
+
+// For holey sealed object.
+function holeyStore() {
+ let a = Object.seal([, ""]);
+ a[0] = 0;
+ assertEquals(a[0], undefined);
+}
+
+holeyStore();
+holeyStore();
+
+// Make sure IC store for holey is consistent.
+let a = Object.seal([, ""]);
+function foo() {
+ a[1] = 0;
+}
+
+foo();
+foo();
+function bar() {
+ a[0] = 1;
+}
+assertEquals(a, [, 0]);
+bar();
+assertEquals(a, [, 0]);
+bar();
+assertEquals(a, [, 0]);
+function baz() {
+ a[2] = 2;
+}
+assertEquals(a, [, 0]);
+baz();
+assertEquals(a, [, 0]);
+baz();
+assertEquals(a, [, 0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-967434.js b/deps/v8/test/mjsunit/regress/regress-crbug-967434.js
index bd7b4073e8..3c08d98b96 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-967434.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-967434.js
@@ -17,16 +17,20 @@ function f2(h, h_eval) {
// constructor, and one in the impossible branch of the bi-morphic store
// site.
o.x = h_eval;
-}
-
+};
function f3(h) {
+ %PrepareFunctionForOptimization(f2);
f2(h, h());
%OptimizeFunctionOnNextCall(f2);
f2(h, h());
}
-function g1() { return {}; };
-function g2() { return 4.2; };
+function g1() {
+ return {};
+};
+function g2() {
+ return 4.2;
+};
f3(g1);
f3(g2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-969368.js b/deps/v8/test/mjsunit/regress/regress-crbug-969368.js
new file mode 100644
index 0000000000..cfc60a3279
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-969368.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module() {
+ 'use asm';
+ function f() {}
+ function g() {
+ var x = 0.0;
+ table[x & 3]();
+ }
+ var table = [f, f, f, f];
+ return { g: g };
+}
+var m = Module();
+assertDoesNotThrow(m.g);
+assertFalse(%IsAsmWasmCode(Module));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-969498.js b/deps/v8/test/mjsunit/regress/regress-crbug-969498.js
new file mode 100644
index 0000000000..4dddcb3bd5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-969498.js
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let global = new WebAssembly.Global({value: 'f32', mutable: true}, 2e66);
+global.value = 2e66;
+
+// Custom additional test case:
+const kRoundsDown = 3.4028235677973362e+38;
+const kRoundsToInf = 3.4028235677973366e+38;
+var floats = new Float32Array([kRoundsDown, kRoundsToInf]);
+assertNotEquals(Infinity, floats[0]);
+assertEquals(Infinity, floats[1]);
+floats.set([kRoundsDown, kRoundsToInf]);
+assertNotEquals(Infinity, floats[0]);
+assertEquals(Infinity, floats[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-971782.js b/deps/v8/test/mjsunit/regress/regress-crbug-971782.js
new file mode 100644
index 0000000000..d979bc941a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-971782.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(dv) {
+ for (let i = -1; i < 1; ++i) {
+ dv.setUint16(i % 1);
+ }
+}
+
+const dv = new DataView(new ArrayBuffer(2));
+%PrepareFunctionForOptimization(foo);
+foo(dv);
+foo(dv);
+%OptimizeFunctionOnNextCall(foo);
+foo(dv);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-976256.js b/deps/v8/test/mjsunit/regress/regress-crbug-976256.js
new file mode 100644
index 0000000000..990e9319db
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-976256.js
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(r) {
+ return r.finally();
+}
+
+const resolution = Promise.resolve();
+foo(resolution);
+
+function bar() {
+ try {
+ foo(undefined);
+ } catch (e) {}
+}
+
+%PrepareFunctionForOptimization(bar);
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-976598.js b/deps/v8/test/mjsunit/regress/regress-crbug-976598.js
new file mode 100644
index 0000000000..0e1a787eaa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-976598.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ return { value: NaN };
+}
+
+%PrepareFunctionForOptimization(f);
+f();
+f();
+
+let x = { value: "Y" };
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-976934.js b/deps/v8/test/mjsunit/regress/regress-crbug-976934.js
new file mode 100644
index 0000000000..4c31615933
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-976934.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module(stdlib, imports, heap) {
+ "use asm";
+
+ var fround = stdlib.Math.fround;
+
+ function f() {
+ var x = fround(-1.7976931348623157e+308);
+ return fround(x);
+ }
+
+ return { f: f };
+}
+
+var m = Module(this);
+assertEquals(-Infinity, m.f());
+assertTrue(%IsAsmWasmCode(Module));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-977012.js b/deps/v8/test/mjsunit/regress/regress-crbug-977012.js
new file mode 100644
index 0000000000..4e94953bd4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-977012.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(arg) {
+ var ret = { x: arg };
+ ret.__defineSetter__("y", function() { });
+ return ret;
+}
+
+// v1 creates a map with a Smi field, v2 deprecates v1's map.
+let v1 = foo(10);
+let v2 = foo(10.5);
+
+// Trigger a PrepareForDataProperty on v1, which also triggers an update to
+// dictionary due to the different accessors on v1 and v2's y property.
+v1.x = 20.5;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-977089.js b/deps/v8/test/mjsunit/regress/regress-crbug-977089.js
new file mode 100644
index 0000000000..d7db660bfe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-977089.js
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+// This function was carefully constructed by Clusterfuzz to execute a certain
+// sequence of transitions. Thus, it may no longer test anything useful if
+// the transition logic changes.
+//
+// The more stable unit test testing the same bug is:
+// test-field-type-tracking/NormalizeToMigrationTarget
+var foo = function() {
+
+ function f1(arg) {
+ var ret = { x: arg };
+ ret.__defineGetter__("y", function() { });
+ return ret;
+ }
+ // Create v1 with a map with properties: {x:Smi, y:AccessorPair}
+ let v1 = f1(10);
+ // Create a map with properties: {x:Double, y:AccessorPair}, deprecating the
+ // previous map.
+ let v2 = f1(10.5);
+
+ // Access x on v1 to a function that reads x, which triggers it to update its
+ // map. This update transitions v1 to slow mode as there is already a "y"
+ // transition with a different accessor.
+ //
+ // Note that the parent function `foo` can't be an IIFE, as then this callsite
+ // would use the NoFeedback version of the LdaNamedProperty bytecode, and this
+ // doesn't trigger the map update.
+ v1.x;
+
+ // Create v3 which overwrites a non-accessor with an accessor, triggering it
+ // to normalize, and picking up the same cached normalized map as v1. However,
+ // v3's map is not a migration target and v1's is (as it was migrated to when
+ // updating v1), so the migration target bit doesn't match. This should be
+ // fine and shouldn't trigger any DCHECKs.
+ let v3 = { z:1 };
+ v3.__defineGetter__("z", function() {});
+};
+
+%EnsureFeedbackVectorForFunction(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-979023.js b/deps/v8/test/mjsunit/regress/regress-crbug-979023.js
new file mode 100644
index 0000000000..0d31be3cb4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-979023.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo(arg) {
+ var ret = { x: arg };
+ Object.defineProperty(ret, "y", {
+ get: function () { },
+ configurable: true
+ });
+ return ret;
+}
+let v0 = foo(10);
+let v1 = foo(10.5);
+Object.defineProperty(v0, "y", {
+ get: function () { },
+ configurable: true
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-979401.js b/deps/v8/test/mjsunit/regress/regress-crbug-979401.js
new file mode 100644
index 0000000000..2ef50848db
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-979401.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let min_fields = 1015;
+let max_fields = 1025;
+
+let static_fields_src = "";
+let instance_fields_src = "";
+for (let i = 0; i < max_fields; i++) {
+ static_fields_src += " static f" + i + "() {}\n";
+ instance_fields_src += " g" + i + "() {}\n";
+
+ if (i >= min_fields) {
+ let src1 = "class A {\n" + static_fields_src + "}\n";
+ eval(src1);
+ let src2 = "class B {\n" + instance_fields_src + "}\n";
+ eval(src2);
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-980168.js b/deps/v8/test/mjsunit/regress/regress-crbug-980168.js
new file mode 100644
index 0000000000..163d22b5ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-980168.js
@@ -0,0 +1,56 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap
+
+// seal then freeze.
+(function () {
+ const v1 = Object.seal(Object);
+ const v3 = Object();
+ const v4 = Object(Object);
+ v3.__proto__ = v4;
+ const v6 = Object.freeze(Object);
+})();
+
+// preventExtensions then freeze.
+(function () {
+ const v1 = Object.preventExtensions(Object);
+ const v3 = Object();
+ const v4 = Object(Object);
+ v3.__proto__ = v4;
+ const v6 = Object.freeze(Object);
+})();
+
+// preventExtensions then seal.
+(function () {
+ const v1 = Object.preventExtensions(Object);
+ const v3 = Object();
+ const v4 = Object(Object);
+ v3.__proto__ = v4;
+ const v6 = Object.seal(Object);
+})();
+
+// freeze.
+(function () {
+ const v3 = Object();
+ const v4 = Object(Object);
+ v3.__proto__ = v4;
+ const v6 = Object.freeze(Object);
+})();
+
+// seal.
+(function () {
+ const v3 = Object();
+ const v4 = Object(Object);
+ v3.__proto__ = v4;
+ const v6 = Object.seal(Object);
+})();
+
+// preventExtensions.
+(function () {
+ const v3 = Object();
+ const v4 = Object(Object);
+ v3.__proto__ = v4;
+ const v6 = Object.preventExtensions(Object);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-980292.js b/deps/v8/test/mjsunit/regress/regress-crbug-980292.js
new file mode 100644
index 0000000000..779c1a13d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-980292.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+let v2 = Object;
+const v4 = new Proxy(Object,v2);
+const v6 = (9).__proto__;
+v6.__proto__ = v4;
+function v8(v9,v10,v11) {
+ let v14 = 0;
+ do {
+ const v16 = (0x1337).prototype;
+ v14++;
+ } while (v14 < 24);
+}
+const v7 = [1,2,3,4];
+const v17 = v7.findIndex(v8);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-984344.js b/deps/v8/test/mjsunit/regress/regress-crbug-984344.js
new file mode 100644
index 0000000000..fea2c2d642
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-984344.js
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function largeAllocToTriggerGC() {
+ for (let i = 0; i < 16; i++) {
+ let ab = new ArrayBuffer(1024 * 1024 * 10);
+ }
+}
+
+function foo() {
+ eval('function bar(a) {}' +
+ '(function() {' +
+ ' for (let c = 0; c < 505; c++) {' +
+ ' while (Promise >= 0xDEADBEEF) {' +
+ ' Array.prototype.slice.call(bar, bar, bar);' +
+ ' }' +
+ ' for (let i = 0; i < 413; i++) {' +
+ ' }' +
+ ' }' +
+ '})();' +
+ 'largeAllocToTriggerGC();');
+}
+
+
+foo();
+foo();
+foo();
+// Don't prepare until here to allow function to be flushed.
+%PrepareFunctionForOptimization(foo);
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-985660.js b/deps/v8/test/mjsunit/regress/regress-crbug-985660.js
new file mode 100644
index 0000000000..49ddf93919
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-985660.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ Object.defineProperty(Number.prototype, "v", {
+ get: constructor
+ });
+} catch (e) {}
+
+function foo(obj) {
+ return obj.v;
+}
+
+%PrepareFunctionForOptimization(foo);
+%OptimizeFunctionOnNextCall(foo);
+foo(3);
+%PrepareFunctionForOptimization(foo);
+%OptimizeFunctionOnNextCall(foo);
+foo(3);
+foo(4);
diff --git a/deps/v8/test/mjsunit/regress/regress-deep-proto.js b/deps/v8/test/mjsunit/regress/regress-deep-proto.js
index 6b1387447d..2d3bb9db4e 100644
--- a/deps/v8/test/mjsunit/regress/regress-deep-proto.js
+++ b/deps/v8/test/mjsunit/regress/regress-deep-proto.js
@@ -29,8 +29,8 @@
function poly(x) {
return x.foo;
-}
-
+};
+%PrepareFunctionForOptimization(poly);
var one = {foo: 0};
var two = {foo: 0, bar: 1};
var three = {bar: 0};
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js b/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js
index 8bebbe27f5..31375e176a 100644
--- a/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js
@@ -4,9 +4,11 @@
// Flags: --allow-natives-syntax
-function f(a,b,c,d) { return [a, ...(%DeoptimizeNow(), [b,c]), d]; }
-
-assertEquals([1,2,3,4], f(1,2,3,4));
-assertEquals([1,2,3,4], f(1,2,3,4));
+function f(a, b, c, d) {
+ return [a, ...(%DeoptimizeNow(), [b, c]), d];
+};
+%PrepareFunctionForOptimization(f);
+assertEquals([1, 2, 3, 4], f(1, 2, 3, 4));
+assertEquals([1, 2, 3, 4], f(1, 2, 3, 4));
%OptimizeFunctionOnNextCall(f);
-assertEquals([1,2,3,4], f(1,2,3,4));
+assertEquals([1, 2, 3, 4], f(1, 2, 3, 4));
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js b/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js
index 59094d3aeb..47264b6bea 100644
--- a/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-store-effect.js
@@ -28,36 +28,40 @@
// Flags: --allow-natives-syntax
// Test deopt after generic store with effect context.
-var pro = { x : 1 }
-var a = {}
-a.__proto__ = pro
-delete pro.x
+var pro = {x: 1};
+var a = {};
+a.__proto__ = pro;
+delete pro.x;
function g(o) {
return 7 + (o.z = 1, 20);
-}
-
+};
+%PrepareFunctionForOptimization(g);
g(a);
g(a);
%OptimizeFunctionOnNextCall(g);
-Object.defineProperty(pro, "z", {
- set: function(v) { %DeoptimizeFunction(g); },
- get: function() { return 20; }
+Object.defineProperty(pro, 'z', {
+ set: function(v) {
+ %DeoptimizeFunction(g);
+ },
+ get: function() {
+ return 20;
+ }
});
assertEquals(27, g(a));
// Test deopt after polymorphic as monomorphic store with effect context.
-var i = { z : 2, r : 1 }
-var j = { z : 2 }
-var p = { a : 10 }
-var pp = { a : 20, b : 1 }
+var i = {z: 2, r: 1};
+var j = {z: 2};
+var p = {a: 10};
+var pp = {a: 20, b: 1};
function bar(o, p) {
return 7 + (o.z = 1, p.a);
-}
-
+};
+%PrepareFunctionForOptimization(bar);
bar(i, p);
bar(i, p);
bar(j, p);
@@ -66,15 +70,15 @@ assertEquals(27, bar(i, pp));
// Test deopt after polymorphic store with effect context.
-var i = { r : 1, z : 2 }
-var j = { z : 2 }
-var p = { a : 10 }
-var pp = { a : 20, b : 1 }
+var i = {r: 1, z: 2};
+var j = {z: 2};
+var p = {a: 10};
+var pp = {a: 20, b: 1};
function bar1(o, p) {
return 7 + (o.z = 1, p.a);
-}
-
+};
+%PrepareFunctionForOptimization(bar1);
bar1(i, p);
bar1(i, p);
bar1(j, p);
diff --git a/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js b/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js
index ed63133c0f..b4bd9e3e0c 100644
--- a/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js
+++ b/deps/v8/test/mjsunit/regress/regress-deoptimize-constant-keyed-load.js
@@ -4,18 +4,21 @@
// Flags: --allow-natives-syntax
-var o = { };
-o.__defineGetter__("progressChanged", function() { %DeoptimizeFunction(f); return 10; })
+var o = {};
+o.__defineGetter__('progressChanged', function() {
+ %DeoptimizeFunction(f);
+ return 10;
+});
function g(a, b, c) {
return a + b + c;
}
function f() {
- var t="progressChanged";
+ var t = 'progressChanged';
return g(1, o[t], 100);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js b/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js
index 2b345d2bb7..6fb7ebd222 100644
--- a/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js
+++ b/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js
@@ -6,16 +6,16 @@
var ab = new ArrayBuffer(8);
var i_view = new Int32Array(ab);
-i_view[0] = %GetHoleNaNUpper()
+i_view[0] = %GetHoleNaNUpper();
i_view[1] = %GetHoleNaNLower();
-var hole_nan = (new Float64Array(ab))[0];
+var hole_nan = new Float64Array(ab)[0];
var array = [];
function write() {
array[0] = hole_nan;
-}
-
+};
+%PrepareFunctionForOptimization(write);
write();
%OptimizeFunctionOnNextCall(write);
write();
diff --git a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
index b95b619d39..f371c994aa 100644
--- a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -35,6 +35,7 @@ if (!%IsConcurrentRecompilationSupported()) {
}
function test(fun) {
+ %PrepareFunctionForOptimization(fun);
fun();
fun();
// Mark for concurrent optimization.
diff --git a/deps/v8/test/mjsunit/regress/regress-empty-fixed-double-array.js b/deps/v8/test/mjsunit/regress/regress-empty-fixed-double-array.js
index 1db9e2b3e5..8da3208c6c 100644
--- a/deps/v8/test/mjsunit/regress/regress-empty-fixed-double-array.js
+++ b/deps/v8/test/mjsunit/regress/regress-empty-fixed-double-array.js
@@ -7,8 +7,8 @@
function f(a, x) {
a.shift();
a[0] = x;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f([1], 1.1);
f([1], 1.1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js b/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js
index dbd4762fcd..e6079153f5 100644
--- a/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js
+++ b/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js
@@ -4,19 +4,19 @@
// Flags: --allow-natives-syntax
-var x = Object.getOwnPropertyDescriptor({get x() {}}, "x").get;
+var x = Object.getOwnPropertyDescriptor({get x() {}}, 'x').get;
function f(o, b) {
if (b) {
return o instanceof x;
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
function g() {
return new x();
-}
-
+};
+%PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
-assertThrows(()=>g());
+assertThrows(() => g());
diff --git a/deps/v8/test/mjsunit/regress/regress-escape-preserve-smi-representation.js b/deps/v8/test/mjsunit/regress/regress-escape-preserve-smi-representation.js
index fd899d64e2..bace1ef238 100644
--- a/deps/v8/test/mjsunit/regress/regress-escape-preserve-smi-representation.js
+++ b/deps/v8/test/mjsunit/regress/regress-escape-preserve-smi-representation.js
@@ -5,15 +5,22 @@
// Flags: --allow-natives-syntax
function deepEquals(a, b) {
- if (a === b) { if (a === 0) return (1 / a) === (1 / b); return true; }
+ if (a === b) {
+ if (a === 0) return 1 / a === 1 / b;
+ return true;
+ }
if (typeof a != typeof b) return false;
if (typeof a == "number") return isNaN(a) && isNaN(b);
if (typeof a !== "object" && typeof a !== "function") return false;
- if (objectClass === "RegExp") { return (a.toString() === b.toString()); }
+ if (objectClass === 'RegExp') {
+ return a.toString() === b.toString();
+ }
if (objectClass === "Function") return false;
if (objectClass === "Array") {
var elementsCount = 0;
- if (a.length != b.length) { return false; }
+ if (a.length != b.length) {
+ return false;
+ }
for (var i = 0; i < a.length; i++) {
if (!deepEquals(a[i], b[i])) return false;
}
@@ -21,14 +28,14 @@ function deepEquals(a, b) {
}
}
-
-function __f_1(){
+function __f_1() {
var __v_0 = [];
- for(var i=0; i<2; i++){
- __v_0.push([])
+ for (var i = 0; i < 2; i++) {
+ __v_0.push([]);
deepEquals(2, __v_0.length);
}
-}
+};
+%PrepareFunctionForOptimization(__f_1);
__f_1();
%OptimizeFunctionOnNextCall(__f_1);
__f_1();
diff --git a/deps/v8/test/mjsunit/regress/regress-et-clobbers-doubles.js b/deps/v8/test/mjsunit/regress/regress-et-clobbers-doubles.js
index 47fa47925f..817985022d 100644
--- a/deps/v8/test/mjsunit/regress/regress-et-clobbers-doubles.js
+++ b/deps/v8/test/mjsunit/regress/regress-et-clobbers-doubles.js
@@ -28,12 +28,12 @@
// Flags: --allow-natives-syntax
function t_smi(a) {
a[0] = 1.5;
-}
-
-t_smi([1,,3]);
-t_smi([1,,3]);
-t_smi([1,,3]);
+};
+%PrepareFunctionForOptimization(t_smi);
+t_smi([1, , 3]);
+t_smi([1, , 3]);
+t_smi([1, , 3]);
%OptimizeFunctionOnNextCall(t_smi);
-var ta = [1,,3];
+var ta = [1, , 3];
t_smi(ta);
-assertEquals([1.5,,3], ta);
+assertEquals([1.5, , 3], ta);
diff --git a/deps/v8/test/mjsunit/regress/regress-fast-literal-transition.js b/deps/v8/test/mjsunit/regress/regress-fast-literal-transition.js
index 2dc6a971d4..90c0bb796a 100644
--- a/deps/v8/test/mjsunit/regress/regress-fast-literal-transition.js
+++ b/deps/v8/test/mjsunit/regress/regress-fast-literal-transition.js
@@ -37,11 +37,15 @@
// final --stress-opt run.
function f(x) {
- switch(x) {
- case 1: return 1.4;
- case 2: return 1.5;
- case 3: return {};
- default: gc();
+ switch (x) {
+ case 1:
+ return 1.4;
+ case 2:
+ return 1.5;
+ case 3:
+ return {};
+ default:
+ gc();
}
}
@@ -50,6 +54,8 @@ function g(x) {
}
// Step 1: Optimize g() to contain a PACKED_DOUBLE_ELEMENTS boilerplate.
+;
+%PrepareFunctionForOptimization(g);
assertEquals([1.1, 1.2, 1.3, 1.4], g(1));
assertEquals([1.1, 1.2, 1.3, 1.5], g(2));
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-filter-contexts.js b/deps/v8/test/mjsunit/regress/regress-filter-contexts.js
index d2abe00325..5869a3ff0a 100644
--- a/deps/v8/test/mjsunit/regress/regress-filter-contexts.js
+++ b/deps/v8/test/mjsunit/regress/regress-filter-contexts.js
@@ -4,7 +4,10 @@
// Flags: --allow-natives-syntax
-function f() { return f.x; }
+function f() {
+ return f.x;
+};
+%PrepareFunctionForOptimization(f);
f.__proto__ = null;
f.prototype = "";
diff --git a/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js b/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js
index 4ec2a6a799..77b2f8a8c3 100644
--- a/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js
+++ b/deps/v8/test/mjsunit/regress/regress-force-constant-representation.js
@@ -8,8 +8,8 @@
var a = [{}];
function f(a) {
a.push(Infinity);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(a);
f(a);
f(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-force-representation.js b/deps/v8/test/mjsunit/regress/regress-force-representation.js
index 8f6746b7f2..706660abf3 100644
--- a/deps/v8/test/mjsunit/regress/regress-force-representation.js
+++ b/deps/v8/test/mjsunit/regress/regress-force-representation.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function optimize(crankshaft_test) {
+ %PrepareFunctionForOptimization(crankshaft_test);
crankshaft_test();
crankshaft_test();
%OptimizeFunctionOnNextCall(crankshaft_test);
diff --git a/deps/v8/test/mjsunit/regress/regress-freeze.js b/deps/v8/test/mjsunit/regress/regress-freeze.js
index 6f3de2a4c9..d57accea4a 100644
--- a/deps/v8/test/mjsunit/regress/regress-freeze.js
+++ b/deps/v8/test/mjsunit/regress/regress-freeze.js
@@ -28,7 +28,11 @@
// Flags: --allow-natives-syntax
// CountOperation
-function f(o) { o.x++ };
+function f(o) {
+ o.x++;
+};
+%PrepareFunctionForOptimization(f);
+;
var o = {x: 5};
Object.freeze(o);
f(o);
@@ -38,7 +42,11 @@ f(o);
assertEquals(5, o.x);
// Compound Assignment
-function f2(o) { o.x+=3 };
+function f2(o) {
+ o.x += 3;
+};
+%PrepareFunctionForOptimization(f2);
+;
f2(o);
f2(o);
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/regress/regress-fundecl.js b/deps/v8/test/mjsunit/regress/regress-fundecl.js
index fddb5895eb..b4e8856785 100644
--- a/deps/v8/test/mjsunit/regress/regress-fundecl.js
+++ b/deps/v8/test/mjsunit/regress/regress-fundecl.js
@@ -32,13 +32,15 @@
function h(a, b) {
var r = a + b;
- function X() { return 42; }
+ function X() {
+ return 42;
+ }
return r + X();
-}
-
-for (var i = 0; i < 5; i++) h(1,2);
+};
+%PrepareFunctionForOptimization(h);
+for (var i = 0; i < 5; i++) h(1, 2);
%OptimizeFunctionOnNextCall(h);
-assertEquals(45, h(1,2));
+assertEquals(45, h(1, 2));
assertEquals("foo742", h("foo", 7));
diff --git a/deps/v8/test/mjsunit/regress/regress-grow-deopt.js b/deps/v8/test/mjsunit/regress/regress-grow-deopt.js
index df3a83fe8b..281ae02a00 100644
--- a/deps/v8/test/mjsunit/regress/regress-grow-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-grow-deopt.js
@@ -6,8 +6,8 @@
function f(a, v) {
a[a.length] = v;
-}
-
+};
+%PrepareFunctionForOptimization(f);
var a = [1.4];
f(a, 1);
f(a, 2);
diff --git a/deps/v8/test/mjsunit/regress/regress-grow-store-smi-check.js b/deps/v8/test/mjsunit/regress/regress-grow-store-smi-check.js
index 381141d523..c632997a69 100644
--- a/deps/v8/test/mjsunit/regress/regress-grow-store-smi-check.js
+++ b/deps/v8/test/mjsunit/regress/regress-grow-store-smi-check.js
@@ -35,15 +35,15 @@ function test(crc32) {
var c = i;
for (var j = 0; j < 8; j++) {
if (c & 1) {
- c = -306674912 ^ ((c >> 1) & 0x7fffffff);
+ c = -306674912 ^ c >> 1 & 0x7fffffff;
} else {
- c = (c >> 1) & 0x7fffffff;
+ c = c >> 1 & 0x7fffffff;
}
}
crc32[i] = c;
}
-}
-
+};
+%PrepareFunctionForOptimization(test);
var a = [0.5];
for (var i = 0; i < 256; ++i) a[i] = i;
diff --git a/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js b/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js
index dee5765438..7a71fc3cba 100644
--- a/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js
+++ b/deps/v8/test/mjsunit/regress/regress-gvn-ftt.js
@@ -21,6 +21,7 @@ function f(o, value) {
var obj = {o: a1};
+%PrepareFunctionForOptimization(f);
f(obj, a1);
f(obj, a1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-hoist-load-named-field.js b/deps/v8/test/mjsunit/regress/regress-hoist-load-named-field.js
index 7df07a04ce..83f35cfdce 100644
--- a/deps/v8/test/mjsunit/regress/regress-hoist-load-named-field.js
+++ b/deps/v8/test/mjsunit/regress/regress-hoist-load-named-field.js
@@ -40,6 +40,7 @@ function f(o, a) {
return v;
}
+%PrepareFunctionForOptimization(f);
f({y:1.4}, [1]);
f({y:1.6}, [1]);
%OptimizeFunctionOnNextCall(f);
@@ -59,6 +60,7 @@ function f2(o) {
var o1 = { x: 1.5 };
var o2 = { y: 1, x: 1 };
+%PrepareFunctionForOptimization(f2);
f2(o1);
f2(o1);
f2(o2);
diff --git a/deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js b/deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js
index dca7e96d4e..73742fa0ec 100644
--- a/deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js
+++ b/deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js
@@ -8,8 +8,8 @@ var a = [1.5];
function p() {
Array.prototype.push.call(a, 1.7);
-}
-
+};
+%PrepareFunctionForOptimization(p);
p();
p();
p();
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js b/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js
index bd8fa31102..56b066abc7 100644
--- a/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js
+++ b/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js
@@ -7,12 +7,12 @@
// This tests that inlining a constructor call to a function which cannot be
// used as a constructor (e.g. arrow function) still throws correctly.
-var g = () => {}
+var g = () => {};
function f() {
return new g();
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-class-constructor.js b/deps/v8/test/mjsunit/regress/regress-inline-class-constructor.js
index 1d77176758..6f3d6279a4 100644
--- a/deps/v8/test/mjsunit/regress/regress-inline-class-constructor.js
+++ b/deps/v8/test/mjsunit/regress/regress-inline-class-constructor.js
@@ -6,14 +6,14 @@
"use strict";
-var B = class extends Int32Array { }
+var B = class extends Int32Array {};
function f(b) {
if (b) {
null instanceof B;
}
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
f();
@@ -22,7 +22,7 @@ f();
function f2() {
return new B();
-}
-
+};
+%PrepareFunctionForOptimization(f2);
%OptimizeFunctionOnNextCall(f2);
f2();
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js b/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js
index 303639c74f..64f626d469 100644
--- a/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js
+++ b/deps/v8/test/mjsunit/regress/regress-inline-constant-load.js
@@ -11,17 +11,19 @@ function foo(x) {
return x.bar;
}
-Object.defineProperty(o1, "bar", {value:200});
+Object.defineProperty(o1, 'bar', {value: 200});
foo(o1);
foo(o1);
function f(b) {
var o = o2;
- if (b) { return foo(o) }
-}
-
+ if (b) {
+ return foo(o);
+ }
+};
+%PrepareFunctionForOptimization(f);
f(false);
%OptimizeFunctionOnNextCall(f);
assertEquals(undefined, f(false));
-Object.defineProperty(o2, "bar", {value: 100});
+Object.defineProperty(o2, 'bar', {value: 100});
assertEquals(100, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-getter-near-stack-limit.js b/deps/v8/test/mjsunit/regress/regress-inline-getter-near-stack-limit.js
index d459a7a8d3..e458302e67 100644
--- a/deps/v8/test/mjsunit/regress/regress-inline-getter-near-stack-limit.js
+++ b/deps/v8/test/mjsunit/regress/regress-inline-getter-near-stack-limit.js
@@ -6,14 +6,24 @@
function runNearStackLimit(f) {
function t() {
- try { t(); } catch(e) { f(); }
+ try {
+ t();
+ } catch (e) {
+ f();
+ }
};
- try { t(); } catch(e) {}
+ try {
+ t();
+ } catch (e) {
+ }
}
-function g(x) { return x.bar; }
-function f1() { }
-function f2() { }
+function g(x) {
+ return x.bar;
+};
+%PrepareFunctionForOptimization(g);
+function f1() {}
+function f2() {}
var x = Object.defineProperty({}, "bar", { get: f1 });
g(x);
@@ -21,4 +31,6 @@ g(x);
var y = Object.defineProperty({}, "bar", { get: f2 });
g(y);
%OptimizeFunctionOnNextCall(g);
-runNearStackLimit(function() { g(y); });
+runNearStackLimit(function() {
+ g(y);
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js b/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js
index 9b7f7ac768..d73dab5f4d 100644
--- a/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js
+++ b/deps/v8/test/mjsunit/regress/regress-inlining-function-literal-context.js
@@ -29,9 +29,9 @@
function mkbaz(x) {
function baz() {
- return function () {
+ return function() {
return [x];
- }
+ };
}
return baz;
}
@@ -44,6 +44,8 @@ function foo() {
}
// Tenure.
+;
+%PrepareFunctionForOptimization(foo);
gc();
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-int32-truncation.js b/deps/v8/test/mjsunit/regress/regress-int32-truncation.js
index dec4ac1195..438be59276 100644
--- a/deps/v8/test/mjsunit/regress/regress-int32-truncation.js
+++ b/deps/v8/test/mjsunit/regress/regress-int32-truncation.js
@@ -35,8 +35,8 @@ function f(i, b) {
}
var x = a >> 3;
return a;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(1, false);
f(1, true);
%OptimizeFunctionOnNextCall(f);
@@ -52,8 +52,8 @@ function f2(b) {
}
var x = a >> 3;
return a;
-}
-
+};
+%PrepareFunctionForOptimization(f2);
f2(false);
f2(true);
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js b/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js
index e9f2b516b5..d9a4d34623 100644
--- a/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js
+++ b/deps/v8/test/mjsunit/regress/regress-is-smi-repr.js
@@ -12,6 +12,7 @@ function g() { global = this; }
Object.defineProperty(Number.prototype, "prop", { get: g });
function f(s) { s.prop; }
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js b/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js
index f2ead686c6..5a151faadc 100644
--- a/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js
+++ b/deps/v8/test/mjsunit/regress/regress-keyed-access-string-length.js
@@ -29,8 +29,8 @@
function f(i) {
return "abc"[i];
-}
-
+};
+%PrepareFunctionForOptimization(f);
f("length");
f("length");
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining.js b/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining.js
index 6cda168dff..5dbac6b171 100644
--- a/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining.js
+++ b/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining.js
@@ -7,10 +7,12 @@
"use strict";
function f1(d) {
return 1 + f2(f3(d));
+};
+%PrepareFunctionForOptimization(f1);
+function f2(v) {
+ return v;
}
-function f2(v) { return v; }
-
function f3(d) {
if (d) %DeoptimizeFunction(f1);
return 2;
diff --git a/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining2.js b/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining2.js
index 7b73b14232..2cbd050fe5 100644
--- a/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining2.js
+++ b/deps/v8/test/mjsunit/regress/regress-lazy-deopt-inlining2.js
@@ -7,10 +7,12 @@
"use strict";
function f1(d) {
return 1 + f2(1, f3(d), d);
+};
+%PrepareFunctionForOptimization(f1);
+function f2(v0, v1, v2) {
+ return v1;
}
-function f2(v0, v1, v2) { return v1; }
-
function f3(d) {
if (d) %DeoptimizeFunction(f1);
return 2;
diff --git a/deps/v8/test/mjsunit/regress/regress-load-elements.js b/deps/v8/test/mjsunit/regress/regress-load-elements.js
index 68cdc8e8a1..e1aebfc66a 100644
--- a/deps/v8/test/mjsunit/regress/regress-load-elements.js
+++ b/deps/v8/test/mjsunit/regress/regress-load-elements.js
@@ -27,13 +27,13 @@
// Flags: --allow-natives-syntax
-function bad_func(o,a) {
+function bad_func(o, a) {
for (var i = 0; i < 1; ++i) {
o.prop = 0;
var x = a[0];
}
-}
-
+};
+%PrepareFunctionForOptimization(bad_func);
o = new Object();
a = {};
a[0] = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-load-field-by-index.js b/deps/v8/test/mjsunit/regress/regress-load-field-by-index.js
index c572c1ee36..2c4cff3111 100644
--- a/deps/v8/test/mjsunit/regress/regress-load-field-by-index.js
+++ b/deps/v8/test/mjsunit/regress/regress-load-field-by-index.js
@@ -14,6 +14,7 @@ function f(o) {
return result;
}
+%PrepareFunctionForOptimization(f);
f(o);
f(o);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js b/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js
index 8c85c1380f..6698e40c02 100644
--- a/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js
+++ b/deps/v8/test/mjsunit/regress/regress-loop-var-assign-without-block-scope.js
@@ -8,6 +8,7 @@ function f() {
for (i = 0; i < 2; i++)
var x = i, // var x that's assigned on each iteration
y = y||(()=>x), // single arrow function that returns x
+ z0 = (%PrepareFunctionForOptimization(y)), // prepare function for optimization
z = (%OptimizeFunctionOnNextCall(y), y()); // optimize y on first iteration
return y()
};
diff --git a/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js b/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
index ece96b3ff0..769e82b35a 100644
--- a/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
@@ -45,6 +45,7 @@ function g() {
}
var fun = g();
+%PrepareFunctionForOptimization(fun);
fun(false, c);
fun(false, c);
fun(false, c);
diff --git a/deps/v8/test/mjsunit/regress/regress-mul-canoverflow.js b/deps/v8/test/mjsunit/regress/regress-mul-canoverflow.js
index e3e21caec8..02b1622485 100644
--- a/deps/v8/test/mjsunit/regress/regress-mul-canoverflow.js
+++ b/deps/v8/test/mjsunit/regress/regress-mul-canoverflow.js
@@ -28,11 +28,14 @@
// Flags: --allow-natives-syntax
function boom(a) {
- return ((a | 0) * (a | 0)) | 0;
-}
+ return (a | 0) * (a | 0) | 0;
+};
+%PrepareFunctionForOptimization(boom);
function boom_unoptimized(a) {
- try {} catch(_) {}
- return ((a | 0) * (a | 0)) | 0;
+ try {
+ } catch (_) {
+ }
+ return (a | 0) * (a | 0) | 0;
}
boom(1, 1);
@@ -41,5 +44,5 @@ boom(2, 2);
%OptimizeFunctionOnNextCall(boom);
var big_int = 0x5F00000F;
var expected = boom_unoptimized(big_int);
-var actual = boom(big_int)
+var actual = boom(big_int);
assertEquals(expected, actual);
diff --git a/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js b/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js
index 4203ac48da..f5db0cd6b5 100644
--- a/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js
+++ b/deps/v8/test/mjsunit/regress/regress-mul-canoverflowb.js
@@ -28,11 +28,12 @@
// Flags: --allow-natives-syntax
function boom(a) {
- return ((a | 0) * (a | 0)) | 0;
-}
+ return (a | 0) * (a | 0) | 0;
+};
+%PrepareFunctionForOptimization(boom);
%NeverOptimizeFunction(boom_unoptimized);
function boom_unoptimized(a) {
- return ((a | 0) * (a | 0)) | 0;
+ return (a | 0) * (a | 0) | 0;
}
boom(1, 1);
@@ -41,5 +42,5 @@ boom(2, 2);
%OptimizeFunctionOnNextCall(boom);
var big_int = 0x5F00000F;
var expected = boom_unoptimized(big_int);
-var actual = boom(big_int)
+var actual = boom(big_int);
assertEquals(expected, actual);
diff --git a/deps/v8/test/mjsunit/regress/regress-no-dummy-use-for-arguments-object.js b/deps/v8/test/mjsunit/regress/regress-no-dummy-use-for-arguments-object.js
index 658d776ea3..3b3b4c19a4 100644
--- a/deps/v8/test/mjsunit/regress/regress-no-dummy-use-for-arguments-object.js
+++ b/deps/v8/test/mjsunit/regress/regress-no-dummy-use-for-arguments-object.js
@@ -13,8 +13,8 @@ var global = "";
function f() {
global.dummy = this;
g({});
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-ntl.js b/deps/v8/test/mjsunit/regress/regress-ntl.js
index 993599e552..7cba1143ad 100644
--- a/deps/v8/test/mjsunit/regress/regress-ntl.js
+++ b/deps/v8/test/mjsunit/regress/regress-ntl.js
@@ -20,6 +20,7 @@ function mod1() {
}
var f = mod1();
+%PrepareFunctionForOptimization(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
assertThrows(f);
@@ -36,6 +37,7 @@ function bug2() {
}
}
+%PrepareFunctionForOptimization(bug2);
assertThrows(bug2);
%OptimizeFunctionOnNextCall(bug2);
assertThrows(bug2);
diff --git a/deps/v8/test/mjsunit/regress/regress-omit-checks.js b/deps/v8/test/mjsunit/regress/regress-omit-checks.js
index e5d5074988..cd60aacbdd 100644
--- a/deps/v8/test/mjsunit/regress/regress-omit-checks.js
+++ b/deps/v8/test/mjsunit/regress/regress-omit-checks.js
@@ -27,11 +27,11 @@
// Flags: --allow-natives-syntax
-var a = {x:1};
-var a_deprecate = {x:1};
+var a = {x: 1};
+var a_deprecate = {x: 1};
a_deprecate.x = 1.5;
function create() {
- return {__proto__:a, y:1};
+ return {__proto__: a, y: 1};
}
var b1 = create();
var b2 = create();
@@ -41,15 +41,19 @@ var b4 = create();
function set(b) {
b.x = 5;
b.z = 10;
-}
-
+};
+%PrepareFunctionForOptimization(set);
set(b1);
set(b2);
%OptimizeFunctionOnNextCall(set);
set(b3);
var called = false;
a.x = 1.5;
-Object.defineProperty(a, "z", {set:function(v) { called = true; }});
+Object.defineProperty(a, 'z', {
+ set: function(v) {
+ called = true;
+ }
+});
set(b4);
assertTrue(called);
assertEquals(undefined, b4.z);
diff --git a/deps/v8/test/mjsunit/regress/regress-opt-typeof-null.js b/deps/v8/test/mjsunit/regress/regress-opt-typeof-null.js
index e4721a18c5..250e03e6fa 100644
--- a/deps/v8/test/mjsunit/regress/regress-opt-typeof-null.js
+++ b/deps/v8/test/mjsunit/regress/regress-opt-typeof-null.js
@@ -7,6 +7,7 @@
function f() {
return typeof null === "object";
};
-
+%PrepareFunctionForOptimization(f);
+;
%OptimizeFunctionOnNextCall(f);
assertTrue(f());
diff --git a/deps/v8/test/mjsunit/regress/regress-parseint.js b/deps/v8/test/mjsunit/regress/regress-parseint.js
index 05501f31fd..a500cc6c45 100644
--- a/deps/v8/test/mjsunit/regress/regress-parseint.js
+++ b/deps/v8/test/mjsunit/regress/regress-parseint.js
@@ -6,12 +6,12 @@
function f(string, radix) {
// Use a phi to force radix into heap number representation.
- radix = (radix == 0) ? radix : (radix >> 0);
+ radix = radix == 0 ? radix : radix >> 0;
if (radix != 2) return NaN;
return %StringParseInt(string, radix);
-}
-
-assertEquals(2, (-4294967294) >> 0);
+};
+%PrepareFunctionForOptimization(f);
+assertEquals(2, -4294967294 >> 0);
assertEquals(3, f("11", -4294967294));
assertEquals(NaN, f("11", -2147483650));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-phi-truncation.js b/deps/v8/test/mjsunit/regress/regress-phi-truncation.js
index 940efe3357..09db57fd05 100644
--- a/deps/v8/test/mjsunit/regress/regress-phi-truncation.js
+++ b/deps/v8/test/mjsunit/regress/regress-phi-truncation.js
@@ -28,6 +28,7 @@
// Flags: --allow-natives-syntax
function test(fun, expectation) {
+ %PrepareFunctionForOptimization(fun);
assertEquals(1, fun(1));
%OptimizeFunctionOnNextCall(fun);
assertEquals(expectation, fun(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-polymorphic-load.js b/deps/v8/test/mjsunit/regress/regress-polymorphic-load.js
index 2545e85f60..a6aa746fd2 100644
--- a/deps/v8/test/mjsunit/regress/regress-polymorphic-load.js
+++ b/deps/v8/test/mjsunit/regress/regress-polymorphic-load.js
@@ -29,10 +29,10 @@
function f(o) {
return o.x;
-}
-
-var o1 = {x:1};
-var o2 = {__proto__: {x:2}};
+};
+%PrepareFunctionForOptimization(f);
+var o1 = {x: 1};
+var o2 = {__proto__: {x: 2}};
f(o2);
f(o2);
diff --git a/deps/v8/test/mjsunit/regress/regress-polymorphic-store.js b/deps/v8/test/mjsunit/regress/regress-polymorphic-store.js
index 4723a7f434..bab5eb016a 100644
--- a/deps/v8/test/mjsunit/regress/regress-polymorphic-store.js
+++ b/deps/v8/test/mjsunit/regress/regress-polymorphic-store.js
@@ -28,7 +28,9 @@
// Flags: --allow-natives-syntax
var o1 = {};
-o1.f1 = function() { return 10; };
+o1.f1 = function() {
+ return 10;
+};
o1.x = 5;
o1.y = 2;
var o2 = {};
@@ -37,8 +39,8 @@ o2.y = 5;
function store(o, v) {
o.y = v;
-}
-
+};
+%PrepareFunctionForOptimization(store);
store(o2, 0);
store(o1, 0);
store(o2, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-smi-math-floor-round.js b/deps/v8/test/mjsunit/regress/regress-smi-math-floor-round.js
index 7c033a3bd0..841594bb2e 100644
--- a/deps/v8/test/mjsunit/regress/regress-smi-math-floor-round.js
+++ b/deps/v8/test/mjsunit/regress/regress-smi-math-floor-round.js
@@ -30,18 +30,18 @@
function f(o) {
return Math.floor(o.x_smi) + 1;
-}
-
-assertEquals(2, f({x_smi:1}));
-assertEquals(2, f({x_smi:1}));
+};
+%PrepareFunctionForOptimization(f);
+assertEquals(2, f({x_smi: 1}));
+assertEquals(2, f({x_smi: 1}));
%OptimizeFunctionOnNextCall(f);
-assertEquals(2, f({x_smi:1}));
+assertEquals(2, f({x_smi: 1}));
function f2(o) {
return Math.floor(o.x_tagged) + 1;
-}
-
-var o = {x_tagged:{}};
+};
+%PrepareFunctionForOptimization(f2);
+var o = {x_tagged: {}};
o.x_tagged = 1.4;
assertEquals(2, f2(o));
assertEquals(2, f2(o));
@@ -50,17 +50,17 @@ assertEquals(2, f2(o));
function f3(o) {
return Math.round(o.x_smi) + 1;
-}
-
-assertEquals(2, f3({x_smi:1}));
-assertEquals(2, f3({x_smi:1}));
+};
+%PrepareFunctionForOptimization(f3);
+assertEquals(2, f3({x_smi: 1}));
+assertEquals(2, f3({x_smi: 1}));
%OptimizeFunctionOnNextCall(f3);
-assertEquals(2, f3({x_smi:1}));
+assertEquals(2, f3({x_smi: 1}));
function f4(o) {
return Math.round(o.x_tagged) + 1;
-}
-
+};
+%PrepareFunctionForOptimization(f4);
assertEquals(2, f4(o));
assertEquals(2, f4(o));
%OptimizeFunctionOnNextCall(f4);
diff --git a/deps/v8/test/mjsunit/regress/regress-sqrt.js b/deps/v8/test/mjsunit/regress/regress-sqrt.js
index f2a7e55242..2e5b454214 100644
--- a/deps/v8/test/mjsunit/regress/regress-sqrt.js
+++ b/deps/v8/test/mjsunit/regress/regress-sqrt.js
@@ -32,8 +32,8 @@
function f(x) {
return Math.sqrt(x);
-}
-
+};
+%PrepareFunctionForOptimization(f);
var x = 7.0506280066499245e-233;
var a = f(x);
diff --git a/deps/v8/test/mjsunit/regress/regress-store-heapobject.js b/deps/v8/test/mjsunit/regress/regress-store-heapobject.js
index 9f2a1b8ffa..f4e34c81de 100644
--- a/deps/v8/test/mjsunit/regress/regress-store-heapobject.js
+++ b/deps/v8/test/mjsunit/regress/regress-store-heapobject.js
@@ -19,8 +19,8 @@ function f(bool) {
store(o, 1);
}
return o;
-}
-
+};
+%PrepareFunctionForOptimization(f);
f(false);
f(false);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js b/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js
index a02a2778b6..0bc180aec0 100644
--- a/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js
+++ b/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js
@@ -4,7 +4,11 @@
// Flags: --allow-natives-syntax
-var thrower = { [Symbol.toPrimitive]: function() { FAIL } };
+var thrower = {
+ [Symbol.toPrimitive]: function() {
+ FAIL;
+ }
+};
function testTrace(func) {
try {
@@ -17,8 +21,10 @@ function testTrace(func) {
testTrace(String.fromCharCode);
-function foo(x) { return String.fromCharCode(x); }
-
+function foo(x) {
+ return String.fromCharCode(x);
+};
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(2);
testTrace(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js b/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js
index 2e14aa027c..c4e364851d 100644
--- a/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js
+++ b/deps/v8/test/mjsunit/regress/regress-stringAt-boundsCheck.js
@@ -7,10 +7,10 @@
(() => {
function f(u) {
for (var j = 0; j < 20; ++j) {
- print("" + u.codePointAt());
+ print('' + u.codePointAt());
}
- }
-
+ };
+ %PrepareFunctionForOptimization(f);
f("test");
f("foo");
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
index c51fa8f98b..2e2cfd465c 100644
--- a/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
+++ b/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
@@ -19,8 +19,8 @@ function get_closure() {
return x;
}
}
-%PrepareFunctionForOptimization(get_closure);
var f1 = get_closure();
+%PrepareFunctionForOptimization(f1);
f1(new Ctor(), false);
f1(new Ctor(), false);
@@ -28,6 +28,7 @@ f1(new Ctor(), false);
// Kick off concurrent recompilation and OSR.
var o = new Ctor();
+%PrepareFunctionForOptimization(f1);
f1(o, true);
// Flush the optimizing compiler's queue.
@@ -37,4 +38,5 @@ f1(o, true);
o.c = 2.2;
var f2 = get_closure();
+%PrepareFunctionForOptimization(f2);
f2(new Ctor(), true);
diff --git a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
index 0dde61fc27..22ede7c096 100644
--- a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
+++ b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
@@ -9,8 +9,8 @@ a.__proto__ = null;
function get(a) {
return a.length;
-}
-
+};
+%PrepareFunctionForOptimization(get);
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
@@ -19,8 +19,9 @@ assertEquals(undefined, get(a));
get = function(a) {
return a.byteLength;
-}
-
+};
+;
+%PrepareFunctionForOptimization(get);
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
@@ -29,8 +30,9 @@ assertEquals(undefined, get(a));
get = function(a) {
return a.byteOffset;
-}
-
+};
+;
+%PrepareFunctionForOptimization(get);
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
@@ -38,74 +40,82 @@ assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
(function() {
- "use strict";
+"use strict";
- class MyTypedArray extends Int32Array {
- get length() {
- return "length";
- }
- }
-
- a = new MyTypedArray();
-
- get = function(a) {
- return a.length;
+class MyTypedArray extends Int32Array {
+ get length() {
+ return "length";
}
+}
- assertEquals("length", get(a));
- assertEquals("length", get(a));
- assertEquals("length", get(a));
- %OptimizeFunctionOnNextCall(get);
- assertEquals("length", get(a));
+a = new MyTypedArray();
- a.__proto__ = null;
+get = function(a) {
+ return a.length;
+};
+;
+%PrepareFunctionForOptimization(get);
+assertEquals("length", get(a));
+assertEquals("length", get(a));
+assertEquals("length", get(a));
+%OptimizeFunctionOnNextCall(get);
+assertEquals("length", get(a));
- get = function(a) {
- return a.length;
- }
+a.__proto__ = null;
- assertEquals(undefined, get(a));
- assertEquals(undefined, get(a));
- assertEquals(undefined, get(a));
- %OptimizeFunctionOnNextCall(get);
- assertEquals(undefined, get(a));
+get = function(a) {
+ return a.length;
+};
+;
+%PrepareFunctionForOptimization(get);
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
+%OptimizeFunctionOnNextCall(get);
+assertEquals(undefined, get(a));
})();
(function() {
- "use strict";
+"use strict";
- class MyTypedArray extends Int32Array {
- constructor(length) {
- super(length);
- }
+class MyTypedArray extends Int32Array {
+ constructor(length) {
+ super(length);
}
+}
- a = new MyTypedArray(1024);
-
- get = function(a) {
- return a.length;
- }
+a = new MyTypedArray(1024);
- assertEquals(1024, get(a));
- assertEquals(1024, get(a));
- assertEquals(1024, get(a));
- %OptimizeFunctionOnNextCall(get);
- assertEquals(1024, get(a));
+get = function(a) {
+ return a.length;
+};
+;
+%PrepareFunctionForOptimization(get);
+assertEquals(1024, get(a));
+assertEquals(1024, get(a));
+assertEquals(1024, get(a));
+%OptimizeFunctionOnNextCall(get);
+assertEquals(1024, get(a));
})();
(function() {
- "use strict";
- var a = new Uint8Array(4);
- Object.defineProperty(a, "length", {get: function() { return "blah"; }});
- get = function(a) {
- return a.length;
+"use strict";
+var a = new Uint8Array(4);
+Object.defineProperty(a, 'length', {
+ get: function() {
+ return 'blah';
}
-
- assertEquals("blah", get(a));
- assertEquals("blah", get(a));
- assertEquals("blah", get(a));
- %OptimizeFunctionOnNextCall(get);
- assertEquals("blah", get(a));
+});
+get = function(a) {
+ return a.length;
+};
+;
+%PrepareFunctionForOptimization(get);
+assertEquals("blah", get(a));
+assertEquals("blah", get(a));
+assertEquals("blah", get(a));
+%OptimizeFunctionOnNextCall(get);
+assertEquals("blah", get(a));
})();
// Ensure we can delete length, byteOffset, byteLength.
@@ -120,8 +130,9 @@ a = new Int32Array(100);
get = function(a) {
return a.length;
-}
-
+};
+;
+%PrepareFunctionForOptimization(get);
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
@@ -130,8 +141,9 @@ assertEquals(undefined, get(a));
get = function(a) {
return a.byteLength;
-}
-
+};
+;
+%PrepareFunctionForOptimization(get);
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
@@ -140,8 +152,9 @@ assertEquals(undefined, get(a));
get = function(a) {
return a.byteOffset;
-}
-
+};
+;
+%PrepareFunctionForOptimization(get);
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
assertEquals(undefined, get(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
index 0e9b3d3f4a..3d14126490 100644
--- a/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
@@ -6,18 +6,20 @@
function loader(dst, src, i) {
dst[i] = src[i];
-}
-
+};
+%PrepareFunctionForOptimization(loader);
var ab = new ArrayBuffer(8);
var i_view = new Int32Array(ab);
-i_view[0] = %GetHoleNaNUpper()
+i_view[0] = %GetHoleNaNUpper();
i_view[1] = %GetHoleNaNLower();
var f_view = new Float64Array(ab);
var fixed_double_elements = new Float64Array(1);
-function opt_store() { fixed_double_elements[0] = f_view[0]; }
-
+function opt_store() {
+ fixed_double_elements[0] = f_view[0];
+};
+%PrepareFunctionForOptimization(opt_store);
opt_store();
opt_store();
%OptimizeFunctionOnNextCall(opt_store);
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
index 5a0bc38c07..400434c612 100644
--- a/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
@@ -6,15 +6,17 @@
var ab = new ArrayBuffer(8);
var i_view = new Int32Array(ab);
-i_view[0] = %GetHoleNaNUpper()
+i_view[0] = %GetHoleNaNUpper();
i_view[1] = %GetHoleNaNLower();
var f_view = new Float64Array(ab);
var fixed_double_elements = new Float64Array(1);
fixed_double_elements[0] = f_view[0];
-function A(src) { this.x = src[0]; }
-
+function A(src) {
+ this.x = src[0];
+};
+%PrepareFunctionForOptimization(A);
new A(fixed_double_elements);
new A(fixed_double_elements);
@@ -22,8 +24,10 @@ new A(fixed_double_elements);
var obj = new A(fixed_double_elements);
-function move_x(dst, obj) { dst[0] = obj.x; }
-
+function move_x(dst, obj) {
+ dst[0] = obj.x;
+};
+%PrepareFunctionForOptimization(move_x);
var doubles = [0.5];
move_x(doubles, obj);
move_x(doubles, obj);
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js b/deps/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js
index 9e6ec9db07..b11ef177ad 100644
--- a/deps/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-store-keyed-fast-element.js
@@ -29,8 +29,8 @@
function f(v) {
return [0.0, 0.1, 0.2, v];
-}
-
+};
+%PrepareFunctionForOptimization(f);
assertEquals([0.0, 0.1, 0.2, NaN], f(NaN));
assertEquals([0.0, 0.1, 0.2, NaN], f(NaN));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js b/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
index b7fa2b1a75..2b34159c14 100644
--- a/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
+++ b/deps/v8/test/mjsunit/regress/regress-unlink-closures-on-deopt.js
@@ -13,6 +13,8 @@ function foo() {
let g1 = foo();
let g2 = foo();
+%PrepareFunctionForOptimization(g1);
+%PrepareFunctionForOptimization(g2);
g1({ f : 1});
g1({ f : 2});
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-4839.js b/deps/v8/test/mjsunit/regress/regress-v8-4839.js
index 120685b1de..fc33deb858 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-4839.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-4839.js
@@ -4,59 +4,71 @@
// Flags: --allow-natives-syntax
-function dummy() { }
+function dummy() {}
(function InlinedFunctionTestContext() {
- var f = function() { }
+ var f = function() {};
function g() {
- var s = "hey";
- dummy(); // Force a deopt point.
- if (f()) return s;
- }
-
+ var s = 'hey';
+ dummy(); // Force a deopt point.
+ if (f()) return s;
+ };
+ %PrepareFunctionForOptimization(g);
g();
g();
g();
%OptimizeFunctionOnNextCall(g);
- f = function() { return true; }
+ f = function() {
+ return true;
+ };
assertEquals("hey", g());
})();
(function InlinedConstructorReturnTestContext() {
- function c() { return 1; }
+ function c() {
+ return 1;
+ }
- var f = function() { return !(new c()); }
+ var f = function() {
+ return !new c();
+ };
function g() {
- var s = "hey";
- dummy(); // Force a deopt point.
- if (f()) return s;
- }
-
+ var s = 'hey';
+ dummy(); // Force a deopt point.
+ if (f()) return s;
+ };
+ %PrepareFunctionForOptimization(g);
g();
g();
g();
%OptimizeFunctionOnNextCall(g);
- f = function() { return true; }
+ f = function() {
+ return true;
+ };
assertEquals("hey", g());
})();
(function InlinedConstructorNoReturnTestContext() {
- function c() { }
+ function c() {}
- var f = function() { return !(new c()); }
+ var f = function() {
+ return !new c();
+ };
function g() {
- var s = "hey";
- dummy(); // Force a deopt point.
- if (f()) return s;
- }
-
+ var s = 'hey';
+ dummy(); // Force a deopt point.
+ if (f()) return s;
+ };
+ %PrepareFunctionForOptimization(g);
g();
g();
g();
%OptimizeFunctionOnNextCall(g);
- f = function() { return true; }
+ f = function() {
+ return true;
+ };
assertEquals("hey", g());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js b/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js
index 624c85f477..c0d9f335de 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js
@@ -4,22 +4,22 @@
// Flags: --allow-natives-syntax
-var foo = (function() {
+var foo = function() {
"use asm";
var a = new Uint16Array(2);
a[0] = 32815;
a[1] = 32114;
function foo() {
- var x = a[0]|0;
- var y = a[1]|0;
- if (x < 0) x = 4294967296 + x|0;
- if (y < 0) y = 4294967296 + y|0;
+ var x = a[0] | 0;
+ var y = a[1] | 0;
+ if (x < 0) x = 4294967296 + x | 0;
+ if (y < 0) y = 4294967296 + y | 0;
return x >= y;
- }
-
+ };
+ %PrepareFunctionForOptimization(foo);
return foo;
-})();
+}();
assertTrue(foo());
assertTrue(foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js b/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js
index f486fa8aa3..6efde90516 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js
@@ -4,22 +4,22 @@
// Flags: --allow-natives-syntax
-var foo = (function() {
+var foo = function() {
"use asm";
var a = new Uint8Array(2);
a[0] = 128;
a[1] = 127;
function foo() {
- var x = a[0]|0;
- var y = a[1]|0;
- if (x < 0) x = 4294967296 + x|0;
- if (y < 0) y = 4294967296 + y|0;
+ var x = a[0] | 0;
+ var y = a[1] | 0;
+ if (x < 0) x = 4294967296 + x | 0;
+ if (y < 0) y = 4294967296 + y | 0;
return x >= y;
- }
-
+ };
+ %PrepareFunctionForOptimization(foo);
return foo;
-})();
+}();
assertTrue(foo());
assertTrue(foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js b/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js
index cd14d63792..55555dcd07 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js
@@ -6,8 +6,8 @@
function foo(x) {
return (x ? true : "7") >> 0;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js b/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js
index 5ae57ce64a..2d7d29f7c4 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js
@@ -6,8 +6,8 @@
function foo(x) {
return (x ? true : "7") << 0;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js b/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js
index 004d6874ad..f37c0f575f 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js
@@ -6,8 +6,8 @@
function foo(x) {
return (x ? true : "7") >>> 0;
-}
-
+};
+%PrepareFunctionForOptimization(foo);
assertEquals(1, foo(1));
assertEquals(1, foo(1));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5697.js b/deps/v8/test/mjsunit/regress/regress-v8-5697.js
index d7c1679fa1..9dec917f70 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5697.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5697.js
@@ -4,25 +4,34 @@
// Flags: --allow-natives-syntax --opt
-function load(o) { return o.x; }
-
+function load(o) {
+ return o.x;
+};
for (var x = 0; x < 1000; ++x) {
+ %PrepareFunctionForOptimization(load);
load({x});
load({x});
%OptimizeFunctionOnNextCall(load);
- try { load(); } catch (e) { }
+ try {
+ load();
+ } catch (e) {
+ }
}
assertOptimized(load);
-
-function store(o) { o.x = -1; }
-
+function store(o) {
+ o.x = -1;
+};
for (var x = 0; x < 1000; ++x) {
+ %PrepareFunctionForOptimization(store);
store({x});
store({x});
%OptimizeFunctionOnNextCall(store);
- try { store(); } catch (e) { }
+ try {
+ store();
+ } catch (e) {
+ }
}
assertOptimized(store);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-6515.js b/deps/v8/test/mjsunit/regress/regress-v8-6515.js
new file mode 100644
index 0000000000..7d7e759e0c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-6515.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These patterns shouldn't generate code of excessive size.
+assertNull(/\b\B\b\B\b\B\b\B\b\B\b\B\b\B\b\B\b\B/.exec(" aa "));
+assertNull(/\b\b\b\b\b\b\b\b\b\B\B\B\B\B\B\B\B\B/.exec(" aa "));
+assertNull(/\b\B$\b\B$\b\B$\b\B$\b\B$\b\B$\b\B$/.exec(" aa "));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-6906.js b/deps/v8/test/mjsunit/regress/regress-v8-6906.js
index 72aa9858d2..cb16a15af2 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-6906.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-6906.js
@@ -4,8 +4,8 @@
// Flags: --allow-natives-syntax
-function f() {}
-
+function f() {};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-7848.js b/deps/v8/test/mjsunit/regress/regress-v8-7848.js
new file mode 100644
index 0000000000..fa673cff46
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-7848.js
@@ -0,0 +1,26 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Should never be called in this test.
+Error.prepareStackTrace = () => 299792458;
+
+{
+ const that_realm = Realm.create();
+ const result = Realm.eval(that_realm,
+ "() => { Error.prepareStackTrace = () => 42; return new Error(); }")();
+ assertEquals(42, result.stack);
+}
+{
+ const that_realm = Realm.create();
+ const result = Realm.eval(that_realm,
+ "() => { Error.prepareStackTrace = () => 42; " +
+ "class MyError extends Error {}; return new MyError(); }")();
+ assertEquals(42, result.stack);
+}
+{
+ const that_realm = Realm.create();
+ const result = Realm.eval(that_realm,
+ "() => { Error.prepareStackTrace = () => 42; return {}; }")();
+ assertFalse("stack" in result);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-8070.js b/deps/v8/test/mjsunit/regress/regress-v8-8070.js
index a75230da1f..5ce8fddc92 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-8070.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8070.js
@@ -17,6 +17,7 @@ function foo(a) {
}
const a = [1, 2, 3];
+%PrepareFunctionForOptimization(foo);
assertTrue(foo(a));
assertTrue(foo(a));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-8770.js b/deps/v8/test/mjsunit/regress/regress-v8-8770.js
new file mode 100644
index 0000000000..9a3b442389
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-8770.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const re = /^.*?Y((?=X?).)*Y$/s;
+const sult = "YABCY";
+const result = re.exec(sult);
+
+assertNotNull(result);
+assertArrayEquals([sult, "C"], result);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9233.js b/deps/v8/test/mjsunit/regress/regress-v8-9233.js
index 9164f940bf..ef3240bee8 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-9233.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9233.js
@@ -10,8 +10,10 @@ o1.y = 999;
// o2 will share map with o1 in its initial state
var o2 = { x: 1 };
-function f() { return o2.x; }
-
+function f() {
+ return o2.x;
+};
+%PrepareFunctionForOptimization(f);
assertEquals(1, f());
assertEquals(1, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9394-2.js b/deps/v8/test/mjsunit/regress/regress-v8-9394-2.js
new file mode 100644
index 0000000000..8034eb951c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9394-2.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --enable-lazy-source-positions --stress-lazy-source-positions
+
+function test() {
+ function f() {
+ with ({}) {
+ // This is a non-assigning shadowing access to value. If both f and test
+ // are fully parsed or both are preparsed, then this is resolved during
+ // scope analysis to the outer value, and the outer value knows it can be
+ // shadowed. If test is fully parsed and f is preparsed, value here
+ // doesn't resolve to anything during partial analysis, and the outer
+ // value does not know it can be shadowed.
+ return value;
+ }
+ }
+ var value = 2;
+ var status = f();
+ return value;
+}
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9394.js b/deps/v8/test/mjsunit/regress/regress-v8-9394.js
new file mode 100644
index 0000000000..e59f39a369
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9394.js
@@ -0,0 +1,83 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+(function testMaybeAssignedWithShadowing() {
+
+ function foo() {
+ let a = 0;
+ let g;
+
+ with ({}) {
+ g = function g() {
+ // Increment a, should set it maybe_assigned but doesn't in the bug.
+ ++a;
+ }
+ // Shadowing the outer 'a' with a dynamic one.
+ a;
+ }
+
+ return function () {
+ // The access to a would be context specialized (to 2 since it's after the
+ // second call) if maybe_assigned were incorrectly not set.
+ g(a);
+ return a;
+ }
+ };
+
+ f = foo();
+ %PrepareFunctionForOptimization(f);
+ assertEquals(f(), 1);
+ assertEquals(f(), 2);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f(), 3);
+
+})();
+
+// Same test as above, just with more shadowing (including dynamic->dynamic
+// shadowing) and skipping over scopes with shadows.
+(function testMaybeAssignedWithDeeplyNestedShadowing() {
+
+ function foo() {
+ let a = 0;
+ let g;
+
+ // Increment a, should set it maybe_assigned but doesn't in the bug.
+ with ({}) {
+ with ({}) {
+ with ({}) {
+ with ({}) {
+ with ({}) {
+ g = function g() { ++a; }
+ // Shadow the second dynamic 'a'.
+ a;
+ }
+ // Shadowing the first dynamic 'a'.
+ a;
+ }
+ // Skip shadowing here
+ }
+ // Skip shadowing here
+ }
+ // Shadowing the outer 'a' with a dynamic one.
+ a;
+ }
+
+ return function () {
+ // The access to a would be context specialized (to 2 since it's after the
+ // second call) if maybe_assigned were incorrectly not set.
+ g(a);
+ return a;
+ }
+ };
+
+ f = foo();
+ %PrepareFunctionForOptimization(f);
+ assertEquals(f(), 1);
+ assertEquals(f(), 2);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(f(), 3);
+
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9460.js b/deps/v8/test/mjsunit/regress/regress-v8-9460.js
new file mode 100644
index 0000000000..b9db175a56
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9460.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var arr = [0, 1];
+
+assertThrows(
+ () => Object.defineProperty(arr, 'length', {value: 1, configurable: true}),
+ TypeError);
+assertEquals(2, arr.length);
+
+assertThrows(
+ () => Object.defineProperty(arr, 'length', {value: 2, configurable: true}),
+ TypeError);
+assertEquals(2, arr.length);
+
+assertThrows(
+ () => Object.defineProperty(arr, 'length', {value: 3, configurable: true}),
+ TypeError);
+assertEquals(2, arr.length);
diff --git a/deps/v8/test/mjsunit/regress/regress_967104.js b/deps/v8/test/mjsunit/regress/regress_967104.js
new file mode 100644
index 0000000000..78c24b0154
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress_967104.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that arrays with non-writable length are handled correctly
+
+arr = new Array();
+Object.defineProperty(arr, "length", {value: 3, writable: false});
+function foo(i, v) { arr[i] = v; }
+foo(3);
+foo(3, 3);
+assertEquals(arr[3], undefined);
diff --git a/deps/v8/test/mjsunit/regress/string-next-encoding.js b/deps/v8/test/mjsunit/regress/string-next-encoding.js
index 27b99a9c9e..d66bab8c12 100644
--- a/deps/v8/test/mjsunit/regress/string-next-encoding.js
+++ b/deps/v8/test/mjsunit/regress/string-next-encoding.js
@@ -12,8 +12,8 @@ function f() {
assertEquals("�", i.next().value);
assertEquals("𝌆", i.next().value);
assertSame(undefined, i.next().value);
-}
-
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/typed-array-lifetime.js b/deps/v8/test/mjsunit/regress/typed-array-lifetime.js
index db9a216531..8f63ec6816 100644
--- a/deps/v8/test/mjsunit/regress/typed-array-lifetime.js
+++ b/deps/v8/test/mjsunit/regress/typed-array-lifetime.js
@@ -15,6 +15,7 @@ var foo = (function () {
}
})();
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
index 791d2a1d2d..199626b3c3 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256.js
@@ -936,6 +936,7 @@ function __f_30(x) {
return 0;
}
try {
+ %PrepareFunctionForOptimization(__f_30);
assertEquals(0, __f_30(0));
assertEquals(0, __f_30(0));
%OptimizeFunctionOnNextCall(__f_30);
@@ -956,6 +957,7 @@ function __f_33() {
__f_32({});
}
try {
+ %PrepareFunctionForOptimization(__f_33);
__f_33();
__f_33();
__f_33();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-02256b.js b/deps/v8/test/mjsunit/regress/wasm/regress-02256b.js
index 120643896d..249e96dfef 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-02256b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-02256b.js
@@ -472,6 +472,7 @@ function __f_30(x) {
return 0;
}
try {
+ %PrepareFunctionForOptimization(__f_30);
assertEquals(0, __f_30(0));
assertEquals(0, __f_30(0));
%OptimizeFunctionOnNextCall(__f_30);
@@ -492,6 +493,7 @@ function __f_33() {
__f_32({});
}
try {
+ %PrepareFunctionForOptimization(__f_33);
__f_33();
__f_33();
__f_33();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9425.js b/deps/v8/test/mjsunit/regress/wasm/regress-9425.js
new file mode 100644
index 0000000000..eb2ca1552a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9425.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --experimental-wasm-threads
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+builder.addMemory(1, 1, /*exp*/ false, /*shared*/ true);
+
+builder.addFunction('test', kSig_v_v).addBody([
+ kExprI32Const, 0, //
+ kExprI64Const, 0, //
+ kExprI64Const, 0, //
+ kAtomicPrefix, kExprI64AtomicWait, 3, 0, //
+ kExprDrop, //
+]);
+
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9447.js b/deps/v8/test/mjsunit/regress/wasm/regress-9447.js
new file mode 100644
index 0000000000..80d64b9b2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9447.js
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-experimental-wasm-bigint
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+// Generate a re-exported function that wraps a JavaScript callable, but with a
+// function signature that is incompatible (i.e. i64 return type) with JS.
+var fun1 = (function GenerateFun1() {
+ let builder = new WasmModuleBuilder();
+ function fun() { return 0 }
+ let fun_index = builder.addImport("m", "fun", kSig_l_v)
+ builder.addExport("fun", fun_index);
+ let instance = builder.instantiate({ m: { fun: fun }});
+ return instance.exports.fun;
+})();
+
+// Generate an exported function that calls the above re-export from another
+// module, still with a function signature that is incompatible with JS.
+var fun2 = (function GenerateFun2() {
+ let builder = new WasmModuleBuilder();
+ let fun_index = builder.addImport("m", "fun", kSig_l_v)
+ builder.addFunction('main', kSig_v_v)
+ .addBody([
+ kExprCallFunction, fun_index,
+ kExprDrop
+ ])
+ .exportFunc();
+ let instance = builder.instantiate({ m: { fun: fun1 }});
+ return instance.exports.main;
+})();
+
+// Both exported functions should throw, no matter how often they get wrapped.
+assertThrows(fun1, TypeError, /wasm function signature contains illegal type/);
+assertThrows(fun2, TypeError, /wasm function signature contains illegal type/);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-968078.js b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
new file mode 100644
index 0000000000..2935ea05e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-968078.js
@@ -0,0 +1,47 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ function repeat(value, length) {
+ var arr = new Array(length);
+ for (let i = 0; i < length; i++) {
+ arr[i] = value;
+ }
+ return arr;
+ }
+ function br_table(block_index, length, def_block) {
+ const bytes = new Binary();
+ bytes.emit_bytes([kExprBrTable]);
+ // Functions count (different than the count in the functions section.
+ bytes.emit_u32v(length);
+ bytes.emit_bytes(repeat(block_index, length));
+ bytes.emit_bytes([def_block]);
+ return Array.from(bytes.trunc_buffer());
+ }
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(12, 12, false);
+ builder.addFunction("foo", kSig_v_iii)
+ .addBody([].concat([
+ kExprBlock, kWasmStmt,
+ kExprGetLocal, 0x2,
+ kExprI32Const, 0x01,
+ kExprI32And,
+ // Generate a test branch (which has 32k limited reach).
+ kExprIf, kWasmStmt,
+ kExprGetLocal, 0x0,
+ kExprI32Const, 0x01,
+ kExprI32And,
+ kExprBrIf, 0x1,
+ kExprGetLocal, 0x0,
+ // Emit a br_table that is long enough to make the test branch go out of range.
+ ], br_table(0x1, 9000, 0x00), [
+ kExprEnd,
+ kExprEnd,
+ ])).exportFunc();
+ builder.instantiate();
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-980007.js b/deps/v8/test/mjsunit/regress/wasm/regress-980007.js
new file mode 100644
index 0000000000..5715c6f815
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-980007.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addFunction(undefined, kSig_i_i).addBody([
+ kExprI64Const, 0x01,
+ kExprI32ConvertI64,
+ kExprI32Const, 0x80, 0x80, 0x80, 0x80, 0x78,
+ kExprI32Sub,
+]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-985154.js b/deps/v8/test/mjsunit/regress/wasm/regress-985154.js
new file mode 100644
index 0000000000..30f34d55be
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-985154.js
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestSloppynessPropagates() {
+ let f = (function() {
+ function Module() {
+ "use asm";
+ function f() {}
+ return {f: f}
+ }
+ return Module;
+ })()().f;
+ let p = Object.getOwnPropertyNames(f);
+ assertArrayEquals(["length", "name", "arguments", "caller", "prototype"], p);
+ assertEquals(null, f.arguments);
+ assertEquals(null, f.caller);
+})();
+
+(function TestStrictnessPropagates() {
+ let f = (function() {
+ "use strict";
+ function Module() {
+ "use asm";
+ function f() {}
+ return {f: f}
+ }
+ return Module;
+ })()().f;
+ let p = Object.getOwnPropertyNames(f);
+ assertArrayEquals(["length", "name", "prototype"], p);
+ assertThrows(() => f.arguments, TypeError);
+ assertThrows(() => f.caller, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/sealed-array-reduce.js b/deps/v8/test/mjsunit/sealed-array-reduce.js
new file mode 100644
index 0000000000..a572aa2cc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/sealed-array-reduce.js
@@ -0,0 +1,1431 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+/**
+ * @fileoverview Test reduce and reduceRight
+ */
+
+function clone(v) {
+ // Shallow-copies arrays, returns everything else verbatim.
+ if (v instanceof Array) {
+ // Shallow-copy an array.
+ var newArray = new Array(v.length);
+ for (var i in v) {
+ newArray[i] = v[i];
+ }
+ return newArray;
+ }
+ return v;
+}
+
+
+// Creates a callback function for reduce/reduceRight that tests the number
+// of arguments and otherwise behaves as "func", but which also
+// records all calls in an array on the function (as arrays of arguments
+// followed by result).
+function makeRecorder(func, testName) {
+ var record = [];
+ var f = function recorder(a, b, i, s) {
+ assertEquals(4, arguments.length,
+ testName + "(number of arguments: " + arguments.length + ")");
+ assertEquals("number", typeof(i), testName + "(index must be number)");
+ assertEquals(s[i], b, testName + "(current argument is at index)");
+ if (record.length > 0) {
+ var prevRecord = record[record.length - 1];
+ var prevResult = prevRecord[prevRecord.length - 1];
+ assertEquals(prevResult, a,
+ testName + "(prev result -> current input)");
+ }
+ var args = [clone(a), clone(b), i, clone(s)];
+ var result = func.apply(this, arguments);
+ args.push(clone(result));
+ record.push(args);
+ return result;
+ };
+ f.record = record;
+ return f;
+}
+
+
+function testReduce(type,
+ testName,
+ expectedResult,
+ expectedCalls,
+ array,
+ combine,
+ init) {
+ var rec = makeRecorder(combine);
+ var result;
+ if (arguments.length > 6) {
+ result = array[type](rec, init);
+ } else {
+ result = array[type](rec);
+ }
+ var calls = rec.record;
+ assertEquals(expectedCalls.length, calls.length,
+ testName + " (number of calls)");
+ for (var i = 0; i < expectedCalls.length; i++) {
+ assertEquals(expectedCalls[i], calls[i],
+ testName + " (call " + (i + 1) + ")");
+ }
+ assertEquals(expectedResult, result, testName + " (result)");
+}
+
+
+function sum(a, b) { return Number(a) + Number(b); }
+function prod(a, b) { return Number(a) * Number(b); }
+function dec(a, b, i, arr) { return Number(a) + Number(b) * Math.pow(10, arr.length - i - 1); }
+function accumulate(acc, elem, i) { acc[i] = elem; return acc; }
+
+// ---- Test Reduce[Left]
+
+var simpleArray = ['2',4,6];
+Object.seal(simpleArray);
+
+testReduce("reduce", "SimpleReduceSum", 12,
+ [[0, '2', 0, simpleArray, 2],
+ [2, 4, 1, simpleArray, 6],
+ [6, 6, 2, simpleArray, 12]],
+ simpleArray, sum, 0);
+
+testReduce("reduce", "SimpleReduceProd", 48,
+ [[1, '2', 0, simpleArray, 2],
+ [2, 4, 1, simpleArray, 8],
+ [8, 6, 2, simpleArray, 48]],
+ simpleArray, prod, 1);
+
+testReduce("reduce", "SimpleReduceDec", 246,
+ [[0, '2', 0, simpleArray, 200],
+ [200, 4, 1, simpleArray, 240],
+ [240, 6, 2, simpleArray, 246]],
+ simpleArray, dec, 0);
+
+testReduce("reduce", "SimpleReduceAccumulate", simpleArray,
+ [[[], '2', 0, simpleArray, ['2']],
+ [['2'], 4, 1, simpleArray, ['2', 4]],
+ [['2', 4], 6, 2, simpleArray, simpleArray]],
+ simpleArray, accumulate, []);
+
+var emptyArray = [];
+Object.seal(emptyArray);
+
+testReduce("reduce", "EmptyReduceSum", 0, [], emptyArray, sum, 0);
+testReduce("reduce", "EmptyReduceProd", 1, [], emptyArray, prod, 1);
+testReduce("reduce", "EmptyReduceDec", 0, [], emptyArray, dec, 0);
+testReduce("reduce", "EmptyReduceAccumulate", [], [], emptyArray, accumulate, []);
+
+testReduce("reduce", "EmptyReduceSumNoInit", 0, emptyArray, [0], sum);
+testReduce("reduce", "EmptyReduceProdNoInit", 1, emptyArray, [1], prod);
+testReduce("reduce", "EmptyReduceDecNoInit", 0, emptyArray, [0], dec);
+testReduce("reduce", "EmptyReduceAccumulateNoInit", [], emptyArray, [[]], accumulate);
+
+
+var simpleSparseArray = [,,,'2',,4,,6,,];
+Object.seal(simpleSparseArray);
+
+testReduce("reduce", "SimpleSparseReduceSum", 12,
+ [[0, '2', 3, simpleSparseArray, 2],
+ [2, 4, 5, simpleSparseArray, 6],
+ [6, 6, 7, simpleSparseArray, 12]],
+ simpleSparseArray, sum, 0);
+
+testReduce("reduce", "SimpleSparseReduceProd", 48,
+ [[1, '2', 3, simpleSparseArray, 2],
+ [2, 4, 5, simpleSparseArray, 8],
+ [8, 6, 7, simpleSparseArray, 48]],
+ simpleSparseArray, prod, 1);
+
+testReduce("reduce", "SimpleSparseReduceDec", 204060,
+ [[0, '2', 3, simpleSparseArray, 200000],
+ [200000, 4, 5, simpleSparseArray, 204000],
+ [204000, 6, 7, simpleSparseArray, 204060]],
+ simpleSparseArray, dec, 0);
+
+testReduce("reduce", "SimpleSparseReduceAccumulate", [,,,'2',,4,,6],
+ [[[], '2', 3, simpleSparseArray, [,,,'2']],
+ [[,,,'2'], 4, 5, simpleSparseArray, [,,,'2',,4]],
+ [[,,,'2',,4], 6, 7, simpleSparseArray, [,,,'2',,4,,6]]],
+ simpleSparseArray, accumulate, []);
+
+
+testReduce("reduce", "EmptySparseReduceSumNoInit", 0, [], [,,0,,], sum);
+testReduce("reduce", "EmptySparseReduceProdNoInit", 1, [], [,,1,,], prod);
+testReduce("reduce", "EmptySparseReduceDecNoInit", 0, [], [,,0,,], dec);
+testReduce("reduce", "EmptySparseReduceAccumulateNoInit",
+ [], [], [,,[],,], accumulate);
+
+
+var verySparseArray = [];
+verySparseArray.length = 10000;
+verySparseArray[2000] = '2';
+verySparseArray[5000] = 4;
+verySparseArray[9000] = 6;
+var verySparseSlice2 = verySparseArray.slice(0, 2001);
+var verySparseSlice4 = verySparseArray.slice(0, 5001);
+var verySparseSlice6 = verySparseArray.slice(0, 9001);
+Object.seal(verySparseArray);
+
+testReduce("reduce", "VerySparseReduceSum", 12,
+ [[0, '2', 2000, verySparseArray, 2],
+ [2, 4, 5000, verySparseArray, 6],
+ [6, 6, 9000, verySparseArray, 12]],
+ verySparseArray, sum, 0);
+
+testReduce("reduce", "VerySparseReduceProd", 48,
+ [[1, '2', 2000, verySparseArray, 2],
+ [2, 4, 5000, verySparseArray, 8],
+ [8, 6, 9000, verySparseArray, 48]],
+ verySparseArray, prod, 1);
+
+testReduce("reduce", "VerySparseReduceDec", Infinity,
+ [[0, '2', 2000, verySparseArray, Infinity],
+ [Infinity, 4, 5000, verySparseArray, Infinity],
+ [Infinity, 6, 9000, verySparseArray, Infinity]],
+ verySparseArray, dec, 0);
+
+testReduce("reduce", "VerySparseReduceAccumulate",
+ verySparseSlice6,
+ [[[], '2', 2000, verySparseArray, verySparseSlice2],
+ [verySparseSlice2, 4, 5000, verySparseArray, verySparseSlice4],
+ [verySparseSlice4, 6, 9000, verySparseArray, verySparseSlice6]],
+ verySparseArray, accumulate, []);
+
+
+testReduce("reduce", "VerySparseReduceSumNoInit", 12,
+ [['2', 4, 5000, verySparseArray, 6],
+ [6, 6, 9000, verySparseArray, 12]],
+ verySparseArray, sum);
+
+testReduce("reduce", "VerySparseReduceProdNoInit", 48,
+ [['2', 4, 5000, verySparseArray, 8],
+ [8, 6, 9000, verySparseArray, 48]],
+ verySparseArray, prod);
+
+testReduce("reduce", "VerySparseReduceDecNoInit", Infinity,
+ [['2', 4, 5000, verySparseArray, Infinity],
+ [Infinity, 6, 9000, verySparseArray, Infinity]],
+ verySparseArray, dec);
+
+testReduce("reduce", "SimpleSparseReduceAccumulateNoInit",
+ '2',
+ [['2', 4, 5000, verySparseArray, '2'],
+ ['2', 6, 9000, verySparseArray, '2']],
+ verySparseArray, accumulate);
+
+
+// ---- Test ReduceRight
+
+testReduce("reduceRight", "SimpleReduceRightSum", 12,
+ [[0, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 10],
+ [10, '2', 0, simpleArray, 12]],
+ simpleArray, sum, 0);
+
+testReduce("reduceRight", "SimpleReduceRightProd", 48,
+ [[1, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 24],
+ [24, '2', 0, simpleArray, 48]],
+ simpleArray, prod, 1);
+
+testReduce("reduceRight", "SimpleReduceRightDec", 246,
+ [[0, 6, 2, simpleArray, 6],
+ [6, 4, 1, simpleArray, 46],
+ [46, '2', 0, simpleArray, 246]],
+ simpleArray, dec, 0);
+
+testReduce("reduceRight", "SimpleReduceRightAccumulate", simpleArray,
+ [[[], 6, 2, simpleArray, [,,6]],
+ [[,,6], 4, 1, simpleArray, [,4,6]],
+ [[,4,6], '2', 0, simpleArray, simpleArray]],
+ simpleArray, accumulate, []);
+
+
+testReduce("reduceRight", "EmptyReduceRightSum", 0, [], [], sum, 0);
+testReduce("reduceRight", "EmptyReduceRightProd", 1, [], [], prod, 1);
+testReduce("reduceRight", "EmptyReduceRightDec", 0, [], [], dec, 0);
+testReduce("reduceRight", "EmptyReduceRightAccumulate", [],
+ [], [], accumulate, []);
+
+testReduce("reduceRight", "EmptyReduceRightSumNoInit", 0, [], [0], sum);
+testReduce("reduceRight", "EmptyReduceRightProdNoInit", 1, [], [1], prod);
+testReduce("reduceRight", "EmptyReduceRightDecNoInit", 0, [], [0], dec);
+testReduce("reduceRight", "EmptyReduceRightAccumulateNoInit",
+ [], [], [[]], accumulate);
+
+
+testReduce("reduceRight", "SimpleSparseReduceRightSum", 12,
+ [[0, 6, 7, simpleSparseArray, 6],
+ [6, 4, 5, simpleSparseArray, 10],
+ [10, '2', 3, simpleSparseArray, 12]],
+ simpleSparseArray, sum, 0);
+
+testReduce("reduceRight", "SimpleSparseReduceRightProd", 48,
+ [[1, 6, 7, simpleSparseArray, 6],
+ [6, 4, 5, simpleSparseArray, 24],
+ [24, '2', 3, simpleSparseArray, 48]],
+ simpleSparseArray, prod, 1);
+
+testReduce("reduceRight", "SimpleSparseReduceRightDec", 204060,
+ [[0, 6, 7, simpleSparseArray, 60],
+ [60, 4, 5, simpleSparseArray, 4060],
+ [4060, '2', 3, simpleSparseArray, 204060]],
+ simpleSparseArray, dec, 0);
+
+testReduce("reduceRight", "SimpleSparseReduceRightAccumulate", [,,,'2',,4,,6],
+ [[[], 6, 7, simpleSparseArray, [,,,,,,,6]],
+ [[,,,,,,,6], 4, 5, simpleSparseArray, [,,,,,4,,6]],
+ [[,,,,,4,,6], '2', 3, simpleSparseArray, [,,,'2',,4,,6]]],
+ simpleSparseArray, accumulate, []);
+
+
+testReduce("reduceRight", "EmptySparseReduceRightSumNoInit",
+ 0, [], [,,0,,], sum);
+testReduce("reduceRight", "EmptySparseReduceRightProdNoInit",
+ 1, [], [,,1,,], prod);
+testReduce("reduceRight", "EmptySparseReduceRightDecNoInit",
+ 0, [], [,,0,,], dec);
+testReduce("reduceRight", "EmptySparseReduceRightAccumulateNoInit",
+ [], [], [,,[],,], accumulate);
+
+
+var verySparseSuffix6 = [];
+verySparseSuffix6[9000] = 6;
+var verySparseSuffix4 = [];
+verySparseSuffix4[5000] = 4;
+verySparseSuffix4[9000] = 6;
+var verySparseSuffix2 = verySparseSlice6;
+
+
+testReduce("reduceRight", "VerySparseReduceRightSum", 12,
+ [[0, 6, 9000, verySparseArray, 6],
+ [6, 4, 5000, verySparseArray, 10],
+ [10, '2', 2000, verySparseArray, 12]],
+ verySparseArray, sum, 0);
+
+testReduce("reduceRight", "VerySparseReduceRightProd", 48,
+ [[1, 6, 9000, verySparseArray, 6],
+ [6, 4, 5000, verySparseArray, 24],
+ [24, '2', 2000, verySparseArray, 48]],
+ verySparseArray, prod, 1);
+
+testReduce("reduceRight", "VerySparseReduceRightDec", Infinity,
+ [[0, 6, 9000, verySparseArray, Infinity],
+ [Infinity, 4, 5000, verySparseArray, Infinity],
+ [Infinity, '2', 2000, verySparseArray, Infinity]],
+ verySparseArray, dec, 0);
+
+testReduce("reduceRight", "VerySparseReduceRightAccumulate",
+ verySparseSuffix2,
+ [[[], 6, 9000, verySparseArray, verySparseSuffix6],
+ [verySparseSuffix6, 4, 5000, verySparseArray, verySparseSuffix4],
+ [verySparseSuffix4, '2', 2000, verySparseArray, verySparseSuffix2]],
+ verySparseArray, accumulate, []);
+
+
+testReduce("reduceRight", "VerySparseReduceRightSumNoInit", 12,
+ [[6, 4, 5000, verySparseArray, 10],
+ [10, '2', 2000, verySparseArray, 12]],
+ verySparseArray, sum);
+
+testReduce("reduceRight", "VerySparseReduceRightProdNoInit", 48,
+ [[6, 4, 5000, verySparseArray, 24],
+ [24, '2', 2000, verySparseArray, 48]],
+ verySparseArray, prod);
+
+testReduce("reduceRight", "VerySparseReduceRightDecNoInit", Infinity,
+ [[6, 4, 5000, verySparseArray, Infinity],
+ [Infinity, '2', 2000, verySparseArray, Infinity]],
+ verySparseArray, dec);
+
+testReduce("reduceRight", "SimpleSparseReduceRightAccumulateNoInit",
+ 6,
+ [[6, 4, 5000, verySparseArray, 6],
+ [6, '2', 2000, verySparseArray, 6]],
+ verySparseArray, accumulate);
+
+
+// undefined is an element
+var undefArray = [,,undefined,,undefined,,];
+Object.seal(undefArray);
+
+testReduce("reduce", "SparseUndefinedReduceAdd", NaN,
+ [[0, undefined, 2, undefArray, NaN],
+ [NaN, undefined, 4, undefArray, NaN],
+ ],
+ undefArray, sum, 0);
+
+testReduce("reduceRight", "SparseUndefinedReduceRightAdd", NaN,
+ [[0, undefined, 4, undefArray, NaN],
+ [NaN, undefined, 2, undefArray, NaN],
+ ], undefArray, sum, 0);
+
+testReduce("reduce", "SparseUndefinedReduceAddNoInit", NaN,
+ [[undefined, undefined, 4, undefArray, NaN],
+ ], undefArray, sum);
+
+testReduce("reduceRight", "SparseUndefinedReduceRightAddNoInit", NaN,
+ [[undefined, undefined, 2, undefArray, NaN],
+ ], undefArray, sum);
+
+
+// Ignore non-array properties:
+
+var arrayPlus = [1,'2',,3];
+arrayPlus[-1] = NaN;
+arrayPlus[Math.pow(2,32)] = NaN;
+arrayPlus[NaN] = NaN;
+arrayPlus["00"] = NaN;
+arrayPlus["02"] = NaN;
+arrayPlus["-0"] = NaN;
+Object.seal(arrayPlus);
+
+testReduce("reduce", "ArrayWithNonElementPropertiesReduce", 6,
+ [[0, 1, 0, arrayPlus, 1],
+ [1, '2', 1, arrayPlus, 3],
+ [3, 3, 3, arrayPlus, 6],
+ ], arrayPlus, sum, 0);
+
+testReduce("reduceRight", "ArrayWithNonElementPropertiesReduceRight", 6,
+ [[0, 3, 3, arrayPlus, 3],
+ [3, '2', 1, arrayPlus, 5],
+ [5, 1, 0, arrayPlus, 6],
+ ], arrayPlus, sum, 0);
+
+// Test passing undefined as initial value (to test missing parameter
+// detection).
+Object.seal(['1']).reduce((a, b) => { assertEquals(a, undefined); assertEquals(b, '1') },
+ undefined);
+Object.seal(['1', 2]).reduce((a, b) => { assertEquals(a, '1'); assertEquals(b, 2); });
+Object.seal(['1']).reduce((a, b) => { assertTrue(false); });
+
+// Test error conditions:
+
+var exception = false;
+try {
+ Object.seal(['1']).reduce("not a function");
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce callback not a function not throwing TypeError");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduce non function TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.seal(['1']).reduceRight("not a function");
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight callback not a function not throwing TypeError");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduceRight non function TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.seal([]).reduce(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduce no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.seal([]).reduceRight(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduceRight no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.seal([,,,]).reduce(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduce sparse no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduce no initial TypeError type");
+}
+assertTrue(exception);
+
+exception = false;
+try {
+ Object.seal([,,,]).reduceRight(sum);
+} catch (e) {
+ exception = true;
+ assertTrue(e instanceof TypeError,
+ "reduceRight sparse no initial value not throwing TypeError");
+ assertEquals("Reduce of empty array with no initial value", e.message,
+ "reduceRight no initial TypeError type");
+}
+assertTrue(exception);
+
+
+// Array changing length
+
+function extender(a, b, i, s) {
+ s[s.length] = s.length;
+ return Number(a) + Number(b);
+}
+
+var arr = [1, '2', 3, 4];
+Object.seal(arr);
+testReduce("reduce", "ArrayManipulationExtender", 10,
+ [[0, 1, 0, [1, '2', 3, 4], 1],
+ [1, '2', 1, [1, '2', 3, 4], 3],
+ [3, 3, 2, [1, '2', 3, 4], 6],
+ [6, 4, 3, [1, '2', 3, 4], 10],
+ ], arr, extender, 0);
+
+var arr = [];
+Object.defineProperty(arr, "0", { get: function() { delete this[0] },
+ configurable: true });
+assertEquals(undefined, Object.seal(arr).reduce(function(val) { return val }));
+
+var arr = [];
+Object.defineProperty(arr, "0", { get: function() { delete this[0] },
+ configurable: true});
+assertEquals(undefined, Object.seal(arr).reduceRight(function(val) { return val }));
+
+
+(function ReduceRightMaxIndex() {
+ const kMaxIndex = 0xffffffff-1;
+ let array = [];
+ array[kMaxIndex-2] = 'value-2';
+ array[kMaxIndex-1] = 'value-1';
+ // Use the maximum array index possible.
+ array[kMaxIndex] = 'value';
+ // Add the next index which is a normal property and thus will not show up.
+ array[kMaxIndex+1] = 'normal property';
+ assertThrowsEquals( () => {
+ Object.seal(array).reduceRight((sum, value) => {
+ assertEquals('initial', sum);
+ assertEquals('value', value);
+ // Throw at this point as we would very slowly loop down from kMaxIndex.
+ throw 'do not continue';
+ }, 'initial')
+ }, 'do not continue');
+})();
+
+(function OptimizedReduce() {
+ let f = (a,current) => a + Number(current);
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.seal(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEmpty() {
+ let f = (a,current) => a + Number(current);
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.seal(a);
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ assertThrows(() => g([]));
+ assertUnoptimized(g);
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + Number(current); };
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.seal(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + Number(current);
+ };
+ let g = function(a) {
+ return a.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ assertOptimized(g);
+ %PrepareFunctionForOptimization(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,'22',,33,45,56,,6,77,84,93,101,];
+ Object.seal(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[0] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ assertOptimized(g);
+ %PrepareFunctionForOptimization(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertUnoptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertUnoptimized(g);
+})();
+
+(function TriggerReduceRightPreLoopDeopt() {
+ function f(a) {
+ a.reduceRight((x) => { return Number(x) + 1 });
+ };
+ %PrepareFunctionForOptimization(f);
+ var arr = Object.seal([1, '2', ]);
+ f(arr);
+ f(arr);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(() => f([]), TypeError);
+ assertUnoptimized(f);
+})();
+
+(function OptimizedReduceRightEagerDeoptMiddleOfIterationHoley() {
+ let deopt = false;
+ let array = [, ,11,'22',,33,45,56,,6,77,84,93,101,];
+ Object.seal(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[array.length-1] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ g();
+ assertOptimized(g);
+ %PrepareFunctionForOptimization(g);
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertUnoptimized(g);
+ deopt = true;
+ assertEquals(total, g());
+ assertUnoptimized(g);
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + current;
+ };
+ let g = function() {
+ try {
+ return Object.seal(array).reduce(f);
+ } catch (e) {
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ %PrepareFunctionForOptimization(g);
+ %OptimizeFunctionOnNextCall(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ %PrepareFunctionForOptimization(g);
+ done = false;
+ %OptimizeFunctionOnNextCall(g);
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1, '2', 3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ return array.reduce(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ f = null;
+ assertThrows(() => g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,2,3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduce(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceRight() {
+ let count = 0;
+ let f = (a,current,i) => a + Number(current) * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.seal(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEmpty() {
+ let count = 0;
+ let f = (a,current,i) => a + Number(current) * ++count;
+ let g = function(a) {
+ count = 0;
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.seal(a);
+ g(a); g(a); g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ assertOptimized(g);
+ assertThrows(() => g([]));
+ assertUnoptimized(g);
+})();
+
+(function OptimizedReduceLazyDeopt() {
+ let deopt = false;
+ let f = (a,current) => { if (deopt) %DeoptimizeNow(); return a + Number(current); };
+ let g = function(a) {
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [1,'2',3,4,5,6,7,8,9,10];
+ Object.seal(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceLazyDeoptMiddleOfIteration() {
+ let deopt = false;
+ let f = (a,current) => {
+ if (current == 6 && deopt) %DeoptimizeNow();
+ return a + Number(current);
+ };
+ let g = function(a) {
+ return a.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ let a = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(a);
+ g(a); g(a);
+ let total = g(a);
+ %OptimizeFunctionOnNextCall(g);
+ g(a);
+ deopt = true;
+ assertEquals(total, g(a));
+ assertOptimized(g);
+})();
+
+(function OptimizedReduceEagerDeoptMiddleOfIteration() {
+ let deopt = false;
+ let array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(array);
+ let f = (a,current) => {
+ if (current == 6 && deopt) {array[9] = 1.5; }
+ return a + Number(current);
+ };
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertOptimized(g);
+ deopt = true;
+ %PrepareFunctionForOptimization(g);
+ g();
+ deopt = false;
+ array = [11,'22',33,45,56,6,77,84,93,101];
+ Object.seal(array);
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ deopt = true;
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatch() {
+ let f = (a,current) => {
+ return a + Number(current);
+ };
+ let g = function() {
+ try {
+ return Object.seal(array).reduceRight(f);
+ } catch (e) {
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ g();
+ assertEquals(total, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceThrow() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinally() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ let array = [1, '2', 3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyNoInline() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) throw "x";
+ return a + Number(current);
+ };
+ %NeverOptimizeFunction(f);
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ assertOptimized(g);
+ done = true;
+ assertEquals(null, g());
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceNonCallableOpt() {
+ let done = false;
+ let f = (a, current) => {
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ return array.reduceRight(f);
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g(); g();
+ assertEquals(6, g());
+ f = null;
+ assertThrows(() => g());
+ assertOptimized(g);
+})();
+
+(function ReduceCatchInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceFinallyInlineDeopt() {
+ let done = false;
+ let f = (a, current) => {
+ if (done) {
+ %DeoptimizeNow();
+ throw "x";
+ }
+ return a + Number(current);
+ };
+ let array = [1,'2',3];
+ Object.seal(array);
+ let g = function() {
+ try {
+ return array.reduceRight(f);
+ } catch (e) {
+ } finally {
+ if (done) return null;
+ }
+ };
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ let total = g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+ done = false;
+ %PrepareFunctionForOptimization(g);
+ g(); g();
+ %OptimizeFunctionOnNextCall(g);
+ g();
+ assertEquals(6, g());
+ done = true;
+ assertEquals(null, g());
+ assertOptimized(g);
+})();
+
+(function ReduceHoleyArrayWithDefaultAccumulator() {
+ var holey = new Array(10);
+ Object.seal(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return currentValue;
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceRightHoleyArrayWithDefaultAccumulator() {
+ var holey = new Array(10);
+ Object.seal(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return currentValue;
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ assertEquals(13, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(13, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceHoleyArrayOneElementWithDefaultAccumulator() {
+ var holey = new Array(10);
+ holey[1] = '5';
+ Object.seal(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return Number(currentValue) + accumulator;
+ };
+ return a.reduce(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceRightHoleyArrayOneElementWithDefaultAccumulator() {
+ var holey = new Array(10);
+ holey[1] = '5';
+ Object.seal(holey);
+ function reduce(a) {
+ let callback = function(accumulator, currentValue) {
+ return Number(currentValue) + accumulator;
+ };
+ return a.reduceRight(callback, 13);
+ };
+ %PrepareFunctionForOptimization(reduce);
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ assertEquals(18, reduce(holey));
+ %OptimizeFunctionOnNextCall(reduce);
+ assertEquals(18, reduce(holey));
+ assertOptimized(reduce);
+})();
+
+(function ReduceMixedHoleyArrays() {
+ function r(a) {
+ return a.reduce((acc, i) => {acc[0]});
+ };
+
+ // Hold on to the objects, otherwise their maps might be garbage
+ // collected and {r} will get deoptmized before the {assertOptimized}.
+ const object1 = Object.seal([[0]]);
+ const object2 = Object.seal([0,,]);
+ const object3 = Object.seal([,0,0]);
+
+ %PrepareFunctionForOptimization(r);
+ assertEquals(r(object1), [0]);
+ assertEquals(r(object1), [0]);
+ assertEquals(r(object2), 0);
+ %OptimizeFunctionOnNextCall(r);
+ assertEquals(r(object3), undefined);
+ assertOptimized(r);
+})();
diff --git a/deps/v8/test/mjsunit/setters-on-elements.js b/deps/v8/test/mjsunit/setters-on-elements.js
index 48fa33b5fe..5ea0c930f4 100644
--- a/deps/v8/test/mjsunit/setters-on-elements.js
+++ b/deps/v8/test/mjsunit/setters-on-elements.js
@@ -55,6 +55,7 @@ if (standalone) {
optimize = empty_func;
clearFunctionTypeFeedback = empty_func;
deoptimizeFunction = empty_func;
+ prepareForOptimize = empty_func;
} else {
optimize = function(name) {
%OptimizeFunctionOnNextCall(name);
@@ -65,6 +66,9 @@ if (standalone) {
deoptimizeFunction = function(name) {
%DeoptimizeFunction(name);
}
+ prepareForOptimize = function(name) {
+ %PrepareFunctionForOptimization(name);
+ }
}
function base_setter_test(create_func, index, store_value) {
@@ -76,6 +80,7 @@ function base_setter_test(create_func, index, store_value) {
var ap = [];
ap.__defineSetter__(index, function() { calls++; });
+ prepareForOptimize(foo);
foo(a);
foo(a);
foo(a);
@@ -141,6 +146,7 @@ function base_setter_test(create_func, index, store_value) {
a = create_func();
ap2 = [];
a.__proto__ = ap2;
+ prepareForOptimize(foo);
foo(a);
foo(a);
foo(a);
@@ -161,6 +167,7 @@ function base_setter_test(create_func, index, store_value) {
a = create_func();
a.__proto__ = ap2;
bar = function(a) { a[index+1] = store_value; }
+ prepareForOptimize(bar);
bar(a);
bar(a);
bar(a); // store should be generic
diff --git a/deps/v8/test/mjsunit/smi-representation.js b/deps/v8/test/mjsunit/smi-representation.js
index fdb41db1c5..95580f5b84 100644
--- a/deps/v8/test/mjsunit/smi-representation.js
+++ b/deps/v8/test/mjsunit/smi-representation.js
@@ -40,8 +40,8 @@ function check_smi_repr(o, d1, d2) {
s = s + d;
o.smi = s;
return o;
-}
-
+};
+%PrepareFunctionForOptimization(check_smi_repr);
var test = smi_field();
check_smi_repr(smi_field(), 5, 3);
check_smi_repr(smi_field(), 6, 2);
@@ -50,7 +50,7 @@ var val = check_smi_repr(smi_field(), 8, 1);
assertTrue(%HaveSameMap(val, test));
function tagged_smi_field() {
- var o = {"tag":false};
+ var o = {'tag': false};
o.tag = 10;
return o;
}
@@ -59,8 +59,8 @@ function check_smi_repr_from_tagged(o, o2) {
var t = o2.tag;
o.smi = t;
return o;
-}
-
+};
+%PrepareFunctionForOptimization(check_smi_repr_from_tagged);
check_smi_repr_from_tagged(smi_field(), tagged_smi_field());
check_smi_repr_from_tagged(smi_field(), tagged_smi_field());
%OptimizeFunctionOnNextCall(check_smi_repr_from_tagged);
diff --git a/deps/v8/test/mjsunit/strict-mode-implicit-receiver.js b/deps/v8/test/mjsunit/strict-mode-implicit-receiver.js
index 8284eddc2d..4e57be0dca 100644
--- a/deps/v8/test/mjsunit/strict-mode-implicit-receiver.js
+++ b/deps/v8/test/mjsunit/strict-mode-implicit-receiver.js
@@ -114,6 +114,7 @@ function strict_return_receiver() {
function g() {
return strict_return_receiver();
}
+%PrepareFunctionForOptimization(g);
for (var i = 0; i < 5; i++) {
assertEquals(void 0, g());
@@ -129,6 +130,7 @@ function g2() {
o.f = strict_return_receiver;
return o.f();
}
+%PrepareFunctionForOptimization(g2);
for (var i = 0; i < 5; i++) {
assertTrue(typeof g2() == "object");
diff --git a/deps/v8/test/mjsunit/strict-mode-opt.js b/deps/v8/test/mjsunit/strict-mode-opt.js
index 5ca5c279e1..d6c03d233d 100644
--- a/deps/v8/test/mjsunit/strict-mode-opt.js
+++ b/deps/v8/test/mjsunit/strict-mode-opt.js
@@ -41,6 +41,7 @@ function strictToBeInlined(n) {
function nonstrictCallStrict(n) {
strictToBeInlined(n);
}
+%PrepareFunctionForOptimization(nonstrictCallStrict);
(function testInlineStrictInNonStrict() {
for (var i = 0; i <= MAX; i ++) {
@@ -67,11 +68,12 @@ function strictCallNonStrict(n) {
"use strict";
nonstrictToBeInlined(n);
}
+%PrepareFunctionForOptimization(strictCallNonStrict);
(function testInlineNonStrictInStrict() {
for (var i = 0; i <= MAX; i ++) {
try {
- if (i == MAX - 1) %OptimizeFunctionOnNextCall(nonstrictCallStrict);
+ if (i == MAX - 1) %OptimizeFunctionOnNextCall(strictCallNonStrict);
strictCallNonStrict(i);
} catch (e) {
fail("no exception", "exception");
@@ -87,6 +89,7 @@ function strictAssignToUndefined(n) {
global = "strict";
if (n == MAX) { undefined_variable_strict_2 = "value"; }
}
+%PrepareFunctionForOptimization(nonstrictCallStrict);
(function testOptimizeStrictAssignToUndefined() {
for (var i = 0; i <= MAX; i ++) {
diff --git a/deps/v8/test/mjsunit/string-charcodeat.js b/deps/v8/test/mjsunit/string-charcodeat.js
index 6031096e0b..f71b3ce66f 100644
--- a/deps/v8/test/mjsunit/string-charcodeat.js
+++ b/deps/v8/test/mjsunit/string-charcodeat.js
@@ -94,7 +94,9 @@ function Thing() {
function NotAString() {
var n = new Thing();
- n.toString = function() { return "Test"; };
+ n.toString = function() {
+ return 'Test';
+ };
n.charCodeAt = String.prototype.charCodeAt;
return n;
}
@@ -102,7 +104,9 @@ function NotAString() {
function NotAString16() {
var n = new Thing();
- n.toString = function() { return "Te\u1234t"; };
+ n.toString = function() {
+ return 'Te\u1234t';
+ };
n.charCodeAt = String.prototype.charCodeAt;
return n;
}
@@ -111,7 +115,7 @@ function NotAString16() {
function TestStringType(generator, sixteen) {
var g = generator;
var len = g().toString().length;
- var t = sixteen ? "t" : "f"
+ var t = sixteen ? 't' : 'f';
t += generator.name;
assertTrue(isNaN(g().charCodeAt(-1e19)), 1 + t);
assertTrue(isNaN(g().charCodeAt(-0x80000001)), 2 + t);
@@ -157,8 +161,8 @@ TestStringType(NotAString16, true);
function Flat16Optimized() {
var str = Flat16();
return str.charCodeAt(2);
-}
-
+};
+%PrepareFunctionForOptimization(Flat16Optimized);
assertEquals(0x1234, Flat16Optimized());
assertEquals(0x1234, Flat16Optimized());
%OptimizeFunctionOnNextCall(Flat16Optimized);
@@ -167,8 +171,8 @@ assertEquals(0x1234, Flat16Optimized());
function ConsNotSmiIndex() {
var str = Cons();
assertTrue(isNaN(str.charCodeAt(0x7fffffff)));
-}
-
+};
+%PrepareFunctionForOptimization(ConsNotSmiIndex);
for (var i = 0; i < 5; i++) {
ConsNotSmiIndex();
}
@@ -183,7 +187,9 @@ for (var i = 0; i != 10; i++) {
function StupidThing() {
// Doesn't return a string from toString!
- this.toString = function() { return 42; }
+ this.toString = function() {
+ return 42;
+ };
this.charCodeAt = String.prototype.charCodeAt;
}
@@ -203,10 +209,10 @@ medium += medium; // 128.
medium += medium; // 256.
var long = medium;
-long += long + long + long; // 1024.
-long += long + long + long; // 4096.
-long += long + long + long; // 16384.
-long += long + long + long; // 65536.
+long += long + long + long; // 1024.
+long += long + long + long; // 4096.
+long += long + long + long; // 16384.
+long += long + long + long; // 65536.
assertTrue(isNaN(medium.charCodeAt(-1)), 31);
assertEquals(49, medium.charCodeAt(0), 32);
@@ -232,8 +238,8 @@ function directlyOnPrototype() {
assertEquals(97, "a".x(0));
assertEquals(98, "b".x(0));
assertEquals(99, "c".x(0));
-}
-
+};
+%PrepareFunctionForOptimization(directlyOnPrototype);
for (var i = 0; i < 5; i++) {
directlyOnPrototype();
}
diff --git a/deps/v8/test/mjsunit/string-fromcharcode.js b/deps/v8/test/mjsunit/string-fromcharcode.js
index ac51682b72..b9f5eaeacf 100644
--- a/deps/v8/test/mjsunit/string-fromcharcode.js
+++ b/deps/v8/test/mjsunit/string-fromcharcode.js
@@ -42,8 +42,8 @@ function testCharCodeTruncation() {
}
assertEquals(String.fromCharCode(0xFFFF), String.fromCharCode(0xFFFFFFFF));
return result;
-}
-
+};
+%PrepareFunctionForOptimization(testCharCodeTruncation);
assertEquals(expected, testCharCodeTruncation());
assertEquals(expected, testCharCodeTruncation());
%OptimizeFunctionOnNextCall(testCharCodeTruncation);
@@ -51,12 +51,18 @@ assertEquals(expected, testCharCodeTruncation());
// Test various receivers and arguments passed to String.fromCharCode.
-Object.prototype.fromCharCode = function(x) { return this; };
+Object.prototype.fromCharCode = function(x) {
+ return this;
+};
var fcc = String.fromCharCode;
var fcc2 = fcc;
-function constFun(x) { return function(y) { return x; }; }
+function constFun(x) {
+ return function(y) {
+ return x;
+ };
+}
function test(num) {
assertEquals(" ", String.fromCharCode(0x20));
@@ -85,11 +91,10 @@ function test(num) {
assertEquals(" ", fcc(0x20, 0x20));
assertEquals(" ", fcc(0x20 + 0.5, 0x20));
- var receiver = (num < 5) ? String : (num < 9) ? "dummy" : 42;
- fcc2 = (num < 5) ? fcc
- : (num < 9) ? constFun(Object("dummy"))
- : constFun(Object(42));
- var expected = (num < 5) ? " " : (num < 9) ? Object("dummy") : Object(42);
+ var receiver = num < 5 ? String : num < 9 ? 'dummy' : 42;
+ fcc2 = num < 5 ? fcc :
+ num < 9 ? constFun(Object('dummy')) : constFun(Object(42));
+ var expected = num < 5 ? ' ' : num < 9 ? Object('dummy') : Object(42);
assertEquals(expected, receiver.fromCharCode(0x20));
assertEquals(expected, receiver.fromCharCode(0x20 - 0x10000));
assertEquals(expected, receiver.fromCharCode(0x20 + 0.5));
@@ -105,7 +110,10 @@ for (var i = 0; i < 10; i++) {
// Test the custom IC works correctly when the map changes.
for (var i = 0; i < 10; i++) {
- var expected = (i < 5) ? " " : 42;
- if (i == 5) String.fromCharCode = function() { return 42; };
+ var expected = i < 5 ? ' ' : 42;
+ if (i == 5)
+ String.fromCharCode = function() {
+ return 42;
+ };
assertEquals(expected, String.fromCharCode(0x20));
}
diff --git a/deps/v8/test/mjsunit/string-indexof-1.js b/deps/v8/test/mjsunit/string-indexof-1.js
index 0267b2c625..5a2043c39b 100644
--- a/deps/v8/test/mjsunit/string-indexof-1.js
+++ b/deps/v8/test/mjsunit/string-indexof-1.js
@@ -212,6 +212,7 @@ for (var lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) {
function f() {
return 'abc'.indexOf('a');
}
+ %PrepareFunctionForOptimization(f);
assertEquals(0, f());
assertEquals(0, f());
assertEquals(0, f());
@@ -221,6 +222,7 @@ for (var lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) {
function f2() {
return 'abc'.indexOf('a', 1);
}
+ %PrepareFunctionForOptimization(f2);
assertEquals(-1, f2());
assertEquals(-1, f2());
assertEquals(-1, f2());
@@ -230,6 +232,7 @@ for (var lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) {
function f3() {
return 'abc'.indexOf('a');
}
+ %PrepareFunctionForOptimization(f3);
assertEquals(0, f3());
assertEquals(0, f3());
assertEquals(0, f3());
@@ -239,6 +242,7 @@ for (var lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) {
function f4() {
return 'abcbc'.indexOf('bc', 2);
}
+ %PrepareFunctionForOptimization(f4);
assertEquals(3, f4());
assertEquals(3, f4());
assertEquals(3, f4());
@@ -248,6 +252,7 @@ for (var lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) {
function f5() {
return 'abcbc'.indexOf('b', -1);
}
+ %PrepareFunctionForOptimization(f5);
assertEquals(1, f5());
assertEquals(1, f5());
assertEquals(1, f5());
@@ -257,6 +262,7 @@ for (var lengthIndex = 0; lengthIndex < lengths.length; lengthIndex++) {
function f6() {
return 'abcbc'.indexOf('b', -10737418);
}
+ %PrepareFunctionForOptimization(f6);
assertEquals(1, f6());
assertEquals(1, f6());
assertEquals(1, f6());
diff --git a/deps/v8/test/mjsunit/string-slices.js b/deps/v8/test/mjsunit/string-slices.js
index 52f1506180..77b5de606a 100644
--- a/deps/v8/test/mjsunit/string-slices.js
+++ b/deps/v8/test/mjsunit/string-slices.js
@@ -35,17 +35,32 @@ assertEquals(s, s.substr(void 0));
assertEquals(s, s.substr(null));
assertEquals(s, s.substr(false));
assertEquals(s, s.substr(0.9));
-assertEquals(s, s.substr({ valueOf: function() { return 0; } }));
-assertEquals(s, s.substr({ toString: function() { return '0'; } }));
+assertEquals(s, s.substr({
+ valueOf: function() {
+ return 0;
+ }
+}));
+assertEquals(s, s.substr({
+ toString: function() {
+ return '0';
+ }
+}));
var s1 = s.substring(1);
assertEquals(s1, s.substr(1));
assertEquals(s1, s.substr('1'));
assertEquals(s1, s.substr(true));
assertEquals(s1, s.substr(1.1));
-assertEquals(s1, s.substr({ valueOf: function() { return 1; } }));
-assertEquals(s1, s.substr({ toString: function() { return '1'; } }));
-
+assertEquals(s1, s.substr({
+ valueOf: function() {
+ return 1;
+ }
+}));
+assertEquals(s1, s.substr({
+ toString: function() {
+ return '1';
+ }
+}));
assertEquals(s.substring(s.length - 1), s.substr(-1));
assertEquals(s.substring(s.length - 1), s.substr(-1.2));
@@ -73,16 +88,16 @@ for (var i = 0; i < 25; i++) {
/x/.exec(x); // Try to force a flatten.
for (var i = 5; i < 25; i++) {
for (var j = 0; j < 25; j++) {
- var z = x.substring(i, i+j);
+ var z = x.substring(i, i + j);
var w = Math.random() * 42; // Allocate something new in new-space.
assertEquals(j, z.length);
for (var k = 0; k < j; k++) {
- assertEquals(x.charAt(i+k), z.charAt(k));
+ assertEquals(x.charAt(i + k), z.charAt(k));
}
}
}
// Then two-byte strings.
-x = "UC16\u2028"; // Non-ascii char forces two-byte string.
+x = 'UC16\u2028'; // Non-ascii char forces two-byte string.
for (var i = 0; i < 25; i++) {
x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
}
@@ -93,7 +108,7 @@ for (var i = 5; i < 25; i++) {
var w = Math.random() * 42; // Allocate something new in new-space.
assertEquals(j, z.length);
for (var k = 0; k < j; k++) {
- assertEquals(x.charAt(i+k), z.charAt(k));
+ assertEquals(x.charAt(i + k), z.charAt(k));
}
}
}
@@ -105,7 +120,7 @@ var xl = x.length;
var cache = [];
for (var i = 0; i < 1000; i++) {
var z = x.substring(i % xl);
- assertEquals(xl - (i % xl), z.length);
+ assertEquals(xl - i % xl, z.length);
cache.push(z);
}
@@ -117,7 +132,7 @@ var xl = x.length;
var cache = [];
for (var i = 0; i < 1000; i++) {
var z = x.substring(i % xl);
- assertEquals(xl - (i % xl), z.length);
+ assertEquals(xl - i % xl, z.length);
cache.push(z);
}
@@ -135,28 +150,28 @@ for (var i = 63; i >= 0; i--) {
var z = cache.pop();
assertTrue(/\u2028123456789ABCDEF/.test(z));
assertEquals(xl - offset, z.length);
- assertEquals(x.charAt(i*(i+1)/2), z.charAt(0));
+ assertEquals(x.charAt(i * (i + 1) / 2), z.charAt(0));
offset -= i;
}
// Test charAt for different strings.
function f(s1, s2, s3, i) {
- assertEquals(String.fromCharCode(97+i%11), s1.charAt(i%11));
- assertEquals(String.fromCharCode(97+i%11), s2.charAt(i%11));
- assertEquals(String.fromCharCode(98+i%11), s3.charAt(i%11));
+ assertEquals(String.fromCharCode(97 + i % 11), s1.charAt(i % 11));
+ assertEquals(String.fromCharCode(97 + i % 11), s2.charAt(i % 11));
+ assertEquals(String.fromCharCode(98 + i % 11), s3.charAt(i % 11));
assertEquals(String.fromCharCode(101), s3.charAt(3));
}
flat = "abcdefghijkl12345";
cons = flat + flat.toUpperCase();
slice = "abcdefghijklmn12345".slice(1, -1);
-for ( var i = 0; i < 1000; i++) {
+for (var i = 0; i < 1000; i++) {
f(flat, cons, slice, i);
}
flat = "abcdefghijkl1\u20232345";
cons = flat + flat.toUpperCase();
slice = "abcdefghijklmn1\u20232345".slice(1, -1);
-for ( var i = 0; i < 1000; i++) {
+for (var i = 0; i < 1000; i++) {
f(flat, cons, slice, i);
}
@@ -180,35 +195,38 @@ assertEquals("c\u1234def", slice.substr(1, 5));
// Concatenate substrings.
var ascii = 'abcdefghijklmnop';
var utf = '\u03B1\u03B2\u03B3\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9\u03BA\u03BB';
-assertEquals("klmno", ascii.substring(10,15) + ascii.substring(16));
-assertEquals("\u03B4\u03B7", utf.substring(3,4) + utf.substring(6,7));
-assertEquals("klp", ascii.substring(10,12) + ascii.substring(15,16));
-assertEquals("\u03B1\u03B4\u03B5", utf.substring(0,1) + utf.substring(5,3));
+assertEquals('klmno', ascii.substring(10, 15) + ascii.substring(16));
+assertEquals('\u03B4\u03B7', utf.substring(3, 4) + utf.substring(6, 7));
+assertEquals('klp', ascii.substring(10, 12) + ascii.substring(15, 16));
+assertEquals('\u03B1\u03B4\u03B5', utf.substring(0, 1) + utf.substring(5, 3));
assertEquals("", ascii.substring(16) + utf.substring(16));
-assertEquals("bcdef\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9",
- ascii.substring(1,6) + utf.substring(3,9));
-assertEquals("\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9abcdefghijklmnop",
- utf.substring(3,9) + ascii);
-assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
- utf.substring(5,1) + utf.substring(3,7));
+assertEquals(
+ 'bcdef\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9',
+ ascii.substring(1, 6) + utf.substring(3, 9));
+assertEquals(
+ '\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9abcdefghijklmnop',
+ utf.substring(3, 9) + ascii);
+assertEquals(
+ '\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7',
+ utf.substring(5, 1) + utf.substring(3, 7));
// Externalizing strings.
var a = "internalized dummy";
a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
-var b = "23456789qwertyuiopasdfghjklzxcvbn"
-assertEquals(a.slice(1,-1), b);
+var b = '23456789qwertyuiopasdfghjklzxcvbn';
+assertEquals(a.slice(1, -1), b);
assertTrue(isOneByteString(a));
externalizeString(a, true);
assertFalse(isOneByteString(a));
-assertEquals(a.slice(1,-1), b);
+assertEquals(a.slice(1, -1), b);
assertTrue(/3456789qwe/.test(a));
assertEquals(5, a.indexOf("678"));
assertEquals("12345", a.split("6")[0]);
// Create a slice with an external string as parent string.
-var c = a.slice(1,-1);
+var c = a.slice(1, -1);
function test_crankshaft() {
for (var i = 0; i < 20; i++) {
@@ -219,8 +237,8 @@ function test_crankshaft() {
assertEquals(4, c.indexOf("678"));
assertEquals("2345", c.split("6")[0]);
}
-}
-
+};
+%PrepareFunctionForOptimization(test_crankshaft);
test_crankshaft();
%OptimizeFunctionOnNextCall(test_crankshaft);
test_crankshaft();
diff --git a/deps/v8/test/mjsunit/sum-0-plus-undefined-is-NaN.js b/deps/v8/test/mjsunit/sum-0-plus-undefined-is-NaN.js
index 5d662d1dc6..c820f49dd0 100644
--- a/deps/v8/test/mjsunit/sum-0-plus-undefined-is-NaN.js
+++ b/deps/v8/test/mjsunit/sum-0-plus-undefined-is-NaN.js
@@ -31,9 +31,12 @@
* @fileoverview Test addition of 0 and undefined.
*/
-function sum(a, b) { return a + b; }
-
+function sum(a, b) {
+ return a + b;
+};
+%PrepareFunctionForOptimization(sum);
function test(x, y, expectNaN) {
+ %PrepareFunctionForOptimization(sum);
for (var i = 0; i < 5; i++) {
assertEquals(expectNaN, isNaN(sum(x, y)));
}
diff --git a/deps/v8/test/mjsunit/switch-opt.js b/deps/v8/test/mjsunit/switch-opt.js
index c1d5d38374..875a8de5ab 100644
--- a/deps/v8/test/mjsunit/switch-opt.js
+++ b/deps/v8/test/mjsunit/switch-opt.js
@@ -61,6 +61,7 @@
break;
}
}
+ %PrepareFunctionForOptimization(f);
function assertResult(r, label, b1, b2, b3) {
f(label, b1, b2, b3);
@@ -94,30 +95,36 @@
assertEquals(9, x);
// Test deopt at the beginning of the case label evaluation.
+ %PrepareFunctionForOptimization(f);
assertResult([2], "ca", "deopt", true);
%OptimizeFunctionOnNextCall(f);
assertResult([4], "ca", "deopt", false);
assertEquals(10, x);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
// Test deopt in the middle of the case label evaluation.
assertResult([2], "ac", true, "deopt");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertResult([4], "ac", false, "deopt");
assertEquals(11, x);
// Test deopt in the default case.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
print("here");
assertResult([4], 10000, false, false, "deopt");
assertEquals(12, x);
// Test deopt in the default case.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertResult([4], 10000, false, false, "deopt");
assertEquals(13, x);
// Test deopt in x++ case.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertResult([5], 13, false, false, "deopt");
assertEquals(14, x);
@@ -158,6 +165,7 @@
break;
}
}
+ %PrepareFunctionForOptimization(f);
function assertResult(r, label, b1, b2, b3) {
f(label, b1, b2, b3);
@@ -191,30 +199,36 @@
assertEquals(9, x);
// Test deopt at the beginning of the case label evaluation.
+ %PrepareFunctionForOptimization(f);
assertResult([2,3], "ca", "deopt", true);
%OptimizeFunctionOnNextCall(f);
assertResult([4,5], "ca", "deopt", false);
assertEquals(10, x);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
// Test deopt in the middle of the case label evaluation.
assertResult([2,3], "ac", true, "deopt");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertResult([4,5], "ac", false, "deopt");
assertEquals(11, x);
// Test deopt in the default case.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
print("here");
assertResult([4,5], 10000, false, false, "deopt");
assertEquals(12, x);
// Test deopt in the default case.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertResult([4,5], 10000, false, false, "deopt");
assertEquals(13, x);
// Test deopt in x++ case.
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
assertResult([5], 13, false, false, "deopt");
assertEquals(14, x);
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index a829779872..2112b1d0dd 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -43,7 +43,6 @@ except NameError: # Python 3
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
-MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
@@ -99,8 +98,7 @@ class TestCase(testcase.D8TestCase):
break
files = [ os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list ]
- testfilename = os.path.join(self.suite.root,
- self.path + self._get_suffix())
+ testfilename = self._get_source_path()
if SELF_SCRIPT_PATTERN.search(source):
files = (
["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")] +
@@ -114,15 +112,10 @@ class TestCase(testcase.D8TestCase):
if self.suite.framework_name == 'num_fuzzer':
mjsunit_files.append(os.path.join(self.suite.root, "mjsunit_numfuzz.js"))
- files_suffix = []
- if MODULE_PATTERN.search(source):
- files_suffix.append("--module")
- files_suffix.append(testfilename)
-
self._source_files = files
self._source_flags = self._parse_source_flags(source)
self._mjsunit_files = mjsunit_files
- self._files_suffix = files_suffix
+ self._files_suffix = [testfilename]
self._env = self._parse_source_env(source)
def _parse_source_env(self, source):
@@ -151,7 +144,13 @@ class TestCase(testcase.D8TestCase):
return self._env
def _get_source_path(self):
- return os.path.join(self.suite.root, self.path + self._get_suffix())
+ base_path = os.path.join(self.suite.root, self.path)
+ # Try .js first, and fall back to .mjs.
+ # TODO(v8:9406): clean this up by never separating the path from
+ # the extension in the first place.
+ if os.path.exists(base_path + self._get_suffix()):
+ return base_path + self._get_suffix()
+ return base_path + '.mjs'
class TestCombiner(testsuite.TestCombiner):
diff --git a/deps/v8/test/mjsunit/thin-strings.js b/deps/v8/test/mjsunit/thin-strings.js
index c86764259a..0c50cf6971 100644
--- a/deps/v8/test/mjsunit/thin-strings.js
+++ b/deps/v8/test/mjsunit/thin-strings.js
@@ -22,17 +22,21 @@ function CheckCS() {
assertEquals("o", str.substring(1, 2));
assertEquals("f".charCodeAt(0), str.charCodeAt(0));
assertEquals("f", str.split(/oo/)[0]);
-}
+};
+%PrepareFunctionForOptimization(CheckCS);
CheckCS();
%OptimizeFunctionOnNextCall(CheckCS);
CheckCS();
function CheckTF() {
- try {} catch(e) {} // Turbofan.
+ try {
+ } catch (e) {
+ } // Turbofan.
assertEquals("o", str.substring(1, 2));
assertEquals("f".charCodeAt(0), str.charCodeAt(0));
assertEquals("f", str.split(/oo/)[0]);
-}
+};
+%PrepareFunctionForOptimization(CheckTF);
CheckTF();
%OptimizeFunctionOnNextCall(CheckTF);
CheckTF();
@@ -57,7 +61,8 @@ assertEquals("________", s.substring(0, 8));
function cc1(s) {
assertEquals(95, s.charCodeAt(0));
assertEquals(95, s.codePointAt(0));
-}
+};
+%PrepareFunctionForOptimization(cc1);
cc1(s);
cc1(s);
%OptimizeFunctionOnNextCall(cc1);
@@ -76,14 +81,15 @@ function get_sliced_thin_string(a, b) {
return slice;
}
-var t = get_sliced_thin_string("abcdefghijklmnopqrstuvwxyz",
- "abcdefghijklmnopqrstuvwxyz");
+var t = get_sliced_thin_string(
+ 'abcdefghijklmnopqrstuvwxyz', 'abcdefghijklmnopqrstuvwxyz');
assertEquals("abcdefghijklmnopqrst", decodeURI(t));
function cc2(s) {
assertEquals(97, s.charCodeAt(0));
assertEquals(97, s.codePointAt(0));
-}
+};
+%PrepareFunctionForOptimization(cc2);
cc2(t);
cc2(t);
%OptimizeFunctionOnNextCall(cc2);
diff --git a/deps/v8/test/mjsunit/tools/compiler-trace-flags.js b/deps/v8/test/mjsunit/tools/compiler-trace-flags.js
index 4a0432bebd..e0dfb37325 100644
--- a/deps/v8/test/mjsunit/tools/compiler-trace-flags.js
+++ b/deps/v8/test/mjsunit/tools/compiler-trace-flags.js
@@ -15,8 +15,8 @@
(function testOptimizedJS() {
function add(a, b) {
return a + b;
- }
-
+ };
+ %PrepareFunctionForOptimization(add);
add(21, 21);
%OptimizeFunctionOnNextCall(add);
add(20, 22);
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index 3247ddf145..07d0646b64 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -134,6 +134,47 @@
}
assertEquals(libc_ref_syms, libc_syms);
+ // Android library with zero length duplicates.
+ UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
+ this.symbols = [[
+ '00000000013a1088 0000000000000224 t v8::internal::interpreter::BytecodeGenerator::BytecodeGenerator(v8::internal::UnoptimizedCompilationInfo*)',
+ '00000000013a1088 0000000000000224 t v8::internal::interpreter::BytecodeGenerator::BytecodeGenerator(v8::internal::UnoptimizedCompilationInfo*)',
+ '00000000013a12ac t $x.4',
+ '00000000013a12ac 00000000000000d0 t v8::internal::interpreter::BytecodeGenerator::FinalizeBytecode(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Script>)',
+ '00000000013a137c t $x.5',
+ '00000000013a137c 0000000000000528 t v8::internal::interpreter::BytecodeGenerator::AllocateDeferredConstants(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Script>)',
+ '00000000013a1578 N $d.46',
+ '00000000013a18a4 t $x.6',
+ '00000000013a18a4 0000000000000 t v8::internal::interpreter::BytecodeGenerator::GlobalDeclarationsBuilder::AllocateDeclarations(v8::internal::UnoptimizedCompilationInfo*, v8::internal::Handle<v8::internal::Script>, v8::internal::Isolate*)',
+ '00000000013a19e0 t $x.7',
+ '00000000013a19e0 0000000000000244 t v8::internal::interpreter::BytecodeGenerator::GenerateBytecode(unsigned long)',
+ '00000000013a1a88 N $d.7',
+ '00000000013a1ac8 N $d.5',
+ '00000000013a1af8 N $d.35',
+ '00000000013a1c24 t $x.8',
+ '00000000013a1c24 000000000000009c t v8::internal::interpreter::BytecodeGenerator::ContextScope::ContextScope(v8::internal::interpreter::BytecodeGenerator*, v8::internal::Scope*)\n',
+ ].join('\n'), ''];
+ };
+ var android_prov = new UnixCppEntriesProvider();
+ var android_syms = [];
+ android_prov.parseVmSymbols('libmonochrome', 0xf7c5c000, 0xf9c5c000, 0,
+ function (name, start, end) {
+ android_syms.push(Array.prototype.slice.apply(arguments, [0]));
+ });
+ var android_ref_syms = [
+ ['v8::internal::interpreter::BytecodeGenerator::BytecodeGenerator(v8::internal::UnoptimizedCompilationInfo*)', 0x013a1088, 0x013a1088 + 0x224],
+ ['v8::internal::interpreter::BytecodeGenerator::FinalizeBytecode(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Script>)', 0x013a12ac, 0x013a12ac + 0xd0],
+ ['v8::internal::interpreter::BytecodeGenerator::AllocateDeferredConstants(v8::internal::Isolate*, v8::internal::Handle<v8::internal::Script>)', 0x013a137c, 0x013a137c + 0x528],
+ ['v8::internal::interpreter::BytecodeGenerator::GlobalDeclarationsBuilder::AllocateDeclarations(v8::internal::UnoptimizedCompilationInfo*, v8::internal::Handle<v8::internal::Script>, v8::internal::Isolate*)', 0x013a18a4, 0x013a18a4 + 0x13c],
+ ['v8::internal::interpreter::BytecodeGenerator::GenerateBytecode(unsigned long)', 0x013a19e0, 0x013a19e0 + 0x244],
+ ['v8::internal::interpreter::BytecodeGenerator::ContextScope::ContextScope(v8::internal::interpreter::BytecodeGenerator*, v8::internal::Scope*)', 0x013a1c24, 0x013a1c24 + 0x9c],
+ ];
+ for (var i = 0; i < android_ref_syms.length; ++i) {
+ android_ref_syms[i][1] += 0xf7c5c000;
+ android_ref_syms[i][2] += 0xf7c5c000;
+ }
+ assertEquals(android_ref_syms, android_syms);
+
UnixCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols;
})();
diff --git a/deps/v8/test/mjsunit/transition-elements-kind.js b/deps/v8/test/mjsunit/transition-elements-kind.js
index 9acf52c0d2..edb5846058 100644
--- a/deps/v8/test/mjsunit/transition-elements-kind.js
+++ b/deps/v8/test/mjsunit/transition-elements-kind.js
@@ -35,9 +35,10 @@ var a = foo();
a[0] = 1.1;
// Emit a TransitionElementsKindStub which transitions from double to object.
-function store(a,x) {
+function store(a, x) {
a[0] = x;
-}
+};
+%PrepareFunctionForOptimization(store);
store([1.1], 'a');
store([1.1], 1.1);
%OptimizeFunctionOnNextCall(store);
diff --git a/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js b/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
index ae590b6676..41ed3689ec 100644
--- a/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
+++ b/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
@@ -50,6 +50,7 @@ float_array[0] = 1e51;
function __f_14159(buffer) {
try { return buffer.getUint16(Infinity, true); } catch(e) { return 0; }
}
+ %PrepareFunctionForOptimization(__f_14159);
__f_14159(buffer);
%OptimizeFunctionOnNextCall(__f_14159);
__f_14159(buffer);
@@ -67,6 +68,7 @@ float_array[0] = 1e51;
}
}
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/unbox-double-field-indexed.js b/deps/v8/test/mjsunit/unbox-double-field-indexed.js
index 29dfc79205..6ddde73583 100644
--- a/deps/v8/test/mjsunit/unbox-double-field-indexed.js
+++ b/deps/v8/test/mjsunit/unbox-double-field-indexed.js
@@ -14,8 +14,8 @@ var g = new Foo(2.25);
function add(a, b) {
var name = "x";
return a[name] + b[name];
-}
-
+};
+%PrepareFunctionForOptimization(add);
assertEquals(3.5, add(f, g));
assertEquals(3.5, add(g, f));
%OptimizeFunctionOnNextCall(add);
diff --git a/deps/v8/test/mjsunit/unbox-double-field.js b/deps/v8/test/mjsunit/unbox-double-field.js
index 9fb5479be7..d0876a83d1 100644
--- a/deps/v8/test/mjsunit/unbox-double-field.js
+++ b/deps/v8/test/mjsunit/unbox-double-field.js
@@ -13,8 +13,8 @@ var g = new Foo(2.25);
function add(a, b) {
return a.x + b.x;
-}
-
+};
+%PrepareFunctionForOptimization(add);
assertEquals(3.5, add(f, g));
assertEquals(3.5, add(g, f));
%OptimizeFunctionOnNextCall(add);
diff --git a/deps/v8/test/mjsunit/unbox-smi-field-indexed.js b/deps/v8/test/mjsunit/unbox-smi-field-indexed.js
index 9e77da03ed..038ee0a9ea 100644
--- a/deps/v8/test/mjsunit/unbox-smi-field-indexed.js
+++ b/deps/v8/test/mjsunit/unbox-smi-field-indexed.js
@@ -14,8 +14,8 @@ var g = new Foo(2);
function add(a, b) {
var name = "x";
return a[name] + b[name];
-}
-
+};
+%PrepareFunctionForOptimization(add);
assertEquals(3, add(f, g));
assertEquals(3, add(g, f));
%OptimizeFunctionOnNextCall(add);
diff --git a/deps/v8/test/mjsunit/undetectable-compare.js b/deps/v8/test/mjsunit/undetectable-compare.js
index c78593439c..0ea79e3499 100644
--- a/deps/v8/test/mjsunit/undetectable-compare.js
+++ b/deps/v8/test/mjsunit/undetectable-compare.js
@@ -99,6 +99,7 @@ assertFalse(undetectable === %GetUndetectable());
function test2(a, b) {
return a == b;
}
+%PrepareFunctionForOptimization(test2);
test2(0, 1);
test2(undetectable, {});
%OptimizeFunctionOnNextCall(test2);
diff --git a/deps/v8/test/mjsunit/wasm/anyref-table.js b/deps/v8/test/mjsunit/wasm/anyref-table.js
index f4e82d32c8..de35dc8fb8 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-table.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-table.js
@@ -36,7 +36,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
const table_index = builder.addImportedTable("imp", "table", 3, 10, kWasmAnyRef);
builder.addFunction('get', kSig_r_v)
- .addBody([kExprI32Const, 0, kExprGetTable, table_index]);
+ .addBody([kExprI32Const, 0, kExprTableGet, table_index]);
let table_ref = new WebAssembly.Table({element: "anyref", initial: 3, maximum: 10});
builder.instantiate({imp:{table: table_ref}});
diff --git a/deps/v8/test/mjsunit/wasm/atomics-stress.js b/deps/v8/test/mjsunit/wasm/atomics-stress.js
index 3d360373f0..8622919043 100644
--- a/deps/v8/test/mjsunit/wasm/atomics-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics-stress.js
@@ -9,7 +9,7 @@
// Note that results of this test are flaky by design. While the test is
// deterministic with a fixed seed, bugs may introduce non-determinism.
-load("test/mjsunit/wasm/wasm-module-builder.js");
+load('test/mjsunit/wasm/wasm-module-builder.js');
const kDebug = false;
@@ -22,319 +22,287 @@ const kFirstOpcodeWithoutOutput = 3;
const kLastOpcodeWithoutOutput = 5;
const opCodes = [
- kExprI32AtomicLoad,
- kExprI32AtomicLoad8U,
- kExprI32AtomicLoad16U,
- kExprI32AtomicStore,
- kExprI32AtomicStore8U,
- kExprI32AtomicStore16U,
- kExprI32AtomicAdd,
- kExprI32AtomicAdd8U,
- kExprI32AtomicAdd16U,
- kExprI32AtomicSub,
- kExprI32AtomicSub8U,
- kExprI32AtomicSub16U,
- kExprI32AtomicAnd,
- kExprI32AtomicAnd8U,
- kExprI32AtomicAnd16U,
- kExprI32AtomicOr,
- kExprI32AtomicOr8U,
- kExprI32AtomicOr16U,
- kExprI32AtomicXor,
- kExprI32AtomicXor8U,
- kExprI32AtomicXor16U,
- kExprI32AtomicExchange,
- kExprI32AtomicExchange8U,
- kExprI32AtomicExchange16U
+ kExprI32AtomicLoad, kExprI32AtomicLoad8U, kExprI32AtomicLoad16U,
+ kExprI32AtomicStore, kExprI32AtomicStore8U, kExprI32AtomicStore16U,
+ kExprI32AtomicAdd, kExprI32AtomicAdd8U, kExprI32AtomicAdd16U,
+ kExprI32AtomicSub, kExprI32AtomicSub8U, kExprI32AtomicSub16U,
+ kExprI32AtomicAnd, kExprI32AtomicAnd8U, kExprI32AtomicAnd16U,
+ kExprI32AtomicOr, kExprI32AtomicOr8U, kExprI32AtomicOr16U,
+ kExprI32AtomicXor, kExprI32AtomicXor8U, kExprI32AtomicXor16U,
+ kExprI32AtomicExchange, kExprI32AtomicExchange8U, kExprI32AtomicExchange16U
];
const opCodeNames = [
- "kExprI32AtomicLoad",
- "kExprI32AtomicLoad8U",
- "kExprI32AtomicLoad16U",
- "kExprI32AtomicStore",
- "kExprI32AtomicStore8U",
- "kExprI32AtomicStore16U",
- "kExprI32AtomicAdd",
- "kExprI32AtomicAdd8U",
- "kExprI32AtomicAdd16U",
- "kExprI32AtomicSub",
- "kExprI32AtomicSub8U",
- "kExprI32AtomicSub16U",
- "kExprI32AtomicAnd",
- "kExprI32AtomicAnd8U",
- "kExprI32AtomicAnd16U",
- "kExprI32AtomicOr",
- "kExprI32AtomicOr8U",
- "kExprI32AtomicOr16U",
- "kExprI32AtomicXor",
- "kExprI32AtomicXor8U",
- "kExprI32AtomicXor16U",
- "kExprI32AtomicExchange",
- "kExprI32AtomicExchange8U",
- "kExprI32AtomicExchange16U"
+ 'kExprI32AtomicLoad', 'kExprI32AtomicLoad8U',
+ 'kExprI32AtomicLoad16U', 'kExprI32AtomicStore',
+ 'kExprI32AtomicStore8U', 'kExprI32AtomicStore16U',
+ 'kExprI32AtomicAdd', 'kExprI32AtomicAdd8U',
+ 'kExprI32AtomicAdd16U', 'kExprI32AtomicSub',
+ 'kExprI32AtomicSub8U', 'kExprI32AtomicSub16U',
+ 'kExprI32AtomicAnd', 'kExprI32AtomicAnd8U',
+ 'kExprI32AtomicAnd16U', 'kExprI32AtomicOr',
+ 'kExprI32AtomicOr8U', 'kExprI32AtomicOr16U',
+ 'kExprI32AtomicXor', 'kExprI32AtomicXor8U',
+ 'kExprI32AtomicXor16U', 'kExprI32AtomicExchange',
+ 'kExprI32AtomicExchange8U', 'kExprI32AtomicExchange16U'
];
-class Operation {
- constructor(opcode, input, offset) {
- this.opcode = opcode != undefined ? opcode : Operation.nextOpcode();
- this.size = Operation.opcodeToSize(this.opcode);
- this.input = input != undefined ? input : Operation.inputForSize(
- this.size);
- this.offset = offset != undefined ? offset : Operation.offsetForSize(
- this.size);
- }
-
- static nextOpcode() {
- let random = Math.random();
- return Math.floor(random * opCodes.length);
- }
-
- static opcodeToSize(opcode) {
- // Instructions are ordered in 32, 8, 16 bits size
- return [32, 8, 16][opcode % 3];
- }
-
- static opcodeToAlignment(opcode) {
- // Instructions are ordered in 32, 8, 16 bits size
- return [2, 0, 1][opcode % 3];
- }
-
- static inputForSize(size) {
- let random = Math.random();
- // Avoid 32 bit overflow for integer here :(
- return Math.floor(random * (1 << (size - 1)) * 2);
- }
-
- static offsetForSize(size) {
- // Pick an offset in bytes between 0 and 7.
- let offset = Math.floor(Math.random() * 8);
- // Make sure the offset matches the required alignment by masking out the lower bits.
- let size_in_bytes = size / 8;
- let mask = ~(size_in_bytes - 1);
- return offset & mask;
- }
-
- get wasmOpcode() {
- // [opcode, alignment, offset]
- return [opCodes[this.opcode], Operation.opcodeToAlignment(this.opcode), this.offset];
- }
-
- get hasInput() {
- return this.opcode >= kFirstOpcodeWithInput;
- }
-
- get hasOutput() {
- return this.opcode < kFirstOpcodeWithoutOutput || this.opcode >
- kLastOpcodeWithoutOutput;
- }
-
- truncateResultBits(low, high) {
- // Shift the lower part. For offsets greater four it drops out of the visible window.
- let shiftedL = this.offset >= 4 ? 0 : low >>> (this.offset * 8);
- // The higher part is zero for offset 0, left shifted for [1..3] and right shifted
- // for [4..7].
- let shiftedH = this.offset == 0 ? 0 :
- this.offset >= 4 ? high >>> (this.offset - 4) * 8 : high << ((4 -
- this.offset) * 8);
- let value = shiftedL | shiftedH;
-
- switch (this.size) {
- case 8:
- return value & 0xFF;
- case 16:
- return value & 0xFFFF;
- case 32:
- return value;
- default:
- throw "Unexpected size: " + this.size;
- }
- }
-
- static get builder() {
- if (!Operation.__builder) {
- let builder = new WasmModuleBuilder();
- builder.addMemory(1, 1, 1, false);
- builder.exportMemoryAs("mem");
- Operation.__builder = builder;
- }
- return Operation.__builder;
- }
+let kMaxMemPages = 10;
+let gSharedMemory =
+ new WebAssembly.Memory({initial: 1, maximum: kMaxMemPages, shared: true});
+let gSharedMemoryView = new Int32Array(gSharedMemory.buffer);
- static get exports() {
- if (!Operation.__instance) {
- return {};
- }
- return Operation.__instance.exports;
- }
+let gPrivateMemory =
+ new WebAssembly.Memory({initial: 1, maximum: kMaxMemPages, shared: true});
+let gPrivateMemoryView = new Int32Array(gPrivateMemory.buffer);
- static get memory() {
- return Operation.exports.mem;
- }
-
- static set instance(instance) {
- Operation.__instance = instance;
- }
-
- compute(state) {
- let evalFun = Operation.exports[this.key];
- if (!evalFun) {
- let builder = Operation.builder;
- let body = [
- // Load address of low 32 bits.
- kExprI32Const, 0,
- // Load expected value.
- kExprGetLocal, 0,
- kExprI32StoreMem, 2, 0,
- // Load address of high 32 bits.
- kExprI32Const, 4,
- // Load expected value.
- kExprGetLocal, 1,
- kExprI32StoreMem, 2, 0,
- // Load address of where our window starts.
- kExprI32Const, 0,
- // Load input if there is one.
- ...(this.hasInput ? [kExprGetLocal, 2] : []),
- // Perform operation.
- kAtomicPrefix, ...this.wasmOpcode,
- // Drop output if it had any.
- ...(this.hasOutput ? [kExprDrop] : []),
- // Load resulting value.
- kExprI32Const, 0,
- kExprI32LoadMem, 2, 0,
- // Return.
- kExprReturn
- ]
- builder.addFunction(this.key, kSig_i_iii)
- .addBody(body)
- .exportAs(this.key);
- // Instantiate module, get function exports.
- let module = new WebAssembly.Module(builder.toBuffer());
- Operation.instance = new WebAssembly.Instance(module);
- evalFun = Operation.exports[this.key];
- }
- let result = evalFun(state.low, state.high, this.input);
- let ta = new Int32Array(Operation.memory.buffer);
- if (kDebug) {
- print(state.high + ":" + state.low + " " + this.toString() +
- " -> " + ta[1] + ":" + ta[0]);
- }
- if (result != ta[0]) throw "!";
- return {
- low: ta[0],
- high: ta[1]
- };
- }
-
- toString() {
- return opCodeNames[this.opcode] + "[+" + this.offset + "] " + this.input;
- }
-
- get key() {
- return this.opcode + "-" + this.offset;
- }
+class Operation {
+ constructor(opcode, input, offset) {
+ this.opcode = opcode != undefined ? opcode : Operation.nextOpcode();
+ this.size = Operation.opcodeToSize(this.opcode);
+ this.input = input != undefined ? input : Operation.inputForSize(this.size);
+ this.offset =
+ offset != undefined ? offset : Operation.offsetForSize(this.size);
+ }
+
+ static nextOpcode() {
+ let random = Math.random();
+ return Math.floor(random * opCodes.length);
+ }
+
+ static opcodeToSize(opcode) {
+ // Instructions are ordered in 32, 8, 16 bits size
+ return [32, 8, 16][opcode % 3];
+ }
+
+ static opcodeToAlignment(opcode) {
+ // Instructions are ordered in 32, 8, 16 bits size
+ return [2, 0, 1][opcode % 3];
+ }
+
+ static inputForSize(size) {
+ let random = Math.random();
+ // Avoid 32 bit overflow for integer here :(
+ return Math.floor(random * (1 << (size - 1)) * 2);
+ }
+
+ static offsetForSize(size) {
+ // Pick an offset in bytes between 0 and 7.
+ let offset = Math.floor(Math.random() * 8);
+ // Make sure the offset matches the required alignment by masking out the
+ // lower bits.
+ let size_in_bytes = size / 8;
+ let mask = ~(size_in_bytes - 1);
+ return offset & mask;
+ }
+
+ get wasmOpcode() {
+ // [opcode, alignment, offset]
+ return [
+ opCodes[this.opcode], Operation.opcodeToAlignment(this.opcode),
+ this.offset
+ ];
+ }
+
+ get hasInput() {
+ return this.opcode >= kFirstOpcodeWithInput;
+ }
+
+ get hasOutput() {
+ return this.opcode < kFirstOpcodeWithoutOutput ||
+ this.opcode > kLastOpcodeWithoutOutput;
+ }
+
+ truncateResultBits(low, high) {
+ // Shift the lower part. For offsets greater four it drops out of the
+ // visible window.
+ let shiftedL = this.offset >= 4 ? 0 : low >>> (this.offset * 8);
+ // The higher part is zero for offset 0, left shifted for [1..3] and right
+ // shifted for [4..7].
+ let shiftedH = this.offset == 0 ?
+ 0 :
+ this.offset >= 4 ? high >>> (this.offset - 4) * 8 :
+ high << ((4 - this.offset) * 8);
+ let value = shiftedL | shiftedH;
+
+ switch (this.size) {
+ case 8:
+ return value & 0xFF;
+ case 16:
+ return value & 0xFFFF;
+ case 32:
+ return value;
+ default:
+ throw 'Unexpected size: ' + this.size;
+ }
+ }
+
+ static get builder() {
+ if (!Operation.__builder) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory('m', 'imported_mem', 0, kMaxMemPages, 'shared');
+ Operation.__builder = builder;
+ }
+ return Operation.__builder;
+ }
+
+ static get exports() {
+ if (!Operation.__instance) {
+ return {};
+ }
+ return Operation.__instance.exports;
+ }
+
+ static set instance(instance) {
+ Operation.__instance = instance;
+ }
+
+ compute(state) {
+ let evalFun = Operation.exports[this.key];
+ if (!evalFun) {
+ let builder = Operation.builder;
+ let body = [
+ // Load address of low 32 bits.
+ kExprI32Const, 0,
+ // Load expected value.
+ kExprGetLocal, 0, kExprI32StoreMem, 2, 0,
+ // Load address of high 32 bits.
+ kExprI32Const, 4,
+ // Load expected value.
+ kExprGetLocal, 1, kExprI32StoreMem, 2, 0,
+ // Load address of where our window starts.
+ kExprI32Const, 0,
+ // Load input if there is one.
+ ...(this.hasInput ? [kExprGetLocal, 2] : []),
+ // Perform operation.
+ kAtomicPrefix, ...this.wasmOpcode,
+ // Drop output if it had any.
+ ...(this.hasOutput ? [kExprDrop] : []),
+ // Load resulting value.
+ kExprI32Const, 0, kExprI32LoadMem, 2, 0,
+ // Return.
+ kExprReturn
+ ]
+ builder.addFunction(this.key, kSig_i_iii)
+ .addBody(body)
+ .exportAs(this.key);
+ // Instantiate module, get function exports.
+ let module = new WebAssembly.Module(builder.toBuffer());
+ Operation.instance =
+ new WebAssembly.Instance(module, {m: {imported_mem: gPrivateMemory}});
+ evalFun = Operation.exports[this.key];
+ }
+ let result = evalFun(state.low, state.high, this.input);
+ let ta = gPrivateMemoryView;
+ if (kDebug) {
+ print(
+ state.high + ':' + state.low + ' ' + this.toString() + ' -> ' +
+ ta[1] + ':' + ta[0]);
+ }
+ if (result != ta[0]) throw '!';
+ return {low: ta[0], high: ta[1]};
+ }
+
+ toString() {
+ return opCodeNames[this.opcode] + '[+' + this.offset + '] ' + this.input;
+ }
+
+ get key() {
+ return this.opcode + '-' + this.offset;
+ }
}
class State {
- constructor(low, high, indices, count) {
- this.low = low;
- this.high = high;
- this.indices = indices;
- this.count = count;
- }
-
- isFinal() {
- return (this.count == kNumberOfWorker * kSequenceLength);
- }
-
- toString() {
- return this.high + ":" + this.low + " @ " + this.indices;
- }
+ constructor(low, high, indices, count) {
+ this.low = low;
+ this.high = high;
+ this.indices = indices;
+ this.count = count;
+ }
+
+ isFinal() {
+ return (this.count == kNumberOfWorker * kSequenceLength);
+ }
+
+ toString() {
+ return this.high + ':' + this.low + ' @ ' + this.indices;
+ }
}
function makeSequenceOfOperations(size) {
- let result = new Array(size);
- for (let i = 0; i < size; i++) {
- result[i] = new Operation();
- }
- return result;
+ let result = new Array(size);
+ for (let i = 0; i < size; i++) {
+ result[i] = new Operation();
+ }
+ return result;
}
function toSLeb128(val) {
- let result = [];
- while (true) {
- let v = val & 0x7f;
- val = val >> 7;
- let msbIsSet = (v & 0x40) || false;
- if (((val == 0) && !msbIsSet) || ((val == -1) && msbIsSet)) {
- result.push(v);
- break;
- }
- result.push(v | 0x80);
- }
- return result;
+ let result = [];
+ while (true) {
+ let v = val & 0x7f;
+ val = val >> 7;
+ let msbIsSet = (v & 0x40) || false;
+ if (((val == 0) && !msbIsSet) || ((val == -1) && msbIsSet)) {
+ result.push(v);
+ break;
+ }
+ result.push(v | 0x80);
+ }
+ return result;
}
function generateFunctionBodyForSequence(sequence) {
- // We expect the int32* to perform ops on as arg 0 and
- // the int32* for our value log as arg1. Argument 2 gives
- // an int32* we use to count down spinning workers.
- let body = [];
- // Initially, we spin until all workers start running.
- if (!kDebug) {
- body.push(
- // Decrement the wait count.
- kExprGetLocal, 2,
- kExprI32Const, 1,
- kAtomicPrefix, kExprI32AtomicSub, 2, 0,
- // Spin until zero.
- kExprLoop, kWasmStmt,
- kExprGetLocal, 2,
- kAtomicPrefix, kExprI32AtomicLoad, 2, 0,
- kExprI32Const, 0,
- kExprI32GtU,
- kExprBrIf, 0,
- kExprEnd
- );
- }
- for (let operation of sequence) {
- body.push(
- // Pre-load address of results sequence pointer for later.
- kExprGetLocal, 1,
- // Load address where atomic pointers are stored.
- kExprGetLocal, 0,
- // Load the second argument if it had any.
- ...(operation.hasInput ? [kExprI32Const, ...toSLeb128(operation
- .input)] : []),
- // Perform operation
- kAtomicPrefix, ...operation.wasmOpcode,
- // Generate fake output in needed.
- ...(operation.hasOutput ? [] : [kExprI32Const, 0]),
- // Store read intermediate to sequence.
- kExprI32StoreMem, 2, 0,
- // Increment result sequence pointer.
- kExprGetLocal, 1,
- kExprI32Const, 4,
- kExprI32Add,
- kExprSetLocal, 1
- );
- }
- // Return end of sequence index.
+ // We expect the int32* to perform ops on as arg 0 and
+ // the int32* for our value log as arg1. Argument 2 gives
+ // an int32* we use to count down spinning workers.
+ let body = [];
+ // Initially, we spin until all workers start running.
+ if (!kDebug) {
+ body.push(
+ // Decrement the wait count.
+ kExprGetLocal, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
+ 0,
+ // Spin until zero.
+ kExprLoop, kWasmStmt, kExprGetLocal, 2, kAtomicPrefix,
+ kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
+ kExprEnd);
+ }
+ for (let operation of sequence) {
body.push(
+ // Pre-load address of results sequence pointer for later.
kExprGetLocal, 1,
- kExprReturn);
- return body;
+ // Load address where atomic pointers are stored.
+ kExprGetLocal, 0,
+ // Load the second argument if it had any.
+ ...(operation.hasInput ?
+ [kExprI32Const, ...toSLeb128(operation.input)] :
+ []),
+ // Perform operation
+ kAtomicPrefix, ...operation.wasmOpcode,
+ // Generate fake output in needed.
+ ...(operation.hasOutput ? [] : [kExprI32Const, 0]),
+ // Store read intermediate to sequence.
+ kExprI32StoreMem, 2, 0,
+ // Increment result sequence pointer.
+ kExprGetLocal, 1, kExprI32Const, 4, kExprI32Add, kExprSetLocal, 1);
+ }
+ // Return end of sequence index.
+ body.push(kExprGetLocal, 1, kExprReturn);
+ return body;
}
function getSequence(start, end) {
- return new Int32Array(memory.buffer, start, (end - start) / Int32Array.BYTES_PER_ELEMENT);
+ return new Int32Array(
+ gSharedMemory.buffer, start,
+ (end - start) / Int32Array.BYTES_PER_ELEMENT);
}
function spawnWorkers() {
- let workers = [];
- for (let i = 0; i < kNumberOfWorker; i++) {
- let worker = new Worker(
- `onmessage = function(msg) {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ let worker = new Worker(
+ `onmessage = function(msg) {
if (msg.module) {
let module = msg.module;
let mem = msg.mem;
@@ -348,206 +316,190 @@ function spawnWorkers() {
let result = instance.exports["worker" + index](address, sequence, spin);
postMessage({index: index, sequence: sequence, result: result});
}
- }`, {type: 'string'}
- );
- workers.push(worker);
- }
- return workers;
+ }`,
+ {type: 'string'});
+ workers.push(worker);
+ }
+ return workers;
}
function instantiateModuleInWorkers(workers) {
- for (let worker of workers) {
- worker.postMessage({
- module: module,
- mem: memory
- });
- let msg = worker.getMessage();
- if (!msg.instantiated) throw "Worker failed to instantiate";
- }
+ for (let worker of workers) {
+ worker.postMessage({module: module, mem: gSharedMemory});
+ let msg = worker.getMessage();
+ if (!msg.instantiated) throw 'Worker failed to instantiate';
+ }
}
function executeSequenceInWorkers(workers) {
- for (i = 0; i < workers.length; i++) {
- let worker = workers[i];
- worker.postMessage({
- index: i,
- address: 0,
- spin: 16,
- sequence: 32 + ((kSequenceLength * 4) + 32) * i
- });
- // In debug mode, keep execution sequential.
- if (kDebug) {
- let msg = worker.getMessage();
- results[msg.index] = getSequence(msg.sequence, msg.result);
- }
- }
+ for (i = 0; i < workers.length; i++) {
+ let worker = workers[i];
+ worker.postMessage({
+ index: i,
+ address: 0,
+ spin: 16,
+ sequence: 32 + ((kSequenceLength * 4) + 32) * i
+ });
+ // In debug mode, keep execution sequential.
+ if (kDebug) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
+ }
}
function selectMatchingWorkers(state) {
- let matching = [];
- let indices = state.indices;
- for (let i = 0; i < indices.length; i++) {
- let index = indices[i];
- if (index >= kSequenceLength) continue;
- // We need to project the expected value to the number of bits this
- // operation will read at runtime.
- let expected = sequences[i][index].truncateResultBits(state.low, state.high);
- let hasOutput = sequences[i][index].hasOutput;
- if (!hasOutput || (results[i][index] == expected)) {
- matching.push(i);
- }
- }
- return matching;
+ let matching = [];
+ let indices = state.indices;
+ for (let i = 0; i < indices.length; i++) {
+ let index = indices[i];
+ if (index >= kSequenceLength) continue;
+ // We need to project the expected value to the number of bits this
+ // operation will read at runtime.
+ let expected =
+ sequences[i][index].truncateResultBits(state.low, state.high);
+ let hasOutput = sequences[i][index].hasOutput;
+ if (!hasOutput || (results[i][index] == expected)) {
+ matching.push(i);
+ }
+ }
+ return matching;
}
function computeNextState(state, advanceIdx) {
- let newIndices = state.indices.slice();
- let sequence = sequences[advanceIdx];
- let operation = sequence[state.indices[advanceIdx]];
- newIndices[advanceIdx]++;
- let {
- low,
- high
- } = operation.compute(state);
- return new State(low, high, newIndices, state.count + 1);
+ let newIndices = state.indices.slice();
+ let sequence = sequences[advanceIdx];
+ let operation = sequence[state.indices[advanceIdx]];
+ newIndices[advanceIdx]++;
+ let {low, high} = operation.compute(state);
+ return new State(low, high, newIndices, state.count + 1);
}
function findSequentialOrdering() {
- let startIndices = new Array(results.length);
- let steps = 0;
- startIndices.fill(0);
- let matchingStates = [new State(0, 0, startIndices, 0)];
- while (matchingStates.length > 0) {
- let current = matchingStates.pop();
- if (kDebug) {
- print(current);
- }
- let matchingResults = selectMatchingWorkers(current);
- if (matchingResults.length == 0) {
- continue;
- }
- for (let match of matchingResults) {
- let newState = computeNextState(current, match);
- if (newState.isFinal()) {
- return true;
- }
- matchingStates.push(newState);
- }
- if (steps++ > kNumberOfSteps) {
- print("Search timed out, aborting...");
- return true;
- }
- }
- // We have no options left.
- return false;
+ let startIndices = new Array(results.length);
+ let steps = 0;
+ startIndices.fill(0);
+ let matchingStates = [new State(0, 0, startIndices, 0)];
+ while (matchingStates.length > 0) {
+ let current = matchingStates.pop();
+ if (kDebug) {
+ print(current);
+ }
+ let matchingResults = selectMatchingWorkers(current);
+ if (matchingResults.length == 0) {
+ continue;
+ }
+ for (let match of matchingResults) {
+ let newState = computeNextState(current, match);
+ if (newState.isFinal()) {
+ return true;
+ }
+ matchingStates.push(newState);
+ }
+ if (steps++ > kNumberOfSteps) {
+ print('Search timed out, aborting...');
+ return true;
+ }
+ }
+ // We have no options left.
+ return false;
}
// Helpful for debugging failed tests.
function loadSequencesFromStrings(inputs) {
- let reverseOpcodes = {};
- for (let i = 0; i < opCodeNames.length; i++) {
- reverseOpcodes[opCodeNames[i]] = i;
- }
- let sequences = [];
- let parseRE = /([a-zA-Z0-9]*)\[\+([0-9])\] ([\-0-9]*)/;
- for (let input of inputs) {
- let parts = input.split(",");
- let sequence = [];
- for (let part of parts) {
- let parsed = parseRE.exec(part);
- sequence.push(new Operation(reverseOpcodes[parsed[1]], parsed[3],
- parsed[2] | 0));
- }
- sequences.push(sequence);
- }
- return sequences;
+ let reverseOpcodes = {};
+ for (let i = 0; i < opCodeNames.length; i++) {
+ reverseOpcodes[opCodeNames[i]] = i;
+ }
+ let sequences = [];
+ let parseRE = /([a-zA-Z0-9]*)\[\+([0-9])\] ([\-0-9]*)/;
+ for (let input of inputs) {
+ let parts = input.split(',');
+ let sequence = [];
+ for (let part of parts) {
+ let parsed = parseRE.exec(part);
+ sequence.push(
+ new Operation(reverseOpcodes[parsed[1]], parsed[3], parsed[2] | 0));
+ }
+ sequences.push(sequence);
+ }
+ return sequences;
}
// Helpful for debugging failed tests.
function loadResultsFromStrings(inputs) {
- let results = [];
- for (let input of inputs) {
- let parts = input.split(",");
- let result = [];
- for (let number of parts) {
- result.push(number | 0);
- }
- results.push(result);
+ let results = [];
+ for (let input of inputs) {
+ let parts = input.split(',');
+ let result = [];
+ for (let number of parts) {
+ result.push(number | 0);
}
- return results;
+ results.push(result);
+ }
+ return results;
}
-let maxSize = 10;
-let memory = new WebAssembly.Memory({
- initial: 1,
- maximum: maxSize,
- shared: true
-});
-let memory_view = new Int32Array(memory.buffer);
-
let sequences = [];
let results = [];
let builder = new WasmModuleBuilder();
-builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
+builder.addImportedMemory('m', 'imported_mem', 0, kMaxMemPages, 'shared');
for (let i = 0; i < kNumberOfWorker; i++) {
- sequences[i] = makeSequenceOfOperations(kSequenceLength);
- builder.addFunction("worker" + i, kSig_i_iii)
- .addBody(generateFunctionBodyForSequence(sequences[i]))
- .exportAs("worker" + i);
+ sequences[i] = makeSequenceOfOperations(kSequenceLength);
+ builder.addFunction('worker' + i, kSig_i_iii)
+ .addBody(generateFunctionBodyForSequence(sequences[i]))
+ .exportAs('worker' + i);
}
// Instantiate module, get function exports.
let module = new WebAssembly.Module(builder.toBuffer());
-let instance = new WebAssembly.Instance(module, {
- m: {
- imported_mem: memory
- }
-});
+let instance =
+ new WebAssembly.Instance(module, {m: {imported_mem: gSharedMemory}});
// Spawn off the workers and run the sequences.
let workers = spawnWorkers();
// Set spin count.
-memory_view[4] = kNumberOfWorker;
+gSharedMemoryView[4] = kNumberOfWorker;
instantiateModuleInWorkers(workers);
executeSequenceInWorkers(workers);
if (!kDebug) {
- // Collect results, d8 style.
- for (let worker of workers) {
- let msg = worker.getMessage();
- results[msg.index] = getSequence(msg.sequence, msg.result);
- }
+ // Collect results, d8 style.
+ for (let worker of workers) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
}
// Terminate all workers.
for (let worker of workers) {
- worker.terminate();
+ worker.terminate();
}
// In debug mode, print sequences and results.
if (kDebug) {
- for (let result of results) {
- print(result);
- }
+ for (let result of results) {
+ print(result);
+ }
- for (let sequence of sequences) {
- print(sequence);
- }
+ for (let sequence of sequences) {
+ print(sequence);
+ }
}
// Try to reconstruct a sequential ordering.
let passed = findSequentialOrdering();
if (passed) {
- print("PASS");
+ print('PASS');
} else {
- for (let i = 0; i < kNumberOfWorker; i++) {
- print("Worker " + i);
- print(sequences[i]);
- print(results[i]);
- }
- print("FAIL");
- quit(-1);
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ print('Worker ' + i);
+ print(sequences[i]);
+ print(results[i]);
+ }
+ print('FAIL');
+ quit(-1);
}
diff --git a/deps/v8/test/mjsunit/wasm/atomics64-stress.js b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
index f85c19a970..386a3b5549 100644
--- a/deps/v8/test/mjsunit/wasm/atomics64-stress.js
+++ b/deps/v8/test/mjsunit/wasm/atomics64-stress.js
@@ -9,7 +9,7 @@
// Note that results of this test are flaky by design. While the test is
// deterministic with a fixed seed, bugs may introduce non-determinism.
-load("test/mjsunit/wasm/wasm-module-builder.js");
+load('test/mjsunit/wasm/wasm-module-builder.js');
const kDebug = false;
@@ -22,358 +22,328 @@ const kFirstOpcodeWithoutOutput = 4;
const kLastOpcodeWithoutOutput = 7;
const opCodes = [
- kExprI64AtomicLoad,
- kExprI64AtomicLoad8U,
- kExprI64AtomicLoad16U,
- kExprI64AtomicLoad32U,
- kExprI64AtomicStore,
- kExprI64AtomicStore8U,
- kExprI64AtomicStore16U,
- kExprI64AtomicStore32U,
- kExprI64AtomicAdd,
- kExprI64AtomicAdd8U,
- kExprI64AtomicAdd16U,
- kExprI64AtomicAdd32U,
- kExprI64AtomicSub,
- kExprI64AtomicSub8U,
- kExprI64AtomicSub16U,
- kExprI64AtomicSub32U,
- kExprI64AtomicAnd,
- kExprI64AtomicAnd8U,
- kExprI64AtomicAnd16U,
- kExprI64AtomicAnd32U,
- kExprI64AtomicOr,
- kExprI64AtomicOr8U,
- kExprI64AtomicOr16U,
- kExprI64AtomicOr32U,
- kExprI64AtomicXor,
- kExprI64AtomicXor8U,
- kExprI64AtomicXor16U,
- kExprI64AtomicXor32U,
- kExprI64AtomicExchange,
- kExprI64AtomicExchange8U,
- kExprI64AtomicExchange16U,
- kExprI64AtomicExchange32U
+ kExprI64AtomicLoad, kExprI64AtomicLoad8U, kExprI64AtomicLoad16U,
+ kExprI64AtomicLoad32U, kExprI64AtomicStore, kExprI64AtomicStore8U,
+ kExprI64AtomicStore16U, kExprI64AtomicStore32U, kExprI64AtomicAdd,
+ kExprI64AtomicAdd8U, kExprI64AtomicAdd16U, kExprI64AtomicAdd32U,
+ kExprI64AtomicSub, kExprI64AtomicSub8U, kExprI64AtomicSub16U,
+ kExprI64AtomicSub32U, kExprI64AtomicAnd, kExprI64AtomicAnd8U,
+ kExprI64AtomicAnd16U, kExprI64AtomicAnd32U, kExprI64AtomicOr,
+ kExprI64AtomicOr8U, kExprI64AtomicOr16U, kExprI64AtomicOr32U,
+ kExprI64AtomicXor, kExprI64AtomicXor8U, kExprI64AtomicXor16U,
+ kExprI64AtomicXor32U, kExprI64AtomicExchange, kExprI64AtomicExchange8U,
+ kExprI64AtomicExchange16U, kExprI64AtomicExchange32U
];
const opCodeNames = [
- "kExprI64AtomicLoad",
- "kExprI64AtomicLoad8U",
- "kExprI64AtomicLoad16U",
- "kExprI64AtomicLoad32U",
- "kExprI64AtomicStore",
- "kExprI64AtomicStore8U",
- "kExprI64AtomicStore16U",
- "kExprI64AtomicStore32U",
- "kExprI64AtomicAdd",
- "kExprI64AtomicAdd8U",
- "kExprI64AtomicAdd16U",
- "kExprI64AtomicAdd32U",
- "kExprI64AtomicSub",
- "kExprI64AtomicSub8U",
- "kExprI64AtomicSub16U",
- "kExprI64AtomicSub32U",
- "kExprI64AtomicAnd",
- "kExprI64AtomicAnd8U",
- "kExprI64AtomicAnd16U",
- "kExprI64AtomicAnd32U",
- "kExprI64AtomicOr",
- "kExprI64AtomicOr8U",
- "kExprI64AtomicOr16U",
- "kExprI64AtomicOr32U",
- "kExprI64AtomicXor",
- "kExprI64AtomicXor8U",
- "kExprI64AtomicXor16U",
- "kExprI64AtomicXor32U",
- "kExprI64AtomicExchange",
- "kExprI64AtomicExchange8U",
- "kExprI64AtomicExchange16U",
- "kExprI64AtomicExchange32U"
+ 'kExprI64AtomicLoad', 'kExprI64AtomicLoad8U',
+ 'kExprI64AtomicLoad16U', 'kExprI64AtomicLoad32U',
+ 'kExprI64AtomicStore', 'kExprI64AtomicStore8U',
+ 'kExprI64AtomicStore16U', 'kExprI64AtomicStore32U',
+ 'kExprI64AtomicAdd', 'kExprI64AtomicAdd8U',
+ 'kExprI64AtomicAdd16U', 'kExprI64AtomicAdd32U',
+ 'kExprI64AtomicSub', 'kExprI64AtomicSub8U',
+ 'kExprI64AtomicSub16U', 'kExprI64AtomicSub32U',
+ 'kExprI64AtomicAnd', 'kExprI64AtomicAnd8U',
+ 'kExprI64AtomicAnd16U', 'kExprI64AtomicAnd32U',
+ 'kExprI64AtomicOr', 'kExprI64AtomicOr8U',
+ 'kExprI64AtomicOr16U', 'kExprI64AtomicOr32U',
+ 'kExprI64AtomicXor', 'kExprI64AtomicXor8U',
+ 'kExprI64AtomicXor16U', 'kExprI64AtomicXor32U',
+ 'kExprI64AtomicExchange', 'kExprI64AtomicExchange8U',
+ 'kExprI64AtomicExchange16U', 'kExprI64AtomicExchange32U'
];
-const kMaxInt32 = (1 << 31) * 2;
-
-class Operation {
- constructor(opcode, low_input, high_input, offset) {
- this.opcode = opcode != undefined ? opcode : Operation.nextOpcode();
- this.size = Operation.opcodeToSize(this.opcode);
- if (low_input == undefined) {
- [low_input, high_input] = Operation.inputForSize(this.size);
- }
- this.low_input = low_input;
- this.high_input = high_input;
- this.offset = offset != undefined ? offset : Operation.offsetForSize(
- this.size);
- }
-
- static nextOpcode() {
- let random = Math.random();
- return Math.floor(random * opCodes.length);
- }
-
- static opcodeToSize(opcode) {
- // Instructions are ordered in 64, 8, 16, 32 bits size
- return [64, 8, 16, 32][opcode % 4];
- }
-
- static opcodeToAlignment(opcode) {
- // Instructions are ordered in 64, 8, 16, 32 bits size
- return [3, 0, 1, 2][opcode % 4];
- }
-
- static inputForSize(size) {
- if (size <= 32) {
- let random = Math.random();
- // Avoid 32 bit overflow for integer here :(
- return [Math.floor(random * (1 << (size - 1)) * 2), 0];
- }
- return [Math.floor(Math.random() * kMaxInt32), Math.floor(Math.random() *
- kMaxInt32)];
- }
-
- static offsetForSize(size) {
- // Pick an offset in bytes between 0 and 8.
- let offset = Math.floor(Math.random() * 8);
- // Make sure the offset matches the required alignment by masking out the lower bits.
- let size_in_bytes = size / 8;
- let mask = ~(size_in_bytes - 1);
- return offset & mask;
- }
-
- get wasmOpcode() {
- // [opcode, alignment, offset]
- return [opCodes[this.opcode], Operation.opcodeToAlignment(this.opcode), this.offset];
- }
+let kMaxMemPages = 10;
+let gSharedMemory =
+ new WebAssembly.Memory({initial: 1, maximum: kMaxMemPages, shared: true});
+let gSharedMemoryView = new Int32Array(gSharedMemory.buffer);
- get hasInput() {
- return this.opcode >= kFirstOpcodeWithInput;
- }
-
- get hasOutput() {
- return this.opcode < kFirstOpcodeWithoutOutput || this.opcode >
- kLastOpcodeWithoutOutput;
- }
-
- truncateResultBits(low, high) {
- if (this.size == 64) return [low, high]
-
- // Shift the lower part. For offsets greater four it drops out of the visible window.
- let shiftedL = this.offset >= 4 ? 0 : low >>> (this.offset * 8);
- // The higher part is zero for offset 0, left shifted for [1..3] and right shifted
- // for [4..7].
- let shiftedH = this.offset == 0 ? 0 :
- this.offset >= 4 ? high >>> (this.offset - 4) * 8 : high << ((4 -
- this.offset) * 8);
- let value = shiftedL | shiftedH;
-
- switch (this.size) {
- case 8:
- return [value & 0xFF, 0];
- case 16:
- return [value & 0xFFFF, 0];
- case 32:
- return [value, 0];
- default:
- throw "Unexpected size: " + this.size;
- }
- }
-
- static get builder() {
- if (!Operation.__builder) {
- let builder = new WasmModuleBuilder();
- builder.addMemory(1, 1, 1, false);
- builder.exportMemoryAs("mem");
- Operation.__builder = builder;
- }
- return Operation.__builder;
- }
-
- static get exports() {
- if (!Operation.__instance) {
- return {};
- }
- return Operation.__instance.exports;
- }
-
- static get memory() {
- return Operation.exports.mem;
- }
-
- static set instance(instance) {
- Operation.__instance = instance;
- }
-
- compute(state) {
- let evalFun = Operation.exports[this.key];
- if (!evalFun) {
- let builder = Operation.builder;
- let body = [
- // Load address of low 32 bits.
- kExprI32Const, 0,
- // Load expected value.
- kExprGetLocal, 0,
- kExprI32StoreMem, 2, 0,
- // Load address of high 32 bits.
- kExprI32Const, 4,
- // Load expected value.
- kExprGetLocal, 1,
- kExprI32StoreMem, 2, 0,
- // Load address of where our window starts.
- kExprI32Const, 0,
- // Load input if there is one.
- ...(this.hasInput ? [kExprGetLocal, 3,
- kExprI64UConvertI32,
- kExprI64Const, 32,
- kExprI64Shl,
- kExprGetLocal, 2,
- kExprI64UConvertI32,
- kExprI64Ior
- ] : []),
- // Perform operation.
- kAtomicPrefix, ...this.wasmOpcode,
- // Drop output if it had any.
- ...(this.hasOutput ? [kExprDrop] : []),
- // Return.
- kExprReturn
- ]
- builder.addFunction(this.key, kSig_v_iiii)
- .addBody(body)
- .exportAs(this.key);
- // Instantiate module, get function exports.
- let module = new WebAssembly.Module(builder.toBuffer());
- Operation.instance = new WebAssembly.Instance(module);
- evalFun = Operation.exports[this.key];
- }
- evalFun(state.low, state.high, this.low_input, this.high_input);
- let ta = new Int32Array(Operation.memory.buffer);
- if (kDebug) {
- print(state.high + ":" + state.low + " " + this.toString() +
- " -> " + ta[1] + ":" + ta[0]);
- }
- return {
- low: ta[0],
- high: ta[1]
- };
- }
+let gPrivateMemory =
+ new WebAssembly.Memory({initial: 1, maximum: kMaxMemPages, shared: true});
+let gPrivateMemoryView = new Int32Array(gPrivateMemory.buffer);
- toString() {
- return opCodeNames[this.opcode] + "[+" + this.offset + "] " + this.high_input +
- ":" + this.low_input;
- }
+const kMaxInt32 = (1 << 31) * 2;
- get key() {
- return this.opcode + "-" + this.offset;
- }
+class Operation {
+ constructor(opcode, low_input, high_input, offset) {
+ this.opcode = opcode != undefined ? opcode : Operation.nextOpcode();
+ this.size = Operation.opcodeToSize(this.opcode);
+ if (low_input == undefined) {
+ [low_input, high_input] = Operation.inputForSize(this.size);
+ }
+ this.low_input = low_input;
+ this.high_input = high_input;
+ this.offset =
+ offset != undefined ? offset : Operation.offsetForSize(this.size);
+ }
+
+ static nextOpcode() {
+ let random = Math.random();
+ return Math.floor(random * opCodes.length);
+ }
+
+ static opcodeToSize(opcode) {
+ // Instructions are ordered in 64, 8, 16, 32 bits size
+ return [64, 8, 16, 32][opcode % 4];
+ }
+
+ static opcodeToAlignment(opcode) {
+ // Instructions are ordered in 64, 8, 16, 32 bits size
+ return [3, 0, 1, 2][opcode % 4];
+ }
+
+ static inputForSize(size) {
+ if (size <= 32) {
+ let random = Math.random();
+ // Avoid 32 bit overflow for integer here :(
+ return [Math.floor(random * (1 << (size - 1)) * 2), 0];
+ }
+ return [
+ Math.floor(Math.random() * kMaxInt32),
+ Math.floor(Math.random() * kMaxInt32)
+ ];
+ }
+
+ static offsetForSize(size) {
+ // Pick an offset in bytes between 0 and 8.
+ let offset = Math.floor(Math.random() * 8);
+ // Make sure the offset matches the required alignment by masking out the
+ // lower bits.
+ let size_in_bytes = size / 8;
+ let mask = ~(size_in_bytes - 1);
+ return offset & mask;
+ }
+
+ get wasmOpcode() {
+ // [opcode, alignment, offset]
+ return [
+ opCodes[this.opcode], Operation.opcodeToAlignment(this.opcode),
+ this.offset
+ ];
+ }
+
+ get hasInput() {
+ return this.opcode >= kFirstOpcodeWithInput;
+ }
+
+ get hasOutput() {
+ return this.opcode < kFirstOpcodeWithoutOutput ||
+ this.opcode > kLastOpcodeWithoutOutput;
+ }
+
+ truncateResultBits(low, high) {
+ if (this.size == 64)
+ return [low, high]
+
+ // Shift the lower part. For offsets greater four it drops out of the
+ // visible window.
+ let shiftedL = this.offset >= 4 ? 0 : low >>> (this.offset * 8);
+ // The higher part is zero for offset 0, left shifted for [1..3] and right
+ // shifted for [4..7].
+ let shiftedH = this.offset == 0 ?
+ 0 :
+ this.offset >= 4 ? high >>> (this.offset - 4) * 8 :
+ high << ((4 - this.offset) * 8);
+ let value = shiftedL | shiftedH;
+
+ switch (this.size) {
+ case 8:
+ return [value & 0xFF, 0];
+ case 16:
+ return [value & 0xFFFF, 0];
+ case 32:
+ return [value, 0];
+ default:
+ throw 'Unexpected size: ' + this.size;
+ }
+ }
+
+ static get builder() {
+ if (!Operation.__builder) {
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory('m', 'imported_mem', 0, kMaxMemPages, 'shared');
+ Operation.__builder = builder;
+ }
+ return Operation.__builder;
+ }
+
+ static get exports() {
+ if (!Operation.__instance) {
+ return {};
+ }
+ return Operation.__instance.exports;
+ }
+
+ static get memory() {
+ return Operation.exports.mem;
+ }
+
+ static set instance(instance) {
+ Operation.__instance = instance;
+ }
+
+ compute(state) {
+ let evalFun = Operation.exports[this.key];
+ if (!evalFun) {
+ let builder = Operation.builder;
+ let body = [
+ // Load address of low 32 bits.
+ kExprI32Const, 0,
+ // Load expected value.
+ kExprGetLocal, 0, kExprI32StoreMem, 2, 0,
+ // Load address of high 32 bits.
+ kExprI32Const, 4,
+ // Load expected value.
+ kExprGetLocal, 1, kExprI32StoreMem, 2, 0,
+ // Load address of where our window starts.
+ kExprI32Const, 0,
+ // Load input if there is one.
+ ...(this.hasInput ?
+ [
+ kExprGetLocal, 3, kExprI64UConvertI32, kExprI64Const, 32,
+ kExprI64Shl, kExprGetLocal, 2, kExprI64UConvertI32,
+ kExprI64Ior
+ ] :
+ []),
+ // Perform operation.
+ kAtomicPrefix, ...this.wasmOpcode,
+ // Drop output if it had any.
+ ...(this.hasOutput ? [kExprDrop] : []),
+ // Return.
+ kExprReturn
+ ]
+ builder.addFunction(this.key, kSig_v_iiii)
+ .addBody(body)
+ .exportAs(this.key);
+ // Instantiate module, get function exports.
+ let module = new WebAssembly.Module(builder.toBuffer());
+ Operation.instance =
+ new WebAssembly.Instance(module, {m: {imported_mem: gPrivateMemory}});
+ evalFun = Operation.exports[this.key];
+ }
+ evalFun(state.low, state.high, this.low_input, this.high_input);
+ let ta = gPrivateMemoryView;
+ if (kDebug) {
+ print(
+ state.high + ':' + state.low + ' ' + this.toString() + ' -> ' +
+ ta[1] + ':' + ta[0]);
+ }
+ return {low: ta[0], high: ta[1]};
+ }
+
+ toString() {
+ return opCodeNames[this.opcode] + '[+' + this.offset + '] ' +
+ this.high_input + ':' + this.low_input;
+ }
+
+ get key() {
+ return this.opcode + '-' + this.offset;
+ }
}
class State {
- constructor(low, high, indices, count) {
- this.low = low;
- this.high = high;
- this.indices = indices;
- this.count = count;
- }
-
- isFinal() {
- return (this.count == kNumberOfWorker * kSequenceLength);
- }
-
- toString() {
- return this.high + ":" + this.low + " @ " + this.indices;
- }
+ constructor(low, high, indices, count) {
+ this.low = low;
+ this.high = high;
+ this.indices = indices;
+ this.count = count;
+ }
+
+ isFinal() {
+ return (this.count == kNumberOfWorker * kSequenceLength);
+ }
+
+ toString() {
+ return this.high + ':' + this.low + ' @ ' + this.indices;
+ }
}
function makeSequenceOfOperations(size) {
- let result = new Array(size);
- for (let i = 0; i < size; i++) {
- result[i] = new Operation();
- }
- return result;
+ let result = new Array(size);
+ for (let i = 0; i < size; i++) {
+ result[i] = new Operation();
+ }
+ return result;
}
function toSLeb128(low, high) {
- let result = [];
- while (true) {
- let v = low & 0x7f;
- // For low, fill up with zeros, high will add extra bits.
- low = low >>> 7;
- if (high != 0) {
- let shiftIn = high << (32 - 7);
- low = low | shiftIn;
- // For high, fill up with ones, so that we keep trailing one.
- high = high >> 7;
- }
- let msbIsSet = (v & 0x40) || false;
- if (((low == 0) && (high == 0) && !msbIsSet) || ((low == -1) && (high ==
- -1) && msbIsSet)) {
- result.push(v);
- break;
- }
- result.push(v | 0x80);
- }
- return result;
+ let result = [];
+ while (true) {
+ let v = low & 0x7f;
+ // For low, fill up with zeros, high will add extra bits.
+ low = low >>> 7;
+ if (high != 0) {
+ let shiftIn = high << (32 - 7);
+ low = low | shiftIn;
+ // For high, fill up with ones, so that we keep trailing one.
+ high = high >> 7;
+ }
+ let msbIsSet = (v & 0x40) || false;
+ if (((low == 0) && (high == 0) && !msbIsSet) ||
+ ((low == -1) && (high == -1) && msbIsSet)) {
+ result.push(v);
+ break;
+ }
+ result.push(v | 0x80);
+ }
+ return result;
}
function generateFunctionBodyForSequence(sequence) {
- // We expect the int64* to perform ops on as arg 0 and
- // the int64* for our value log as arg1. Argument 2 gives
- // an int32* we use to count down spinning workers.
- let body = [];
- // Initially, we spin until all workers start running.
- if (!kDebug) {
- body.push(
- // Decrement the wait count.
- kExprGetLocal, 2,
- kExprI32Const, 1,
- kAtomicPrefix, kExprI32AtomicSub, 2, 0,
- // Spin until zero.
- kExprLoop, kWasmStmt,
- kExprGetLocal, 2,
- kAtomicPrefix, kExprI32AtomicLoad, 2, 0,
- kExprI32Const, 0,
- kExprI32GtU,
- kExprBrIf, 0,
- kExprEnd
- );
- }
- for (let operation of sequence) {
- body.push(
- // Pre-load address of results sequence pointer for later.
- kExprGetLocal, 1,
- // Load address where atomic pointers are stored.
- kExprGetLocal, 0,
- // Load the second argument if it had any.
- ...(operation.hasInput ? [kExprI64Const, ...toSLeb128(operation
- .low_input, operation.high_input)] : []),
- // Perform operation
- kAtomicPrefix, ...operation.wasmOpcode,
- // Generate fake output in needed.
- ...(operation.hasOutput ? [] : [kExprI64Const, 0]),
- // Store read intermediate to sequence.
- kExprI64StoreMem, 3, 0,
- // Increment result sequence pointer.
- kExprGetLocal, 1,
- kExprI32Const, 8,
- kExprI32Add,
- kExprSetLocal, 1
- );
- }
- // Return end of sequence index.
+ // We expect the int64* to perform ops on as arg 0 and
+ // the int64* for our value log as arg1. Argument 2 gives
+ // an int32* we use to count down spinning workers.
+ let body = [];
+ // Initially, we spin until all workers start running.
+ if (!kDebug) {
body.push(
+ // Decrement the wait count.
+ kExprGetLocal, 2, kExprI32Const, 1, kAtomicPrefix, kExprI32AtomicSub, 2,
+ 0,
+ // Spin until zero.
+ kExprLoop, kWasmStmt, kExprGetLocal, 2, kAtomicPrefix,
+ kExprI32AtomicLoad, 2, 0, kExprI32Const, 0, kExprI32GtU, kExprBrIf, 0,
+ kExprEnd);
+ }
+ for (let operation of sequence) {
+ body.push(
+ // Pre-load address of results sequence pointer for later.
kExprGetLocal, 1,
- kExprReturn);
- return body;
+ // Load address where atomic pointers are stored.
+ kExprGetLocal, 0,
+ // Load the second argument if it had any.
+ ...(operation.hasInput ?
+ [
+ kExprI64Const,
+ ...toSLeb128(operation.low_input, operation.high_input)
+ ] :
+ []),
+ // Perform operation
+ kAtomicPrefix, ...operation.wasmOpcode,
+ // Generate fake output in needed.
+ ...(operation.hasOutput ? [] : [kExprI64Const, 0]),
+ // Store read intermediate to sequence.
+ kExprI64StoreMem, 3, 0,
+ // Increment result sequence pointer.
+ kExprGetLocal, 1, kExprI32Const, 8, kExprI32Add, kExprSetLocal, 1);
+ }
+ // Return end of sequence index.
+ body.push(kExprGetLocal, 1, kExprReturn);
+ return body;
}
function getSequence(start, end) {
- return new Int32Array(memory.buffer, start, (end - start) / Int32Array.BYTES_PER_ELEMENT);
+ return new Int32Array(
+ gSharedMemory.buffer, start,
+ (end - start) / Int32Array.BYTES_PER_ELEMENT);
}
function spawnWorkers() {
- let workers = [];
- for (let i = 0; i < kNumberOfWorker; i++) {
- let worker = new Worker(
- `onmessage = function(msg) {
+ let workers = [];
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ let worker = new Worker(
+ `onmessage = function(msg) {
if (msg.module) {
let module = msg.module;
let mem = msg.mem;
@@ -387,209 +357,193 @@ function spawnWorkers() {
let result = instance.exports["worker" + index](address, sequence, spin);
postMessage({index: index, sequence: sequence, result: result});
}
- }`, {type: 'string'}
- );
- workers.push(worker);
- }
- return workers;
+ }`,
+ {type: 'string'});
+ workers.push(worker);
+ }
+ return workers;
}
function instantiateModuleInWorkers(workers) {
- for (let worker of workers) {
- worker.postMessage({
- module: module,
- mem: memory
- });
- let msg = worker.getMessage();
- if (!msg.instantiated) throw "Worker failed to instantiate";
- }
+ for (let worker of workers) {
+ worker.postMessage({module: module, mem: gSharedMemory});
+ let msg = worker.getMessage();
+ if (!msg.instantiated) throw 'Worker failed to instantiate';
+ }
}
function executeSequenceInWorkers(workers) {
- for (i = 0; i < workers.length; i++) {
- let worker = workers[i];
- worker.postMessage({
- index: i,
- address: 0,
- spin: 16,
- sequence: 32 + ((kSequenceLength * 8) + 32) * i
- });
- // In debug mode, keep execution sequential.
- if (kDebug) {
- let msg = worker.getMessage();
- results[msg.index] = getSequence(msg.sequence, msg.result);
- }
- }
+ for (i = 0; i < workers.length; i++) {
+ let worker = workers[i];
+ worker.postMessage({
+ index: i,
+ address: 0,
+ spin: 16,
+ sequence: 32 + ((kSequenceLength * 8) + 32) * i
+ });
+ // In debug mode, keep execution sequential.
+ if (kDebug) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
+ }
}
function selectMatchingWorkers(state) {
- let matching = [];
- let indices = state.indices;
- for (let i = 0; i < indices.length; i++) {
- let index = indices[i];
- if (index >= kSequenceLength) continue;
- // We need to project the expected value to the number of bits this
- // operation will read at runtime.
- let [expected_low, expected_high] = sequences[i][index].truncateResultBits(
- state.low, state.high);
- let hasOutput = sequences[i][index].hasOutput;
- if (!hasOutput || ((results[i][index * 2] == expected_low) && (results[
- i][index * 2 + 1] == expected_high))) {
- matching.push(i);
- }
- }
- return matching;
+ let matching = [];
+ let indices = state.indices;
+ for (let i = 0; i < indices.length; i++) {
+ let index = indices[i];
+ if (index >= kSequenceLength) continue;
+ // We need to project the expected value to the number of bits this
+ // operation will read at runtime.
+ let [expected_low, expected_high] =
+ sequences[i][index].truncateResultBits(state.low, state.high);
+ let hasOutput = sequences[i][index].hasOutput;
+ if (!hasOutput ||
+ ((results[i][index * 2] == expected_low) &&
+ (results[i][index * 2 + 1] == expected_high))) {
+ matching.push(i);
+ }
+ }
+ return matching;
}
function computeNextState(state, advanceIdx) {
- let newIndices = state.indices.slice();
- let sequence = sequences[advanceIdx];
- let operation = sequence[state.indices[advanceIdx]];
- newIndices[advanceIdx]++;
- let {
- low,
- high
- } = operation.compute(state);
-
- return new State(low, high, newIndices, state.count + 1);
+ let newIndices = state.indices.slice();
+ let sequence = sequences[advanceIdx];
+ let operation = sequence[state.indices[advanceIdx]];
+ newIndices[advanceIdx]++;
+ let {low, high} = operation.compute(state);
+
+ return new State(low, high, newIndices, state.count + 1);
}
function findSequentialOrdering() {
- let startIndices = new Array(results.length);
- let steps = 0;
- startIndices.fill(0);
- let matchingStates = [new State(0, 0, startIndices, 0)];
- while (matchingStates.length > 0) {
- let current = matchingStates.pop();
- if (kDebug) {
- print(current);
- }
- let matchingResults = selectMatchingWorkers(current);
- if (matchingResults.length == 0) {
- continue;
- }
- for (let match of matchingResults) {
- let newState = computeNextState(current, match);
- if (newState.isFinal()) {
- return true;
- }
- matchingStates.push(newState);
- }
- if (steps++ > kNumberOfSteps) {
- print("Search timed out, aborting...");
- return true;
- }
- }
- // We have no options left.
- return false;
+ let startIndices = new Array(results.length);
+ let steps = 0;
+ startIndices.fill(0);
+ let matchingStates = [new State(0, 0, startIndices, 0)];
+ while (matchingStates.length > 0) {
+ let current = matchingStates.pop();
+ if (kDebug) {
+ print(current);
+ }
+ let matchingResults = selectMatchingWorkers(current);
+ if (matchingResults.length == 0) {
+ continue;
+ }
+ for (let match of matchingResults) {
+ let newState = computeNextState(current, match);
+ if (newState.isFinal()) {
+ return true;
+ }
+ matchingStates.push(newState);
+ }
+ if (steps++ > kNumberOfSteps) {
+ print('Search timed out, aborting...');
+ return true;
+ }
+ }
+ // We have no options left.
+ return false;
}
// Helpful for debugging failed tests.
function loadSequencesFromStrings(inputs) {
- let reverseOpcodes = {};
- for (let i = 0; i < opCodeNames.length; i++) {
- reverseOpcodes[opCodeNames[i]] = i;
- }
- let sequences = [];
- let parseRE = /([a-zA-Z0-9]*)\[\+([0-9])\] ([\-0-9]*)/;
- for (let input of inputs) {
- let parts = input.split(",");
- let sequence = [];
- for (let part of parts) {
- let parsed = parseRE.exec(part);
- sequence.push(new Operation(reverseOpcodes[parsed[1]], parsed[3],
- parsed[2] | 0));
- }
- sequences.push(sequence);
- }
- return sequences;
+ let reverseOpcodes = {};
+ for (let i = 0; i < opCodeNames.length; i++) {
+ reverseOpcodes[opCodeNames[i]] = i;
+ }
+ let sequences = [];
+ let parseRE = /([a-zA-Z0-9]*)\[\+([0-9])\] ([\-0-9]*)/;
+ for (let input of inputs) {
+ let parts = input.split(',');
+ let sequence = [];
+ for (let part of parts) {
+ let parsed = parseRE.exec(part);
+ sequence.push(
+ new Operation(reverseOpcodes[parsed[1]], parsed[3], parsed[2] | 0));
+ }
+ sequences.push(sequence);
+ }
+ return sequences;
}
// Helpful for debugging failed tests.
function loadResultsFromStrings(inputs) {
- let results = [];
- for (let input of inputs) {
- let parts = input.split(",");
- let result = [];
- for (let number of parts) {
- result.push(number | 0);
- }
- results.push(result);
+ let results = [];
+ for (let input of inputs) {
+ let parts = input.split(',');
+ let result = [];
+ for (let number of parts) {
+ result.push(number | 0);
}
- return results;
+ results.push(result);
+ }
+ return results;
}
-let maxSize = 10;
-let memory = new WebAssembly.Memory({
- initial: 1,
- maximum: maxSize,
- shared: true
-});
-let memory_view = new Int32Array(memory.buffer);
-
let sequences = [];
let results = [];
let builder = new WasmModuleBuilder();
-builder.addImportedMemory("m", "imported_mem", 0, maxSize, "shared");
+builder.addImportedMemory('m', 'imported_mem', 0, kMaxMemPages, 'shared');
for (let i = 0; i < kNumberOfWorker; i++) {
- sequences[i] = makeSequenceOfOperations(kSequenceLength);
- builder.addFunction("worker" + i, kSig_i_iii)
- .addBody(generateFunctionBodyForSequence(sequences[i]))
- .exportAs("worker" + i);
+ sequences[i] = makeSequenceOfOperations(kSequenceLength);
+ builder.addFunction('worker' + i, kSig_i_iii)
+ .addBody(generateFunctionBodyForSequence(sequences[i]))
+ .exportAs('worker' + i);
}
// Instantiate module, get function exports.
let module = new WebAssembly.Module(builder.toBuffer());
-let instance = new WebAssembly.Instance(module, {
- m: {
- imported_mem: memory
- }
-});
+let instance =
+ new WebAssembly.Instance(module, {m: {imported_mem: gSharedMemory}});
// Spawn off the workers and run the sequences.
let workers = spawnWorkers();
// Set spin count.
-memory_view[4] = kNumberOfWorker;
+gSharedMemoryView[4] = kNumberOfWorker;
instantiateModuleInWorkers(workers);
executeSequenceInWorkers(workers);
if (!kDebug) {
- // Collect results, d8 style.
- for (let worker of workers) {
- let msg = worker.getMessage();
- results[msg.index] = getSequence(msg.sequence, msg.result);
- }
+ // Collect results, d8 style.
+ for (let worker of workers) {
+ let msg = worker.getMessage();
+ results[msg.index] = getSequence(msg.sequence, msg.result);
+ }
}
// Terminate all workers.
for (let worker of workers) {
- worker.terminate();
+ worker.terminate();
}
// In debug mode, print sequences and results.
if (kDebug) {
- for (let result of results) {
- print(result);
- }
+ for (let result of results) {
+ print(result);
+ }
- for (let sequence of sequences) {
- print(sequence);
- }
+ for (let sequence of sequences) {
+ print(sequence);
+ }
}
// Try to reconstruct a sequential ordering.
let passed = findSequentialOrdering();
if (passed) {
- print("PASS");
+ print('PASS');
} else {
- for (let i = 0; i < kNumberOfWorker; i++) {
- print("Worker " + i);
- print(sequences[i]);
- print(results[i]);
- }
- print("FAIL");
- quit(-1);
+ for (let i = 0; i < kNumberOfWorker; i++) {
+ print('Worker ' + i);
+ print(sequences[i]);
+ print(results[i]);
+ }
+ print('FAIL');
+ quit(-1);
}
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/README b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/README
deleted file mode 100644
index 8fd8d3498b..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/README
+++ /dev/null
@@ -1,19 +0,0 @@
-This directory contains the bulk-memory proposal tests, converted to JS using
-the reference interpreter using the following shell command:
-
-```
-for f in *.wast; do wasm $f -o $f.js; done
-```
-
-Where `wasm` is the reference interpreter compiled from the bulk memory
-proposal (https://github.com/WebAssembly/bulk-memory-operations).
-
-This only includes the tests that are different than the spec repo. The
-testsuite repo (https://github.com/WebAssembly/testsuite) has a tool which
-calculates this, see
-https://github.com/WebAssembly/testsuite/tree/master/proposals/bulk-memory-operations
-
-The contents are copied from the following revisions:
-
-WebAssembly/testsuite: 2a2099d52103215962707fbe9f44cd51fd146636
-WebAssembly/bulk-memory-operations: 47b4ae718b42081a220ac7f405bed1391661a635
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast
deleted file mode 100644
index e88c72ca7f..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast
+++ /dev/null
@@ -1,1047 +0,0 @@
-(module binary "\00asm\01\00\00\00")
-(module binary "\00asm" "\01\00\00\00")
-(module $M1 binary "\00asm\01\00\00\00")
-(module $M2 binary "\00asm" "\01\00\00\00")
-
-(assert_malformed (module binary "") "unexpected end")
-(assert_malformed (module binary "\01") "unexpected end")
-(assert_malformed (module binary "\00as") "unexpected end")
-(assert_malformed (module binary "asm\00") "magic header not detected")
-(assert_malformed (module binary "msa\00") "magic header not detected")
-(assert_malformed (module binary "msa\00\01\00\00\00") "magic header not detected")
-(assert_malformed (module binary "msa\00\00\00\00\01") "magic header not detected")
-(assert_malformed (module binary "asm\01\00\00\00\00") "magic header not detected")
-(assert_malformed (module binary "wasm\01\00\00\00") "magic header not detected")
-(assert_malformed (module binary "\7fasm\01\00\00\00") "magic header not detected")
-(assert_malformed (module binary "\80asm\01\00\00\00") "magic header not detected")
-(assert_malformed (module binary "\82asm\01\00\00\00") "magic header not detected")
-(assert_malformed (module binary "\ffasm\01\00\00\00") "magic header not detected")
-
-;; 8-byte endian-reversed.
-(assert_malformed (module binary "\00\00\00\01msa\00") "magic header not detected")
-
-;; Middle-endian byte orderings.
-(assert_malformed (module binary "a\00ms\00\01\00\00") "magic header not detected")
-(assert_malformed (module binary "sm\00a\00\00\01\00") "magic header not detected")
-
-;; Upper-cased.
-(assert_malformed (module binary "\00ASM\01\00\00\00") "magic header not detected")
-
-;; EBCDIC-encoded magic.
-(assert_malformed (module binary "\00\81\a2\94\01\00\00\00") "magic header not detected")
-
-;; Leading UTF-8 BOM.
-(assert_malformed (module binary "\ef\bb\bf\00asm\01\00\00\00") "magic header not detected")
-
-(assert_malformed (module binary "\00asm") "unexpected end")
-(assert_malformed (module binary "\00asm\01") "unexpected end")
-(assert_malformed (module binary "\00asm\01\00\00") "unexpected end")
-(assert_malformed (module binary "\00asm\00\00\00\00") "unknown binary version")
-(assert_malformed (module binary "\00asm\0d\00\00\00") "unknown binary version")
-(assert_malformed (module binary "\00asm\0e\00\00\00") "unknown binary version")
-(assert_malformed (module binary "\00asm\00\01\00\00") "unknown binary version")
-(assert_malformed (module binary "\00asm\00\00\01\00") "unknown binary version")
-(assert_malformed (module binary "\00asm\00\00\00\01") "unknown binary version")
-
-;; Unsigned LEB128 can have non-minimal length
-(module binary
- "\00asm" "\01\00\00\00"
- "\05\04\01" ;; Memory section with 1 entry
- "\00\82\00" ;; no max, minimum 2
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\05\07\01" ;; Memory section with 1 entry
- "\00\82\80\80\80\00" ;; no max, minimum 2
-)
-
-;; Signed LEB128 can have non-minimal length
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\07\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\80\00" ;; i32.const 0
- "\0b" ;; end
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\07\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\ff\7f" ;; i32.const -1
- "\0b" ;; end
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\0a\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\80\80\80\80\00" ;; i32.const 0
- "\0b" ;; end
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\0a\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\ff\ff\ff\ff\7f" ;; i32.const -1
- "\0b" ;; end
-)
-
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\07\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\80\00" ;; i64.const 0 with unused bits set
- "\0b" ;; end
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\07\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\ff\7f" ;; i64.const -1 with unused bits unset
- "\0b" ;; end
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\0f\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\80\80\80\80\80\80\80\80\80\00" ;; i64.const 0 with unused bits set
- "\0b" ;; end
-)
-(module binary
- "\00asm" "\01\00\00\00"
- "\06\0f\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f" ;; i64.const -1 with unused bits unset
- "\0b" ;; end
-)
-
-;; Data segment memory index can have non-minimal length
-(module binary
- "\00asm" "\01\00\00\00"
- "\05\03\01" ;; Memory section with 1 entry
- "\00\00" ;; no max, minimum 0
- "\0b\07\01" ;; Data section with 1 entry
- "\80\00" ;; Memory index 0, encoded with 2 bytes
- "\41\00\0b\00" ;; (i32.const 0) with contents ""
-)
-
-;; Element segment table index can have non-minimal length
-(module binary
- "\00asm" "\01\00\00\00"
- "\04\04\01" ;; Table section with 1 entry
- "\70\00\00" ;; no max, minimum 0, funcref
- "\09\07\01" ;; Element section with 1 entry
- "\80\00" ;; Table index 0, encoded with 2 bytes
- "\41\00\0b\00" ;; (i32.const 0) with no elements
-)
-
-;; Unsigned LEB128 must not be overlong
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\05\08\01" ;; Memory section with 1 entry
- "\00\82\80\80\80\80\00" ;; no max, minimum 2 with one byte too many
- )
- "integer representation too long"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\11\01" ;; Code section
- ;; function 0
- "\0f\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\28" ;; i32.load
- "\02" ;; alignment 2
- "\82\80\80\80\80\00" ;; offset 2 with one byte too many
- "\1a" ;; drop
- "\0b" ;; end
- )
- "integer representation too long"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\11\01" ;; Code section
- ;; function 0
- "\0f\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\28" ;; i32.load
- "\82\80\80\80\80\00" ;; alignment 2 with one byte too many
- "\00" ;; offset 0
- "\1a" ;; drop
- "\0b" ;; end
- )
- "integer representation too long"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\12\01" ;; Code section
- ;; function 0
- "\10\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\41\03" ;; i32.const 3
- "\36" ;; i32.store
- "\82\80\80\80\80\00" ;; alignment 2 with one byte too many
- "\03" ;; offset 3
- "\0b" ;; end
- )
- "integer representation too long"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\12\01" ;; Code section
- ;; function 0
- "\10\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\41\03" ;; i32.const 3
- "\36" ;; i32.store
- "\02" ;; alignment 2
- "\82\80\80\80\80\00" ;; offset 2 with one byte too many
- "\0b" ;; end
- )
- "integer representation too long"
-)
-
-;; Signed LEB128 must not be overlong
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0b\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\80\80\80\80\80\00" ;; i32.const 0 with one byte too many
- "\0b" ;; end
- )
- "integer representation too long"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0b\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\ff\ff\ff\ff\ff\7f" ;; i32.const -1 with one byte too many
- "\0b" ;; end
- )
- "integer representation too long"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\10\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\80\80\80\80\80\80\80\80\80\80\00" ;; i64.const 0 with one byte too many
- "\0b" ;; end
- )
- "integer representation too long"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\10\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f" ;; i64.const -1 with one byte too many
- "\0b" ;; end
- )
- "integer representation too long"
-)
-
-;; Unsigned LEB128s zero-extend
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\05\07\01" ;; Memory section with 1 entry
- "\00\82\80\80\80\70" ;; no max, minimum 2 with unused bits set
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\05\07\01" ;; Memory section with 1 entry
- "\00\82\80\80\80\40" ;; no max, minimum 2 with some unused bits set
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\10\01" ;; Code section
- ;; function 0
- "\0e\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\28" ;; i32.load
- "\02" ;; alignment 2
- "\82\80\80\80\10" ;; offset 2 with unused bits set
- "\1a" ;; drop
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\10\01" ;; Code section
- ;; function 0
- "\0e\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\28" ;; i32.load
- "\02" ;; alignment 2
- "\82\80\80\80\40" ;; offset 2 with some unused bits set
- "\1a" ;; drop
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\10\01" ;; Code section
- "\0e\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\28" ;; i32.load
- "\82\80\80\80\10" ;; alignment 2 with unused bits set
- "\00" ;; offset 0
- "\1a" ;; drop
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\10\01" ;; Code section
- ;; function 0
- "\0e\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\28" ;; i32.load
- "\82\80\80\80\40" ;; alignment 2 with some unused bits set
- "\00" ;; offset 0
- "\1a" ;; drop
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\11\01" ;; Code section
- ;; function 0
- "\0f\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\41\03" ;; i32.const 3
- "\36" ;; i32.store
- "\82\80\80\80\10" ;; alignment 2 with unused bits set
- "\03" ;; offset 3
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\11\01" ;; Code section
- ;; function 0
- "\0f\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\41\03" ;; i32.const 3
- "\36" ;; i32.store
- "\82\80\80\80\40" ;; alignment 2 with some unused bits set
- "\03" ;; offset 3
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\11\01" ;; Code section
- ;; function 0
- "\0f\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\41\03" ;; i32.const 3
- "\36" ;; i32.store
- "\03" ;; alignment 2
- "\82\80\80\80\10" ;; offset 2 with unused bits set
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\01" ;; Memory section
- "\0a\11\01" ;; Code section
-
- ;; function 0
- "\0f\01\01" ;; local type count
- "\7f" ;; i32
- "\41\00" ;; i32.const 0
- "\41\03" ;; i32.const 3
- "\36" ;; i32.store
- "\02" ;; alignment 2
- "\82\80\80\80\40" ;; offset 2 with some unused bits set
- "\0b" ;; end
- )
- "integer too large"
-)
-
-;; Signed LEB128s sign-extend
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0a\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\80\80\80\80\70" ;; i32.const 0 with unused bits set
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0a\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\ff\ff\ff\ff\0f" ;; i32.const -1 with unused bits unset
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0a\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\80\80\80\80\1f" ;; i32.const 0 with some unused bits set
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0a\01" ;; Global section with 1 entry
- "\7f\00" ;; i32, immutable
- "\41\ff\ff\ff\ff\4f" ;; i32.const -1 with some unused bits unset
- "\0b" ;; end
- )
- "integer too large"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0f\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\80\80\80\80\80\80\80\80\80\7e" ;; i64.const 0 with unused bits set
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0f\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\01" ;; i64.const -1 with unused bits unset
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0f\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\80\80\80\80\80\80\80\80\80\02" ;; i64.const 0 with some unused bits set
- "\0b" ;; end
- )
- "integer too large"
-)
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\06\0f\01" ;; Global section with 1 entry
- "\7e\00" ;; i64, immutable
- "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\41" ;; i64.const -1 with some unused bits unset
- "\0b" ;; end
- )
- "integer too large"
-)
-
-;; call_indirect reserved byte equal to zero.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\04\04\01\70\00\00" ;; Table section
- "\0a\09\01" ;; Code section
-
- ;; function 0
- "\07\00"
- "\41\00" ;; i32.const 0
- "\11\00" ;; call_indirect (type 0)
- "\01" ;; call_indirect reserved byte is not equal to zero!
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; call_indirect reserved byte should not be a "long" LEB128 zero.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\04\04\01\70\00\00" ;; Table section
- "\0a\0a\01" ;; Code section
-
- ;; function 0
- "\07\00"
- "\41\00" ;; i32.const 0
- "\11\00" ;; call_indirect (type 0)
- "\80\00" ;; call_indirect reserved byte
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; Same as above for 3, 4, and 5-byte zero encodings.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\04\04\01\70\00\00" ;; Table section
- "\0a\0b\01" ;; Code section
-
- ;; function 0
- "\08\00"
- "\41\00" ;; i32.const 0
- "\11\00" ;; call_indirect (type 0)
- "\80\80\00" ;; call_indirect reserved byte
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\04\04\01\70\00\00" ;; Table section
- "\0a\0c\01" ;; Code section
-
- ;; function 0
- "\09\00"
- "\41\00" ;; i32.const 0
- "\11\00" ;; call_indirect (type 0)
- "\80\80\80\00" ;; call_indirect reserved byte
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\04\04\01\70\00\00" ;; Table section
- "\0a\0d\01" ;; Code section
-
- ;; function 0
- "\0a\00"
- "\41\00" ;; i32.const 0
- "\11\00" ;; call_indirect (type 0)
- "\80\80\80\80\00" ;; call_indirect reserved byte
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; memory.grow reserved byte equal to zero.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\09\01" ;; Code section
-
- ;; function 0
- "\07\00"
- "\41\00" ;; i32.const 0
- "\40" ;; memory.grow
- "\01" ;; memory.grow reserved byte is not equal to zero!
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; memory.grow reserved byte should not be a "long" LEB128 zero.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0a\01" ;; Code section
-
- ;; function 0
- "\08\00"
- "\41\00" ;; i32.const 0
- "\40" ;; memory.grow
- "\80\00" ;; memory.grow reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; Same as above for 3, 4, and 5-byte zero encodings.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0b\01" ;; Code section
-
- ;; function 0
- "\09\00"
- "\41\00" ;; i32.const 0
- "\40" ;; memory.grow
- "\80\80\00" ;; memory.grow reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0c\01" ;; Code section
-
- ;; function 0
- "\0a\00"
- "\41\00" ;; i32.const 0
- "\40" ;; memory.grow
- "\80\80\80\00" ;; memory.grow reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0d\01" ;; Code section
-
- ;; function 0
- "\0b\00"
- "\41\00" ;; i32.const 0
- "\40" ;; memory.grow
- "\80\80\80\80\00" ;; memory.grow reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; memory.size reserved byte equal to zero.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\07\01" ;; Code section
-
- ;; function 0
- "\05\00"
- "\3f" ;; memory.size
- "\01" ;; memory.size reserved byte is not equal to zero!
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; memory.size reserved byte should not be a "long" LEB128 zero.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\08\01" ;; Code section
-
- ;; function 0
- "\06\00"
- "\3f" ;; memory.size
- "\80\00" ;; memory.size reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; Same as above for 3, 4, and 5-byte zero encodings.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\09\01" ;; Code section
-
- ;; function 0
- "\07\00"
- "\3f" ;; memory.size
- "\80\80\00" ;; memory.size reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0a\01" ;; Code section
-
- ;; function 0
- "\08\00"
- "\3f" ;; memory.size
- "\80\80\80\00" ;; memory.size reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0b\01" ;; Code section
-
- ;; function 0
- "\09\00"
- "\3f" ;; memory.size
- "\80\80\80\80\00" ;; memory.size reserved byte
- "\1a" ;; drop
- "\0b" ;; end
- )
- "zero flag expected"
-)
-
-;; No more than 2^32 locals.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\0a\0c\01" ;; Code section
-
- ;; function 0
- "\0a\02"
- "\ff\ff\ff\ff\0f\7f" ;; 0xFFFFFFFF i32
- "\02\7e" ;; 0x00000002 i64
- "\0b" ;; end
- )
- "too many locals"
-)
-
-;; Local count can be 0.
-(module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\0a\0a\01" ;; Code section
-
- ;; function 0
- "\08\03"
- "\00\7f" ;; 0 i32
- "\00\7e" ;; 0 i64
- "\02\7d" ;; 2 f32
- "\0b" ;; end
-)
-
-;; Function section has non-zero count, but code section is absent.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\03\02\00\00" ;; Function section with 2 functions
- )
- "function and code section have inconsistent lengths"
-)
-
-;; Code section has non-zero count, but function section is absent.
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\0a\04\01\02\00\0b" ;; Code section with 1 empty function
- )
- "function and code section have inconsistent lengths"
-)
-
-;; Function section count > code section count
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\03\02\00\00" ;; Function section with 2 functions
- "\0a\04\01\02\00\0b" ;; Code section with 1 empty function
- )
- "function and code section have inconsistent lengths"
-)
-
-;; Function section count < code section count
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section with 1 function
- "\0a\07\02\02\00\0b\02\00\0b" ;; Code section with 2 empty functions
- )
- "function and code section have inconsistent lengths"
-)
-
-;; Function section has zero count, and code section is absent.
-(module binary
- "\00asm" "\01\00\00\00"
- "\03\01\00" ;; Function section with 0 functions
-)
-
-;; Code section has zero count, and function section is absent.
-(module binary
- "\00asm" "\01\00\00\00"
- "\0a\01\00" ;; Code section with 0 functions
-)
-
-;; Fewer passive segments than datacount
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\0c\01\03" ;; Datacount section with value "3"
- "\0b\05\02" ;; Data section with two entries
- "\01\00" ;; Passive data section
- "\01\00") ;; Passive data section
- "data count and data section have inconsistent lengths")
-
-;; More passive segments than datacount
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\0c\01\01" ;; Datacount section with value "1"
- "\0b\05\02" ;; Data section with two entries
- "\01\00" ;; Passive data section
- "\01\00") ;; Passive data section
- "data count and data section have inconsistent lengths")
-
-;; memory.init requires a datacount section
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
-
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\0e\01" ;; Code section
-
- ;; function 0
- "\0c\00"
- "\41\00" ;; zero args
- "\41\00"
- "\41\00"
- "\fc\08\00\00" ;; memory.init
- "\0b"
-
- "\0b\03\01\01\00" ;; Data section
- ) ;; end
- "data count section required")
-
-;; data.drop requires a datacount section
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
-
- "\01\04\01\60\00\00" ;; Type section
- "\03\02\01\00" ;; Function section
- "\05\03\01\00\00" ;; Memory section
- "\0a\07\01" ;; Code section
-
- ;; function 0
- "\05\00"
- "\fc\09\00" ;; data.drop
- "\0b"
-
- "\0b\03\01\01\00" ;; Data section
- ) ;; end
- "data count section required")
-
-;; passive element segment containing opcode other than ref.func or ref.null
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
-
- "\01\04\01\60\00\00" ;; Type section
-
- "\03\02\01\00" ;; Function section
-
- "\04\04\01" ;; Table section with 1 entry
- "\70\00\00" ;; no max, minimum 0, funcref
-
- "\05\03\01\00\00" ;; Memory section
-
- "\09\07\01" ;; Element section with one segment
- "\01\70" ;; Passive, funcref
- "\01" ;; 1 element
- "\d3\00\0b" ;; bad opcode, index 0, end
-
- "\0a\04\01" ;; Code section
-
- ;; function 0
- "\02\00"
- "\0b") ;; end
- "invalid elem")
-
-;; passive element segment containing type other than funcref
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
-
- "\01\04\01\60\00\00" ;; Type section
-
- "\03\02\01\00" ;; Function section
-
- "\04\04\01" ;; Table section with 1 entry
- "\70\00\00" ;; no max, minimum 0, funcref
-
- "\05\03\01\00\00" ;; Memory section
-
- "\09\07\01" ;; Element section with one segment
- "\01\7f" ;; Passive, i32
- "\01" ;; 1 element
- "\d2\00\0b" ;; ref.func, index 0, end
-
- "\0a\04\01" ;; Code section
-
- ;; function 0
- "\02\00"
- "\0b") ;; end
- "invalid element type")
-
-;; passive element segment containing opcode ref.func
-(module binary
- "\00asm" "\01\00\00\00"
-
- "\01\04\01\60\00\00" ;; Type section
-
- "\03\02\01\00" ;; Function section
-
- "\04\04\01" ;; Table section with 1 entry
- "\70\00\00" ;; no max, minimum 0, funcref
-
- "\05\03\01\00\00" ;; Memory section
-
- "\09\07\01" ;; Element section with one segment
- "\01\70" ;; Passive, funcref
- "\01" ;; 1 element
- "\d2\00\0b" ;; ref.func, index 0, end
-
- "\0a\04\01" ;; Code section
-
- ;; function 0
- "\02\00"
- "\0b") ;; end
-
-;; passive element segment containing opcode ref.null
-(module binary
- "\00asm" "\01\00\00\00"
-
- "\01\04\01\60\00\00" ;; Type section
-
- "\03\02\01\00" ;; Function section
-
- "\04\04\01" ;; Table section with 1 entry
- "\70\00\00" ;; no max, minimum 0, funcref
-
- "\05\03\01\00\00" ;; Memory section
-
- "\09\06\01" ;; Element section with one segment
- "\01\70" ;; Passive, funcref
- "\01" ;; 1 element
- "\d0\0b" ;; ref.null, end
-
- "\0a\04\01" ;; Code section
-
- ;; function 0
- "\02\00"
- "\0b") ;; end
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js
deleted file mode 100644
index 134a2a339b..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js
+++ /dev/null
@@ -1,445 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// binary.wast:1
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:2
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:3
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
-let $M1 = $3;
-
-// binary.wast:4
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
-let $M2 = $4;
-
-// binary.wast:6
-assert_malformed("");
-
-// binary.wast:7
-assert_malformed("\x01");
-
-// binary.wast:8
-assert_malformed("\x00\x61\x73");
-
-// binary.wast:9
-assert_malformed("\x61\x73\x6d\x00");
-
-// binary.wast:10
-assert_malformed("\x6d\x73\x61\x00");
-
-// binary.wast:11
-assert_malformed("\x6d\x73\x61\x00\x01\x00\x00\x00");
-
-// binary.wast:12
-assert_malformed("\x6d\x73\x61\x00\x00\x00\x00\x01");
-
-// binary.wast:13
-assert_malformed("\x61\x73\x6d\x01\x00\x00\x00\x00");
-
-// binary.wast:14
-assert_malformed("\x77\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:15
-assert_malformed("\x7f\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:16
-assert_malformed("\x80\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:17
-assert_malformed("\x82\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:18
-assert_malformed("\xff\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:21
-assert_malformed("\x00\x00\x00\x01\x6d\x73\x61\x00");
-
-// binary.wast:24
-assert_malformed("\x61\x00\x6d\x73\x00\x01\x00\x00");
-
-// binary.wast:25
-assert_malformed("\x73\x6d\x00\x61\x00\x00\x01\x00");
-
-// binary.wast:28
-assert_malformed("\x00\x41\x53\x4d\x01\x00\x00\x00");
-
-// binary.wast:31
-assert_malformed("\x00\x81\xa2\x94\x01\x00\x00\x00");
-
-// binary.wast:34
-assert_malformed("\xef\xbb\xbf\x00\x61\x73\x6d\x01\x00\x00\x00");
-
-// binary.wast:36
-assert_malformed("\x00\x61\x73\x6d");
-
-// binary.wast:37
-assert_malformed("\x00\x61\x73\x6d\x01");
-
-// binary.wast:38
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00");
-
-// binary.wast:39
-assert_malformed("\x00\x61\x73\x6d\x00\x00\x00\x00");
-
-// binary.wast:40
-assert_malformed("\x00\x61\x73\x6d\x0d\x00\x00\x00");
-
-// binary.wast:41
-assert_malformed("\x00\x61\x73\x6d\x0e\x00\x00\x00");
-
-// binary.wast:42
-assert_malformed("\x00\x61\x73\x6d\x00\x01\x00\x00");
-
-// binary.wast:43
-assert_malformed("\x00\x61\x73\x6d\x00\x00\x01\x00");
-
-// binary.wast:44
-assert_malformed("\x00\x61\x73\x6d\x00\x00\x00\x01");
-
-// binary.wast:47
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x04\x01\x00\x82\x00");
-
-// binary.wast:52
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x07\x01\x00\x82\x80\x80\x80\x00");
-
-// binary.wast:59
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7f\x00\x41\x80\x00\x0b");
-
-// binary.wast:66
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7f\x00\x41\xff\x7f\x0b");
-
-// binary.wast:73
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\x80\x80\x80\x80\x00\x0b");
-
-// binary.wast:80
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\xff\xff\xff\xff\x7f\x0b");
-
-// binary.wast:88
-let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7e\x00\x42\x80\x00\x0b");
-
-// binary.wast:95
-let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7e\x00\x42\xff\x7f\x0b");
-
-// binary.wast:102
-let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x00\x0b");
-
-// binary.wast:109
-let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x0b");
-
-// binary.wast:118
-let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x03\x01\x00\x00\x0b\x07\x01\x80\x00\x41\x00\x0b\x00");
-
-// binary.wast:128
-let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x04\x04\x01\x70\x00\x00\x09\x07\x01\x80\x00\x41\x00\x0b\x00");
-
-// binary.wast:138
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x08\x01\x00\x82\x80\x80\x80\x80\x00");
-
-// binary.wast:146
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x28\x02\x82\x80\x80\x80\x80\x00\x1a\x0b");
-
-// binary.wast:165
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x28\x82\x80\x80\x80\x80\x00\x00\x1a\x0b");
-
-// binary.wast:184
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x12\x01\x10\x01\x01\x7f\x41\x00\x41\x03\x36\x82\x80\x80\x80\x80\x00\x03\x0b");
-
-// binary.wast:203
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x12\x01\x10\x01\x01\x7f\x41\x00\x41\x03\x36\x02\x82\x80\x80\x80\x80\x00\x0b");
-
-// binary.wast:224
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0b\x01\x7f\x00\x41\x80\x80\x80\x80\x80\x00\x0b");
-
-// binary.wast:234
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0b\x01\x7f\x00\x41\xff\xff\xff\xff\xff\x7f\x0b");
-
-// binary.wast:245
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x10\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x80\x00\x0b");
-
-// binary.wast:255
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x10\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x0b");
-
-// binary.wast:267
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x07\x01\x00\x82\x80\x80\x80\x70");
-
-// binary.wast:275
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x07\x01\x00\x82\x80\x80\x80\x40");
-
-// binary.wast:283
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x02\x82\x80\x80\x80\x10\x1a\x0b");
-
-// binary.wast:302
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x02\x82\x80\x80\x80\x40\x1a\x0b");
-
-// binary.wast:321
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x82\x80\x80\x80\x10\x00\x1a\x0b");
-
-// binary.wast:339
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x82\x80\x80\x80\x40\x00\x1a\x0b");
-
-// binary.wast:358
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x82\x80\x80\x80\x10\x03\x0b");
-
-// binary.wast:377
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x82\x80\x80\x80\x40\x03\x0b");
-
-// binary.wast:396
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x03\x82\x80\x80\x80\x10\x0b");
-
-// binary.wast:415
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x02\x82\x80\x80\x80\x40\x0b");
-
-// binary.wast:437
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\x80\x80\x80\x80\x70\x0b");
-
-// binary.wast:447
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\xff\xff\xff\xff\x0f\x0b");
-
-// binary.wast:457
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\x80\x80\x80\x80\x1f\x0b");
-
-// binary.wast:467
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\xff\xff\xff\xff\x4f\x0b");
-
-// binary.wast:478
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7e\x0b");
-
-// binary.wast:488
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x0b");
-
-// binary.wast:498
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x02\x0b");
-
-// binary.wast:508
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\x41\x0b");
-
-// binary.wast:520
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x09\x01\x07\x00\x41\x00\x11\x00\x01\x0b");
-
-// binary.wast:539
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0a\x01\x07\x00\x41\x00\x11\x00\x80\x00\x0b");
-
-// binary.wast:558
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0b\x01\x08\x00\x41\x00\x11\x00\x80\x80\x00\x0b");
-
-// binary.wast:576
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0c\x01\x09\x00\x41\x00\x11\x00\x80\x80\x80\x00\x0b");
-
-// binary.wast:594
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0d\x01\x0a\x00\x41\x00\x11\x00\x80\x80\x80\x80\x00\x0b");
-
-// binary.wast:613
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x09\x01\x07\x00\x41\x00\x40\x01\x1a\x0b");
-
-// binary.wast:633
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0a\x01\x08\x00\x41\x00\x40\x80\x00\x1a\x0b");
-
-// binary.wast:653
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0b\x01\x09\x00\x41\x00\x40\x80\x80\x00\x1a\x0b");
-
-// binary.wast:672
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0c\x01\x0a\x00\x41\x00\x40\x80\x80\x80\x00\x1a\x0b");
-
-// binary.wast:691
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0d\x01\x0b\x00\x41\x00\x40\x80\x80\x80\x80\x00\x1a\x0b");
-
-// binary.wast:711
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x07\x01\x05\x00\x3f\x01\x1a\x0b");
-
-// binary.wast:730
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x08\x01\x06\x00\x3f\x80\x00\x1a\x0b");
-
-// binary.wast:749
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x09\x01\x07\x00\x3f\x80\x80\x00\x1a\x0b");
-
-// binary.wast:767
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0a\x01\x08\x00\x3f\x80\x80\x80\x00\x1a\x0b");
-
-// binary.wast:785
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0b\x01\x09\x00\x3f\x80\x80\x80\x80\x00\x1a\x0b");
-
-// binary.wast:804
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x0a\x0c\x01\x0a\x02\xff\xff\xff\xff\x0f\x7f\x02\x7e\x0b");
-
-// binary.wast:821
-let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x0a\x0a\x01\x08\x03\x00\x7f\x00\x7e\x02\x7d\x0b");
-
-// binary.wast:836
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x03\x02\x00\x00");
-
-// binary.wast:846
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x0a\x04\x01\x02\x00\x0b");
-
-// binary.wast:855
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x03\x02\x00\x00\x0a\x04\x01\x02\x00\x0b");
-
-// binary.wast:866
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x0a\x07\x02\x02\x00\x0b\x02\x00\x0b");
-
-// binary.wast:877
-let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x03\x01\x00");
-
-// binary.wast:883
-let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x0a\x01\x00");
-
-// binary.wast:889
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x0c\x01\x03\x0b\x05\x02\x01\x00\x01\x00");
-
-// binary.wast:899
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x0c\x01\x01\x0b\x05\x02\x01\x00\x01\x00");
-
-// binary.wast:909
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0e\x01\x0c\x00\x41\x00\x41\x00\x41\x00\xfc\x08\x00\x00\x0b\x0b\x03\x01\x01\x00");
-
-// binary.wast:931
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x07\x01\x05\x00\xfc\x09\x00\x0b\x0b\x03\x01\x01\x00");
-
-// binary.wast:950
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x07\x01\x01\x70\x01\xd3\x00\x0b\x0a\x04\x01\x02\x00\x0b");
-
-// binary.wast:976
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x07\x01\x01\x7f\x01\xd2\x00\x0b\x0a\x04\x01\x02\x00\x0b");
-
-// binary.wast:1002
-let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x07\x01\x01\x70\x01\xd2\x00\x0b\x0a\x04\x01\x02\x00\x0b");
-
-// binary.wast:1026
-let $21 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x06\x01\x01\x70\x01\xd0\x0b\x0a\x04\x01\x02\x00\x0b");
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast
deleted file mode 100644
index bb71f493d6..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast
+++ /dev/null
@@ -1,308 +0,0 @@
-;; Passive segment syntax
-(module
- (memory 1)
- (data passive "foo"))
-
-(module
- (table 3 funcref)
- (elem passive funcref (ref.func 0) (ref.null) (ref.func 1))
- (func)
- (func))
-
-;; memory.fill
-(module
- (memory 1)
-
- (func (export "fill") (param i32 i32 i32)
- (memory.fill
- (local.get 0)
- (local.get 1)
- (local.get 2)))
-
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0)))
-)
-
-;; Basic fill test.
-(invoke "fill" (i32.const 1) (i32.const 0xff) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xff))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 0xff))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 0xff))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 0))
-
-;; Fill value is stored as a byte.
-(invoke "fill" (i32.const 0) (i32.const 0xbbaa) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0xaa))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xaa))
-
-;; Fill all of memory
-(invoke "fill" (i32.const 0) (i32.const 0) (i32.const 0x10000))
-
-;; Out-of-bounds writes trap, but all previous writes succeed.
-(assert_trap (invoke "fill" (i32.const 0xff00) (i32.const 1) (i32.const 0x101))
- "out of bounds memory access")
-(assert_return (invoke "load8_u" (i32.const 0xff00)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 1))
-
-;; Succeed when writing 0 bytes at the end of the region.
-(invoke "fill" (i32.const 0x10000) (i32.const 0) (i32.const 0))
-
-;; Fail on out-of-bounds when writing 0 bytes outside of memory.
-(assert_trap (invoke "fill" (i32.const 0x10001) (i32.const 0) (i32.const 0))
- "out of bounds memory access")
-
-
-;; memory.copy
-(module
- (memory (data "\aa\bb\cc\dd"))
-
- (func (export "copy") (param i32 i32 i32)
- (memory.copy
- (local.get 0)
- (local.get 1)
- (local.get 2)))
-
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0)))
-)
-
-;; Non-overlapping copy.
-(invoke "copy" (i32.const 10) (i32.const 0) (i32.const 4))
-
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0xaa))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xbb))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xcc))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xdd))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0))
-
-;; Overlap, source > dest
-(invoke "copy" (i32.const 8) (i32.const 10) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0xaa))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0xbb))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0xcc))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xdd))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xcc))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xdd))
-
-;; Overlap, source < dest
-(invoke "copy" (i32.const 10) (i32.const 7) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xaa))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xbb))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xcc))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0xdd))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0xcc))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 0))
-
-;; Copy ending at memory limit is ok.
-(invoke "copy" (i32.const 0xff00) (i32.const 0) (i32.const 0x100))
-(invoke "copy" (i32.const 0xfe00) (i32.const 0xff00) (i32.const 0x100))
-
-;; Out-of-bounds writes trap, but all previous writes succeed.
-(assert_trap (invoke "copy" (i32.const 0xfffe) (i32.const 0) (i32.const 3))
- "out of bounds memory access")
-(assert_return (invoke "load8_u" (i32.const 0xfffe)) (i32.const 0xaa))
-(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 0xbb))
-
-;; Succeed when copying 0 bytes at the end of the region.
-(invoke "copy" (i32.const 0x10000) (i32.const 0) (i32.const 0))
-(invoke "copy" (i32.const 0) (i32.const 0x10000) (i32.const 0))
-
-;; Fail on out-of-bounds when copying 0 bytes outside of memory.
-(assert_trap (invoke "copy" (i32.const 0x10001) (i32.const 0) (i32.const 0))
- "out of bounds memory access")
-(assert_trap (invoke "copy" (i32.const 0) (i32.const 0x10001) (i32.const 0))
- "out of bounds memory access")
-
-
-;; memory.init
-(module
- (memory 1)
- (data passive "\aa\bb\cc\dd")
-
- (func (export "init") (param i32 i32 i32)
- (memory.init 0
- (local.get 0)
- (local.get 1)
- (local.get 2)))
-
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0)))
-)
-
-(invoke "init" (i32.const 0) (i32.const 1) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0xbb))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xcc))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 0))
-
-;; Init ending at memory limit and segment limit is ok.
-(invoke "init" (i32.const 0xfffc) (i32.const 0) (i32.const 4))
-
-;; Out-of-bounds writes trap, but all previous writes succeed.
-(assert_trap (invoke "init" (i32.const 0xfffe) (i32.const 0) (i32.const 3))
- "out of bounds memory access")
-(assert_return (invoke "load8_u" (i32.const 0xfffe)) (i32.const 0xaa))
-(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 0xbb))
-
-;; Succeed when writing 0 bytes at the end of either region.
-(invoke "init" (i32.const 0x10000) (i32.const 0) (i32.const 0))
-(invoke "init" (i32.const 0) (i32.const 4) (i32.const 0))
-
-;; Fail on out-of-bounds when writing 0 bytes outside of memory or segment.
-(assert_trap (invoke "init" (i32.const 0x10001) (i32.const 0) (i32.const 0))
- "out of bounds memory access")
-(assert_trap (invoke "init" (i32.const 0) (i32.const 5) (i32.const 0))
- "out of bounds memory access")
-
-;; data.drop
-(module
- (memory 1)
- (data $p passive "")
- (data $a 0 (i32.const 0) "")
-
- (func (export "drop_passive") (data.drop $p))
- (func (export "init_passive")
- (memory.init $p (i32.const 0) (i32.const 0) (i32.const 0)))
-
- (func (export "drop_active") (data.drop $a))
- (func (export "init_active")
- (memory.init $a (i32.const 0) (i32.const 0) (i32.const 0)))
-)
-
-(invoke "init_passive")
-(invoke "drop_passive")
-(assert_trap (invoke "drop_passive") "data segment dropped")
-(assert_trap (invoke "init_passive") "data segment dropped")
-(assert_trap (invoke "drop_active") "data segment dropped")
-(assert_trap (invoke "init_active") "data segment dropped")
-
-
-;; table.init
-(module
- (table 3 funcref)
- (elem passive funcref
- (ref.func $zero) (ref.func $one) (ref.func $zero) (ref.func $one))
-
- (func $zero (result i32) (i32.const 0))
- (func $one (result i32) (i32.const 1))
-
- (func (export "init") (param i32 i32 i32)
- (table.init 0
- (local.get 0)
- (local.get 1)
- (local.get 2)))
-
- (func (export "call") (param i32) (result i32)
- (call_indirect (result i32)
- (local.get 0)))
-)
-
-(invoke "init" (i32.const 0) (i32.const 1) (i32.const 2))
-(assert_return (invoke "call" (i32.const 0)) (i32.const 1))
-(assert_return (invoke "call" (i32.const 1)) (i32.const 0))
-(assert_trap (invoke "call" (i32.const 2)) "uninitialized element")
-
-;; Init ending at table limit and segment limit is ok.
-(invoke "init" (i32.const 1) (i32.const 2) (i32.const 2))
-
-;; Out-of-bounds stores trap, but all previous stores succeed.
-(assert_trap (invoke "init" (i32.const 2) (i32.const 0) (i32.const 2))
- "out of bounds table access")
-(assert_return (invoke "call" (i32.const 2)) (i32.const 0))
-
-;; Succeed when storing 0 elements at the end of either region.
-(invoke "init" (i32.const 3) (i32.const 0) (i32.const 0))
-(invoke "init" (i32.const 0) (i32.const 4) (i32.const 0))
-
-;; Fail on out-of-bounds when storing 0 elements outside of table or segment.
-(assert_trap (invoke "init" (i32.const 4) (i32.const 0) (i32.const 0))
- "out of bounds table access")
-(assert_trap (invoke "init" (i32.const 0) (i32.const 5) (i32.const 0))
- "out of bounds table access")
-
-
-;; elem.drop
-(module
- (table 1 funcref)
- (func $f)
- (elem $p passive funcref (ref.func $f))
- (elem $a 0 (i32.const 0) $f)
-
- (func (export "drop_passive") (elem.drop $p))
- (func (export "init_passive")
- (table.init $p (i32.const 0) (i32.const 0) (i32.const 0)))
-
- (func (export "drop_active") (elem.drop $a))
- (func (export "init_active")
- (table.init $a (i32.const 0) (i32.const 0) (i32.const 0)))
-)
-
-(invoke "init_passive")
-(invoke "drop_passive")
-(assert_trap (invoke "drop_passive") "element segment dropped")
-(assert_trap (invoke "init_passive") "element segment dropped")
-(assert_trap (invoke "drop_active") "element segment dropped")
-(assert_trap (invoke "init_active") "element segment dropped")
-
-
-;; table.copy
-(module
- (table 10 funcref)
- (elem (i32.const 0) $zero $one $two)
- (func $zero (result i32) (i32.const 0))
- (func $one (result i32) (i32.const 1))
- (func $two (result i32) (i32.const 2))
-
- (func (export "copy") (param i32 i32 i32)
- (table.copy
- (local.get 0)
- (local.get 1)
- (local.get 2)))
-
- (func (export "call") (param i32) (result i32)
- (call_indirect (result i32)
- (local.get 0)))
-)
-
-;; Non-overlapping copy.
-(invoke "copy" (i32.const 3) (i32.const 0) (i32.const 3))
-;; Now [$zero, $one, $two, $zero, $one, $two, ...]
-(assert_return (invoke "call" (i32.const 3)) (i32.const 0))
-(assert_return (invoke "call" (i32.const 4)) (i32.const 1))
-(assert_return (invoke "call" (i32.const 5)) (i32.const 2))
-
-;; Overlap, source > dest
-(invoke "copy" (i32.const 0) (i32.const 1) (i32.const 3))
-;; Now [$one, $two, $zero, $zero, $one, $two, ...]
-(assert_return (invoke "call" (i32.const 0)) (i32.const 1))
-(assert_return (invoke "call" (i32.const 1)) (i32.const 2))
-(assert_return (invoke "call" (i32.const 2)) (i32.const 0))
-
-;; Overlap, source < dest
-(invoke "copy" (i32.const 2) (i32.const 0) (i32.const 3))
-;; Now [$one, $two, $one, $two, $zero, $two, ...]
-(assert_return (invoke "call" (i32.const 2)) (i32.const 1))
-(assert_return (invoke "call" (i32.const 3)) (i32.const 2))
-(assert_return (invoke "call" (i32.const 4)) (i32.const 0))
-
-;; Copy ending at table limit is ok.
-(invoke "copy" (i32.const 6) (i32.const 8) (i32.const 2))
-(invoke "copy" (i32.const 8) (i32.const 6) (i32.const 2))
-
-;; Out-of-bounds writes trap, but all previous writes succeed.
-(assert_trap (invoke "call" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "copy" (i32.const 9) (i32.const 0) (i32.const 2))
- "out of bounds table access")
-(assert_return (invoke "call" (i32.const 9)) (i32.const 1))
-
-;; Succeed when copying 0 elements at the end of the region.
-(invoke "copy" (i32.const 10) (i32.const 0) (i32.const 0))
-(invoke "copy" (i32.const 0) (i32.const 10) (i32.const 0))
-
-;; Fail on out-of-bounds when copying 0 elements outside of table.
-(assert_trap (invoke "copy" (i32.const 11) (i32.const 0) (i32.const 0))
- "out of bounds table access")
-(assert_trap (invoke "copy" (i32.const 0) (i32.const 11) (i32.const 0))
- "out of bounds table access")
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js
deleted file mode 100644
index 294aca0c5c..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js
+++ /dev/null
@@ -1,470 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// bulk.wast:2
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x0b\x86\x80\x80\x80\x00\x01\x01\x03\x66\x6f\x6f");
-
-// bulk.wast:6
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x03\x09\x8c\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd0\x0b\xd2\x01\x0b\x0a\x8f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x82\x80\x80\x80\x00\x00\x0b");
-
-// bulk.wast:13
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x92\x80\x80\x80\x00\x02\x04\x66\x69\x6c\x6c\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9d\x80\x80\x80\x00\x02\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b");
-
-// bulk.wast:27
-run(() => call($3, "fill", [1, 255, 3]));
-
-// bulk.wast:28
-assert_return(() => call($3, "load8_u", [0]), 0);
-
-// bulk.wast:29
-assert_return(() => call($3, "load8_u", [1]), 255);
-
-// bulk.wast:30
-assert_return(() => call($3, "load8_u", [2]), 255);
-
-// bulk.wast:31
-assert_return(() => call($3, "load8_u", [3]), 255);
-
-// bulk.wast:32
-assert_return(() => call($3, "load8_u", [4]), 0);
-
-// bulk.wast:35
-run(() => call($3, "fill", [0, 48042, 2]));
-
-// bulk.wast:36
-assert_return(() => call($3, "load8_u", [0]), 170);
-
-// bulk.wast:37
-assert_return(() => call($3, "load8_u", [1]), 170);
-
-// bulk.wast:40
-run(() => call($3, "fill", [0, 0, 65536]));
-
-// bulk.wast:43
-assert_trap(() => call($3, "fill", [65280, 1, 257]));
-
-// bulk.wast:45
-assert_return(() => call($3, "load8_u", [65280]), 1);
-
-// bulk.wast:46
-assert_return(() => call($3, "load8_u", [65535]), 1);
-
-// bulk.wast:49
-run(() => call($3, "fill", [65536, 0, 0]));
-
-// bulk.wast:52
-assert_trap(() => call($3, "fill", [65537, 0, 0]));
-
-// bulk.wast:57
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x92\x80\x80\x80\x00\x02\x04\x63\x6f\x70\x79\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x8a\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x04\xaa\xbb\xcc\xdd");
-
-// bulk.wast:71
-run(() => call($4, "copy", [10, 0, 4]));
-
-// bulk.wast:73
-assert_return(() => call($4, "load8_u", [9]), 0);
-
-// bulk.wast:74
-assert_return(() => call($4, "load8_u", [10]), 170);
-
-// bulk.wast:75
-assert_return(() => call($4, "load8_u", [11]), 187);
-
-// bulk.wast:76
-assert_return(() => call($4, "load8_u", [12]), 204);
-
-// bulk.wast:77
-assert_return(() => call($4, "load8_u", [13]), 221);
-
-// bulk.wast:78
-assert_return(() => call($4, "load8_u", [14]), 0);
-
-// bulk.wast:81
-run(() => call($4, "copy", [8, 10, 4]));
-
-// bulk.wast:82
-assert_return(() => call($4, "load8_u", [8]), 170);
-
-// bulk.wast:83
-assert_return(() => call($4, "load8_u", [9]), 187);
-
-// bulk.wast:84
-assert_return(() => call($4, "load8_u", [10]), 204);
-
-// bulk.wast:85
-assert_return(() => call($4, "load8_u", [11]), 221);
-
-// bulk.wast:86
-assert_return(() => call($4, "load8_u", [12]), 204);
-
-// bulk.wast:87
-assert_return(() => call($4, "load8_u", [13]), 221);
-
-// bulk.wast:90
-run(() => call($4, "copy", [10, 7, 6]));
-
-// bulk.wast:91
-assert_return(() => call($4, "load8_u", [10]), 0);
-
-// bulk.wast:92
-assert_return(() => call($4, "load8_u", [11]), 170);
-
-// bulk.wast:93
-assert_return(() => call($4, "load8_u", [12]), 187);
-
-// bulk.wast:94
-assert_return(() => call($4, "load8_u", [13]), 204);
-
-// bulk.wast:95
-assert_return(() => call($4, "load8_u", [14]), 221);
-
-// bulk.wast:96
-assert_return(() => call($4, "load8_u", [15]), 204);
-
-// bulk.wast:97
-assert_return(() => call($4, "load8_u", [16]), 0);
-
-// bulk.wast:100
-run(() => call($4, "copy", [65280, 0, 256]));
-
-// bulk.wast:101
-run(() => call($4, "copy", [65024, 65280, 256]));
-
-// bulk.wast:104
-assert_trap(() => call($4, "copy", [65534, 0, 3]));
-
-// bulk.wast:106
-assert_return(() => call($4, "load8_u", [65534]), 170);
-
-// bulk.wast:107
-assert_return(() => call($4, "load8_u", [65535]), 187);
-
-// bulk.wast:110
-run(() => call($4, "copy", [65536, 0, 0]));
-
-// bulk.wast:111
-run(() => call($4, "copy", [0, 65536, 0]));
-
-// bulk.wast:114
-assert_trap(() => call($4, "copy", [65537, 0, 0]));
-
-// bulk.wast:116
-assert_trap(() => call($4, "copy", [0, 65537, 0]));
-
-// bulk.wast:121
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x92\x80\x80\x80\x00\x02\x04\x69\x6e\x69\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x08\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x87\x80\x80\x80\x00\x01\x01\x04\xaa\xbb\xcc\xdd");
-
-// bulk.wast:135
-run(() => call($5, "init", [0, 1, 2]));
-
-// bulk.wast:136
-assert_return(() => call($5, "load8_u", [0]), 187);
-
-// bulk.wast:137
-assert_return(() => call($5, "load8_u", [1]), 204);
-
-// bulk.wast:138
-assert_return(() => call($5, "load8_u", [2]), 0);
-
-// bulk.wast:141
-run(() => call($5, "init", [65532, 0, 4]));
-
-// bulk.wast:144
-assert_trap(() => call($5, "init", [65534, 0, 3]));
-
-// bulk.wast:146
-assert_return(() => call($5, "load8_u", [65534]), 170);
-
-// bulk.wast:147
-assert_return(() => call($5, "load8_u", [65535]), 187);
-
-// bulk.wast:150
-run(() => call($5, "init", [65536, 0, 0]));
-
-// bulk.wast:151
-run(() => call($5, "init", [0, 4, 0]));
-
-// bulk.wast:154
-assert_trap(() => call($5, "init", [65537, 0, 0]));
-
-// bulk.wast:156
-assert_trap(() => call($5, "init", [0, 5, 0]));
-
-// bulk.wast:160
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x85\x80\x80\x80\x00\x04\x00\x00\x00\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\xbb\x80\x80\x80\x00\x04\x0c\x64\x72\x6f\x70\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x00\x0c\x69\x6e\x69\x74\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x01\x0b\x64\x72\x6f\x70\x5f\x61\x63\x74\x69\x76\x65\x00\x02\x0b\x69\x6e\x69\x74\x5f\x61\x63\x74\x69\x76\x65\x00\x03\x0c\x81\x80\x80\x80\x00\x02\x0a\xb7\x80\x80\x80\x00\x04\x85\x80\x80\x80\x00\x00\xfc\x09\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x08\x00\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x09\x01\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x08\x01\x00\x0b\x0b\x88\x80\x80\x80\x00\x02\x01\x00\x00\x41\x00\x0b\x00");
-
-// bulk.wast:174
-run(() => call($6, "init_passive", []));
-
-// bulk.wast:175
-run(() => call($6, "drop_passive", []));
-
-// bulk.wast:176
-assert_trap(() => call($6, "drop_passive", []));
-
-// bulk.wast:177
-assert_trap(() => call($6, "init_passive", []));
-
-// bulk.wast:178
-assert_trap(() => call($6, "drop_active", []));
-
-// bulk.wast:179
-assert_trap(() => call($6, "init_active", []));
-
-// bulk.wast:183
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x85\x80\x80\x80\x00\x04\x00\x00\x01\x02\x04\x84\x80\x80\x80\x00\x01\x70\x00\x03\x07\x8f\x80\x80\x80\x00\x02\x04\x69\x6e\x69\x74\x00\x02\x04\x63\x61\x6c\x6c\x00\x03\x09\x90\x80\x80\x80\x00\x01\x01\x70\x04\xd2\x00\x0b\xd2\x01\x0b\xd2\x00\x0b\xd2\x01\x0b\x0a\xb0\x80\x80\x80\x00\x04\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0c\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// bulk.wast:202
-run(() => call($7, "init", [0, 1, 2]));
-
-// bulk.wast:203
-assert_return(() => call($7, "call", [0]), 1);
-
-// bulk.wast:204
-assert_return(() => call($7, "call", [1]), 0);
-
-// bulk.wast:205
-assert_trap(() => call($7, "call", [2]));
-
-// bulk.wast:208
-run(() => call($7, "init", [1, 2, 2]));
-
-// bulk.wast:211
-assert_trap(() => call($7, "init", [2, 0, 2]));
-
-// bulk.wast:213
-assert_return(() => call($7, "call", [2]), 0);
-
-// bulk.wast:216
-run(() => call($7, "init", [3, 0, 0]));
-
-// bulk.wast:217
-run(() => call($7, "init", [0, 4, 0]));
-
-// bulk.wast:220
-assert_trap(() => call($7, "init", [4, 0, 0]));
-
-// bulk.wast:222
-assert_trap(() => call($7, "init", [0, 5, 0]));
-
-// bulk.wast:227
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x01\x07\xbb\x80\x80\x80\x00\x04\x0c\x64\x72\x6f\x70\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x01\x0c\x69\x6e\x69\x74\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x02\x0b\x64\x72\x6f\x70\x5f\x61\x63\x74\x69\x76\x65\x00\x03\x0b\x69\x6e\x69\x74\x5f\x61\x63\x74\x69\x76\x65\x00\x04\x09\x8d\x80\x80\x80\x00\x02\x01\x70\x01\xd2\x00\x0b\x00\x41\x00\x0b\x01\x00\x0a\xbe\x80\x80\x80\x00\x05\x82\x80\x80\x80\x00\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x0c\x00\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x01\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x0c\x01\x00\x0b");
-
-// bulk.wast:242
-run(() => call($8, "init_passive", []));
-
-// bulk.wast:243
-run(() => call($8, "drop_passive", []));
-
-// bulk.wast:244
-assert_trap(() => call($8, "drop_passive", []));
-
-// bulk.wast:245
-assert_trap(() => call($8, "init_passive", []));
-
-// bulk.wast:246
-assert_trap(() => call($8, "drop_active", []));
-
-// bulk.wast:247
-assert_trap(() => call($8, "init_active", []));
-
-// bulk.wast:251
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x01\x02\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x8f\x80\x80\x80\x00\x02\x04\x63\x6f\x70\x79\x00\x03\x04\x63\x61\x6c\x6c\x00\x04\x09\x89\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x03\x00\x01\x02\x0a\xb9\x80\x80\x80\x00\x05\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// bulk.wast:270
-run(() => call($9, "copy", [3, 0, 3]));
-
-// bulk.wast:272
-assert_return(() => call($9, "call", [3]), 0);
-
-// bulk.wast:273
-assert_return(() => call($9, "call", [4]), 1);
-
-// bulk.wast:274
-assert_return(() => call($9, "call", [5]), 2);
-
-// bulk.wast:277
-run(() => call($9, "copy", [0, 1, 3]));
-
-// bulk.wast:279
-assert_return(() => call($9, "call", [0]), 1);
-
-// bulk.wast:280
-assert_return(() => call($9, "call", [1]), 2);
-
-// bulk.wast:281
-assert_return(() => call($9, "call", [2]), 0);
-
-// bulk.wast:284
-run(() => call($9, "copy", [2, 0, 3]));
-
-// bulk.wast:286
-assert_return(() => call($9, "call", [2]), 1);
-
-// bulk.wast:287
-assert_return(() => call($9, "call", [3]), 2);
-
-// bulk.wast:288
-assert_return(() => call($9, "call", [4]), 0);
-
-// bulk.wast:291
-run(() => call($9, "copy", [6, 8, 2]));
-
-// bulk.wast:292
-run(() => call($9, "copy", [8, 6, 2]));
-
-// bulk.wast:295
-assert_trap(() => call($9, "call", [9]));
-
-// bulk.wast:296
-assert_trap(() => call($9, "copy", [9, 0, 2]));
-
-// bulk.wast:298
-assert_return(() => call($9, "call", [9]), 1);
-
-// bulk.wast:301
-run(() => call($9, "copy", [10, 0, 0]));
-
-// bulk.wast:302
-run(() => call($9, "copy", [0, 10, 0]));
-
-// bulk.wast:305
-assert_trap(() => call($9, "copy", [11, 0, 0]));
-
-// bulk.wast:307
-assert_trap(() => call($9, "copy", [0, 11, 0]));
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast
deleted file mode 100644
index 0310f76b54..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast
+++ /dev/null
@@ -1,130 +0,0 @@
-(module binary
- "\00asm" "\01\00\00\00"
- "\00\24\10" "a custom section" "this is the payload"
- "\00\20\10" "a custom section" "this is payload"
- "\00\11\10" "a custom section" ""
- "\00\10\00" "" "this is payload"
- "\00\01\00" "" ""
- "\00\24\10" "\00\00custom sectio\00" "this is the payload"
- "\00\24\10" "\ef\bb\bfa custom sect" "this is the payload"
- "\00\24\10" "a custom sect\e2\8c\a3" "this is the payload"
- "\00\1f\16" "module within a module" "\00asm" "\01\00\00\00"
-)
-
-(module binary
- "\00asm" "\01\00\00\00"
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\01\01\00" ;; type section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\02\01\00" ;; import section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\03\01\00" ;; function section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\04\01\00" ;; table section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\05\01\00" ;; memory section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\06\01\00" ;; global section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\07\01\00" ;; export section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\09\01\00" ;; element section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\0a\01\00" ;; code section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
- "\0b\01\00" ;; data section
- "\00\0e\06" "custom" "payload"
- "\00\0e\06" "custom" "payload"
-)
-
-(module binary
- "\00asm" "\01\00\00\00"
- "\01\07\01\60\02\7f\7f\01\7f" ;; type section
- "\00\1a\06" "custom" "this is the payload" ;; custom section
- "\03\02\01\00" ;; function section
- "\07\0a\01\06\61\64\64\54\77\6f\00\00" ;; export section
- "\0a\09\01\07\00\20\00\20\01\6a\0b" ;; code section
- "\00\1b\07" "custom2" "this is the payload" ;; custom section
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\00"
- )
- "unexpected end"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\00\00"
- )
- "unexpected end"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\00\00\00\05\01\00\07\00\00"
- )
- "unexpected end"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\00\26\10" "a custom section" "this is the payload"
- )
- "unexpected end"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\00\25\10" "a custom section" "this is the payload"
- "\00\24\10" "a custom section" "this is the payload"
- )
- "invalid section id"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\01\07\01\60\02\7f\7f\01\7f" ;; type section
- "\00\25\10" "a custom section" "this is the payload" ;; invalid length!
- "\03\02\01\00" ;; function section
- "\0a\09\01\07\00\20\00\20\01\6a\0b" ;; code section
- "\00\1b\07" "custom2" "this is the payload" ;; custom section
- )
- "function and code section have inconsistent lengths"
-)
-
-;; Test concatenated modules.
-(assert_malformed
- (module binary
- "\00asm\01\00\00\00"
- "\00asm\01\00\00\00"
- )
- "length out of bounds"
-)
-
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\05\03\01\00\01" ;; memory section
- "\0c\01\02" ;; data count section (2 segments)
- "\0b\06\01\00\41\00\0b\00" ;; data section (1 segment)
- )
- "data count and data section have inconsistent lengths"
-)
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js
deleted file mode 100644
index d078b10fee..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js
+++ /dev/null
@@ -1,170 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// custom.wast:1
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x24\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x20\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x11\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x00\x10\x00\x74\x68\x69\x73\x20\x69\x73\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x01\x00\x00\x24\x10\x00\x00\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x00\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x24\x10\xef\xbb\xbf\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x24\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\xe2\x8c\xa3\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x1f\x16\x6d\x6f\x64\x75\x6c\x65\x20\x77\x69\x74\x68\x69\x6e\x20\x61\x20\x6d\x6f\x64\x75\x6c\x65\x00\x61\x73\x6d\x01\x00\x00\x00");
-
-// custom.wast:14
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x01\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x02\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x03\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x04\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x05\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x06\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x07\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x09\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x0a\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x0b\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64");
-
-// custom.wast:50
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x07\x01\x60\x02\x7f\x7f\x01\x7f\x00\x1a\x06\x63\x75\x73\x74\x6f\x6d\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x03\x02\x01\x00\x07\x0a\x01\x06\x61\x64\x64\x54\x77\x6f\x00\x00\x0a\x09\x01\x07\x00\x20\x00\x20\x01\x6a\x0b\x00\x1b\x07\x63\x75\x73\x74\x6f\x6d\x32\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
-
-// custom.wast:60
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00");
-
-// custom.wast:68
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x00");
-
-// custom.wast:76
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x00\x00\x05\x01\x00\x07\x00\x00");
-
-// custom.wast:84
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x26\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
-
-// custom.wast:92
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x25\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x24\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
-
-// custom.wast:101
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x07\x01\x60\x02\x7f\x7f\x01\x7f\x00\x25\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x03\x02\x01\x00\x0a\x09\x01\x07\x00\x20\x00\x20\x01\x6a\x0b\x00\x1b\x07\x63\x75\x73\x74\x6f\x6d\x32\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
-
-// custom.wast:114
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x61\x73\x6d\x01\x00\x00\x00");
-
-// custom.wast:122
-assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x03\x01\x00\x01\x0c\x01\x02\x0b\x06\x01\x00\x41\x00\x0b\x00");
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast
deleted file mode 100644
index 5edb6eb87d..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast
+++ /dev/null
@@ -1,392 +0,0 @@
-;; Functions
-
-(module $Mf
- (func (export "call") (result i32) (call $g))
- (func $g (result i32) (i32.const 2))
-)
-(register "Mf" $Mf)
-
-(module $Nf
- (func $f (import "Mf" "call") (result i32))
- (export "Mf.call" (func $f))
- (func (export "call Mf.call") (result i32) (call $f))
- (func (export "call") (result i32) (call $g))
- (func $g (result i32) (i32.const 3))
-)
-
-(assert_return (invoke $Mf "call") (i32.const 2))
-(assert_return (invoke $Nf "Mf.call") (i32.const 2))
-(assert_return (invoke $Nf "call") (i32.const 3))
-(assert_return (invoke $Nf "call Mf.call") (i32.const 2))
-
-(module
- (import "spectest" "print_i32" (func $f (param i32)))
- (export "print" (func $f))
-)
-(register "reexport_f")
-(assert_unlinkable
- (module (import "reexport_f" "print" (func (param i64))))
- "incompatible import type"
-)
-(assert_unlinkable
- (module (import "reexport_f" "print" (func (param i32) (result i32))))
- "incompatible import type"
-)
-
-
-;; Globals
-
-(module $Mg
- (global $glob (export "glob") i32 (i32.const 42))
- (func (export "get") (result i32) (global.get $glob))
-
- ;; export mutable globals
- (global $mut_glob (export "mut_glob") (mut i32) (i32.const 142))
- (func (export "get_mut") (result i32) (global.get $mut_glob))
- (func (export "set_mut") (param i32) (global.set $mut_glob (local.get 0)))
-)
-(register "Mg" $Mg)
-
-(module $Ng
- (global $x (import "Mg" "glob") i32)
- (global $mut_glob (import "Mg" "mut_glob") (mut i32))
- (func $f (import "Mg" "get") (result i32))
- (func $get_mut (import "Mg" "get_mut") (result i32))
- (func $set_mut (import "Mg" "set_mut") (param i32))
-
- (export "Mg.glob" (global $x))
- (export "Mg.get" (func $f))
- (global $glob (export "glob") i32 (i32.const 43))
- (func (export "get") (result i32) (global.get $glob))
-
- (export "Mg.mut_glob" (global $mut_glob))
- (export "Mg.get_mut" (func $get_mut))
- (export "Mg.set_mut" (func $set_mut))
-)
-
-(assert_return (get $Mg "glob") (i32.const 42))
-(assert_return (get $Ng "Mg.glob") (i32.const 42))
-(assert_return (get $Ng "glob") (i32.const 43))
-(assert_return (invoke $Mg "get") (i32.const 42))
-(assert_return (invoke $Ng "Mg.get") (i32.const 42))
-(assert_return (invoke $Ng "get") (i32.const 43))
-
-(assert_return (get $Mg "mut_glob") (i32.const 142))
-(assert_return (get $Ng "Mg.mut_glob") (i32.const 142))
-(assert_return (invoke $Mg "get_mut") (i32.const 142))
-(assert_return (invoke $Ng "Mg.get_mut") (i32.const 142))
-
-(assert_return (invoke $Mg "set_mut" (i32.const 241)))
-(assert_return (get $Mg "mut_glob") (i32.const 241))
-(assert_return (get $Ng "Mg.mut_glob") (i32.const 241))
-(assert_return (invoke $Mg "get_mut") (i32.const 241))
-(assert_return (invoke $Ng "Mg.get_mut") (i32.const 241))
-
-
-(assert_unlinkable
- (module (import "Mg" "mut_glob" (global i32)))
- "incompatible import type"
-)
-(assert_unlinkable
- (module (import "Mg" "glob" (global (mut i32))))
- "incompatible import type"
-)
-
-;; Tables
-
-(module $Mt
- (type (func (result i32)))
- (type (func))
-
- (table (export "tab") 10 funcref)
- (elem (i32.const 2) $g $g $g $g)
- (func $g (result i32) (i32.const 4))
- (func (export "h") (result i32) (i32.const -4))
-
- (func (export "call") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0))
- )
-)
-(register "Mt" $Mt)
-
-(module $Nt
- (type (func))
- (type (func (result i32)))
-
- (func $f (import "Mt" "call") (param i32) (result i32))
- (func $h (import "Mt" "h") (result i32))
-
- (table funcref (elem $g $g $g $h $f))
- (func $g (result i32) (i32.const 5))
-
- (export "Mt.call" (func $f))
- (func (export "call Mt.call") (param i32) (result i32)
- (call $f (local.get 0))
- )
- (func (export "call") (param i32) (result i32)
- (call_indirect (type 1) (local.get 0))
- )
-)
-
-(assert_return (invoke $Mt "call" (i32.const 2)) (i32.const 4))
-(assert_return (invoke $Nt "Mt.call" (i32.const 2)) (i32.const 4))
-(assert_return (invoke $Nt "call" (i32.const 2)) (i32.const 5))
-(assert_return (invoke $Nt "call Mt.call" (i32.const 2)) (i32.const 4))
-
-(assert_trap (invoke $Mt "call" (i32.const 1)) "uninitialized")
-(assert_trap (invoke $Nt "Mt.call" (i32.const 1)) "uninitialized")
-(assert_return (invoke $Nt "call" (i32.const 1)) (i32.const 5))
-(assert_trap (invoke $Nt "call Mt.call" (i32.const 1)) "uninitialized")
-
-(assert_trap (invoke $Mt "call" (i32.const 0)) "uninitialized")
-(assert_trap (invoke $Nt "Mt.call" (i32.const 0)) "uninitialized")
-(assert_return (invoke $Nt "call" (i32.const 0)) (i32.const 5))
-(assert_trap (invoke $Nt "call Mt.call" (i32.const 0)) "uninitialized")
-
-(assert_trap (invoke $Mt "call" (i32.const 20)) "undefined")
-(assert_trap (invoke $Nt "Mt.call" (i32.const 20)) "undefined")
-(assert_trap (invoke $Nt "call" (i32.const 7)) "undefined")
-(assert_trap (invoke $Nt "call Mt.call" (i32.const 20)) "undefined")
-
-(assert_return (invoke $Nt "call" (i32.const 3)) (i32.const -4))
-(assert_trap (invoke $Nt "call" (i32.const 4)) "indirect call")
-
-(module $Ot
- (type (func (result i32)))
-
- (func $h (import "Mt" "h") (result i32))
- (table (import "Mt" "tab") 5 funcref)
- (elem (i32.const 1) $i $h)
- (func $i (result i32) (i32.const 6))
-
- (func (export "call") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0))
- )
-)
-
-(assert_return (invoke $Mt "call" (i32.const 3)) (i32.const 4))
-(assert_return (invoke $Nt "Mt.call" (i32.const 3)) (i32.const 4))
-(assert_return (invoke $Nt "call Mt.call" (i32.const 3)) (i32.const 4))
-(assert_return (invoke $Ot "call" (i32.const 3)) (i32.const 4))
-
-(assert_return (invoke $Mt "call" (i32.const 2)) (i32.const -4))
-(assert_return (invoke $Nt "Mt.call" (i32.const 2)) (i32.const -4))
-(assert_return (invoke $Nt "call" (i32.const 2)) (i32.const 5))
-(assert_return (invoke $Nt "call Mt.call" (i32.const 2)) (i32.const -4))
-(assert_return (invoke $Ot "call" (i32.const 2)) (i32.const -4))
-
-(assert_return (invoke $Mt "call" (i32.const 1)) (i32.const 6))
-(assert_return (invoke $Nt "Mt.call" (i32.const 1)) (i32.const 6))
-(assert_return (invoke $Nt "call" (i32.const 1)) (i32.const 5))
-(assert_return (invoke $Nt "call Mt.call" (i32.const 1)) (i32.const 6))
-(assert_return (invoke $Ot "call" (i32.const 1)) (i32.const 6))
-
-(assert_trap (invoke $Mt "call" (i32.const 0)) "uninitialized")
-(assert_trap (invoke $Nt "Mt.call" (i32.const 0)) "uninitialized")
-(assert_return (invoke $Nt "call" (i32.const 0)) (i32.const 5))
-(assert_trap (invoke $Nt "call Mt.call" (i32.const 0)) "uninitialized")
-(assert_trap (invoke $Ot "call" (i32.const 0)) "uninitialized")
-
-(assert_trap (invoke $Ot "call" (i32.const 20)) "undefined")
-
-(module
- (table (import "Mt" "tab") 0 funcref)
- (elem (i32.const 9) $f)
- (func $f)
-)
-
-(module $G1 (global (export "g") i32 (i32.const 5)))
-(register "G1" $G1)
-(module $G2
- (global (import "G1" "g") i32)
- (global (export "g") i32 (global.get 0))
-)
-(assert_return (get $G2 "g") (i32.const 5))
-
-(assert_unlinkable
- (module
- (table (import "Mt" "tab") 0 funcref)
- (elem (i32.const 10) $f)
- (func $f)
- )
- "elements segment does not fit"
-)
-
-(assert_unlinkable
- (module
- (table (import "Mt" "tab") 10 funcref)
- (memory (import "Mt" "mem") 1) ;; does not exist
- (func $f (result i32) (i32.const 0))
- (elem (i32.const 7) $f)
- (elem (i32.const 9) $f)
- )
- "unknown import"
-)
-(assert_trap (invoke $Mt "call" (i32.const 7)) "uninitialized")
-
-;; Unlike in the v1 spec, the elements stored before an out-of-bounds access
-;; persist after the instantiation failure.
-(assert_unlinkable
- (module
- (table (import "Mt" "tab") 10 funcref)
- (func $f (result i32) (i32.const 0))
- (elem (i32.const 7) $f)
- (elem (i32.const 12) $f) ;; out of bounds
- )
- "elements segment does not fit"
-)
-(assert_return (invoke $Mt "call" (i32.const 7)) (i32.const 0))
-
-(assert_unlinkable
- (module
- (table (import "Mt" "tab") 10 funcref)
- (func $f (result i32) (i32.const 0))
- (elem (i32.const 7) $f)
- (memory 1)
- (data (i32.const 0x10000) "d") ;; out of bounds
- )
- "data segment does not fit"
-)
-(assert_return (invoke $Mt "call" (i32.const 7)) (i32.const 0))
-
-
-;; Memories
-
-(module $Mm
- (memory (export "mem") 1 5)
- (data (i32.const 10) "\00\01\02\03\04\05\06\07\08\09")
-
- (func (export "load") (param $a i32) (result i32)
- (i32.load8_u (local.get 0))
- )
-)
-(register "Mm" $Mm)
-
-(module $Nm
- (func $loadM (import "Mm" "load") (param i32) (result i32))
-
- (memory 1)
- (data (i32.const 10) "\f0\f1\f2\f3\f4\f5")
-
- (export "Mm.load" (func $loadM))
- (func (export "load") (param $a i32) (result i32)
- (i32.load8_u (local.get 0))
- )
-)
-
-(assert_return (invoke $Mm "load" (i32.const 12)) (i32.const 2))
-(assert_return (invoke $Nm "Mm.load" (i32.const 12)) (i32.const 2))
-(assert_return (invoke $Nm "load" (i32.const 12)) (i32.const 0xf2))
-
-(module $Om
- (memory (import "Mm" "mem") 1)
- (data (i32.const 5) "\a0\a1\a2\a3\a4\a5\a6\a7")
-
- (func (export "load") (param $a i32) (result i32)
- (i32.load8_u (local.get 0))
- )
-)
-
-(assert_return (invoke $Mm "load" (i32.const 12)) (i32.const 0xa7))
-(assert_return (invoke $Nm "Mm.load" (i32.const 12)) (i32.const 0xa7))
-(assert_return (invoke $Nm "load" (i32.const 12)) (i32.const 0xf2))
-(assert_return (invoke $Om "load" (i32.const 12)) (i32.const 0xa7))
-
-(module
- (memory (import "Mm" "mem") 0)
- (data (i32.const 0xffff) "a")
-)
-
-(assert_unlinkable
- (module
- (memory (import "Mm" "mem") 0)
- (data (i32.const 0x10000) "a")
- )
- "data segment does not fit"
-)
-
-(module $Pm
- (memory (import "Mm" "mem") 1 8)
-
- (func (export "grow") (param $a i32) (result i32)
- (memory.grow (local.get 0))
- )
-)
-
-(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 1))
-(assert_return (invoke $Pm "grow" (i32.const 2)) (i32.const 1))
-(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 3))
-(assert_return (invoke $Pm "grow" (i32.const 1)) (i32.const 3))
-(assert_return (invoke $Pm "grow" (i32.const 1)) (i32.const 4))
-(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 5))
-(assert_return (invoke $Pm "grow" (i32.const 1)) (i32.const -1))
-(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 5))
-
-(assert_unlinkable
- (module
- (func $host (import "spectest" "print"))
- (memory (import "Mm" "mem") 1)
- (table (import "Mm" "tab") 0 funcref) ;; does not exist
- (data (i32.const 0) "abc")
- )
- "unknown import"
-)
-(assert_return (invoke $Mm "load" (i32.const 0)) (i32.const 0))
-
-;; Unlike in v1 spec, bytes written before an out-of-bounds access persist
-;; after the instantiation failure.
-(assert_unlinkable
- (module
- (memory (import "Mm" "mem") 1)
- (data (i32.const 0) "abc")
- (data (i32.const 0x50000) "d") ;; out of bounds
- )
- "data segment does not fit"
-)
-(assert_return (invoke $Mm "load" (i32.const 0)) (i32.const 97))
-
-(assert_unlinkable
- (module
- (memory (import "Mm" "mem") 1)
- (data (i32.const 0) "abc")
- (table 0 funcref)
- (func)
- (elem (i32.const 0) 0) ;; out of bounds
- )
- "elements segment does not fit"
-)
-(assert_return (invoke $Mm "load" (i32.const 0)) (i32.const 97))
-
-;; Store is modified if the start function traps.
-(module $Ms
- (type $t (func (result i32)))
- (memory (export "memory") 1)
- (table (export "table") 1 funcref)
- (func (export "get memory[0]") (type $t)
- (i32.load8_u (i32.const 0))
- )
- (func (export "get table[0]") (type $t)
- (call_indirect (type $t) (i32.const 0))
- )
-)
-(register "Ms" $Ms)
-
-(assert_trap
- (module
- (import "Ms" "memory" (memory 1))
- (import "Ms" "table" (table 1 funcref))
- (data (i32.const 0) "hello")
- (elem (i32.const 0) $f)
- (func $f (result i32)
- (i32.const 0xdead)
- )
- (func $main
- (unreachable)
- )
- (start $main)
- )
- "unreachable"
-)
-
-(assert_return (invoke $Ms "get memory[0]") (i32.const 104)) ;; 'h'
-(assert_return (invoke $Ms "get table[0]") (i32.const 0xdead))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js
deleted file mode 100644
index 729b41d5ca..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js
+++ /dev/null
@@ -1,505 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// linking.wast:3
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x00\x07\x88\x80\x80\x80\x00\x01\x04\x63\x61\x6c\x6c\x00\x00\x0a\x93\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x10\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b");
-let $Mf = $1;
-
-// linking.wast:7
-register("Mf", $Mf)
-
-// linking.wast:9
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x66\x04\x63\x61\x6c\x6c\x00\x00\x03\x84\x80\x80\x80\x00\x03\x00\x00\x00\x07\xa1\x80\x80\x80\x00\x03\x07\x4d\x66\x2e\x63\x61\x6c\x6c\x00\x00\x0c\x63\x61\x6c\x6c\x20\x4d\x66\x2e\x63\x61\x6c\x6c\x00\x01\x04\x63\x61\x6c\x6c\x00\x02\x0a\x9c\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x10\x00\x0b\x84\x80\x80\x80\x00\x00\x10\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b");
-let $Nf = $2;
-
-// linking.wast:17
-assert_return(() => call($Mf, "call", []), 2);
-
-// linking.wast:18
-assert_return(() => call($Nf, "Mf.call", []), 2);
-
-// linking.wast:19
-assert_return(() => call($Nf, "call", []), 3);
-
-// linking.wast:20
-assert_return(() => call($Nf, "call Mf.call", []), 2);
-
-// linking.wast:22
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x01\x7f\x00\x02\x96\x80\x80\x80\x00\x01\x08\x73\x70\x65\x63\x74\x65\x73\x74\x09\x70\x72\x69\x6e\x74\x5f\x69\x33\x32\x00\x00\x07\x89\x80\x80\x80\x00\x01\x05\x70\x72\x69\x6e\x74\x00\x00");
-
-// linking.wast:26
-register("reexport_f", $3)
-
-// linking.wast:27
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x01\x7e\x00\x02\x94\x80\x80\x80\x00\x01\x0a\x72\x65\x65\x78\x70\x6f\x72\x74\x5f\x66\x05\x70\x72\x69\x6e\x74\x00\x00");
-
-// linking.wast:31
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x94\x80\x80\x80\x00\x01\x0a\x72\x65\x65\x78\x70\x6f\x72\x74\x5f\x66\x05\x70\x72\x69\x6e\x74\x00\x00");
-
-// linking.wast:39
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x01\x7f\x00\x03\x84\x80\x80\x80\x00\x03\x00\x00\x01\x06\x8c\x80\x80\x80\x00\x02\x7f\x00\x41\x2a\x0b\x7f\x01\x41\x8e\x01\x0b\x07\xad\x80\x80\x80\x00\x05\x04\x67\x6c\x6f\x62\x03\x00\x03\x67\x65\x74\x00\x00\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x01\x07\x67\x65\x74\x5f\x6d\x75\x74\x00\x01\x07\x73\x65\x74\x5f\x6d\x75\x74\x00\x02\x0a\x9e\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x23\x00\x0b\x84\x80\x80\x80\x00\x00\x23\x01\x0b\x86\x80\x80\x80\x00\x00\x20\x00\x24\x01\x0b");
-let $Mg = $4;
-
-// linking.wast:48
-register("Mg", $Mg)
-
-// linking.wast:50
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x01\x7f\x00\x02\xbe\x80\x80\x80\x00\x05\x02\x4d\x67\x04\x67\x6c\x6f\x62\x03\x7f\x00\x02\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x02\x4d\x67\x03\x67\x65\x74\x00\x00\x02\x4d\x67\x07\x67\x65\x74\x5f\x6d\x75\x74\x00\x00\x02\x4d\x67\x07\x73\x65\x74\x5f\x6d\x75\x74\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x06\x86\x80\x80\x80\x00\x01\x7f\x00\x41\x2b\x0b\x07\xc9\x80\x80\x80\x00\x07\x07\x4d\x67\x2e\x67\x6c\x6f\x62\x03\x00\x06\x4d\x67\x2e\x67\x65\x74\x00\x00\x04\x67\x6c\x6f\x62\x03\x02\x03\x67\x65\x74\x00\x03\x0b\x4d\x67\x2e\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x01\x0a\x4d\x67\x2e\x67\x65\x74\x5f\x6d\x75\x74\x00\x01\x0a\x4d\x67\x2e\x73\x65\x74\x5f\x6d\x75\x74\x00\x02\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x23\x02\x0b");
-let $Ng = $5;
-
-// linking.wast:67
-assert_return(() => get($Mg, "glob"), 42);
-
-// linking.wast:68
-assert_return(() => get($Ng, "Mg.glob"), 42);
-
-// linking.wast:69
-assert_return(() => get($Ng, "glob"), 43);
-
-// linking.wast:70
-assert_return(() => call($Mg, "get", []), 42);
-
-// linking.wast:71
-assert_return(() => call($Ng, "Mg.get", []), 42);
-
-// linking.wast:72
-assert_return(() => call($Ng, "get", []), 43);
-
-// linking.wast:74
-run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x91\x80\x80\x80\x00\x01\x03\x24\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\x8e\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Mg", $Mg)), "run", [])); // assert_return(() => get($Mg, "mut_glob"), 142)
-
-// linking.wast:75
-run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x94\x80\x80\x80\x00\x01\x03\x24\x4e\x67\x0b\x4d\x67\x2e\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\x8e\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Ng", $Ng)), "run", [])); // assert_return(() => get($Ng, "Mg.mut_glob"), 142)
-
-// linking.wast:76
-assert_return(() => call($Mg, "get_mut", []), 142);
-
-// linking.wast:77
-assert_return(() => call($Ng, "Mg.get_mut", []), 142);
-
-// linking.wast:79
-assert_return(() => call($Mg, "set_mut", [241]));
-
-// linking.wast:80
-run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x91\x80\x80\x80\x00\x01\x03\x24\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\xf1\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Mg", $Mg)), "run", [])); // assert_return(() => get($Mg, "mut_glob"), 241)
-
-// linking.wast:81
-run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x94\x80\x80\x80\x00\x01\x03\x24\x4e\x67\x0b\x4d\x67\x2e\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\xf1\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Ng", $Ng)), "run", [])); // assert_return(() => get($Ng, "Mg.mut_glob"), 241)
-
-// linking.wast:82
-assert_return(() => call($Mg, "get_mut", []), 241);
-
-// linking.wast:83
-assert_return(() => call($Ng, "Mg.get_mut", []), 241);
-
-// linking.wast:86
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x90\x80\x80\x80\x00\x01\x02\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x00");
-
-// linking.wast:90
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x67\x04\x67\x6c\x6f\x62\x03\x7f\x01");
-
-// linking.wast:97
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x84\x80\x80\x80\x00\x03\x00\x00\x02\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x92\x80\x80\x80\x00\x03\x03\x74\x61\x62\x01\x00\x01\x68\x00\x01\x04\x63\x61\x6c\x6c\x00\x02\x09\x8a\x80\x80\x80\x00\x01\x00\x41\x02\x0b\x04\x00\x00\x00\x00\x0a\x9f\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x7c\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-let $Mt = $6;
-
-// linking.wast:110
-register("Mt", $Mt)
-
-// linking.wast:112
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x00\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x02\x92\x80\x80\x80\x00\x02\x02\x4d\x74\x04\x63\x61\x6c\x6c\x00\x02\x02\x4d\x74\x01\x68\x00\x01\x03\x84\x80\x80\x80\x00\x03\x01\x02\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x05\x05\x07\xa1\x80\x80\x80\x00\x03\x07\x4d\x74\x2e\x63\x61\x6c\x6c\x00\x00\x0c\x63\x61\x6c\x6c\x20\x4d\x74\x2e\x63\x61\x6c\x6c\x00\x03\x04\x63\x61\x6c\x6c\x00\x04\x09\x8b\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x05\x02\x02\x02\x01\x00\x0a\xa1\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x86\x80\x80\x80\x00\x00\x20\x00\x10\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x01\x00\x0b");
-let $Nt = $7;
-
-// linking.wast:131
-assert_return(() => call($Mt, "call", [2]), 4);
-
-// linking.wast:132
-assert_return(() => call($Nt, "Mt.call", [2]), 4);
-
-// linking.wast:133
-assert_return(() => call($Nt, "call", [2]), 5);
-
-// linking.wast:134
-assert_return(() => call($Nt, "call Mt.call", [2]), 4);
-
-// linking.wast:136
-assert_trap(() => call($Mt, "call", [1]));
-
-// linking.wast:137
-assert_trap(() => call($Nt, "Mt.call", [1]));
-
-// linking.wast:138
-assert_return(() => call($Nt, "call", [1]), 5);
-
-// linking.wast:139
-assert_trap(() => call($Nt, "call Mt.call", [1]));
-
-// linking.wast:141
-assert_trap(() => call($Mt, "call", [0]));
-
-// linking.wast:142
-assert_trap(() => call($Nt, "Mt.call", [0]));
-
-// linking.wast:143
-assert_return(() => call($Nt, "call", [0]), 5);
-
-// linking.wast:144
-assert_trap(() => call($Nt, "call Mt.call", [0]));
-
-// linking.wast:146
-assert_trap(() => call($Mt, "call", [20]));
-
-// linking.wast:147
-assert_trap(() => call($Nt, "Mt.call", [20]));
-
-// linking.wast:148
-assert_trap(() => call($Nt, "call", [7]));
-
-// linking.wast:149
-assert_trap(() => call($Nt, "call Mt.call", [20]));
-
-// linking.wast:151
-assert_return(() => call($Nt, "call", [3]), -4);
-
-// linking.wast:152
-assert_trap(() => call($Nt, "call", [4]));
-
-// linking.wast:154
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8a\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x02\x93\x80\x80\x80\x00\x02\x02\x4d\x74\x01\x68\x00\x00\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x05\x03\x83\x80\x80\x80\x00\x02\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x63\x61\x6c\x6c\x00\x02\x09\x88\x80\x80\x80\x00\x01\x00\x41\x01\x0b\x02\x01\x00\x0a\x96\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-let $Ot = $8;
-
-// linking.wast:167
-assert_return(() => call($Mt, "call", [3]), 4);
-
-// linking.wast:168
-assert_return(() => call($Nt, "Mt.call", [3]), 4);
-
-// linking.wast:169
-assert_return(() => call($Nt, "call Mt.call", [3]), 4);
-
-// linking.wast:170
-assert_return(() => call($Ot, "call", [3]), 4);
-
-// linking.wast:172
-assert_return(() => call($Mt, "call", [2]), -4);
-
-// linking.wast:173
-assert_return(() => call($Nt, "Mt.call", [2]), -4);
-
-// linking.wast:174
-assert_return(() => call($Nt, "call", [2]), 5);
-
-// linking.wast:175
-assert_return(() => call($Nt, "call Mt.call", [2]), -4);
-
-// linking.wast:176
-assert_return(() => call($Ot, "call", [2]), -4);
-
-// linking.wast:178
-assert_return(() => call($Mt, "call", [1]), 6);
-
-// linking.wast:179
-assert_return(() => call($Nt, "Mt.call", [1]), 6);
-
-// linking.wast:180
-assert_return(() => call($Nt, "call", [1]), 5);
-
-// linking.wast:181
-assert_return(() => call($Nt, "call Mt.call", [1]), 6);
-
-// linking.wast:182
-assert_return(() => call($Ot, "call", [1]), 6);
-
-// linking.wast:184
-assert_trap(() => call($Mt, "call", [0]));
-
-// linking.wast:185
-assert_trap(() => call($Nt, "Mt.call", [0]));
-
-// linking.wast:186
-assert_return(() => call($Nt, "call", [0]), 5);
-
-// linking.wast:187
-assert_trap(() => call($Nt, "call Mt.call", [0]));
-
-// linking.wast:188
-assert_trap(() => call($Ot, "call", [0]));
-
-// linking.wast:190
-assert_trap(() => call($Ot, "call", [20]));
-
-// linking.wast:192
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x09\x87\x80\x80\x80\x00\x01\x00\x41\x09\x0b\x01\x00\x0a\x88\x80\x80\x80\x00\x01\x82\x80\x80\x80\x00\x00\x0b");
-
-// linking.wast:198
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x86\x80\x80\x80\x00\x01\x7f\x00\x41\x05\x0b\x07\x85\x80\x80\x80\x00\x01\x01\x67\x03\x00");
-let $G1 = $10;
-
-// linking.wast:199
-register("G1", $G1)
-
-// linking.wast:200
-let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x89\x80\x80\x80\x00\x01\x02\x47\x31\x01\x67\x03\x7f\x00\x06\x86\x80\x80\x80\x00\x01\x7f\x00\x23\x00\x0b\x07\x85\x80\x80\x80\x00\x01\x01\x67\x03\x01");
-let $G2 = $11;
-
-// linking.wast:204
-assert_return(() => get($G2, "g"), 5);
-
-// linking.wast:206
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x09\x87\x80\x80\x80\x00\x01\x00\x41\x0a\x0b\x01\x00\x0a\x88\x80\x80\x80\x00\x01\x82\x80\x80\x80\x00\x00\x0b");
-
-// linking.wast:215
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x96\x80\x80\x80\x00\x02\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x0a\x02\x4d\x74\x03\x6d\x65\x6d\x02\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x09\x8d\x80\x80\x80\x00\x02\x00\x41\x07\x0b\x01\x00\x00\x41\x09\x0b\x01\x00\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x41\x00\x0b");
-
-// linking.wast:225
-assert_trap(() => call($Mt, "call", [7]));
-
-// linking.wast:229
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x0a\x03\x82\x80\x80\x80\x00\x01\x00\x09\x8d\x80\x80\x80\x00\x02\x00\x41\x07\x0b\x01\x00\x00\x41\x0c\x0b\x01\x00\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x41\x00\x0b");
-
-// linking.wast:238
-assert_return(() => call($Mt, "call", [7]), 0);
-
-// linking.wast:240
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x0a\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x09\x87\x80\x80\x80\x00\x01\x00\x41\x07\x0b\x01\x00\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x80\x80\x04\x0b\x01\x64");
-
-// linking.wast:250
-assert_return(() => call($Mt, "call", [7]), 0);
-
-// linking.wast:255
-let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x05\x07\x8e\x80\x80\x80\x00\x02\x03\x6d\x65\x6d\x02\x00\x04\x6c\x6f\x61\x64\x00\x00\x0a\x8d\x80\x80\x80\x00\x01\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x90\x80\x80\x80\x00\x01\x00\x41\x0a\x0b\x0a\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09");
-let $Mm = $12;
-
-// linking.wast:263
-register("Mm", $Mm)
-
-// linking.wast:265
-let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x04\x6c\x6f\x61\x64\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x92\x80\x80\x80\x00\x02\x07\x4d\x6d\x2e\x6c\x6f\x61\x64\x00\x00\x04\x6c\x6f\x61\x64\x00\x01\x0a\x8d\x80\x80\x80\x00\x01\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x8c\x80\x80\x80\x00\x01\x00\x41\x0a\x0b\x06\xf0\xf1\xf2\xf3\xf4\xf5");
-let $Nm = $13;
-
-// linking.wast:277
-assert_return(() => call($Mm, "load", [12]), 2);
-
-// linking.wast:278
-assert_return(() => call($Nm, "Mm.load", [12]), 2);
-
-// linking.wast:279
-assert_return(() => call($Nm, "load", [12]), 242);
-
-// linking.wast:281
-let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x6c\x6f\x61\x64\x00\x00\x0a\x8d\x80\x80\x80\x00\x01\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x8e\x80\x80\x80\x00\x01\x00\x41\x05\x0b\x08\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7");
-let $Om = $14;
-
-// linking.wast:290
-assert_return(() => call($Mm, "load", [12]), 167);
-
-// linking.wast:291
-assert_return(() => call($Nm, "Mm.load", [12]), 167);
-
-// linking.wast:292
-assert_return(() => call($Nm, "load", [12]), 242);
-
-// linking.wast:293
-assert_return(() => call($Om, "load", [12]), 167);
-
-// linking.wast:295
-let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x00\x0b\x89\x80\x80\x80\x00\x01\x00\x41\xff\xff\x03\x0b\x01\x61");
-
-// linking.wast:300
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x00\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x80\x80\x04\x0b\x01\x61");
-
-// linking.wast:308
-let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x01\x01\x08\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x67\x72\x6f\x77\x00\x00\x0a\x8c\x80\x80\x80\x00\x01\x86\x80\x80\x80\x00\x00\x20\x00\x40\x00\x0b");
-let $Pm = $16;
-
-// linking.wast:316
-assert_return(() => call($Pm, "grow", [0]), 1);
-
-// linking.wast:317
-assert_return(() => call($Pm, "grow", [2]), 1);
-
-// linking.wast:318
-assert_return(() => call($Pm, "grow", [0]), 3);
-
-// linking.wast:319
-assert_return(() => call($Pm, "grow", [1]), 3);
-
-// linking.wast:320
-assert_return(() => call($Pm, "grow", [1]), 4);
-
-// linking.wast:321
-assert_return(() => call($Pm, "grow", [0]), 5);
-
-// linking.wast:322
-assert_return(() => call($Pm, "grow", [1]), -1);
-
-// linking.wast:323
-assert_return(() => call($Pm, "grow", [0]), 5);
-
-// linking.wast:325
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\xa7\x80\x80\x80\x00\x03\x08\x73\x70\x65\x63\x74\x65\x73\x74\x05\x70\x72\x69\x6e\x74\x00\x00\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x02\x4d\x6d\x03\x74\x61\x62\x01\x70\x00\x00\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x03\x61\x62\x63");
-
-// linking.wast:334
-assert_return(() => call($Mm, "load", [0]), 0);
-
-// linking.wast:338
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x0b\x91\x80\x80\x80\x00\x02\x00\x41\x00\x0b\x03\x61\x62\x63\x00\x41\x80\x80\x14\x0b\x01\x64");
-
-// linking.wast:346
-assert_return(() => call($Mm, "load", [0]), 97);
-
-// linking.wast:348
-assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x00\x09\x87\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x01\x00\x0a\x88\x80\x80\x80\x00\x01\x82\x80\x80\x80\x00\x00\x0b\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x03\x61\x62\x63");
-
-// linking.wast:358
-assert_return(() => call($Mm, "load", [0]), 97);
-
-// linking.wast:361
-let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\xb1\x80\x80\x80\x00\x04\x06\x6d\x65\x6d\x6f\x72\x79\x02\x00\x05\x74\x61\x62\x6c\x65\x01\x00\x0d\x67\x65\x74\x20\x6d\x65\x6d\x6f\x72\x79\x5b\x30\x5d\x00\x00\x0c\x67\x65\x74\x20\x74\x61\x62\x6c\x65\x5b\x30\x5d\x00\x01\x0a\x99\x80\x80\x80\x00\x02\x87\x80\x80\x80\x00\x00\x41\x00\x2d\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x41\x00\x11\x00\x00\x0b");
-let $Ms = $17;
-
-// linking.wast:372
-register("Ms", $Ms)
-
-// linking.wast:374
-assert_uninstantiable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x02\x9b\x80\x80\x80\x00\x02\x02\x4d\x73\x06\x6d\x65\x6d\x6f\x72\x79\x02\x00\x01\x02\x4d\x73\x05\x74\x61\x62\x6c\x65\x01\x70\x00\x01\x03\x83\x80\x80\x80\x00\x02\x00\x01\x08\x81\x80\x80\x80\x00\x01\x09\x87\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x01\x00\x0a\x94\x80\x80\x80\x00\x02\x86\x80\x80\x80\x00\x00\x41\xad\xbd\x03\x0b\x83\x80\x80\x80\x00\x00\x00\x0b\x0b\x8b\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x05\x68\x65\x6c\x6c\x6f");
-
-// linking.wast:391
-assert_return(() => call($Ms, "get memory[0]", []), 104);
-
-// linking.wast:392
-assert_return(() => call($Ms, "get table[0]", []), 57005);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast
deleted file mode 100644
index b5f25c009b..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast
+++ /dev/null
@@ -1,5685 +0,0 @@
-;;
-;; Generated by ../meta/generate_memory_copy.js
-;;
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (nop))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 13) (i32.const 2) (i32.const 3)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 25) (i32.const 15) (i32.const 2)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 13) (i32.const 25) (i32.const 3)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 20) (i32.const 22) (i32.const 4)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 25) (i32.const 1) (i32.const 3)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 10) (i32.const 12) (i32.const 7)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data (i32.const 12) "\07\05\02\03\06")
- (func (export "test")
- (memory.copy (i32.const 12) (i32.const 10) (i32.const 7)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 0) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65516) (i32.const 0) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 218)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 417)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 616)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 815)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1014)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1213)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1412)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1611)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1810)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2009)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2208)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2407)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2606)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2805)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3004)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3203)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3402)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3601)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3800)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3999)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 0) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13\14")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65515) (i32.const 0) (i32.const 39))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 20))
-(assert_return (invoke "load8_u" (i32.const 219)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 418)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 617)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 816)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1015)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1214)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1413)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1612)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1811)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2010)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2209)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2408)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2607)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2806)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3005)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3204)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3403)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3602)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3801)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4000)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4199)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4398)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4597)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4796)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4995)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5194)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5393)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5592)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5791)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5990)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6189)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6388)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6587)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6786)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6985)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7184)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7383)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7582)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7781)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7980)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8179)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8378)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8577)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8776)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8975)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9174)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9373)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9572)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9771)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9970)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10169)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10368)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10567)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10766)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10965)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11164)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11363)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11562)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11761)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11960)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12159)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12358)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12557)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12756)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12955)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13154)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13353)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13552)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13751)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13950)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14149)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14348)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14547)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14746)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14945)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15144)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15343)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15542)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15741)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15940)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16139)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16338)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16537)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16736)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16935)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17134)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17333)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17532)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17731)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17930)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18129)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18328)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18527)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18726)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18925)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19124)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19323)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19522)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19721)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19920)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20119)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20318)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20517)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20716)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20915)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21114)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21313)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21512)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21711)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21910)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22109)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22308)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22507)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22706)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22905)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23104)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23303)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23502)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23701)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23900)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24099)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24298)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24497)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24696)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24895)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25094)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25293)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25492)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25691)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25890)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26089)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26288)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26487)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26686)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26885)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27084)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27283)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27482)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27681)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27880)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28079)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28278)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28477)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28676)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28875)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29074)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29273)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29472)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29671)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29870)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30069)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30268)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30467)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30666)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30865)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31064)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31263)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31462)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31661)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31860)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32059)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32258)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32457)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32656)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32855)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33054)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33253)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33452)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33651)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33850)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34049)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34248)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34447)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34646)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34845)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35044)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35243)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35442)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35641)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35840)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36039)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36238)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36437)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36636)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36835)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37034)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37233)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37432)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37631)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37830)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38029)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38228)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38427)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38626)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38825)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39024)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39223)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39422)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39621)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39820)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40019)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40218)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40417)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40616)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40815)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41014)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41213)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41412)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41611)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41810)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42009)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42208)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42407)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42606)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42805)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43004)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43203)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43402)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43601)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43800)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43999)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 20))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 65516) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 218)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 417)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 616)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 815)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1014)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1213)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1412)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1611)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1810)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2009)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2208)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2407)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2606)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2805)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3004)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3203)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3402)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3601)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3800)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3999)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65515) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13\14")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 65515) (i32.const 39))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 20))
-(assert_return (invoke "load8_u" (i32.const 219)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 418)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 617)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 816)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1015)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1214)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1413)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1612)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1811)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2010)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2209)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2408)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2607)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2806)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3005)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3204)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3403)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3602)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3801)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4000)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4199)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4398)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4597)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4796)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4995)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5194)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5393)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5592)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5791)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5990)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6189)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6388)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6587)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6786)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6985)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7184)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7383)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7582)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7781)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7980)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8179)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8378)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8577)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8776)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8975)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9174)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9373)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9572)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9771)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9970)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10169)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10368)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10567)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10766)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10965)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11164)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11363)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11562)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11761)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11960)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12159)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12358)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12557)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12756)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12955)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13154)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13353)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13552)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13751)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13950)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14149)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14348)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14547)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14746)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14945)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15144)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15343)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15542)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15741)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15940)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16139)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16338)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16537)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16736)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16935)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17134)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17333)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17532)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17731)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17930)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18129)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18328)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18527)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18726)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18925)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19124)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19323)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19522)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19721)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19920)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20119)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20318)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20517)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20716)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20915)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21114)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21313)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21512)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21711)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21910)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22109)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22308)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22507)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22706)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22905)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23104)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23303)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23502)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23701)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23900)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24099)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24298)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24497)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24696)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24895)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25094)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25293)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25492)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25691)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25890)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26089)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26288)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26487)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26686)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26885)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27084)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27283)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27482)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27681)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27880)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28079)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28278)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28477)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28676)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28875)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29074)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29273)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29472)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29671)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29870)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30069)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30268)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30467)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30666)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30865)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31064)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31263)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31462)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31661)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31860)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32059)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32258)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32457)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32656)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32855)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33054)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33253)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33452)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33651)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33850)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34049)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34248)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34447)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34646)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34845)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35044)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35243)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35442)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35641)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35840)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36039)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36238)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36437)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36636)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36835)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37034)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37233)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37432)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37631)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37830)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38029)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38228)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38427)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38626)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38825)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39024)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39223)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39422)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39621)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39820)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40019)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40218)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40417)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40616)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40815)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41014)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41213)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41412)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41611)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41810)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42009)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42208)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42407)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42606)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42805)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43004)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43203)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43402)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43601)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43800)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43999)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 20))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65486) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65516) (i32.const 65486) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65487)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65488)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65489)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65492)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65493)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65494)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65495)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65496)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65497)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65498)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65499)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65500)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65501)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65502)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65503)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65504)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65505)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65486) (i32.const 65516) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65487)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65488)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65489)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65492)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65493)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65494)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65495)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65496)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65497)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65498)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65499)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65500)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65501)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65502)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65503)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65504)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65505)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65506) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65516) (i32.const 65506) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65507)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65508)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65509)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65510)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65511)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65512)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65513)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65514)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65506) (i32.const 65516) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65507)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65508)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65509)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65510)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65511)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65512)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65513)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65514)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65516) (i32.const 65516) (i32.const 40))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 )
- (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 65516) (i32.const 4294963200))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 218)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 417)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 616)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 815)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1014)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1213)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1412)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1611)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1810)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2009)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2208)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2407)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2606)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2805)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3004)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3203)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3402)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3601)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3800)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3999)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
-
-(module
- (memory (export "mem") 1 1 )
- (data (i32.const 61440) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(assert_trap (invoke "run" (i32.const 65516) (i32.const 61440) (i32.const 4294967040))
- "out of bounds")
-
-(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61440)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61441)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 61442)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 61443)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 61444)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 61445)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 61446)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 61447)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 61448)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 61449)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 61450)) (i32.const 10))
-(assert_return (invoke "load8_u" (i32.const 61451)) (i32.const 11))
-(assert_return (invoke "load8_u" (i32.const 61452)) (i32.const 12))
-(assert_return (invoke "load8_u" (i32.const 61453)) (i32.const 13))
-(assert_return (invoke "load8_u" (i32.const 61454)) (i32.const 14))
-(assert_return (invoke "load8_u" (i32.const 61455)) (i32.const 15))
-(assert_return (invoke "load8_u" (i32.const 61456)) (i32.const 16))
-(assert_return (invoke "load8_u" (i32.const 61457)) (i32.const 17))
-(assert_return (invoke "load8_u" (i32.const 61458)) (i32.const 18))
-(assert_return (invoke "load8_u" (i32.const 61459)) (i32.const 19))
-(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
-
-(assert_invalid
- (module
- (func (export "testfn")
- (memory.copy (i32.const 10) (i32.const 20) (i32.const 30))))
- "unknown memory 0")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i32.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f32.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (i64.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.copy (f64.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.fill (i32.const 10) (i32.const 0x55) (i32.const 10))
- (memory.copy (i32.const 9) (i32.const 10) (i32.const 5)))
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-)
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 9) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 9) (i32.const 20) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 20) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.fill (i32.const 10) (i32.const 0x55) (i32.const 10))
- (memory.copy (i32.const 16) (i32.const 15) (i32.const 5)))
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-)
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 10) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 10) (i32.const 21) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 21) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.copy (i32.const 0xFF00) (i32.const 0x8000) (i32.const 257))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.copy (i32.const 0xFFFFFF00) (i32.const 0x4000) (i32.const 257))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.copy (i32.const 0x8000) (i32.const 0xFF00) (i32.const 257))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.copy (i32.const 0x4000) (i32.const 0xFFFFFF00) (i32.const 257))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.fill (i32.const 0x0000) (i32.const 0x55) (i32.const 0x8000))
- (memory.fill (i32.const 0x8000) (i32.const 0xAA) (i32.const 0x8000))
- (memory.copy (i32.const 0x9000) (i32.const 0x7000) (i32.const 0)))
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-)
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 32768) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 32768) (i32.const 65536) (i32.const 170))
- (i32.const -1))
-(module
- (memory 1 1)
- (func (export "test")
- (memory.copy (i32.const 0x10000) (i32.const 0x7000) (i32.const 0))))
-(invoke "test")
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.copy (i32.const 0x9000) (i32.const 0x10000) (i32.const 0))))
-(invoke "test")
-
-(module
- (memory 1 1)
- (func (export "test")
- (memory.fill (i32.const 17767) (i32.const 1) (i32.const 1344))
- (memory.fill (i32.const 39017) (i32.const 2) (i32.const 1055))
- (memory.fill (i32.const 56401) (i32.const 3) (i32.const 988))
- (memory.fill (i32.const 37962) (i32.const 4) (i32.const 322))
- (memory.fill (i32.const 7977) (i32.const 5) (i32.const 1994))
- (memory.fill (i32.const 22714) (i32.const 6) (i32.const 3036))
- (memory.fill (i32.const 16882) (i32.const 7) (i32.const 2372))
- (memory.fill (i32.const 43491) (i32.const 8) (i32.const 835))
- (memory.fill (i32.const 124) (i32.const 9) (i32.const 1393))
- (memory.fill (i32.const 2132) (i32.const 10) (i32.const 2758))
- (memory.fill (i32.const 8987) (i32.const 11) (i32.const 3098))
- (memory.fill (i32.const 52711) (i32.const 12) (i32.const 741))
- (memory.fill (i32.const 3958) (i32.const 13) (i32.const 2823))
- (memory.fill (i32.const 49715) (i32.const 14) (i32.const 1280))
- (memory.fill (i32.const 50377) (i32.const 15) (i32.const 1466))
- (memory.fill (i32.const 20493) (i32.const 16) (i32.const 3158))
- (memory.fill (i32.const 47665) (i32.const 17) (i32.const 544))
- (memory.fill (i32.const 12451) (i32.const 18) (i32.const 2669))
- (memory.fill (i32.const 24869) (i32.const 19) (i32.const 2651))
- (memory.fill (i32.const 45317) (i32.const 20) (i32.const 1570))
- (memory.fill (i32.const 43096) (i32.const 21) (i32.const 1691))
- (memory.fill (i32.const 33886) (i32.const 22) (i32.const 646))
- (memory.fill (i32.const 48555) (i32.const 23) (i32.const 1858))
- (memory.fill (i32.const 53453) (i32.const 24) (i32.const 2657))
- (memory.fill (i32.const 30363) (i32.const 25) (i32.const 981))
- (memory.fill (i32.const 9300) (i32.const 26) (i32.const 1807))
- (memory.fill (i32.const 50190) (i32.const 27) (i32.const 487))
- (memory.fill (i32.const 62753) (i32.const 28) (i32.const 530))
- (memory.fill (i32.const 36316) (i32.const 29) (i32.const 943))
- (memory.fill (i32.const 6768) (i32.const 30) (i32.const 381))
- (memory.fill (i32.const 51262) (i32.const 31) (i32.const 3089))
- (memory.fill (i32.const 49729) (i32.const 32) (i32.const 658))
- (memory.fill (i32.const 44540) (i32.const 33) (i32.const 1702))
- (memory.fill (i32.const 33342) (i32.const 34) (i32.const 1092))
- (memory.fill (i32.const 50814) (i32.const 35) (i32.const 1410))
- (memory.fill (i32.const 47594) (i32.const 36) (i32.const 2204))
- (memory.fill (i32.const 54123) (i32.const 37) (i32.const 2394))
- (memory.fill (i32.const 55183) (i32.const 38) (i32.const 250))
- (memory.fill (i32.const 22620) (i32.const 39) (i32.const 2097))
- (memory.fill (i32.const 17132) (i32.const 40) (i32.const 3264))
- (memory.fill (i32.const 54331) (i32.const 41) (i32.const 3299))
- (memory.fill (i32.const 39474) (i32.const 42) (i32.const 2796))
- (memory.fill (i32.const 36156) (i32.const 43) (i32.const 2070))
- (memory.fill (i32.const 35308) (i32.const 44) (i32.const 2763))
- (memory.fill (i32.const 32731) (i32.const 45) (i32.const 312))
- (memory.fill (i32.const 63746) (i32.const 46) (i32.const 192))
- (memory.fill (i32.const 30974) (i32.const 47) (i32.const 596))
- (memory.fill (i32.const 16635) (i32.const 48) (i32.const 501))
- (memory.fill (i32.const 57002) (i32.const 49) (i32.const 686))
- (memory.fill (i32.const 34299) (i32.const 50) (i32.const 385))
- (memory.fill (i32.const 60881) (i32.const 51) (i32.const 903))
- (memory.fill (i32.const 61445) (i32.const 52) (i32.const 2390))
- (memory.fill (i32.const 46972) (i32.const 53) (i32.const 1441))
- (memory.fill (i32.const 25973) (i32.const 54) (i32.const 3162))
- (memory.fill (i32.const 5566) (i32.const 55) (i32.const 2135))
- (memory.fill (i32.const 35977) (i32.const 56) (i32.const 519))
- (memory.fill (i32.const 44892) (i32.const 57) (i32.const 3280))
- (memory.fill (i32.const 46760) (i32.const 58) (i32.const 1678))
- (memory.fill (i32.const 46607) (i32.const 59) (i32.const 3168))
- (memory.fill (i32.const 22449) (i32.const 60) (i32.const 1441))
- (memory.fill (i32.const 58609) (i32.const 61) (i32.const 663))
- (memory.fill (i32.const 32261) (i32.const 62) (i32.const 1671))
- (memory.fill (i32.const 3063) (i32.const 63) (i32.const 721))
- (memory.fill (i32.const 34025) (i32.const 64) (i32.const 84))
- (memory.fill (i32.const 33338) (i32.const 65) (i32.const 2029))
- (memory.fill (i32.const 36810) (i32.const 66) (i32.const 29))
- (memory.fill (i32.const 19147) (i32.const 67) (i32.const 3034))
- (memory.fill (i32.const 12616) (i32.const 68) (i32.const 1043))
- (memory.fill (i32.const 18276) (i32.const 69) (i32.const 3324))
- (memory.fill (i32.const 4639) (i32.const 70) (i32.const 1091))
- (memory.fill (i32.const 16158) (i32.const 71) (i32.const 1997))
- (memory.fill (i32.const 18204) (i32.const 72) (i32.const 2259))
- (memory.fill (i32.const 50532) (i32.const 73) (i32.const 3189))
- (memory.fill (i32.const 11028) (i32.const 74) (i32.const 1968))
- (memory.fill (i32.const 15962) (i32.const 75) (i32.const 1455))
- (memory.fill (i32.const 45406) (i32.const 76) (i32.const 1177))
- (memory.fill (i32.const 54137) (i32.const 77) (i32.const 1568))
- (memory.fill (i32.const 33083) (i32.const 78) (i32.const 1642))
- (memory.fill (i32.const 61028) (i32.const 79) (i32.const 3284))
- (memory.fill (i32.const 51729) (i32.const 80) (i32.const 223))
- (memory.fill (i32.const 4361) (i32.const 81) (i32.const 2171))
- (memory.fill (i32.const 57514) (i32.const 82) (i32.const 1322))
- (memory.fill (i32.const 55724) (i32.const 83) (i32.const 2648))
- (memory.fill (i32.const 24091) (i32.const 84) (i32.const 1045))
- (memory.fill (i32.const 43183) (i32.const 85) (i32.const 3097))
- (memory.fill (i32.const 32307) (i32.const 86) (i32.const 2796))
- (memory.fill (i32.const 3811) (i32.const 87) (i32.const 2010))
- (memory.fill (i32.const 54856) (i32.const 88) (i32.const 0))
- (memory.fill (i32.const 49941) (i32.const 89) (i32.const 2069))
- (memory.fill (i32.const 20411) (i32.const 90) (i32.const 2896))
- (memory.fill (i32.const 33826) (i32.const 91) (i32.const 192))
- (memory.fill (i32.const 9402) (i32.const 92) (i32.const 2195))
- (memory.fill (i32.const 12413) (i32.const 93) (i32.const 24))
- (memory.fill (i32.const 14091) (i32.const 94) (i32.const 577))
- (memory.fill (i32.const 44058) (i32.const 95) (i32.const 2089))
- (memory.fill (i32.const 36735) (i32.const 96) (i32.const 3436))
- (memory.fill (i32.const 23288) (i32.const 97) (i32.const 2765))
- (memory.fill (i32.const 6392) (i32.const 98) (i32.const 830))
- (memory.fill (i32.const 33307) (i32.const 99) (i32.const 1938))
- (memory.fill (i32.const 21941) (i32.const 100) (i32.const 2750))
- (memory.copy (i32.const 59214) (i32.const 54248) (i32.const 2098))
- (memory.copy (i32.const 63026) (i32.const 39224) (i32.const 230))
- (memory.copy (i32.const 51833) (i32.const 23629) (i32.const 2300))
- (memory.copy (i32.const 6708) (i32.const 23996) (i32.const 639))
- (memory.copy (i32.const 6990) (i32.const 33399) (i32.const 1097))
- (memory.copy (i32.const 19403) (i32.const 10348) (i32.const 3197))
- (memory.copy (i32.const 27308) (i32.const 54406) (i32.const 100))
- (memory.copy (i32.const 27221) (i32.const 43682) (i32.const 1717))
- (memory.copy (i32.const 60528) (i32.const 8629) (i32.const 119))
- (memory.copy (i32.const 5947) (i32.const 2308) (i32.const 658))
- (memory.copy (i32.const 4787) (i32.const 51631) (i32.const 2269))
- (memory.copy (i32.const 12617) (i32.const 19197) (i32.const 833))
- (memory.copy (i32.const 11854) (i32.const 46505) (i32.const 3300))
- (memory.copy (i32.const 11376) (i32.const 45012) (i32.const 2281))
- (memory.copy (i32.const 34186) (i32.const 6697) (i32.const 2572))
- (memory.copy (i32.const 4936) (i32.const 1690) (i32.const 1328))
- (memory.copy (i32.const 63164) (i32.const 7637) (i32.const 1670))
- (memory.copy (i32.const 44568) (i32.const 18344) (i32.const 33))
- (memory.copy (i32.const 43918) (i32.const 22348) (i32.const 1427))
- (memory.copy (i32.const 46637) (i32.const 49819) (i32.const 1434))
- (memory.copy (i32.const 63684) (i32.const 8755) (i32.const 834))
- (memory.copy (i32.const 33485) (i32.const 20131) (i32.const 3317))
- (memory.copy (i32.const 40575) (i32.const 54317) (i32.const 3201))
- (memory.copy (i32.const 25812) (i32.const 59254) (i32.const 2452))
- (memory.copy (i32.const 19678) (i32.const 56882) (i32.const 346))
- (memory.copy (i32.const 15852) (i32.const 35914) (i32.const 2430))
- (memory.copy (i32.const 11824) (i32.const 35574) (i32.const 300))
- (memory.copy (i32.const 59427) (i32.const 13957) (i32.const 3153))
- (memory.copy (i32.const 34299) (i32.const 60594) (i32.const 1281))
- (memory.copy (i32.const 8964) (i32.const 12276) (i32.const 943))
- (memory.copy (i32.const 2827) (i32.const 10425) (i32.const 1887))
- (memory.copy (i32.const 43194) (i32.const 43910) (i32.const 738))
- (memory.copy (i32.const 63038) (i32.const 18949) (i32.const 122))
- (memory.copy (i32.const 24044) (i32.const 44761) (i32.const 1755))
- (memory.copy (i32.const 22608) (i32.const 14755) (i32.const 702))
- (memory.copy (i32.const 11284) (i32.const 26579) (i32.const 1830))
- (memory.copy (i32.const 23092) (i32.const 20471) (i32.const 1064))
- (memory.copy (i32.const 57248) (i32.const 54770) (i32.const 2631))
- (memory.copy (i32.const 25492) (i32.const 1025) (i32.const 3113))
- (memory.copy (i32.const 49588) (i32.const 44220) (i32.const 975))
- (memory.copy (i32.const 28280) (i32.const 41722) (i32.const 2336))
- (memory.copy (i32.const 61289) (i32.const 230) (i32.const 2872))
- (memory.copy (i32.const 22480) (i32.const 52506) (i32.const 2197))
- (memory.copy (i32.const 40553) (i32.const 9578) (i32.const 1958))
- (memory.copy (i32.const 29004) (i32.const 20862) (i32.const 2186))
- (memory.copy (i32.const 53029) (i32.const 43955) (i32.const 1037))
- (memory.copy (i32.const 25476) (i32.const 35667) (i32.const 1650))
- (memory.copy (i32.const 58516) (i32.const 45819) (i32.const 1986))
- (memory.copy (i32.const 38297) (i32.const 5776) (i32.const 1955))
- (memory.copy (i32.const 28503) (i32.const 55364) (i32.const 2368))
- (memory.copy (i32.const 62619) (i32.const 18108) (i32.const 1356))
- (memory.copy (i32.const 50149) (i32.const 13861) (i32.const 382))
- (memory.copy (i32.const 16904) (i32.const 36341) (i32.const 1900))
- (memory.copy (i32.const 48098) (i32.const 11358) (i32.const 2807))
- (memory.copy (i32.const 28512) (i32.const 40362) (i32.const 323))
- (memory.copy (i32.const 35506) (i32.const 27856) (i32.const 1670))
- (memory.copy (i32.const 62970) (i32.const 53332) (i32.const 1341))
- (memory.copy (i32.const 14133) (i32.const 46312) (i32.const 644))
- (memory.copy (i32.const 29030) (i32.const 19074) (i32.const 496))
- (memory.copy (i32.const 44952) (i32.const 47577) (i32.const 2784))
- (memory.copy (i32.const 39559) (i32.const 44661) (i32.const 1350))
- (memory.copy (i32.const 10352) (i32.const 29274) (i32.const 1475))
- (memory.copy (i32.const 46911) (i32.const 46178) (i32.const 1467))
- (memory.copy (i32.const 4905) (i32.const 28740) (i32.const 1895))
- (memory.copy (i32.const 38012) (i32.const 57253) (i32.const 1751))
- (memory.copy (i32.const 26446) (i32.const 27223) (i32.const 1127))
- (memory.copy (i32.const 58835) (i32.const 24657) (i32.const 1063))
- (memory.copy (i32.const 61356) (i32.const 38790) (i32.const 766))
- (memory.copy (i32.const 44160) (i32.const 2284) (i32.const 1520))
- (memory.copy (i32.const 32740) (i32.const 47237) (i32.const 3014))
- (memory.copy (i32.const 11148) (i32.const 21260) (i32.const 1011))
- (memory.copy (i32.const 7665) (i32.const 31612) (i32.const 3034))
- (memory.copy (i32.const 18044) (i32.const 12987) (i32.const 3320))
- (memory.copy (i32.const 57306) (i32.const 55905) (i32.const 308))
- (memory.copy (i32.const 24675) (i32.const 16815) (i32.const 1155))
- (memory.copy (i32.const 19900) (i32.const 10115) (i32.const 722))
- (memory.copy (i32.const 2921) (i32.const 5935) (i32.const 2370))
- (memory.copy (i32.const 32255) (i32.const 50095) (i32.const 2926))
- (memory.copy (i32.const 15126) (i32.const 17299) (i32.const 2607))
- (memory.copy (i32.const 45575) (i32.const 28447) (i32.const 2045))
- (memory.copy (i32.const 55149) (i32.const 36113) (i32.const 2596))
- (memory.copy (i32.const 28461) (i32.const 54157) (i32.const 1168))
- (memory.copy (i32.const 47951) (i32.const 53385) (i32.const 3137))
- (memory.copy (i32.const 30646) (i32.const 45155) (i32.const 2649))
- (memory.copy (i32.const 5057) (i32.const 4295) (i32.const 52))
- (memory.copy (i32.const 6692) (i32.const 24195) (i32.const 441))
- (memory.copy (i32.const 32984) (i32.const 27117) (i32.const 3445))
- (memory.copy (i32.const 32530) (i32.const 59372) (i32.const 2785))
- (memory.copy (i32.const 34361) (i32.const 8962) (i32.const 2406))
- (memory.copy (i32.const 17893) (i32.const 54538) (i32.const 3381))
- (memory.copy (i32.const 22685) (i32.const 44151) (i32.const 136))
- (memory.copy (i32.const 59089) (i32.const 7077) (i32.const 1045))
- (memory.copy (i32.const 42945) (i32.const 55028) (i32.const 2389))
- (memory.copy (i32.const 44693) (i32.const 20138) (i32.const 877))
- (memory.copy (i32.const 36810) (i32.const 25196) (i32.const 3447))
- (memory.copy (i32.const 45742) (i32.const 31888) (i32.const 854))
- (memory.copy (i32.const 24236) (i32.const 31866) (i32.const 1377))
- (memory.copy (i32.const 33778) (i32.const 692) (i32.const 1594))
- (memory.copy (i32.const 60618) (i32.const 18585) (i32.const 2987))
- (memory.copy (i32.const 50370) (i32.const 41271) (i32.const 1406))
- )
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-)
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 124) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 124) (i32.const 1517) (i32.const 9))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 1517) (i32.const 2132) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 2132) (i32.const 2827) (i32.const 10))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 2827) (i32.const 2921) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 2921) (i32.const 3538) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 3538) (i32.const 3786) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 3786) (i32.const 4042) (i32.const 97))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 4042) (i32.const 4651) (i32.const 99))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 4651) (i32.const 5057) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 5057) (i32.const 5109) (i32.const 99))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 5109) (i32.const 5291) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 5291) (i32.const 5524) (i32.const 72))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 5524) (i32.const 5691) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 5691) (i32.const 6552) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 6552) (i32.const 7133) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 7133) (i32.const 7665) (i32.const 99))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 7665) (i32.const 8314) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 8314) (i32.const 8360) (i32.const 62))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 8360) (i32.const 8793) (i32.const 86))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 8793) (i32.const 8979) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 8979) (i32.const 9373) (i32.const 79))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 9373) (i32.const 9518) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 9518) (i32.const 9934) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 9934) (i32.const 10087) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 10087) (i32.const 10206) (i32.const 5))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 10206) (i32.const 10230) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 10230) (i32.const 10249) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 10249) (i32.const 11148) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 11148) (i32.const 11356) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 11356) (i32.const 11380) (i32.const 93))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 11380) (i32.const 11939) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 11939) (i32.const 12159) (i32.const 68))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 12159) (i32.const 12575) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 12575) (i32.const 12969) (i32.const 79))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 12969) (i32.const 13114) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 13114) (i32.const 14133) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 14133) (i32.const 14404) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 14404) (i32.const 14428) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 14428) (i32.const 14458) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 14458) (i32.const 14580) (i32.const 32))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 14580) (i32.const 14777) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 14777) (i32.const 15124) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 15124) (i32.const 15126) (i32.const 36))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 15126) (i32.const 15192) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 15192) (i32.const 15871) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 15871) (i32.const 15998) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 15998) (i32.const 17017) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17017) (i32.const 17288) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17288) (i32.const 17312) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17312) (i32.const 17342) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17342) (i32.const 17464) (i32.const 32))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17464) (i32.const 17661) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17661) (i32.const 17727) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17727) (i32.const 17733) (i32.const 5))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17733) (i32.const 17893) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 17893) (i32.const 18553) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18553) (i32.const 18744) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18744) (i32.const 18801) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18801) (i32.const 18825) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18825) (i32.const 18876) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18876) (i32.const 18885) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18885) (i32.const 18904) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18904) (i32.const 19567) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 19567) (i32.const 20403) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 20403) (i32.const 21274) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 21274) (i32.const 21364) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 21364) (i32.const 21468) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 21468) (i32.const 21492) (i32.const 93))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 21492) (i32.const 22051) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 22051) (i32.const 22480) (i32.const 68))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 22480) (i32.const 22685) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 22685) (i32.const 22694) (i32.const 68))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 22694) (i32.const 22821) (i32.const 10))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 22821) (i32.const 22869) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 22869) (i32.const 24107) (i32.const 97))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 24107) (i32.const 24111) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 24111) (i32.const 24236) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 24236) (i32.const 24348) (i32.const 72))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 24348) (i32.const 24515) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 24515) (i32.const 24900) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 24900) (i32.const 25136) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 25136) (i32.const 25182) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 25182) (i32.const 25426) (i32.const 68))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 25426) (i32.const 25613) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 25613) (i32.const 25830) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 25830) (i32.const 26446) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 26446) (i32.const 26517) (i32.const 10))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 26517) (i32.const 27468) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 27468) (i32.const 27503) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 27503) (i32.const 27573) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 27573) (i32.const 28245) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 28245) (i32.const 28280) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 28280) (i32.const 29502) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 29502) (i32.const 29629) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 29629) (i32.const 30387) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 30387) (i32.const 30646) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 30646) (i32.const 31066) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31066) (i32.const 31131) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31131) (i32.const 31322) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31322) (i32.const 31379) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31379) (i32.const 31403) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31403) (i32.const 31454) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31454) (i32.const 31463) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31463) (i32.const 31482) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31482) (i32.const 31649) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31649) (i32.const 31978) (i32.const 72))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 31978) (i32.const 32145) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 32145) (i32.const 32530) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 32530) (i32.const 32766) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 32766) (i32.const 32812) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 32812) (i32.const 33056) (i32.const 68))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 33056) (i32.const 33660) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 33660) (i32.const 33752) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 33752) (i32.const 33775) (i32.const 36))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 33775) (i32.const 33778) (i32.const 32))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 33778) (i32.const 34603) (i32.const 9))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 34603) (i32.const 35218) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 35218) (i32.const 35372) (i32.const 10))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 35372) (i32.const 35486) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 35486) (i32.const 35605) (i32.const 5))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 35605) (i32.const 35629) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 35629) (i32.const 35648) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 35648) (i32.const 36547) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 36547) (i32.const 36755) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 36755) (i32.const 36767) (i32.const 93))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 36767) (i32.const 36810) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 36810) (i32.const 36839) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 36839) (i32.const 37444) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 37444) (i32.const 38060) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 38060) (i32.const 38131) (i32.const 10))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 38131) (i32.const 39082) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 39082) (i32.const 39117) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 39117) (i32.const 39187) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 39187) (i32.const 39859) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 39859) (i32.const 39894) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 39894) (i32.const 40257) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 40257) (i32.const 40344) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 40344) (i32.const 40371) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 40371) (i32.const 40804) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 40804) (i32.const 40909) (i32.const 5))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 40909) (i32.const 42259) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 42259) (i32.const 42511) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 42511) (i32.const 42945) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 42945) (i32.const 43115) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43115) (i32.const 43306) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43306) (i32.const 43363) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43363) (i32.const 43387) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43387) (i32.const 43438) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43438) (i32.const 43447) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43447) (i32.const 43466) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 43466) (i32.const 44129) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 44129) (i32.const 44958) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 44958) (i32.const 45570) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 45570) (i32.const 45575) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 45575) (i32.const 45640) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 45640) (i32.const 45742) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 45742) (i32.const 45832) (i32.const 72))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 45832) (i32.const 45999) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 45999) (i32.const 46384) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 46384) (i32.const 46596) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 46596) (i32.const 46654) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 46654) (i32.const 47515) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 47515) (i32.const 47620) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 47620) (i32.const 47817) (i32.const 79))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 47817) (i32.const 47951) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 47951) (i32.const 48632) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 48632) (i32.const 48699) (i32.const 97))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 48699) (i32.const 48703) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 48703) (i32.const 49764) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 49764) (i32.const 49955) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 49955) (i32.const 50012) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 50012) (i32.const 50036) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 50036) (i32.const 50087) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 50087) (i32.const 50096) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 50096) (i32.const 50115) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 50115) (i32.const 50370) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 50370) (i32.const 51358) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 51358) (i32.const 51610) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 51610) (i32.const 51776) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 51776) (i32.const 51833) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 51833) (i32.const 52895) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 52895) (i32.const 53029) (i32.const 97))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 53029) (i32.const 53244) (i32.const 68))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 53244) (i32.const 54066) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 54066) (i32.const 54133) (i32.const 97))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 54133) (i32.const 54137) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 54137) (i32.const 55198) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55198) (i32.const 55389) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55389) (i32.const 55446) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55446) (i32.const 55470) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55470) (i32.const 55521) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55521) (i32.const 55530) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55530) (i32.const 55549) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 55549) (i32.const 56212) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 56212) (i32.const 57048) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 57048) (i32.const 58183) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 58183) (i32.const 58202) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 58202) (i32.const 58516) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 58516) (i32.const 58835) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 58835) (i32.const 58855) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 58855) (i32.const 59089) (i32.const 95))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 59089) (i32.const 59145) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 59145) (i32.const 59677) (i32.const 99))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 59677) (i32.const 60134) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60134) (i32.const 60502) (i32.const 89))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60502) (i32.const 60594) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60594) (i32.const 60617) (i32.const 36))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60617) (i32.const 60618) (i32.const 32))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60618) (i32.const 60777) (i32.const 42))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60777) (i32.const 60834) (i32.const 76))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60834) (i32.const 60858) (i32.const 57))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60858) (i32.const 60909) (i32.const 59))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60909) (i32.const 60918) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60918) (i32.const 60937) (i32.const 41))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 60937) (i32.const 61600) (i32.const 83))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 61600) (i32.const 62436) (i32.const 96))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 62436) (i32.const 63307) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63307) (i32.const 63397) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63397) (i32.const 63501) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63501) (i32.const 63525) (i32.const 93))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63525) (i32.const 63605) (i32.const 74))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63605) (i32.const 63704) (i32.const 100))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63704) (i32.const 63771) (i32.const 97))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63771) (i32.const 63775) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 63775) (i32.const 64311) (i32.const 77))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 64311) (i32.const 64331) (i32.const 26))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 64331) (i32.const 64518) (i32.const 92))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 64518) (i32.const 64827) (i32.const 11))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 64827) (i32.const 64834) (i32.const 26))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 64834) (i32.const 65536) (i32.const 0))
- (i32.const -1))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js
deleted file mode 100644
index 3db49b5b24..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js
+++ /dev/null
@@ -1,13859 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// memory_copy.wast:5
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x95\x80\x80\x80\x00\x02\x83\x80\x80\x80\x00\x00\x01\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:14
-run(() => call($1, "test", []));
-
-// memory_copy.wast:16
-assert_return(() => call($1, "load8_u", [0]), 0);
-
-// memory_copy.wast:17
-assert_return(() => call($1, "load8_u", [1]), 0);
-
-// memory_copy.wast:18
-assert_return(() => call($1, "load8_u", [2]), 3);
-
-// memory_copy.wast:19
-assert_return(() => call($1, "load8_u", [3]), 1);
-
-// memory_copy.wast:20
-assert_return(() => call($1, "load8_u", [4]), 4);
-
-// memory_copy.wast:21
-assert_return(() => call($1, "load8_u", [5]), 1);
-
-// memory_copy.wast:22
-assert_return(() => call($1, "load8_u", [6]), 0);
-
-// memory_copy.wast:23
-assert_return(() => call($1, "load8_u", [7]), 0);
-
-// memory_copy.wast:24
-assert_return(() => call($1, "load8_u", [8]), 0);
-
-// memory_copy.wast:25
-assert_return(() => call($1, "load8_u", [9]), 0);
-
-// memory_copy.wast:26
-assert_return(() => call($1, "load8_u", [10]), 0);
-
-// memory_copy.wast:27
-assert_return(() => call($1, "load8_u", [11]), 0);
-
-// memory_copy.wast:28
-assert_return(() => call($1, "load8_u", [12]), 7);
-
-// memory_copy.wast:29
-assert_return(() => call($1, "load8_u", [13]), 5);
-
-// memory_copy.wast:30
-assert_return(() => call($1, "load8_u", [14]), 2);
-
-// memory_copy.wast:31
-assert_return(() => call($1, "load8_u", [15]), 3);
-
-// memory_copy.wast:32
-assert_return(() => call($1, "load8_u", [16]), 6);
-
-// memory_copy.wast:33
-assert_return(() => call($1, "load8_u", [17]), 0);
-
-// memory_copy.wast:34
-assert_return(() => call($1, "load8_u", [18]), 0);
-
-// memory_copy.wast:35
-assert_return(() => call($1, "load8_u", [19]), 0);
-
-// memory_copy.wast:36
-assert_return(() => call($1, "load8_u", [20]), 0);
-
-// memory_copy.wast:37
-assert_return(() => call($1, "load8_u", [21]), 0);
-
-// memory_copy.wast:38
-assert_return(() => call($1, "load8_u", [22]), 0);
-
-// memory_copy.wast:39
-assert_return(() => call($1, "load8_u", [23]), 0);
-
-// memory_copy.wast:40
-assert_return(() => call($1, "load8_u", [24]), 0);
-
-// memory_copy.wast:41
-assert_return(() => call($1, "load8_u", [25]), 0);
-
-// memory_copy.wast:42
-assert_return(() => call($1, "load8_u", [26]), 0);
-
-// memory_copy.wast:43
-assert_return(() => call($1, "load8_u", [27]), 0);
-
-// memory_copy.wast:44
-assert_return(() => call($1, "load8_u", [28]), 0);
-
-// memory_copy.wast:45
-assert_return(() => call($1, "load8_u", [29]), 0);
-
-// memory_copy.wast:47
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x02\x41\x03\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:56
-run(() => call($2, "test", []));
-
-// memory_copy.wast:58
-assert_return(() => call($2, "load8_u", [0]), 0);
-
-// memory_copy.wast:59
-assert_return(() => call($2, "load8_u", [1]), 0);
-
-// memory_copy.wast:60
-assert_return(() => call($2, "load8_u", [2]), 3);
-
-// memory_copy.wast:61
-assert_return(() => call($2, "load8_u", [3]), 1);
-
-// memory_copy.wast:62
-assert_return(() => call($2, "load8_u", [4]), 4);
-
-// memory_copy.wast:63
-assert_return(() => call($2, "load8_u", [5]), 1);
-
-// memory_copy.wast:64
-assert_return(() => call($2, "load8_u", [6]), 0);
-
-// memory_copy.wast:65
-assert_return(() => call($2, "load8_u", [7]), 0);
-
-// memory_copy.wast:66
-assert_return(() => call($2, "load8_u", [8]), 0);
-
-// memory_copy.wast:67
-assert_return(() => call($2, "load8_u", [9]), 0);
-
-// memory_copy.wast:68
-assert_return(() => call($2, "load8_u", [10]), 0);
-
-// memory_copy.wast:69
-assert_return(() => call($2, "load8_u", [11]), 0);
-
-// memory_copy.wast:70
-assert_return(() => call($2, "load8_u", [12]), 7);
-
-// memory_copy.wast:71
-assert_return(() => call($2, "load8_u", [13]), 3);
-
-// memory_copy.wast:72
-assert_return(() => call($2, "load8_u", [14]), 1);
-
-// memory_copy.wast:73
-assert_return(() => call($2, "load8_u", [15]), 4);
-
-// memory_copy.wast:74
-assert_return(() => call($2, "load8_u", [16]), 6);
-
-// memory_copy.wast:75
-assert_return(() => call($2, "load8_u", [17]), 0);
-
-// memory_copy.wast:76
-assert_return(() => call($2, "load8_u", [18]), 0);
-
-// memory_copy.wast:77
-assert_return(() => call($2, "load8_u", [19]), 0);
-
-// memory_copy.wast:78
-assert_return(() => call($2, "load8_u", [20]), 0);
-
-// memory_copy.wast:79
-assert_return(() => call($2, "load8_u", [21]), 0);
-
-// memory_copy.wast:80
-assert_return(() => call($2, "load8_u", [22]), 0);
-
-// memory_copy.wast:81
-assert_return(() => call($2, "load8_u", [23]), 0);
-
-// memory_copy.wast:82
-assert_return(() => call($2, "load8_u", [24]), 0);
-
-// memory_copy.wast:83
-assert_return(() => call($2, "load8_u", [25]), 0);
-
-// memory_copy.wast:84
-assert_return(() => call($2, "load8_u", [26]), 0);
-
-// memory_copy.wast:85
-assert_return(() => call($2, "load8_u", [27]), 0);
-
-// memory_copy.wast:86
-assert_return(() => call($2, "load8_u", [28]), 0);
-
-// memory_copy.wast:87
-assert_return(() => call($2, "load8_u", [29]), 0);
-
-// memory_copy.wast:89
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x0f\x41\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:98
-run(() => call($3, "test", []));
-
-// memory_copy.wast:100
-assert_return(() => call($3, "load8_u", [0]), 0);
-
-// memory_copy.wast:101
-assert_return(() => call($3, "load8_u", [1]), 0);
-
-// memory_copy.wast:102
-assert_return(() => call($3, "load8_u", [2]), 3);
-
-// memory_copy.wast:103
-assert_return(() => call($3, "load8_u", [3]), 1);
-
-// memory_copy.wast:104
-assert_return(() => call($3, "load8_u", [4]), 4);
-
-// memory_copy.wast:105
-assert_return(() => call($3, "load8_u", [5]), 1);
-
-// memory_copy.wast:106
-assert_return(() => call($3, "load8_u", [6]), 0);
-
-// memory_copy.wast:107
-assert_return(() => call($3, "load8_u", [7]), 0);
-
-// memory_copy.wast:108
-assert_return(() => call($3, "load8_u", [8]), 0);
-
-// memory_copy.wast:109
-assert_return(() => call($3, "load8_u", [9]), 0);
-
-// memory_copy.wast:110
-assert_return(() => call($3, "load8_u", [10]), 0);
-
-// memory_copy.wast:111
-assert_return(() => call($3, "load8_u", [11]), 0);
-
-// memory_copy.wast:112
-assert_return(() => call($3, "load8_u", [12]), 7);
-
-// memory_copy.wast:113
-assert_return(() => call($3, "load8_u", [13]), 5);
-
-// memory_copy.wast:114
-assert_return(() => call($3, "load8_u", [14]), 2);
-
-// memory_copy.wast:115
-assert_return(() => call($3, "load8_u", [15]), 3);
-
-// memory_copy.wast:116
-assert_return(() => call($3, "load8_u", [16]), 6);
-
-// memory_copy.wast:117
-assert_return(() => call($3, "load8_u", [17]), 0);
-
-// memory_copy.wast:118
-assert_return(() => call($3, "load8_u", [18]), 0);
-
-// memory_copy.wast:119
-assert_return(() => call($3, "load8_u", [19]), 0);
-
-// memory_copy.wast:120
-assert_return(() => call($3, "load8_u", [20]), 0);
-
-// memory_copy.wast:121
-assert_return(() => call($3, "load8_u", [21]), 0);
-
-// memory_copy.wast:122
-assert_return(() => call($3, "load8_u", [22]), 0);
-
-// memory_copy.wast:123
-assert_return(() => call($3, "load8_u", [23]), 0);
-
-// memory_copy.wast:124
-assert_return(() => call($3, "load8_u", [24]), 0);
-
-// memory_copy.wast:125
-assert_return(() => call($3, "load8_u", [25]), 3);
-
-// memory_copy.wast:126
-assert_return(() => call($3, "load8_u", [26]), 6);
-
-// memory_copy.wast:127
-assert_return(() => call($3, "load8_u", [27]), 0);
-
-// memory_copy.wast:128
-assert_return(() => call($3, "load8_u", [28]), 0);
-
-// memory_copy.wast:129
-assert_return(() => call($3, "load8_u", [29]), 0);
-
-// memory_copy.wast:131
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x19\x41\x03\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:140
-run(() => call($4, "test", []));
-
-// memory_copy.wast:142
-assert_return(() => call($4, "load8_u", [0]), 0);
-
-// memory_copy.wast:143
-assert_return(() => call($4, "load8_u", [1]), 0);
-
-// memory_copy.wast:144
-assert_return(() => call($4, "load8_u", [2]), 3);
-
-// memory_copy.wast:145
-assert_return(() => call($4, "load8_u", [3]), 1);
-
-// memory_copy.wast:146
-assert_return(() => call($4, "load8_u", [4]), 4);
-
-// memory_copy.wast:147
-assert_return(() => call($4, "load8_u", [5]), 1);
-
-// memory_copy.wast:148
-assert_return(() => call($4, "load8_u", [6]), 0);
-
-// memory_copy.wast:149
-assert_return(() => call($4, "load8_u", [7]), 0);
-
-// memory_copy.wast:150
-assert_return(() => call($4, "load8_u", [8]), 0);
-
-// memory_copy.wast:151
-assert_return(() => call($4, "load8_u", [9]), 0);
-
-// memory_copy.wast:152
-assert_return(() => call($4, "load8_u", [10]), 0);
-
-// memory_copy.wast:153
-assert_return(() => call($4, "load8_u", [11]), 0);
-
-// memory_copy.wast:154
-assert_return(() => call($4, "load8_u", [12]), 7);
-
-// memory_copy.wast:155
-assert_return(() => call($4, "load8_u", [13]), 0);
-
-// memory_copy.wast:156
-assert_return(() => call($4, "load8_u", [14]), 0);
-
-// memory_copy.wast:157
-assert_return(() => call($4, "load8_u", [15]), 0);
-
-// memory_copy.wast:158
-assert_return(() => call($4, "load8_u", [16]), 6);
-
-// memory_copy.wast:159
-assert_return(() => call($4, "load8_u", [17]), 0);
-
-// memory_copy.wast:160
-assert_return(() => call($4, "load8_u", [18]), 0);
-
-// memory_copy.wast:161
-assert_return(() => call($4, "load8_u", [19]), 0);
-
-// memory_copy.wast:162
-assert_return(() => call($4, "load8_u", [20]), 0);
-
-// memory_copy.wast:163
-assert_return(() => call($4, "load8_u", [21]), 0);
-
-// memory_copy.wast:164
-assert_return(() => call($4, "load8_u", [22]), 0);
-
-// memory_copy.wast:165
-assert_return(() => call($4, "load8_u", [23]), 0);
-
-// memory_copy.wast:166
-assert_return(() => call($4, "load8_u", [24]), 0);
-
-// memory_copy.wast:167
-assert_return(() => call($4, "load8_u", [25]), 0);
-
-// memory_copy.wast:168
-assert_return(() => call($4, "load8_u", [26]), 0);
-
-// memory_copy.wast:169
-assert_return(() => call($4, "load8_u", [27]), 0);
-
-// memory_copy.wast:170
-assert_return(() => call($4, "load8_u", [28]), 0);
-
-// memory_copy.wast:171
-assert_return(() => call($4, "load8_u", [29]), 0);
-
-// memory_copy.wast:173
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x14\x41\x16\x41\x04\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:182
-run(() => call($5, "test", []));
-
-// memory_copy.wast:184
-assert_return(() => call($5, "load8_u", [0]), 0);
-
-// memory_copy.wast:185
-assert_return(() => call($5, "load8_u", [1]), 0);
-
-// memory_copy.wast:186
-assert_return(() => call($5, "load8_u", [2]), 3);
-
-// memory_copy.wast:187
-assert_return(() => call($5, "load8_u", [3]), 1);
-
-// memory_copy.wast:188
-assert_return(() => call($5, "load8_u", [4]), 4);
-
-// memory_copy.wast:189
-assert_return(() => call($5, "load8_u", [5]), 1);
-
-// memory_copy.wast:190
-assert_return(() => call($5, "load8_u", [6]), 0);
-
-// memory_copy.wast:191
-assert_return(() => call($5, "load8_u", [7]), 0);
-
-// memory_copy.wast:192
-assert_return(() => call($5, "load8_u", [8]), 0);
-
-// memory_copy.wast:193
-assert_return(() => call($5, "load8_u", [9]), 0);
-
-// memory_copy.wast:194
-assert_return(() => call($5, "load8_u", [10]), 0);
-
-// memory_copy.wast:195
-assert_return(() => call($5, "load8_u", [11]), 0);
-
-// memory_copy.wast:196
-assert_return(() => call($5, "load8_u", [12]), 7);
-
-// memory_copy.wast:197
-assert_return(() => call($5, "load8_u", [13]), 5);
-
-// memory_copy.wast:198
-assert_return(() => call($5, "load8_u", [14]), 2);
-
-// memory_copy.wast:199
-assert_return(() => call($5, "load8_u", [15]), 3);
-
-// memory_copy.wast:200
-assert_return(() => call($5, "load8_u", [16]), 6);
-
-// memory_copy.wast:201
-assert_return(() => call($5, "load8_u", [17]), 0);
-
-// memory_copy.wast:202
-assert_return(() => call($5, "load8_u", [18]), 0);
-
-// memory_copy.wast:203
-assert_return(() => call($5, "load8_u", [19]), 0);
-
-// memory_copy.wast:204
-assert_return(() => call($5, "load8_u", [20]), 0);
-
-// memory_copy.wast:205
-assert_return(() => call($5, "load8_u", [21]), 0);
-
-// memory_copy.wast:206
-assert_return(() => call($5, "load8_u", [22]), 0);
-
-// memory_copy.wast:207
-assert_return(() => call($5, "load8_u", [23]), 0);
-
-// memory_copy.wast:208
-assert_return(() => call($5, "load8_u", [24]), 0);
-
-// memory_copy.wast:209
-assert_return(() => call($5, "load8_u", [25]), 0);
-
-// memory_copy.wast:210
-assert_return(() => call($5, "load8_u", [26]), 0);
-
-// memory_copy.wast:211
-assert_return(() => call($5, "load8_u", [27]), 0);
-
-// memory_copy.wast:212
-assert_return(() => call($5, "load8_u", [28]), 0);
-
-// memory_copy.wast:213
-assert_return(() => call($5, "load8_u", [29]), 0);
-
-// memory_copy.wast:215
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x01\x41\x03\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:224
-run(() => call($6, "test", []));
-
-// memory_copy.wast:226
-assert_return(() => call($6, "load8_u", [0]), 0);
-
-// memory_copy.wast:227
-assert_return(() => call($6, "load8_u", [1]), 0);
-
-// memory_copy.wast:228
-assert_return(() => call($6, "load8_u", [2]), 3);
-
-// memory_copy.wast:229
-assert_return(() => call($6, "load8_u", [3]), 1);
-
-// memory_copy.wast:230
-assert_return(() => call($6, "load8_u", [4]), 4);
-
-// memory_copy.wast:231
-assert_return(() => call($6, "load8_u", [5]), 1);
-
-// memory_copy.wast:232
-assert_return(() => call($6, "load8_u", [6]), 0);
-
-// memory_copy.wast:233
-assert_return(() => call($6, "load8_u", [7]), 0);
-
-// memory_copy.wast:234
-assert_return(() => call($6, "load8_u", [8]), 0);
-
-// memory_copy.wast:235
-assert_return(() => call($6, "load8_u", [9]), 0);
-
-// memory_copy.wast:236
-assert_return(() => call($6, "load8_u", [10]), 0);
-
-// memory_copy.wast:237
-assert_return(() => call($6, "load8_u", [11]), 0);
-
-// memory_copy.wast:238
-assert_return(() => call($6, "load8_u", [12]), 7);
-
-// memory_copy.wast:239
-assert_return(() => call($6, "load8_u", [13]), 5);
-
-// memory_copy.wast:240
-assert_return(() => call($6, "load8_u", [14]), 2);
-
-// memory_copy.wast:241
-assert_return(() => call($6, "load8_u", [15]), 3);
-
-// memory_copy.wast:242
-assert_return(() => call($6, "load8_u", [16]), 6);
-
-// memory_copy.wast:243
-assert_return(() => call($6, "load8_u", [17]), 0);
-
-// memory_copy.wast:244
-assert_return(() => call($6, "load8_u", [18]), 0);
-
-// memory_copy.wast:245
-assert_return(() => call($6, "load8_u", [19]), 0);
-
-// memory_copy.wast:246
-assert_return(() => call($6, "load8_u", [20]), 0);
-
-// memory_copy.wast:247
-assert_return(() => call($6, "load8_u", [21]), 0);
-
-// memory_copy.wast:248
-assert_return(() => call($6, "load8_u", [22]), 0);
-
-// memory_copy.wast:249
-assert_return(() => call($6, "load8_u", [23]), 0);
-
-// memory_copy.wast:250
-assert_return(() => call($6, "load8_u", [24]), 0);
-
-// memory_copy.wast:251
-assert_return(() => call($6, "load8_u", [25]), 0);
-
-// memory_copy.wast:252
-assert_return(() => call($6, "load8_u", [26]), 3);
-
-// memory_copy.wast:253
-assert_return(() => call($6, "load8_u", [27]), 1);
-
-// memory_copy.wast:254
-assert_return(() => call($6, "load8_u", [28]), 0);
-
-// memory_copy.wast:255
-assert_return(() => call($6, "load8_u", [29]), 0);
-
-// memory_copy.wast:257
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x0c\x41\x07\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:266
-run(() => call($7, "test", []));
-
-// memory_copy.wast:268
-assert_return(() => call($7, "load8_u", [0]), 0);
-
-// memory_copy.wast:269
-assert_return(() => call($7, "load8_u", [1]), 0);
-
-// memory_copy.wast:270
-assert_return(() => call($7, "load8_u", [2]), 3);
-
-// memory_copy.wast:271
-assert_return(() => call($7, "load8_u", [3]), 1);
-
-// memory_copy.wast:272
-assert_return(() => call($7, "load8_u", [4]), 4);
-
-// memory_copy.wast:273
-assert_return(() => call($7, "load8_u", [5]), 1);
-
-// memory_copy.wast:274
-assert_return(() => call($7, "load8_u", [6]), 0);
-
-// memory_copy.wast:275
-assert_return(() => call($7, "load8_u", [7]), 0);
-
-// memory_copy.wast:276
-assert_return(() => call($7, "load8_u", [8]), 0);
-
-// memory_copy.wast:277
-assert_return(() => call($7, "load8_u", [9]), 0);
-
-// memory_copy.wast:278
-assert_return(() => call($7, "load8_u", [10]), 7);
-
-// memory_copy.wast:279
-assert_return(() => call($7, "load8_u", [11]), 5);
-
-// memory_copy.wast:280
-assert_return(() => call($7, "load8_u", [12]), 2);
-
-// memory_copy.wast:281
-assert_return(() => call($7, "load8_u", [13]), 3);
-
-// memory_copy.wast:282
-assert_return(() => call($7, "load8_u", [14]), 6);
-
-// memory_copy.wast:283
-assert_return(() => call($7, "load8_u", [15]), 0);
-
-// memory_copy.wast:284
-assert_return(() => call($7, "load8_u", [16]), 0);
-
-// memory_copy.wast:285
-assert_return(() => call($7, "load8_u", [17]), 0);
-
-// memory_copy.wast:286
-assert_return(() => call($7, "load8_u", [18]), 0);
-
-// memory_copy.wast:287
-assert_return(() => call($7, "load8_u", [19]), 0);
-
-// memory_copy.wast:288
-assert_return(() => call($7, "load8_u", [20]), 0);
-
-// memory_copy.wast:289
-assert_return(() => call($7, "load8_u", [21]), 0);
-
-// memory_copy.wast:290
-assert_return(() => call($7, "load8_u", [22]), 0);
-
-// memory_copy.wast:291
-assert_return(() => call($7, "load8_u", [23]), 0);
-
-// memory_copy.wast:292
-assert_return(() => call($7, "load8_u", [24]), 0);
-
-// memory_copy.wast:293
-assert_return(() => call($7, "load8_u", [25]), 0);
-
-// memory_copy.wast:294
-assert_return(() => call($7, "load8_u", [26]), 0);
-
-// memory_copy.wast:295
-assert_return(() => call($7, "load8_u", [27]), 0);
-
-// memory_copy.wast:296
-assert_return(() => call($7, "load8_u", [28]), 0);
-
-// memory_copy.wast:297
-assert_return(() => call($7, "load8_u", [29]), 0);
-
-// memory_copy.wast:299
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x0a\x41\x07\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
-
-// memory_copy.wast:308
-run(() => call($8, "test", []));
-
-// memory_copy.wast:310
-assert_return(() => call($8, "load8_u", [0]), 0);
-
-// memory_copy.wast:311
-assert_return(() => call($8, "load8_u", [1]), 0);
-
-// memory_copy.wast:312
-assert_return(() => call($8, "load8_u", [2]), 3);
-
-// memory_copy.wast:313
-assert_return(() => call($8, "load8_u", [3]), 1);
-
-// memory_copy.wast:314
-assert_return(() => call($8, "load8_u", [4]), 4);
-
-// memory_copy.wast:315
-assert_return(() => call($8, "load8_u", [5]), 1);
-
-// memory_copy.wast:316
-assert_return(() => call($8, "load8_u", [6]), 0);
-
-// memory_copy.wast:317
-assert_return(() => call($8, "load8_u", [7]), 0);
-
-// memory_copy.wast:318
-assert_return(() => call($8, "load8_u", [8]), 0);
-
-// memory_copy.wast:319
-assert_return(() => call($8, "load8_u", [9]), 0);
-
-// memory_copy.wast:320
-assert_return(() => call($8, "load8_u", [10]), 0);
-
-// memory_copy.wast:321
-assert_return(() => call($8, "load8_u", [11]), 0);
-
-// memory_copy.wast:322
-assert_return(() => call($8, "load8_u", [12]), 0);
-
-// memory_copy.wast:323
-assert_return(() => call($8, "load8_u", [13]), 0);
-
-// memory_copy.wast:324
-assert_return(() => call($8, "load8_u", [14]), 7);
-
-// memory_copy.wast:325
-assert_return(() => call($8, "load8_u", [15]), 5);
-
-// memory_copy.wast:326
-assert_return(() => call($8, "load8_u", [16]), 2);
-
-// memory_copy.wast:327
-assert_return(() => call($8, "load8_u", [17]), 3);
-
-// memory_copy.wast:328
-assert_return(() => call($8, "load8_u", [18]), 6);
-
-// memory_copy.wast:329
-assert_return(() => call($8, "load8_u", [19]), 0);
-
-// memory_copy.wast:330
-assert_return(() => call($8, "load8_u", [20]), 0);
-
-// memory_copy.wast:331
-assert_return(() => call($8, "load8_u", [21]), 0);
-
-// memory_copy.wast:332
-assert_return(() => call($8, "load8_u", [22]), 0);
-
-// memory_copy.wast:333
-assert_return(() => call($8, "load8_u", [23]), 0);
-
-// memory_copy.wast:334
-assert_return(() => call($8, "load8_u", [24]), 0);
-
-// memory_copy.wast:335
-assert_return(() => call($8, "load8_u", [25]), 0);
-
-// memory_copy.wast:336
-assert_return(() => call($8, "load8_u", [26]), 0);
-
-// memory_copy.wast:337
-assert_return(() => call($8, "load8_u", [27]), 0);
-
-// memory_copy.wast:338
-assert_return(() => call($8, "load8_u", [28]), 0);
-
-// memory_copy.wast:339
-assert_return(() => call($8, "load8_u", [29]), 0);
-
-// memory_copy.wast:341
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9a\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:349
-assert_trap(() => call($9, "run", [65516, 0, 40]));
-
-// memory_copy.wast:352
-assert_return(() => call($9, "load8_u", [0]), 0);
-
-// memory_copy.wast:353
-assert_return(() => call($9, "load8_u", [1]), 1);
-
-// memory_copy.wast:354
-assert_return(() => call($9, "load8_u", [2]), 2);
-
-// memory_copy.wast:355
-assert_return(() => call($9, "load8_u", [3]), 3);
-
-// memory_copy.wast:356
-assert_return(() => call($9, "load8_u", [4]), 4);
-
-// memory_copy.wast:357
-assert_return(() => call($9, "load8_u", [5]), 5);
-
-// memory_copy.wast:358
-assert_return(() => call($9, "load8_u", [6]), 6);
-
-// memory_copy.wast:359
-assert_return(() => call($9, "load8_u", [7]), 7);
-
-// memory_copy.wast:360
-assert_return(() => call($9, "load8_u", [8]), 8);
-
-// memory_copy.wast:361
-assert_return(() => call($9, "load8_u", [9]), 9);
-
-// memory_copy.wast:362
-assert_return(() => call($9, "load8_u", [10]), 10);
-
-// memory_copy.wast:363
-assert_return(() => call($9, "load8_u", [11]), 11);
-
-// memory_copy.wast:364
-assert_return(() => call($9, "load8_u", [12]), 12);
-
-// memory_copy.wast:365
-assert_return(() => call($9, "load8_u", [13]), 13);
-
-// memory_copy.wast:366
-assert_return(() => call($9, "load8_u", [14]), 14);
-
-// memory_copy.wast:367
-assert_return(() => call($9, "load8_u", [15]), 15);
-
-// memory_copy.wast:368
-assert_return(() => call($9, "load8_u", [16]), 16);
-
-// memory_copy.wast:369
-assert_return(() => call($9, "load8_u", [17]), 17);
-
-// memory_copy.wast:370
-assert_return(() => call($9, "load8_u", [18]), 18);
-
-// memory_copy.wast:371
-assert_return(() => call($9, "load8_u", [19]), 19);
-
-// memory_copy.wast:372
-assert_return(() => call($9, "load8_u", [218]), 0);
-
-// memory_copy.wast:373
-assert_return(() => call($9, "load8_u", [417]), 0);
-
-// memory_copy.wast:374
-assert_return(() => call($9, "load8_u", [616]), 0);
-
-// memory_copy.wast:375
-assert_return(() => call($9, "load8_u", [815]), 0);
-
-// memory_copy.wast:376
-assert_return(() => call($9, "load8_u", [1014]), 0);
-
-// memory_copy.wast:377
-assert_return(() => call($9, "load8_u", [1213]), 0);
-
-// memory_copy.wast:378
-assert_return(() => call($9, "load8_u", [1412]), 0);
-
-// memory_copy.wast:379
-assert_return(() => call($9, "load8_u", [1611]), 0);
-
-// memory_copy.wast:380
-assert_return(() => call($9, "load8_u", [1810]), 0);
-
-// memory_copy.wast:381
-assert_return(() => call($9, "load8_u", [2009]), 0);
-
-// memory_copy.wast:382
-assert_return(() => call($9, "load8_u", [2208]), 0);
-
-// memory_copy.wast:383
-assert_return(() => call($9, "load8_u", [2407]), 0);
-
-// memory_copy.wast:384
-assert_return(() => call($9, "load8_u", [2606]), 0);
-
-// memory_copy.wast:385
-assert_return(() => call($9, "load8_u", [2805]), 0);
-
-// memory_copy.wast:386
-assert_return(() => call($9, "load8_u", [3004]), 0);
-
-// memory_copy.wast:387
-assert_return(() => call($9, "load8_u", [3203]), 0);
-
-// memory_copy.wast:388
-assert_return(() => call($9, "load8_u", [3402]), 0);
-
-// memory_copy.wast:389
-assert_return(() => call($9, "load8_u", [3601]), 0);
-
-// memory_copy.wast:390
-assert_return(() => call($9, "load8_u", [3800]), 0);
-
-// memory_copy.wast:391
-assert_return(() => call($9, "load8_u", [3999]), 0);
-
-// memory_copy.wast:392
-assert_return(() => call($9, "load8_u", [4198]), 0);
-
-// memory_copy.wast:393
-assert_return(() => call($9, "load8_u", [4397]), 0);
-
-// memory_copy.wast:394
-assert_return(() => call($9, "load8_u", [4596]), 0);
-
-// memory_copy.wast:395
-assert_return(() => call($9, "load8_u", [4795]), 0);
-
-// memory_copy.wast:396
-assert_return(() => call($9, "load8_u", [4994]), 0);
-
-// memory_copy.wast:397
-assert_return(() => call($9, "load8_u", [5193]), 0);
-
-// memory_copy.wast:398
-assert_return(() => call($9, "load8_u", [5392]), 0);
-
-// memory_copy.wast:399
-assert_return(() => call($9, "load8_u", [5591]), 0);
-
-// memory_copy.wast:400
-assert_return(() => call($9, "load8_u", [5790]), 0);
-
-// memory_copy.wast:401
-assert_return(() => call($9, "load8_u", [5989]), 0);
-
-// memory_copy.wast:402
-assert_return(() => call($9, "load8_u", [6188]), 0);
-
-// memory_copy.wast:403
-assert_return(() => call($9, "load8_u", [6387]), 0);
-
-// memory_copy.wast:404
-assert_return(() => call($9, "load8_u", [6586]), 0);
-
-// memory_copy.wast:405
-assert_return(() => call($9, "load8_u", [6785]), 0);
-
-// memory_copy.wast:406
-assert_return(() => call($9, "load8_u", [6984]), 0);
-
-// memory_copy.wast:407
-assert_return(() => call($9, "load8_u", [7183]), 0);
-
-// memory_copy.wast:408
-assert_return(() => call($9, "load8_u", [7382]), 0);
-
-// memory_copy.wast:409
-assert_return(() => call($9, "load8_u", [7581]), 0);
-
-// memory_copy.wast:410
-assert_return(() => call($9, "load8_u", [7780]), 0);
-
-// memory_copy.wast:411
-assert_return(() => call($9, "load8_u", [7979]), 0);
-
-// memory_copy.wast:412
-assert_return(() => call($9, "load8_u", [8178]), 0);
-
-// memory_copy.wast:413
-assert_return(() => call($9, "load8_u", [8377]), 0);
-
-// memory_copy.wast:414
-assert_return(() => call($9, "load8_u", [8576]), 0);
-
-// memory_copy.wast:415
-assert_return(() => call($9, "load8_u", [8775]), 0);
-
-// memory_copy.wast:416
-assert_return(() => call($9, "load8_u", [8974]), 0);
-
-// memory_copy.wast:417
-assert_return(() => call($9, "load8_u", [9173]), 0);
-
-// memory_copy.wast:418
-assert_return(() => call($9, "load8_u", [9372]), 0);
-
-// memory_copy.wast:419
-assert_return(() => call($9, "load8_u", [9571]), 0);
-
-// memory_copy.wast:420
-assert_return(() => call($9, "load8_u", [9770]), 0);
-
-// memory_copy.wast:421
-assert_return(() => call($9, "load8_u", [9969]), 0);
-
-// memory_copy.wast:422
-assert_return(() => call($9, "load8_u", [10168]), 0);
-
-// memory_copy.wast:423
-assert_return(() => call($9, "load8_u", [10367]), 0);
-
-// memory_copy.wast:424
-assert_return(() => call($9, "load8_u", [10566]), 0);
-
-// memory_copy.wast:425
-assert_return(() => call($9, "load8_u", [10765]), 0);
-
-// memory_copy.wast:426
-assert_return(() => call($9, "load8_u", [10964]), 0);
-
-// memory_copy.wast:427
-assert_return(() => call($9, "load8_u", [11163]), 0);
-
-// memory_copy.wast:428
-assert_return(() => call($9, "load8_u", [11362]), 0);
-
-// memory_copy.wast:429
-assert_return(() => call($9, "load8_u", [11561]), 0);
-
-// memory_copy.wast:430
-assert_return(() => call($9, "load8_u", [11760]), 0);
-
-// memory_copy.wast:431
-assert_return(() => call($9, "load8_u", [11959]), 0);
-
-// memory_copy.wast:432
-assert_return(() => call($9, "load8_u", [12158]), 0);
-
-// memory_copy.wast:433
-assert_return(() => call($9, "load8_u", [12357]), 0);
-
-// memory_copy.wast:434
-assert_return(() => call($9, "load8_u", [12556]), 0);
-
-// memory_copy.wast:435
-assert_return(() => call($9, "load8_u", [12755]), 0);
-
-// memory_copy.wast:436
-assert_return(() => call($9, "load8_u", [12954]), 0);
-
-// memory_copy.wast:437
-assert_return(() => call($9, "load8_u", [13153]), 0);
-
-// memory_copy.wast:438
-assert_return(() => call($9, "load8_u", [13352]), 0);
-
-// memory_copy.wast:439
-assert_return(() => call($9, "load8_u", [13551]), 0);
-
-// memory_copy.wast:440
-assert_return(() => call($9, "load8_u", [13750]), 0);
-
-// memory_copy.wast:441
-assert_return(() => call($9, "load8_u", [13949]), 0);
-
-// memory_copy.wast:442
-assert_return(() => call($9, "load8_u", [14148]), 0);
-
-// memory_copy.wast:443
-assert_return(() => call($9, "load8_u", [14347]), 0);
-
-// memory_copy.wast:444
-assert_return(() => call($9, "load8_u", [14546]), 0);
-
-// memory_copy.wast:445
-assert_return(() => call($9, "load8_u", [14745]), 0);
-
-// memory_copy.wast:446
-assert_return(() => call($9, "load8_u", [14944]), 0);
-
-// memory_copy.wast:447
-assert_return(() => call($9, "load8_u", [15143]), 0);
-
-// memory_copy.wast:448
-assert_return(() => call($9, "load8_u", [15342]), 0);
-
-// memory_copy.wast:449
-assert_return(() => call($9, "load8_u", [15541]), 0);
-
-// memory_copy.wast:450
-assert_return(() => call($9, "load8_u", [15740]), 0);
-
-// memory_copy.wast:451
-assert_return(() => call($9, "load8_u", [15939]), 0);
-
-// memory_copy.wast:452
-assert_return(() => call($9, "load8_u", [16138]), 0);
-
-// memory_copy.wast:453
-assert_return(() => call($9, "load8_u", [16337]), 0);
-
-// memory_copy.wast:454
-assert_return(() => call($9, "load8_u", [16536]), 0);
-
-// memory_copy.wast:455
-assert_return(() => call($9, "load8_u", [16735]), 0);
-
-// memory_copy.wast:456
-assert_return(() => call($9, "load8_u", [16934]), 0);
-
-// memory_copy.wast:457
-assert_return(() => call($9, "load8_u", [17133]), 0);
-
-// memory_copy.wast:458
-assert_return(() => call($9, "load8_u", [17332]), 0);
-
-// memory_copy.wast:459
-assert_return(() => call($9, "load8_u", [17531]), 0);
-
-// memory_copy.wast:460
-assert_return(() => call($9, "load8_u", [17730]), 0);
-
-// memory_copy.wast:461
-assert_return(() => call($9, "load8_u", [17929]), 0);
-
-// memory_copy.wast:462
-assert_return(() => call($9, "load8_u", [18128]), 0);
-
-// memory_copy.wast:463
-assert_return(() => call($9, "load8_u", [18327]), 0);
-
-// memory_copy.wast:464
-assert_return(() => call($9, "load8_u", [18526]), 0);
-
-// memory_copy.wast:465
-assert_return(() => call($9, "load8_u", [18725]), 0);
-
-// memory_copy.wast:466
-assert_return(() => call($9, "load8_u", [18924]), 0);
-
-// memory_copy.wast:467
-assert_return(() => call($9, "load8_u", [19123]), 0);
-
-// memory_copy.wast:468
-assert_return(() => call($9, "load8_u", [19322]), 0);
-
-// memory_copy.wast:469
-assert_return(() => call($9, "load8_u", [19521]), 0);
-
-// memory_copy.wast:470
-assert_return(() => call($9, "load8_u", [19720]), 0);
-
-// memory_copy.wast:471
-assert_return(() => call($9, "load8_u", [19919]), 0);
-
-// memory_copy.wast:472
-assert_return(() => call($9, "load8_u", [20118]), 0);
-
-// memory_copy.wast:473
-assert_return(() => call($9, "load8_u", [20317]), 0);
-
-// memory_copy.wast:474
-assert_return(() => call($9, "load8_u", [20516]), 0);
-
-// memory_copy.wast:475
-assert_return(() => call($9, "load8_u", [20715]), 0);
-
-// memory_copy.wast:476
-assert_return(() => call($9, "load8_u", [20914]), 0);
-
-// memory_copy.wast:477
-assert_return(() => call($9, "load8_u", [21113]), 0);
-
-// memory_copy.wast:478
-assert_return(() => call($9, "load8_u", [21312]), 0);
-
-// memory_copy.wast:479
-assert_return(() => call($9, "load8_u", [21511]), 0);
-
-// memory_copy.wast:480
-assert_return(() => call($9, "load8_u", [21710]), 0);
-
-// memory_copy.wast:481
-assert_return(() => call($9, "load8_u", [21909]), 0);
-
-// memory_copy.wast:482
-assert_return(() => call($9, "load8_u", [22108]), 0);
-
-// memory_copy.wast:483
-assert_return(() => call($9, "load8_u", [22307]), 0);
-
-// memory_copy.wast:484
-assert_return(() => call($9, "load8_u", [22506]), 0);
-
-// memory_copy.wast:485
-assert_return(() => call($9, "load8_u", [22705]), 0);
-
-// memory_copy.wast:486
-assert_return(() => call($9, "load8_u", [22904]), 0);
-
-// memory_copy.wast:487
-assert_return(() => call($9, "load8_u", [23103]), 0);
-
-// memory_copy.wast:488
-assert_return(() => call($9, "load8_u", [23302]), 0);
-
-// memory_copy.wast:489
-assert_return(() => call($9, "load8_u", [23501]), 0);
-
-// memory_copy.wast:490
-assert_return(() => call($9, "load8_u", [23700]), 0);
-
-// memory_copy.wast:491
-assert_return(() => call($9, "load8_u", [23899]), 0);
-
-// memory_copy.wast:492
-assert_return(() => call($9, "load8_u", [24098]), 0);
-
-// memory_copy.wast:493
-assert_return(() => call($9, "load8_u", [24297]), 0);
-
-// memory_copy.wast:494
-assert_return(() => call($9, "load8_u", [24496]), 0);
-
-// memory_copy.wast:495
-assert_return(() => call($9, "load8_u", [24695]), 0);
-
-// memory_copy.wast:496
-assert_return(() => call($9, "load8_u", [24894]), 0);
-
-// memory_copy.wast:497
-assert_return(() => call($9, "load8_u", [25093]), 0);
-
-// memory_copy.wast:498
-assert_return(() => call($9, "load8_u", [25292]), 0);
-
-// memory_copy.wast:499
-assert_return(() => call($9, "load8_u", [25491]), 0);
-
-// memory_copy.wast:500
-assert_return(() => call($9, "load8_u", [25690]), 0);
-
-// memory_copy.wast:501
-assert_return(() => call($9, "load8_u", [25889]), 0);
-
-// memory_copy.wast:502
-assert_return(() => call($9, "load8_u", [26088]), 0);
-
-// memory_copy.wast:503
-assert_return(() => call($9, "load8_u", [26287]), 0);
-
-// memory_copy.wast:504
-assert_return(() => call($9, "load8_u", [26486]), 0);
-
-// memory_copy.wast:505
-assert_return(() => call($9, "load8_u", [26685]), 0);
-
-// memory_copy.wast:506
-assert_return(() => call($9, "load8_u", [26884]), 0);
-
-// memory_copy.wast:507
-assert_return(() => call($9, "load8_u", [27083]), 0);
-
-// memory_copy.wast:508
-assert_return(() => call($9, "load8_u", [27282]), 0);
-
-// memory_copy.wast:509
-assert_return(() => call($9, "load8_u", [27481]), 0);
-
-// memory_copy.wast:510
-assert_return(() => call($9, "load8_u", [27680]), 0);
-
-// memory_copy.wast:511
-assert_return(() => call($9, "load8_u", [27879]), 0);
-
-// memory_copy.wast:512
-assert_return(() => call($9, "load8_u", [28078]), 0);
-
-// memory_copy.wast:513
-assert_return(() => call($9, "load8_u", [28277]), 0);
-
-// memory_copy.wast:514
-assert_return(() => call($9, "load8_u", [28476]), 0);
-
-// memory_copy.wast:515
-assert_return(() => call($9, "load8_u", [28675]), 0);
-
-// memory_copy.wast:516
-assert_return(() => call($9, "load8_u", [28874]), 0);
-
-// memory_copy.wast:517
-assert_return(() => call($9, "load8_u", [29073]), 0);
-
-// memory_copy.wast:518
-assert_return(() => call($9, "load8_u", [29272]), 0);
-
-// memory_copy.wast:519
-assert_return(() => call($9, "load8_u", [29471]), 0);
-
-// memory_copy.wast:520
-assert_return(() => call($9, "load8_u", [29670]), 0);
-
-// memory_copy.wast:521
-assert_return(() => call($9, "load8_u", [29869]), 0);
-
-// memory_copy.wast:522
-assert_return(() => call($9, "load8_u", [30068]), 0);
-
-// memory_copy.wast:523
-assert_return(() => call($9, "load8_u", [30267]), 0);
-
-// memory_copy.wast:524
-assert_return(() => call($9, "load8_u", [30466]), 0);
-
-// memory_copy.wast:525
-assert_return(() => call($9, "load8_u", [30665]), 0);
-
-// memory_copy.wast:526
-assert_return(() => call($9, "load8_u", [30864]), 0);
-
-// memory_copy.wast:527
-assert_return(() => call($9, "load8_u", [31063]), 0);
-
-// memory_copy.wast:528
-assert_return(() => call($9, "load8_u", [31262]), 0);
-
-// memory_copy.wast:529
-assert_return(() => call($9, "load8_u", [31461]), 0);
-
-// memory_copy.wast:530
-assert_return(() => call($9, "load8_u", [31660]), 0);
-
-// memory_copy.wast:531
-assert_return(() => call($9, "load8_u", [31859]), 0);
-
-// memory_copy.wast:532
-assert_return(() => call($9, "load8_u", [32058]), 0);
-
-// memory_copy.wast:533
-assert_return(() => call($9, "load8_u", [32257]), 0);
-
-// memory_copy.wast:534
-assert_return(() => call($9, "load8_u", [32456]), 0);
-
-// memory_copy.wast:535
-assert_return(() => call($9, "load8_u", [32655]), 0);
-
-// memory_copy.wast:536
-assert_return(() => call($9, "load8_u", [32854]), 0);
-
-// memory_copy.wast:537
-assert_return(() => call($9, "load8_u", [33053]), 0);
-
-// memory_copy.wast:538
-assert_return(() => call($9, "load8_u", [33252]), 0);
-
-// memory_copy.wast:539
-assert_return(() => call($9, "load8_u", [33451]), 0);
-
-// memory_copy.wast:540
-assert_return(() => call($9, "load8_u", [33650]), 0);
-
-// memory_copy.wast:541
-assert_return(() => call($9, "load8_u", [33849]), 0);
-
-// memory_copy.wast:542
-assert_return(() => call($9, "load8_u", [34048]), 0);
-
-// memory_copy.wast:543
-assert_return(() => call($9, "load8_u", [34247]), 0);
-
-// memory_copy.wast:544
-assert_return(() => call($9, "load8_u", [34446]), 0);
-
-// memory_copy.wast:545
-assert_return(() => call($9, "load8_u", [34645]), 0);
-
-// memory_copy.wast:546
-assert_return(() => call($9, "load8_u", [34844]), 0);
-
-// memory_copy.wast:547
-assert_return(() => call($9, "load8_u", [35043]), 0);
-
-// memory_copy.wast:548
-assert_return(() => call($9, "load8_u", [35242]), 0);
-
-// memory_copy.wast:549
-assert_return(() => call($9, "load8_u", [35441]), 0);
-
-// memory_copy.wast:550
-assert_return(() => call($9, "load8_u", [35640]), 0);
-
-// memory_copy.wast:551
-assert_return(() => call($9, "load8_u", [35839]), 0);
-
-// memory_copy.wast:552
-assert_return(() => call($9, "load8_u", [36038]), 0);
-
-// memory_copy.wast:553
-assert_return(() => call($9, "load8_u", [36237]), 0);
-
-// memory_copy.wast:554
-assert_return(() => call($9, "load8_u", [36436]), 0);
-
-// memory_copy.wast:555
-assert_return(() => call($9, "load8_u", [36635]), 0);
-
-// memory_copy.wast:556
-assert_return(() => call($9, "load8_u", [36834]), 0);
-
-// memory_copy.wast:557
-assert_return(() => call($9, "load8_u", [37033]), 0);
-
-// memory_copy.wast:558
-assert_return(() => call($9, "load8_u", [37232]), 0);
-
-// memory_copy.wast:559
-assert_return(() => call($9, "load8_u", [37431]), 0);
-
-// memory_copy.wast:560
-assert_return(() => call($9, "load8_u", [37630]), 0);
-
-// memory_copy.wast:561
-assert_return(() => call($9, "load8_u", [37829]), 0);
-
-// memory_copy.wast:562
-assert_return(() => call($9, "load8_u", [38028]), 0);
-
-// memory_copy.wast:563
-assert_return(() => call($9, "load8_u", [38227]), 0);
-
-// memory_copy.wast:564
-assert_return(() => call($9, "load8_u", [38426]), 0);
-
-// memory_copy.wast:565
-assert_return(() => call($9, "load8_u", [38625]), 0);
-
-// memory_copy.wast:566
-assert_return(() => call($9, "load8_u", [38824]), 0);
-
-// memory_copy.wast:567
-assert_return(() => call($9, "load8_u", [39023]), 0);
-
-// memory_copy.wast:568
-assert_return(() => call($9, "load8_u", [39222]), 0);
-
-// memory_copy.wast:569
-assert_return(() => call($9, "load8_u", [39421]), 0);
-
-// memory_copy.wast:570
-assert_return(() => call($9, "load8_u", [39620]), 0);
-
-// memory_copy.wast:571
-assert_return(() => call($9, "load8_u", [39819]), 0);
-
-// memory_copy.wast:572
-assert_return(() => call($9, "load8_u", [40018]), 0);
-
-// memory_copy.wast:573
-assert_return(() => call($9, "load8_u", [40217]), 0);
-
-// memory_copy.wast:574
-assert_return(() => call($9, "load8_u", [40416]), 0);
-
-// memory_copy.wast:575
-assert_return(() => call($9, "load8_u", [40615]), 0);
-
-// memory_copy.wast:576
-assert_return(() => call($9, "load8_u", [40814]), 0);
-
-// memory_copy.wast:577
-assert_return(() => call($9, "load8_u", [41013]), 0);
-
-// memory_copy.wast:578
-assert_return(() => call($9, "load8_u", [41212]), 0);
-
-// memory_copy.wast:579
-assert_return(() => call($9, "load8_u", [41411]), 0);
-
-// memory_copy.wast:580
-assert_return(() => call($9, "load8_u", [41610]), 0);
-
-// memory_copy.wast:581
-assert_return(() => call($9, "load8_u", [41809]), 0);
-
-// memory_copy.wast:582
-assert_return(() => call($9, "load8_u", [42008]), 0);
-
-// memory_copy.wast:583
-assert_return(() => call($9, "load8_u", [42207]), 0);
-
-// memory_copy.wast:584
-assert_return(() => call($9, "load8_u", [42406]), 0);
-
-// memory_copy.wast:585
-assert_return(() => call($9, "load8_u", [42605]), 0);
-
-// memory_copy.wast:586
-assert_return(() => call($9, "load8_u", [42804]), 0);
-
-// memory_copy.wast:587
-assert_return(() => call($9, "load8_u", [43003]), 0);
-
-// memory_copy.wast:588
-assert_return(() => call($9, "load8_u", [43202]), 0);
-
-// memory_copy.wast:589
-assert_return(() => call($9, "load8_u", [43401]), 0);
-
-// memory_copy.wast:590
-assert_return(() => call($9, "load8_u", [43600]), 0);
-
-// memory_copy.wast:591
-assert_return(() => call($9, "load8_u", [43799]), 0);
-
-// memory_copy.wast:592
-assert_return(() => call($9, "load8_u", [43998]), 0);
-
-// memory_copy.wast:593
-assert_return(() => call($9, "load8_u", [44197]), 0);
-
-// memory_copy.wast:594
-assert_return(() => call($9, "load8_u", [44396]), 0);
-
-// memory_copy.wast:595
-assert_return(() => call($9, "load8_u", [44595]), 0);
-
-// memory_copy.wast:596
-assert_return(() => call($9, "load8_u", [44794]), 0);
-
-// memory_copy.wast:597
-assert_return(() => call($9, "load8_u", [44993]), 0);
-
-// memory_copy.wast:598
-assert_return(() => call($9, "load8_u", [45192]), 0);
-
-// memory_copy.wast:599
-assert_return(() => call($9, "load8_u", [45391]), 0);
-
-// memory_copy.wast:600
-assert_return(() => call($9, "load8_u", [45590]), 0);
-
-// memory_copy.wast:601
-assert_return(() => call($9, "load8_u", [45789]), 0);
-
-// memory_copy.wast:602
-assert_return(() => call($9, "load8_u", [45988]), 0);
-
-// memory_copy.wast:603
-assert_return(() => call($9, "load8_u", [46187]), 0);
-
-// memory_copy.wast:604
-assert_return(() => call($9, "load8_u", [46386]), 0);
-
-// memory_copy.wast:605
-assert_return(() => call($9, "load8_u", [46585]), 0);
-
-// memory_copy.wast:606
-assert_return(() => call($9, "load8_u", [46784]), 0);
-
-// memory_copy.wast:607
-assert_return(() => call($9, "load8_u", [46983]), 0);
-
-// memory_copy.wast:608
-assert_return(() => call($9, "load8_u", [47182]), 0);
-
-// memory_copy.wast:609
-assert_return(() => call($9, "load8_u", [47381]), 0);
-
-// memory_copy.wast:610
-assert_return(() => call($9, "load8_u", [47580]), 0);
-
-// memory_copy.wast:611
-assert_return(() => call($9, "load8_u", [47779]), 0);
-
-// memory_copy.wast:612
-assert_return(() => call($9, "load8_u", [47978]), 0);
-
-// memory_copy.wast:613
-assert_return(() => call($9, "load8_u", [48177]), 0);
-
-// memory_copy.wast:614
-assert_return(() => call($9, "load8_u", [48376]), 0);
-
-// memory_copy.wast:615
-assert_return(() => call($9, "load8_u", [48575]), 0);
-
-// memory_copy.wast:616
-assert_return(() => call($9, "load8_u", [48774]), 0);
-
-// memory_copy.wast:617
-assert_return(() => call($9, "load8_u", [48973]), 0);
-
-// memory_copy.wast:618
-assert_return(() => call($9, "load8_u", [49172]), 0);
-
-// memory_copy.wast:619
-assert_return(() => call($9, "load8_u", [49371]), 0);
-
-// memory_copy.wast:620
-assert_return(() => call($9, "load8_u", [49570]), 0);
-
-// memory_copy.wast:621
-assert_return(() => call($9, "load8_u", [49769]), 0);
-
-// memory_copy.wast:622
-assert_return(() => call($9, "load8_u", [49968]), 0);
-
-// memory_copy.wast:623
-assert_return(() => call($9, "load8_u", [50167]), 0);
-
-// memory_copy.wast:624
-assert_return(() => call($9, "load8_u", [50366]), 0);
-
-// memory_copy.wast:625
-assert_return(() => call($9, "load8_u", [50565]), 0);
-
-// memory_copy.wast:626
-assert_return(() => call($9, "load8_u", [50764]), 0);
-
-// memory_copy.wast:627
-assert_return(() => call($9, "load8_u", [50963]), 0);
-
-// memory_copy.wast:628
-assert_return(() => call($9, "load8_u", [51162]), 0);
-
-// memory_copy.wast:629
-assert_return(() => call($9, "load8_u", [51361]), 0);
-
-// memory_copy.wast:630
-assert_return(() => call($9, "load8_u", [51560]), 0);
-
-// memory_copy.wast:631
-assert_return(() => call($9, "load8_u", [51759]), 0);
-
-// memory_copy.wast:632
-assert_return(() => call($9, "load8_u", [51958]), 0);
-
-// memory_copy.wast:633
-assert_return(() => call($9, "load8_u", [52157]), 0);
-
-// memory_copy.wast:634
-assert_return(() => call($9, "load8_u", [52356]), 0);
-
-// memory_copy.wast:635
-assert_return(() => call($9, "load8_u", [52555]), 0);
-
-// memory_copy.wast:636
-assert_return(() => call($9, "load8_u", [52754]), 0);
-
-// memory_copy.wast:637
-assert_return(() => call($9, "load8_u", [52953]), 0);
-
-// memory_copy.wast:638
-assert_return(() => call($9, "load8_u", [53152]), 0);
-
-// memory_copy.wast:639
-assert_return(() => call($9, "load8_u", [53351]), 0);
-
-// memory_copy.wast:640
-assert_return(() => call($9, "load8_u", [53550]), 0);
-
-// memory_copy.wast:641
-assert_return(() => call($9, "load8_u", [53749]), 0);
-
-// memory_copy.wast:642
-assert_return(() => call($9, "load8_u", [53948]), 0);
-
-// memory_copy.wast:643
-assert_return(() => call($9, "load8_u", [54147]), 0);
-
-// memory_copy.wast:644
-assert_return(() => call($9, "load8_u", [54346]), 0);
-
-// memory_copy.wast:645
-assert_return(() => call($9, "load8_u", [54545]), 0);
-
-// memory_copy.wast:646
-assert_return(() => call($9, "load8_u", [54744]), 0);
-
-// memory_copy.wast:647
-assert_return(() => call($9, "load8_u", [54943]), 0);
-
-// memory_copy.wast:648
-assert_return(() => call($9, "load8_u", [55142]), 0);
-
-// memory_copy.wast:649
-assert_return(() => call($9, "load8_u", [55341]), 0);
-
-// memory_copy.wast:650
-assert_return(() => call($9, "load8_u", [55540]), 0);
-
-// memory_copy.wast:651
-assert_return(() => call($9, "load8_u", [55739]), 0);
-
-// memory_copy.wast:652
-assert_return(() => call($9, "load8_u", [55938]), 0);
-
-// memory_copy.wast:653
-assert_return(() => call($9, "load8_u", [56137]), 0);
-
-// memory_copy.wast:654
-assert_return(() => call($9, "load8_u", [56336]), 0);
-
-// memory_copy.wast:655
-assert_return(() => call($9, "load8_u", [56535]), 0);
-
-// memory_copy.wast:656
-assert_return(() => call($9, "load8_u", [56734]), 0);
-
-// memory_copy.wast:657
-assert_return(() => call($9, "load8_u", [56933]), 0);
-
-// memory_copy.wast:658
-assert_return(() => call($9, "load8_u", [57132]), 0);
-
-// memory_copy.wast:659
-assert_return(() => call($9, "load8_u", [57331]), 0);
-
-// memory_copy.wast:660
-assert_return(() => call($9, "load8_u", [57530]), 0);
-
-// memory_copy.wast:661
-assert_return(() => call($9, "load8_u", [57729]), 0);
-
-// memory_copy.wast:662
-assert_return(() => call($9, "load8_u", [57928]), 0);
-
-// memory_copy.wast:663
-assert_return(() => call($9, "load8_u", [58127]), 0);
-
-// memory_copy.wast:664
-assert_return(() => call($9, "load8_u", [58326]), 0);
-
-// memory_copy.wast:665
-assert_return(() => call($9, "load8_u", [58525]), 0);
-
-// memory_copy.wast:666
-assert_return(() => call($9, "load8_u", [58724]), 0);
-
-// memory_copy.wast:667
-assert_return(() => call($9, "load8_u", [58923]), 0);
-
-// memory_copy.wast:668
-assert_return(() => call($9, "load8_u", [59122]), 0);
-
-// memory_copy.wast:669
-assert_return(() => call($9, "load8_u", [59321]), 0);
-
-// memory_copy.wast:670
-assert_return(() => call($9, "load8_u", [59520]), 0);
-
-// memory_copy.wast:671
-assert_return(() => call($9, "load8_u", [59719]), 0);
-
-// memory_copy.wast:672
-assert_return(() => call($9, "load8_u", [59918]), 0);
-
-// memory_copy.wast:673
-assert_return(() => call($9, "load8_u", [60117]), 0);
-
-// memory_copy.wast:674
-assert_return(() => call($9, "load8_u", [60316]), 0);
-
-// memory_copy.wast:675
-assert_return(() => call($9, "load8_u", [60515]), 0);
-
-// memory_copy.wast:676
-assert_return(() => call($9, "load8_u", [60714]), 0);
-
-// memory_copy.wast:677
-assert_return(() => call($9, "load8_u", [60913]), 0);
-
-// memory_copy.wast:678
-assert_return(() => call($9, "load8_u", [61112]), 0);
-
-// memory_copy.wast:679
-assert_return(() => call($9, "load8_u", [61311]), 0);
-
-// memory_copy.wast:680
-assert_return(() => call($9, "load8_u", [61510]), 0);
-
-// memory_copy.wast:681
-assert_return(() => call($9, "load8_u", [61709]), 0);
-
-// memory_copy.wast:682
-assert_return(() => call($9, "load8_u", [61908]), 0);
-
-// memory_copy.wast:683
-assert_return(() => call($9, "load8_u", [62107]), 0);
-
-// memory_copy.wast:684
-assert_return(() => call($9, "load8_u", [62306]), 0);
-
-// memory_copy.wast:685
-assert_return(() => call($9, "load8_u", [62505]), 0);
-
-// memory_copy.wast:686
-assert_return(() => call($9, "load8_u", [62704]), 0);
-
-// memory_copy.wast:687
-assert_return(() => call($9, "load8_u", [62903]), 0);
-
-// memory_copy.wast:688
-assert_return(() => call($9, "load8_u", [63102]), 0);
-
-// memory_copy.wast:689
-assert_return(() => call($9, "load8_u", [63301]), 0);
-
-// memory_copy.wast:690
-assert_return(() => call($9, "load8_u", [63500]), 0);
-
-// memory_copy.wast:691
-assert_return(() => call($9, "load8_u", [63699]), 0);
-
-// memory_copy.wast:692
-assert_return(() => call($9, "load8_u", [63898]), 0);
-
-// memory_copy.wast:693
-assert_return(() => call($9, "load8_u", [64097]), 0);
-
-// memory_copy.wast:694
-assert_return(() => call($9, "load8_u", [64296]), 0);
-
-// memory_copy.wast:695
-assert_return(() => call($9, "load8_u", [64495]), 0);
-
-// memory_copy.wast:696
-assert_return(() => call($9, "load8_u", [64694]), 0);
-
-// memory_copy.wast:697
-assert_return(() => call($9, "load8_u", [64893]), 0);
-
-// memory_copy.wast:698
-assert_return(() => call($9, "load8_u", [65092]), 0);
-
-// memory_copy.wast:699
-assert_return(() => call($9, "load8_u", [65291]), 0);
-
-// memory_copy.wast:700
-assert_return(() => call($9, "load8_u", [65490]), 0);
-
-// memory_copy.wast:701
-assert_return(() => call($9, "load8_u", [65516]), 0);
-
-// memory_copy.wast:702
-assert_return(() => call($9, "load8_u", [65517]), 1);
-
-// memory_copy.wast:703
-assert_return(() => call($9, "load8_u", [65518]), 2);
-
-// memory_copy.wast:704
-assert_return(() => call($9, "load8_u", [65519]), 3);
-
-// memory_copy.wast:705
-assert_return(() => call($9, "load8_u", [65520]), 4);
-
-// memory_copy.wast:706
-assert_return(() => call($9, "load8_u", [65521]), 5);
-
-// memory_copy.wast:707
-assert_return(() => call($9, "load8_u", [65522]), 6);
-
-// memory_copy.wast:708
-assert_return(() => call($9, "load8_u", [65523]), 7);
-
-// memory_copy.wast:709
-assert_return(() => call($9, "load8_u", [65524]), 8);
-
-// memory_copy.wast:710
-assert_return(() => call($9, "load8_u", [65525]), 9);
-
-// memory_copy.wast:711
-assert_return(() => call($9, "load8_u", [65526]), 10);
-
-// memory_copy.wast:712
-assert_return(() => call($9, "load8_u", [65527]), 11);
-
-// memory_copy.wast:713
-assert_return(() => call($9, "load8_u", [65528]), 12);
-
-// memory_copy.wast:714
-assert_return(() => call($9, "load8_u", [65529]), 13);
-
-// memory_copy.wast:715
-assert_return(() => call($9, "load8_u", [65530]), 14);
-
-// memory_copy.wast:716
-assert_return(() => call($9, "load8_u", [65531]), 15);
-
-// memory_copy.wast:717
-assert_return(() => call($9, "load8_u", [65532]), 16);
-
-// memory_copy.wast:718
-assert_return(() => call($9, "load8_u", [65533]), 17);
-
-// memory_copy.wast:719
-assert_return(() => call($9, "load8_u", [65534]), 18);
-
-// memory_copy.wast:720
-assert_return(() => call($9, "load8_u", [65535]), 19);
-
-// memory_copy.wast:722
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9b\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x15\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14");
-
-// memory_copy.wast:730
-assert_trap(() => call($10, "run", [65515, 0, 39]));
-
-// memory_copy.wast:733
-assert_return(() => call($10, "load8_u", [0]), 0);
-
-// memory_copy.wast:734
-assert_return(() => call($10, "load8_u", [1]), 1);
-
-// memory_copy.wast:735
-assert_return(() => call($10, "load8_u", [2]), 2);
-
-// memory_copy.wast:736
-assert_return(() => call($10, "load8_u", [3]), 3);
-
-// memory_copy.wast:737
-assert_return(() => call($10, "load8_u", [4]), 4);
-
-// memory_copy.wast:738
-assert_return(() => call($10, "load8_u", [5]), 5);
-
-// memory_copy.wast:739
-assert_return(() => call($10, "load8_u", [6]), 6);
-
-// memory_copy.wast:740
-assert_return(() => call($10, "load8_u", [7]), 7);
-
-// memory_copy.wast:741
-assert_return(() => call($10, "load8_u", [8]), 8);
-
-// memory_copy.wast:742
-assert_return(() => call($10, "load8_u", [9]), 9);
-
-// memory_copy.wast:743
-assert_return(() => call($10, "load8_u", [10]), 10);
-
-// memory_copy.wast:744
-assert_return(() => call($10, "load8_u", [11]), 11);
-
-// memory_copy.wast:745
-assert_return(() => call($10, "load8_u", [12]), 12);
-
-// memory_copy.wast:746
-assert_return(() => call($10, "load8_u", [13]), 13);
-
-// memory_copy.wast:747
-assert_return(() => call($10, "load8_u", [14]), 14);
-
-// memory_copy.wast:748
-assert_return(() => call($10, "load8_u", [15]), 15);
-
-// memory_copy.wast:749
-assert_return(() => call($10, "load8_u", [16]), 16);
-
-// memory_copy.wast:750
-assert_return(() => call($10, "load8_u", [17]), 17);
-
-// memory_copy.wast:751
-assert_return(() => call($10, "load8_u", [18]), 18);
-
-// memory_copy.wast:752
-assert_return(() => call($10, "load8_u", [19]), 19);
-
-// memory_copy.wast:753
-assert_return(() => call($10, "load8_u", [20]), 20);
-
-// memory_copy.wast:754
-assert_return(() => call($10, "load8_u", [219]), 0);
-
-// memory_copy.wast:755
-assert_return(() => call($10, "load8_u", [418]), 0);
-
-// memory_copy.wast:756
-assert_return(() => call($10, "load8_u", [617]), 0);
-
-// memory_copy.wast:757
-assert_return(() => call($10, "load8_u", [816]), 0);
-
-// memory_copy.wast:758
-assert_return(() => call($10, "load8_u", [1015]), 0);
-
-// memory_copy.wast:759
-assert_return(() => call($10, "load8_u", [1214]), 0);
-
-// memory_copy.wast:760
-assert_return(() => call($10, "load8_u", [1413]), 0);
-
-// memory_copy.wast:761
-assert_return(() => call($10, "load8_u", [1612]), 0);
-
-// memory_copy.wast:762
-assert_return(() => call($10, "load8_u", [1811]), 0);
-
-// memory_copy.wast:763
-assert_return(() => call($10, "load8_u", [2010]), 0);
-
-// memory_copy.wast:764
-assert_return(() => call($10, "load8_u", [2209]), 0);
-
-// memory_copy.wast:765
-assert_return(() => call($10, "load8_u", [2408]), 0);
-
-// memory_copy.wast:766
-assert_return(() => call($10, "load8_u", [2607]), 0);
-
-// memory_copy.wast:767
-assert_return(() => call($10, "load8_u", [2806]), 0);
-
-// memory_copy.wast:768
-assert_return(() => call($10, "load8_u", [3005]), 0);
-
-// memory_copy.wast:769
-assert_return(() => call($10, "load8_u", [3204]), 0);
-
-// memory_copy.wast:770
-assert_return(() => call($10, "load8_u", [3403]), 0);
-
-// memory_copy.wast:771
-assert_return(() => call($10, "load8_u", [3602]), 0);
-
-// memory_copy.wast:772
-assert_return(() => call($10, "load8_u", [3801]), 0);
-
-// memory_copy.wast:773
-assert_return(() => call($10, "load8_u", [4000]), 0);
-
-// memory_copy.wast:774
-assert_return(() => call($10, "load8_u", [4199]), 0);
-
-// memory_copy.wast:775
-assert_return(() => call($10, "load8_u", [4398]), 0);
-
-// memory_copy.wast:776
-assert_return(() => call($10, "load8_u", [4597]), 0);
-
-// memory_copy.wast:777
-assert_return(() => call($10, "load8_u", [4796]), 0);
-
-// memory_copy.wast:778
-assert_return(() => call($10, "load8_u", [4995]), 0);
-
-// memory_copy.wast:779
-assert_return(() => call($10, "load8_u", [5194]), 0);
-
-// memory_copy.wast:780
-assert_return(() => call($10, "load8_u", [5393]), 0);
-
-// memory_copy.wast:781
-assert_return(() => call($10, "load8_u", [5592]), 0);
-
-// memory_copy.wast:782
-assert_return(() => call($10, "load8_u", [5791]), 0);
-
-// memory_copy.wast:783
-assert_return(() => call($10, "load8_u", [5990]), 0);
-
-// memory_copy.wast:784
-assert_return(() => call($10, "load8_u", [6189]), 0);
-
-// memory_copy.wast:785
-assert_return(() => call($10, "load8_u", [6388]), 0);
-
-// memory_copy.wast:786
-assert_return(() => call($10, "load8_u", [6587]), 0);
-
-// memory_copy.wast:787
-assert_return(() => call($10, "load8_u", [6786]), 0);
-
-// memory_copy.wast:788
-assert_return(() => call($10, "load8_u", [6985]), 0);
-
-// memory_copy.wast:789
-assert_return(() => call($10, "load8_u", [7184]), 0);
-
-// memory_copy.wast:790
-assert_return(() => call($10, "load8_u", [7383]), 0);
-
-// memory_copy.wast:791
-assert_return(() => call($10, "load8_u", [7582]), 0);
-
-// memory_copy.wast:792
-assert_return(() => call($10, "load8_u", [7781]), 0);
-
-// memory_copy.wast:793
-assert_return(() => call($10, "load8_u", [7980]), 0);
-
-// memory_copy.wast:794
-assert_return(() => call($10, "load8_u", [8179]), 0);
-
-// memory_copy.wast:795
-assert_return(() => call($10, "load8_u", [8378]), 0);
-
-// memory_copy.wast:796
-assert_return(() => call($10, "load8_u", [8577]), 0);
-
-// memory_copy.wast:797
-assert_return(() => call($10, "load8_u", [8776]), 0);
-
-// memory_copy.wast:798
-assert_return(() => call($10, "load8_u", [8975]), 0);
-
-// memory_copy.wast:799
-assert_return(() => call($10, "load8_u", [9174]), 0);
-
-// memory_copy.wast:800
-assert_return(() => call($10, "load8_u", [9373]), 0);
-
-// memory_copy.wast:801
-assert_return(() => call($10, "load8_u", [9572]), 0);
-
-// memory_copy.wast:802
-assert_return(() => call($10, "load8_u", [9771]), 0);
-
-// memory_copy.wast:803
-assert_return(() => call($10, "load8_u", [9970]), 0);
-
-// memory_copy.wast:804
-assert_return(() => call($10, "load8_u", [10169]), 0);
-
-// memory_copy.wast:805
-assert_return(() => call($10, "load8_u", [10368]), 0);
-
-// memory_copy.wast:806
-assert_return(() => call($10, "load8_u", [10567]), 0);
-
-// memory_copy.wast:807
-assert_return(() => call($10, "load8_u", [10766]), 0);
-
-// memory_copy.wast:808
-assert_return(() => call($10, "load8_u", [10965]), 0);
-
-// memory_copy.wast:809
-assert_return(() => call($10, "load8_u", [11164]), 0);
-
-// memory_copy.wast:810
-assert_return(() => call($10, "load8_u", [11363]), 0);
-
-// memory_copy.wast:811
-assert_return(() => call($10, "load8_u", [11562]), 0);
-
-// memory_copy.wast:812
-assert_return(() => call($10, "load8_u", [11761]), 0);
-
-// memory_copy.wast:813
-assert_return(() => call($10, "load8_u", [11960]), 0);
-
-// memory_copy.wast:814
-assert_return(() => call($10, "load8_u", [12159]), 0);
-
-// memory_copy.wast:815
-assert_return(() => call($10, "load8_u", [12358]), 0);
-
-// memory_copy.wast:816
-assert_return(() => call($10, "load8_u", [12557]), 0);
-
-// memory_copy.wast:817
-assert_return(() => call($10, "load8_u", [12756]), 0);
-
-// memory_copy.wast:818
-assert_return(() => call($10, "load8_u", [12955]), 0);
-
-// memory_copy.wast:819
-assert_return(() => call($10, "load8_u", [13154]), 0);
-
-// memory_copy.wast:820
-assert_return(() => call($10, "load8_u", [13353]), 0);
-
-// memory_copy.wast:821
-assert_return(() => call($10, "load8_u", [13552]), 0);
-
-// memory_copy.wast:822
-assert_return(() => call($10, "load8_u", [13751]), 0);
-
-// memory_copy.wast:823
-assert_return(() => call($10, "load8_u", [13950]), 0);
-
-// memory_copy.wast:824
-assert_return(() => call($10, "load8_u", [14149]), 0);
-
-// memory_copy.wast:825
-assert_return(() => call($10, "load8_u", [14348]), 0);
-
-// memory_copy.wast:826
-assert_return(() => call($10, "load8_u", [14547]), 0);
-
-// memory_copy.wast:827
-assert_return(() => call($10, "load8_u", [14746]), 0);
-
-// memory_copy.wast:828
-assert_return(() => call($10, "load8_u", [14945]), 0);
-
-// memory_copy.wast:829
-assert_return(() => call($10, "load8_u", [15144]), 0);
-
-// memory_copy.wast:830
-assert_return(() => call($10, "load8_u", [15343]), 0);
-
-// memory_copy.wast:831
-assert_return(() => call($10, "load8_u", [15542]), 0);
-
-// memory_copy.wast:832
-assert_return(() => call($10, "load8_u", [15741]), 0);
-
-// memory_copy.wast:833
-assert_return(() => call($10, "load8_u", [15940]), 0);
-
-// memory_copy.wast:834
-assert_return(() => call($10, "load8_u", [16139]), 0);
-
-// memory_copy.wast:835
-assert_return(() => call($10, "load8_u", [16338]), 0);
-
-// memory_copy.wast:836
-assert_return(() => call($10, "load8_u", [16537]), 0);
-
-// memory_copy.wast:837
-assert_return(() => call($10, "load8_u", [16736]), 0);
-
-// memory_copy.wast:838
-assert_return(() => call($10, "load8_u", [16935]), 0);
-
-// memory_copy.wast:839
-assert_return(() => call($10, "load8_u", [17134]), 0);
-
-// memory_copy.wast:840
-assert_return(() => call($10, "load8_u", [17333]), 0);
-
-// memory_copy.wast:841
-assert_return(() => call($10, "load8_u", [17532]), 0);
-
-// memory_copy.wast:842
-assert_return(() => call($10, "load8_u", [17731]), 0);
-
-// memory_copy.wast:843
-assert_return(() => call($10, "load8_u", [17930]), 0);
-
-// memory_copy.wast:844
-assert_return(() => call($10, "load8_u", [18129]), 0);
-
-// memory_copy.wast:845
-assert_return(() => call($10, "load8_u", [18328]), 0);
-
-// memory_copy.wast:846
-assert_return(() => call($10, "load8_u", [18527]), 0);
-
-// memory_copy.wast:847
-assert_return(() => call($10, "load8_u", [18726]), 0);
-
-// memory_copy.wast:848
-assert_return(() => call($10, "load8_u", [18925]), 0);
-
-// memory_copy.wast:849
-assert_return(() => call($10, "load8_u", [19124]), 0);
-
-// memory_copy.wast:850
-assert_return(() => call($10, "load8_u", [19323]), 0);
-
-// memory_copy.wast:851
-assert_return(() => call($10, "load8_u", [19522]), 0);
-
-// memory_copy.wast:852
-assert_return(() => call($10, "load8_u", [19721]), 0);
-
-// memory_copy.wast:853
-assert_return(() => call($10, "load8_u", [19920]), 0);
-
-// memory_copy.wast:854
-assert_return(() => call($10, "load8_u", [20119]), 0);
-
-// memory_copy.wast:855
-assert_return(() => call($10, "load8_u", [20318]), 0);
-
-// memory_copy.wast:856
-assert_return(() => call($10, "load8_u", [20517]), 0);
-
-// memory_copy.wast:857
-assert_return(() => call($10, "load8_u", [20716]), 0);
-
-// memory_copy.wast:858
-assert_return(() => call($10, "load8_u", [20915]), 0);
-
-// memory_copy.wast:859
-assert_return(() => call($10, "load8_u", [21114]), 0);
-
-// memory_copy.wast:860
-assert_return(() => call($10, "load8_u", [21313]), 0);
-
-// memory_copy.wast:861
-assert_return(() => call($10, "load8_u", [21512]), 0);
-
-// memory_copy.wast:862
-assert_return(() => call($10, "load8_u", [21711]), 0);
-
-// memory_copy.wast:863
-assert_return(() => call($10, "load8_u", [21910]), 0);
-
-// memory_copy.wast:864
-assert_return(() => call($10, "load8_u", [22109]), 0);
-
-// memory_copy.wast:865
-assert_return(() => call($10, "load8_u", [22308]), 0);
-
-// memory_copy.wast:866
-assert_return(() => call($10, "load8_u", [22507]), 0);
-
-// memory_copy.wast:867
-assert_return(() => call($10, "load8_u", [22706]), 0);
-
-// memory_copy.wast:868
-assert_return(() => call($10, "load8_u", [22905]), 0);
-
-// memory_copy.wast:869
-assert_return(() => call($10, "load8_u", [23104]), 0);
-
-// memory_copy.wast:870
-assert_return(() => call($10, "load8_u", [23303]), 0);
-
-// memory_copy.wast:871
-assert_return(() => call($10, "load8_u", [23502]), 0);
-
-// memory_copy.wast:872
-assert_return(() => call($10, "load8_u", [23701]), 0);
-
-// memory_copy.wast:873
-assert_return(() => call($10, "load8_u", [23900]), 0);
-
-// memory_copy.wast:874
-assert_return(() => call($10, "load8_u", [24099]), 0);
-
-// memory_copy.wast:875
-assert_return(() => call($10, "load8_u", [24298]), 0);
-
-// memory_copy.wast:876
-assert_return(() => call($10, "load8_u", [24497]), 0);
-
-// memory_copy.wast:877
-assert_return(() => call($10, "load8_u", [24696]), 0);
-
-// memory_copy.wast:878
-assert_return(() => call($10, "load8_u", [24895]), 0);
-
-// memory_copy.wast:879
-assert_return(() => call($10, "load8_u", [25094]), 0);
-
-// memory_copy.wast:880
-assert_return(() => call($10, "load8_u", [25293]), 0);
-
-// memory_copy.wast:881
-assert_return(() => call($10, "load8_u", [25492]), 0);
-
-// memory_copy.wast:882
-assert_return(() => call($10, "load8_u", [25691]), 0);
-
-// memory_copy.wast:883
-assert_return(() => call($10, "load8_u", [25890]), 0);
-
-// memory_copy.wast:884
-assert_return(() => call($10, "load8_u", [26089]), 0);
-
-// memory_copy.wast:885
-assert_return(() => call($10, "load8_u", [26288]), 0);
-
-// memory_copy.wast:886
-assert_return(() => call($10, "load8_u", [26487]), 0);
-
-// memory_copy.wast:887
-assert_return(() => call($10, "load8_u", [26686]), 0);
-
-// memory_copy.wast:888
-assert_return(() => call($10, "load8_u", [26885]), 0);
-
-// memory_copy.wast:889
-assert_return(() => call($10, "load8_u", [27084]), 0);
-
-// memory_copy.wast:890
-assert_return(() => call($10, "load8_u", [27283]), 0);
-
-// memory_copy.wast:891
-assert_return(() => call($10, "load8_u", [27482]), 0);
-
-// memory_copy.wast:892
-assert_return(() => call($10, "load8_u", [27681]), 0);
-
-// memory_copy.wast:893
-assert_return(() => call($10, "load8_u", [27880]), 0);
-
-// memory_copy.wast:894
-assert_return(() => call($10, "load8_u", [28079]), 0);
-
-// memory_copy.wast:895
-assert_return(() => call($10, "load8_u", [28278]), 0);
-
-// memory_copy.wast:896
-assert_return(() => call($10, "load8_u", [28477]), 0);
-
-// memory_copy.wast:897
-assert_return(() => call($10, "load8_u", [28676]), 0);
-
-// memory_copy.wast:898
-assert_return(() => call($10, "load8_u", [28875]), 0);
-
-// memory_copy.wast:899
-assert_return(() => call($10, "load8_u", [29074]), 0);
-
-// memory_copy.wast:900
-assert_return(() => call($10, "load8_u", [29273]), 0);
-
-// memory_copy.wast:901
-assert_return(() => call($10, "load8_u", [29472]), 0);
-
-// memory_copy.wast:902
-assert_return(() => call($10, "load8_u", [29671]), 0);
-
-// memory_copy.wast:903
-assert_return(() => call($10, "load8_u", [29870]), 0);
-
-// memory_copy.wast:904
-assert_return(() => call($10, "load8_u", [30069]), 0);
-
-// memory_copy.wast:905
-assert_return(() => call($10, "load8_u", [30268]), 0);
-
-// memory_copy.wast:906
-assert_return(() => call($10, "load8_u", [30467]), 0);
-
-// memory_copy.wast:907
-assert_return(() => call($10, "load8_u", [30666]), 0);
-
-// memory_copy.wast:908
-assert_return(() => call($10, "load8_u", [30865]), 0);
-
-// memory_copy.wast:909
-assert_return(() => call($10, "load8_u", [31064]), 0);
-
-// memory_copy.wast:910
-assert_return(() => call($10, "load8_u", [31263]), 0);
-
-// memory_copy.wast:911
-assert_return(() => call($10, "load8_u", [31462]), 0);
-
-// memory_copy.wast:912
-assert_return(() => call($10, "load8_u", [31661]), 0);
-
-// memory_copy.wast:913
-assert_return(() => call($10, "load8_u", [31860]), 0);
-
-// memory_copy.wast:914
-assert_return(() => call($10, "load8_u", [32059]), 0);
-
-// memory_copy.wast:915
-assert_return(() => call($10, "load8_u", [32258]), 0);
-
-// memory_copy.wast:916
-assert_return(() => call($10, "load8_u", [32457]), 0);
-
-// memory_copy.wast:917
-assert_return(() => call($10, "load8_u", [32656]), 0);
-
-// memory_copy.wast:918
-assert_return(() => call($10, "load8_u", [32855]), 0);
-
-// memory_copy.wast:919
-assert_return(() => call($10, "load8_u", [33054]), 0);
-
-// memory_copy.wast:920
-assert_return(() => call($10, "load8_u", [33253]), 0);
-
-// memory_copy.wast:921
-assert_return(() => call($10, "load8_u", [33452]), 0);
-
-// memory_copy.wast:922
-assert_return(() => call($10, "load8_u", [33651]), 0);
-
-// memory_copy.wast:923
-assert_return(() => call($10, "load8_u", [33850]), 0);
-
-// memory_copy.wast:924
-assert_return(() => call($10, "load8_u", [34049]), 0);
-
-// memory_copy.wast:925
-assert_return(() => call($10, "load8_u", [34248]), 0);
-
-// memory_copy.wast:926
-assert_return(() => call($10, "load8_u", [34447]), 0);
-
-// memory_copy.wast:927
-assert_return(() => call($10, "load8_u", [34646]), 0);
-
-// memory_copy.wast:928
-assert_return(() => call($10, "load8_u", [34845]), 0);
-
-// memory_copy.wast:929
-assert_return(() => call($10, "load8_u", [35044]), 0);
-
-// memory_copy.wast:930
-assert_return(() => call($10, "load8_u", [35243]), 0);
-
-// memory_copy.wast:931
-assert_return(() => call($10, "load8_u", [35442]), 0);
-
-// memory_copy.wast:932
-assert_return(() => call($10, "load8_u", [35641]), 0);
-
-// memory_copy.wast:933
-assert_return(() => call($10, "load8_u", [35840]), 0);
-
-// memory_copy.wast:934
-assert_return(() => call($10, "load8_u", [36039]), 0);
-
-// memory_copy.wast:935
-assert_return(() => call($10, "load8_u", [36238]), 0);
-
-// memory_copy.wast:936
-assert_return(() => call($10, "load8_u", [36437]), 0);
-
-// memory_copy.wast:937
-assert_return(() => call($10, "load8_u", [36636]), 0);
-
-// memory_copy.wast:938
-assert_return(() => call($10, "load8_u", [36835]), 0);
-
-// memory_copy.wast:939
-assert_return(() => call($10, "load8_u", [37034]), 0);
-
-// memory_copy.wast:940
-assert_return(() => call($10, "load8_u", [37233]), 0);
-
-// memory_copy.wast:941
-assert_return(() => call($10, "load8_u", [37432]), 0);
-
-// memory_copy.wast:942
-assert_return(() => call($10, "load8_u", [37631]), 0);
-
-// memory_copy.wast:943
-assert_return(() => call($10, "load8_u", [37830]), 0);
-
-// memory_copy.wast:944
-assert_return(() => call($10, "load8_u", [38029]), 0);
-
-// memory_copy.wast:945
-assert_return(() => call($10, "load8_u", [38228]), 0);
-
-// memory_copy.wast:946
-assert_return(() => call($10, "load8_u", [38427]), 0);
-
-// memory_copy.wast:947
-assert_return(() => call($10, "load8_u", [38626]), 0);
-
-// memory_copy.wast:948
-assert_return(() => call($10, "load8_u", [38825]), 0);
-
-// memory_copy.wast:949
-assert_return(() => call($10, "load8_u", [39024]), 0);
-
-// memory_copy.wast:950
-assert_return(() => call($10, "load8_u", [39223]), 0);
-
-// memory_copy.wast:951
-assert_return(() => call($10, "load8_u", [39422]), 0);
-
-// memory_copy.wast:952
-assert_return(() => call($10, "load8_u", [39621]), 0);
-
-// memory_copy.wast:953
-assert_return(() => call($10, "load8_u", [39820]), 0);
-
-// memory_copy.wast:954
-assert_return(() => call($10, "load8_u", [40019]), 0);
-
-// memory_copy.wast:955
-assert_return(() => call($10, "load8_u", [40218]), 0);
-
-// memory_copy.wast:956
-assert_return(() => call($10, "load8_u", [40417]), 0);
-
-// memory_copy.wast:957
-assert_return(() => call($10, "load8_u", [40616]), 0);
-
-// memory_copy.wast:958
-assert_return(() => call($10, "load8_u", [40815]), 0);
-
-// memory_copy.wast:959
-assert_return(() => call($10, "load8_u", [41014]), 0);
-
-// memory_copy.wast:960
-assert_return(() => call($10, "load8_u", [41213]), 0);
-
-// memory_copy.wast:961
-assert_return(() => call($10, "load8_u", [41412]), 0);
-
-// memory_copy.wast:962
-assert_return(() => call($10, "load8_u", [41611]), 0);
-
-// memory_copy.wast:963
-assert_return(() => call($10, "load8_u", [41810]), 0);
-
-// memory_copy.wast:964
-assert_return(() => call($10, "load8_u", [42009]), 0);
-
-// memory_copy.wast:965
-assert_return(() => call($10, "load8_u", [42208]), 0);
-
-// memory_copy.wast:966
-assert_return(() => call($10, "load8_u", [42407]), 0);
-
-// memory_copy.wast:967
-assert_return(() => call($10, "load8_u", [42606]), 0);
-
-// memory_copy.wast:968
-assert_return(() => call($10, "load8_u", [42805]), 0);
-
-// memory_copy.wast:969
-assert_return(() => call($10, "load8_u", [43004]), 0);
-
-// memory_copy.wast:970
-assert_return(() => call($10, "load8_u", [43203]), 0);
-
-// memory_copy.wast:971
-assert_return(() => call($10, "load8_u", [43402]), 0);
-
-// memory_copy.wast:972
-assert_return(() => call($10, "load8_u", [43601]), 0);
-
-// memory_copy.wast:973
-assert_return(() => call($10, "load8_u", [43800]), 0);
-
-// memory_copy.wast:974
-assert_return(() => call($10, "load8_u", [43999]), 0);
-
-// memory_copy.wast:975
-assert_return(() => call($10, "load8_u", [44198]), 0);
-
-// memory_copy.wast:976
-assert_return(() => call($10, "load8_u", [44397]), 0);
-
-// memory_copy.wast:977
-assert_return(() => call($10, "load8_u", [44596]), 0);
-
-// memory_copy.wast:978
-assert_return(() => call($10, "load8_u", [44795]), 0);
-
-// memory_copy.wast:979
-assert_return(() => call($10, "load8_u", [44994]), 0);
-
-// memory_copy.wast:980
-assert_return(() => call($10, "load8_u", [45193]), 0);
-
-// memory_copy.wast:981
-assert_return(() => call($10, "load8_u", [45392]), 0);
-
-// memory_copy.wast:982
-assert_return(() => call($10, "load8_u", [45591]), 0);
-
-// memory_copy.wast:983
-assert_return(() => call($10, "load8_u", [45790]), 0);
-
-// memory_copy.wast:984
-assert_return(() => call($10, "load8_u", [45989]), 0);
-
-// memory_copy.wast:985
-assert_return(() => call($10, "load8_u", [46188]), 0);
-
-// memory_copy.wast:986
-assert_return(() => call($10, "load8_u", [46387]), 0);
-
-// memory_copy.wast:987
-assert_return(() => call($10, "load8_u", [46586]), 0);
-
-// memory_copy.wast:988
-assert_return(() => call($10, "load8_u", [46785]), 0);
-
-// memory_copy.wast:989
-assert_return(() => call($10, "load8_u", [46984]), 0);
-
-// memory_copy.wast:990
-assert_return(() => call($10, "load8_u", [47183]), 0);
-
-// memory_copy.wast:991
-assert_return(() => call($10, "load8_u", [47382]), 0);
-
-// memory_copy.wast:992
-assert_return(() => call($10, "load8_u", [47581]), 0);
-
-// memory_copy.wast:993
-assert_return(() => call($10, "load8_u", [47780]), 0);
-
-// memory_copy.wast:994
-assert_return(() => call($10, "load8_u", [47979]), 0);
-
-// memory_copy.wast:995
-assert_return(() => call($10, "load8_u", [48178]), 0);
-
-// memory_copy.wast:996
-assert_return(() => call($10, "load8_u", [48377]), 0);
-
-// memory_copy.wast:997
-assert_return(() => call($10, "load8_u", [48576]), 0);
-
-// memory_copy.wast:998
-assert_return(() => call($10, "load8_u", [48775]), 0);
-
-// memory_copy.wast:999
-assert_return(() => call($10, "load8_u", [48974]), 0);
-
-// memory_copy.wast:1000
-assert_return(() => call($10, "load8_u", [49173]), 0);
-
-// memory_copy.wast:1001
-assert_return(() => call($10, "load8_u", [49372]), 0);
-
-// memory_copy.wast:1002
-assert_return(() => call($10, "load8_u", [49571]), 0);
-
-// memory_copy.wast:1003
-assert_return(() => call($10, "load8_u", [49770]), 0);
-
-// memory_copy.wast:1004
-assert_return(() => call($10, "load8_u", [49969]), 0);
-
-// memory_copy.wast:1005
-assert_return(() => call($10, "load8_u", [50168]), 0);
-
-// memory_copy.wast:1006
-assert_return(() => call($10, "load8_u", [50367]), 0);
-
-// memory_copy.wast:1007
-assert_return(() => call($10, "load8_u", [50566]), 0);
-
-// memory_copy.wast:1008
-assert_return(() => call($10, "load8_u", [50765]), 0);
-
-// memory_copy.wast:1009
-assert_return(() => call($10, "load8_u", [50964]), 0);
-
-// memory_copy.wast:1010
-assert_return(() => call($10, "load8_u", [51163]), 0);
-
-// memory_copy.wast:1011
-assert_return(() => call($10, "load8_u", [51362]), 0);
-
-// memory_copy.wast:1012
-assert_return(() => call($10, "load8_u", [51561]), 0);
-
-// memory_copy.wast:1013
-assert_return(() => call($10, "load8_u", [51760]), 0);
-
-// memory_copy.wast:1014
-assert_return(() => call($10, "load8_u", [51959]), 0);
-
-// memory_copy.wast:1015
-assert_return(() => call($10, "load8_u", [52158]), 0);
-
-// memory_copy.wast:1016
-assert_return(() => call($10, "load8_u", [52357]), 0);
-
-// memory_copy.wast:1017
-assert_return(() => call($10, "load8_u", [52556]), 0);
-
-// memory_copy.wast:1018
-assert_return(() => call($10, "load8_u", [52755]), 0);
-
-// memory_copy.wast:1019
-assert_return(() => call($10, "load8_u", [52954]), 0);
-
-// memory_copy.wast:1020
-assert_return(() => call($10, "load8_u", [53153]), 0);
-
-// memory_copy.wast:1021
-assert_return(() => call($10, "load8_u", [53352]), 0);
-
-// memory_copy.wast:1022
-assert_return(() => call($10, "load8_u", [53551]), 0);
-
-// memory_copy.wast:1023
-assert_return(() => call($10, "load8_u", [53750]), 0);
-
-// memory_copy.wast:1024
-assert_return(() => call($10, "load8_u", [53949]), 0);
-
-// memory_copy.wast:1025
-assert_return(() => call($10, "load8_u", [54148]), 0);
-
-// memory_copy.wast:1026
-assert_return(() => call($10, "load8_u", [54347]), 0);
-
-// memory_copy.wast:1027
-assert_return(() => call($10, "load8_u", [54546]), 0);
-
-// memory_copy.wast:1028
-assert_return(() => call($10, "load8_u", [54745]), 0);
-
-// memory_copy.wast:1029
-assert_return(() => call($10, "load8_u", [54944]), 0);
-
-// memory_copy.wast:1030
-assert_return(() => call($10, "load8_u", [55143]), 0);
-
-// memory_copy.wast:1031
-assert_return(() => call($10, "load8_u", [55342]), 0);
-
-// memory_copy.wast:1032
-assert_return(() => call($10, "load8_u", [55541]), 0);
-
-// memory_copy.wast:1033
-assert_return(() => call($10, "load8_u", [55740]), 0);
-
-// memory_copy.wast:1034
-assert_return(() => call($10, "load8_u", [55939]), 0);
-
-// memory_copy.wast:1035
-assert_return(() => call($10, "load8_u", [56138]), 0);
-
-// memory_copy.wast:1036
-assert_return(() => call($10, "load8_u", [56337]), 0);
-
-// memory_copy.wast:1037
-assert_return(() => call($10, "load8_u", [56536]), 0);
-
-// memory_copy.wast:1038
-assert_return(() => call($10, "load8_u", [56735]), 0);
-
-// memory_copy.wast:1039
-assert_return(() => call($10, "load8_u", [56934]), 0);
-
-// memory_copy.wast:1040
-assert_return(() => call($10, "load8_u", [57133]), 0);
-
-// memory_copy.wast:1041
-assert_return(() => call($10, "load8_u", [57332]), 0);
-
-// memory_copy.wast:1042
-assert_return(() => call($10, "load8_u", [57531]), 0);
-
-// memory_copy.wast:1043
-assert_return(() => call($10, "load8_u", [57730]), 0);
-
-// memory_copy.wast:1044
-assert_return(() => call($10, "load8_u", [57929]), 0);
-
-// memory_copy.wast:1045
-assert_return(() => call($10, "load8_u", [58128]), 0);
-
-// memory_copy.wast:1046
-assert_return(() => call($10, "load8_u", [58327]), 0);
-
-// memory_copy.wast:1047
-assert_return(() => call($10, "load8_u", [58526]), 0);
-
-// memory_copy.wast:1048
-assert_return(() => call($10, "load8_u", [58725]), 0);
-
-// memory_copy.wast:1049
-assert_return(() => call($10, "load8_u", [58924]), 0);
-
-// memory_copy.wast:1050
-assert_return(() => call($10, "load8_u", [59123]), 0);
-
-// memory_copy.wast:1051
-assert_return(() => call($10, "load8_u", [59322]), 0);
-
-// memory_copy.wast:1052
-assert_return(() => call($10, "load8_u", [59521]), 0);
-
-// memory_copy.wast:1053
-assert_return(() => call($10, "load8_u", [59720]), 0);
-
-// memory_copy.wast:1054
-assert_return(() => call($10, "load8_u", [59919]), 0);
-
-// memory_copy.wast:1055
-assert_return(() => call($10, "load8_u", [60118]), 0);
-
-// memory_copy.wast:1056
-assert_return(() => call($10, "load8_u", [60317]), 0);
-
-// memory_copy.wast:1057
-assert_return(() => call($10, "load8_u", [60516]), 0);
-
-// memory_copy.wast:1058
-assert_return(() => call($10, "load8_u", [60715]), 0);
-
-// memory_copy.wast:1059
-assert_return(() => call($10, "load8_u", [60914]), 0);
-
-// memory_copy.wast:1060
-assert_return(() => call($10, "load8_u", [61113]), 0);
-
-// memory_copy.wast:1061
-assert_return(() => call($10, "load8_u", [61312]), 0);
-
-// memory_copy.wast:1062
-assert_return(() => call($10, "load8_u", [61511]), 0);
-
-// memory_copy.wast:1063
-assert_return(() => call($10, "load8_u", [61710]), 0);
-
-// memory_copy.wast:1064
-assert_return(() => call($10, "load8_u", [61909]), 0);
-
-// memory_copy.wast:1065
-assert_return(() => call($10, "load8_u", [62108]), 0);
-
-// memory_copy.wast:1066
-assert_return(() => call($10, "load8_u", [62307]), 0);
-
-// memory_copy.wast:1067
-assert_return(() => call($10, "load8_u", [62506]), 0);
-
-// memory_copy.wast:1068
-assert_return(() => call($10, "load8_u", [62705]), 0);
-
-// memory_copy.wast:1069
-assert_return(() => call($10, "load8_u", [62904]), 0);
-
-// memory_copy.wast:1070
-assert_return(() => call($10, "load8_u", [63103]), 0);
-
-// memory_copy.wast:1071
-assert_return(() => call($10, "load8_u", [63302]), 0);
-
-// memory_copy.wast:1072
-assert_return(() => call($10, "load8_u", [63501]), 0);
-
-// memory_copy.wast:1073
-assert_return(() => call($10, "load8_u", [63700]), 0);
-
-// memory_copy.wast:1074
-assert_return(() => call($10, "load8_u", [63899]), 0);
-
-// memory_copy.wast:1075
-assert_return(() => call($10, "load8_u", [64098]), 0);
-
-// memory_copy.wast:1076
-assert_return(() => call($10, "load8_u", [64297]), 0);
-
-// memory_copy.wast:1077
-assert_return(() => call($10, "load8_u", [64496]), 0);
-
-// memory_copy.wast:1078
-assert_return(() => call($10, "load8_u", [64695]), 0);
-
-// memory_copy.wast:1079
-assert_return(() => call($10, "load8_u", [64894]), 0);
-
-// memory_copy.wast:1080
-assert_return(() => call($10, "load8_u", [65093]), 0);
-
-// memory_copy.wast:1081
-assert_return(() => call($10, "load8_u", [65292]), 0);
-
-// memory_copy.wast:1082
-assert_return(() => call($10, "load8_u", [65491]), 0);
-
-// memory_copy.wast:1083
-assert_return(() => call($10, "load8_u", [65515]), 0);
-
-// memory_copy.wast:1084
-assert_return(() => call($10, "load8_u", [65516]), 1);
-
-// memory_copy.wast:1085
-assert_return(() => call($10, "load8_u", [65517]), 2);
-
-// memory_copy.wast:1086
-assert_return(() => call($10, "load8_u", [65518]), 3);
-
-// memory_copy.wast:1087
-assert_return(() => call($10, "load8_u", [65519]), 4);
-
-// memory_copy.wast:1088
-assert_return(() => call($10, "load8_u", [65520]), 5);
-
-// memory_copy.wast:1089
-assert_return(() => call($10, "load8_u", [65521]), 6);
-
-// memory_copy.wast:1090
-assert_return(() => call($10, "load8_u", [65522]), 7);
-
-// memory_copy.wast:1091
-assert_return(() => call($10, "load8_u", [65523]), 8);
-
-// memory_copy.wast:1092
-assert_return(() => call($10, "load8_u", [65524]), 9);
-
-// memory_copy.wast:1093
-assert_return(() => call($10, "load8_u", [65525]), 10);
-
-// memory_copy.wast:1094
-assert_return(() => call($10, "load8_u", [65526]), 11);
-
-// memory_copy.wast:1095
-assert_return(() => call($10, "load8_u", [65527]), 12);
-
-// memory_copy.wast:1096
-assert_return(() => call($10, "load8_u", [65528]), 13);
-
-// memory_copy.wast:1097
-assert_return(() => call($10, "load8_u", [65529]), 14);
-
-// memory_copy.wast:1098
-assert_return(() => call($10, "load8_u", [65530]), 15);
-
-// memory_copy.wast:1099
-assert_return(() => call($10, "load8_u", [65531]), 16);
-
-// memory_copy.wast:1100
-assert_return(() => call($10, "load8_u", [65532]), 17);
-
-// memory_copy.wast:1101
-assert_return(() => call($10, "load8_u", [65533]), 18);
-
-// memory_copy.wast:1102
-assert_return(() => call($10, "load8_u", [65534]), 19);
-
-// memory_copy.wast:1103
-assert_return(() => call($10, "load8_u", [65535]), 20);
-
-// memory_copy.wast:1105
-let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:1113
-assert_trap(() => call($11, "run", [0, 65516, 40]));
-
-// memory_copy.wast:1116
-assert_return(() => call($11, "load8_u", [0]), 0);
-
-// memory_copy.wast:1117
-assert_return(() => call($11, "load8_u", [1]), 1);
-
-// memory_copy.wast:1118
-assert_return(() => call($11, "load8_u", [2]), 2);
-
-// memory_copy.wast:1119
-assert_return(() => call($11, "load8_u", [3]), 3);
-
-// memory_copy.wast:1120
-assert_return(() => call($11, "load8_u", [4]), 4);
-
-// memory_copy.wast:1121
-assert_return(() => call($11, "load8_u", [5]), 5);
-
-// memory_copy.wast:1122
-assert_return(() => call($11, "load8_u", [6]), 6);
-
-// memory_copy.wast:1123
-assert_return(() => call($11, "load8_u", [7]), 7);
-
-// memory_copy.wast:1124
-assert_return(() => call($11, "load8_u", [8]), 8);
-
-// memory_copy.wast:1125
-assert_return(() => call($11, "load8_u", [9]), 9);
-
-// memory_copy.wast:1126
-assert_return(() => call($11, "load8_u", [10]), 10);
-
-// memory_copy.wast:1127
-assert_return(() => call($11, "load8_u", [11]), 11);
-
-// memory_copy.wast:1128
-assert_return(() => call($11, "load8_u", [12]), 12);
-
-// memory_copy.wast:1129
-assert_return(() => call($11, "load8_u", [13]), 13);
-
-// memory_copy.wast:1130
-assert_return(() => call($11, "load8_u", [14]), 14);
-
-// memory_copy.wast:1131
-assert_return(() => call($11, "load8_u", [15]), 15);
-
-// memory_copy.wast:1132
-assert_return(() => call($11, "load8_u", [16]), 16);
-
-// memory_copy.wast:1133
-assert_return(() => call($11, "load8_u", [17]), 17);
-
-// memory_copy.wast:1134
-assert_return(() => call($11, "load8_u", [18]), 18);
-
-// memory_copy.wast:1135
-assert_return(() => call($11, "load8_u", [19]), 19);
-
-// memory_copy.wast:1136
-assert_return(() => call($11, "load8_u", [218]), 0);
-
-// memory_copy.wast:1137
-assert_return(() => call($11, "load8_u", [417]), 0);
-
-// memory_copy.wast:1138
-assert_return(() => call($11, "load8_u", [616]), 0);
-
-// memory_copy.wast:1139
-assert_return(() => call($11, "load8_u", [815]), 0);
-
-// memory_copy.wast:1140
-assert_return(() => call($11, "load8_u", [1014]), 0);
-
-// memory_copy.wast:1141
-assert_return(() => call($11, "load8_u", [1213]), 0);
-
-// memory_copy.wast:1142
-assert_return(() => call($11, "load8_u", [1412]), 0);
-
-// memory_copy.wast:1143
-assert_return(() => call($11, "load8_u", [1611]), 0);
-
-// memory_copy.wast:1144
-assert_return(() => call($11, "load8_u", [1810]), 0);
-
-// memory_copy.wast:1145
-assert_return(() => call($11, "load8_u", [2009]), 0);
-
-// memory_copy.wast:1146
-assert_return(() => call($11, "load8_u", [2208]), 0);
-
-// memory_copy.wast:1147
-assert_return(() => call($11, "load8_u", [2407]), 0);
-
-// memory_copy.wast:1148
-assert_return(() => call($11, "load8_u", [2606]), 0);
-
-// memory_copy.wast:1149
-assert_return(() => call($11, "load8_u", [2805]), 0);
-
-// memory_copy.wast:1150
-assert_return(() => call($11, "load8_u", [3004]), 0);
-
-// memory_copy.wast:1151
-assert_return(() => call($11, "load8_u", [3203]), 0);
-
-// memory_copy.wast:1152
-assert_return(() => call($11, "load8_u", [3402]), 0);
-
-// memory_copy.wast:1153
-assert_return(() => call($11, "load8_u", [3601]), 0);
-
-// memory_copy.wast:1154
-assert_return(() => call($11, "load8_u", [3800]), 0);
-
-// memory_copy.wast:1155
-assert_return(() => call($11, "load8_u", [3999]), 0);
-
-// memory_copy.wast:1156
-assert_return(() => call($11, "load8_u", [4198]), 0);
-
-// memory_copy.wast:1157
-assert_return(() => call($11, "load8_u", [4397]), 0);
-
-// memory_copy.wast:1158
-assert_return(() => call($11, "load8_u", [4596]), 0);
-
-// memory_copy.wast:1159
-assert_return(() => call($11, "load8_u", [4795]), 0);
-
-// memory_copy.wast:1160
-assert_return(() => call($11, "load8_u", [4994]), 0);
-
-// memory_copy.wast:1161
-assert_return(() => call($11, "load8_u", [5193]), 0);
-
-// memory_copy.wast:1162
-assert_return(() => call($11, "load8_u", [5392]), 0);
-
-// memory_copy.wast:1163
-assert_return(() => call($11, "load8_u", [5591]), 0);
-
-// memory_copy.wast:1164
-assert_return(() => call($11, "load8_u", [5790]), 0);
-
-// memory_copy.wast:1165
-assert_return(() => call($11, "load8_u", [5989]), 0);
-
-// memory_copy.wast:1166
-assert_return(() => call($11, "load8_u", [6188]), 0);
-
-// memory_copy.wast:1167
-assert_return(() => call($11, "load8_u", [6387]), 0);
-
-// memory_copy.wast:1168
-assert_return(() => call($11, "load8_u", [6586]), 0);
-
-// memory_copy.wast:1169
-assert_return(() => call($11, "load8_u", [6785]), 0);
-
-// memory_copy.wast:1170
-assert_return(() => call($11, "load8_u", [6984]), 0);
-
-// memory_copy.wast:1171
-assert_return(() => call($11, "load8_u", [7183]), 0);
-
-// memory_copy.wast:1172
-assert_return(() => call($11, "load8_u", [7382]), 0);
-
-// memory_copy.wast:1173
-assert_return(() => call($11, "load8_u", [7581]), 0);
-
-// memory_copy.wast:1174
-assert_return(() => call($11, "load8_u", [7780]), 0);
-
-// memory_copy.wast:1175
-assert_return(() => call($11, "load8_u", [7979]), 0);
-
-// memory_copy.wast:1176
-assert_return(() => call($11, "load8_u", [8178]), 0);
-
-// memory_copy.wast:1177
-assert_return(() => call($11, "load8_u", [8377]), 0);
-
-// memory_copy.wast:1178
-assert_return(() => call($11, "load8_u", [8576]), 0);
-
-// memory_copy.wast:1179
-assert_return(() => call($11, "load8_u", [8775]), 0);
-
-// memory_copy.wast:1180
-assert_return(() => call($11, "load8_u", [8974]), 0);
-
-// memory_copy.wast:1181
-assert_return(() => call($11, "load8_u", [9173]), 0);
-
-// memory_copy.wast:1182
-assert_return(() => call($11, "load8_u", [9372]), 0);
-
-// memory_copy.wast:1183
-assert_return(() => call($11, "load8_u", [9571]), 0);
-
-// memory_copy.wast:1184
-assert_return(() => call($11, "load8_u", [9770]), 0);
-
-// memory_copy.wast:1185
-assert_return(() => call($11, "load8_u", [9969]), 0);
-
-// memory_copy.wast:1186
-assert_return(() => call($11, "load8_u", [10168]), 0);
-
-// memory_copy.wast:1187
-assert_return(() => call($11, "load8_u", [10367]), 0);
-
-// memory_copy.wast:1188
-assert_return(() => call($11, "load8_u", [10566]), 0);
-
-// memory_copy.wast:1189
-assert_return(() => call($11, "load8_u", [10765]), 0);
-
-// memory_copy.wast:1190
-assert_return(() => call($11, "load8_u", [10964]), 0);
-
-// memory_copy.wast:1191
-assert_return(() => call($11, "load8_u", [11163]), 0);
-
-// memory_copy.wast:1192
-assert_return(() => call($11, "load8_u", [11362]), 0);
-
-// memory_copy.wast:1193
-assert_return(() => call($11, "load8_u", [11561]), 0);
-
-// memory_copy.wast:1194
-assert_return(() => call($11, "load8_u", [11760]), 0);
-
-// memory_copy.wast:1195
-assert_return(() => call($11, "load8_u", [11959]), 0);
-
-// memory_copy.wast:1196
-assert_return(() => call($11, "load8_u", [12158]), 0);
-
-// memory_copy.wast:1197
-assert_return(() => call($11, "load8_u", [12357]), 0);
-
-// memory_copy.wast:1198
-assert_return(() => call($11, "load8_u", [12556]), 0);
-
-// memory_copy.wast:1199
-assert_return(() => call($11, "load8_u", [12755]), 0);
-
-// memory_copy.wast:1200
-assert_return(() => call($11, "load8_u", [12954]), 0);
-
-// memory_copy.wast:1201
-assert_return(() => call($11, "load8_u", [13153]), 0);
-
-// memory_copy.wast:1202
-assert_return(() => call($11, "load8_u", [13352]), 0);
-
-// memory_copy.wast:1203
-assert_return(() => call($11, "load8_u", [13551]), 0);
-
-// memory_copy.wast:1204
-assert_return(() => call($11, "load8_u", [13750]), 0);
-
-// memory_copy.wast:1205
-assert_return(() => call($11, "load8_u", [13949]), 0);
-
-// memory_copy.wast:1206
-assert_return(() => call($11, "load8_u", [14148]), 0);
-
-// memory_copy.wast:1207
-assert_return(() => call($11, "load8_u", [14347]), 0);
-
-// memory_copy.wast:1208
-assert_return(() => call($11, "load8_u", [14546]), 0);
-
-// memory_copy.wast:1209
-assert_return(() => call($11, "load8_u", [14745]), 0);
-
-// memory_copy.wast:1210
-assert_return(() => call($11, "load8_u", [14944]), 0);
-
-// memory_copy.wast:1211
-assert_return(() => call($11, "load8_u", [15143]), 0);
-
-// memory_copy.wast:1212
-assert_return(() => call($11, "load8_u", [15342]), 0);
-
-// memory_copy.wast:1213
-assert_return(() => call($11, "load8_u", [15541]), 0);
-
-// memory_copy.wast:1214
-assert_return(() => call($11, "load8_u", [15740]), 0);
-
-// memory_copy.wast:1215
-assert_return(() => call($11, "load8_u", [15939]), 0);
-
-// memory_copy.wast:1216
-assert_return(() => call($11, "load8_u", [16138]), 0);
-
-// memory_copy.wast:1217
-assert_return(() => call($11, "load8_u", [16337]), 0);
-
-// memory_copy.wast:1218
-assert_return(() => call($11, "load8_u", [16536]), 0);
-
-// memory_copy.wast:1219
-assert_return(() => call($11, "load8_u", [16735]), 0);
-
-// memory_copy.wast:1220
-assert_return(() => call($11, "load8_u", [16934]), 0);
-
-// memory_copy.wast:1221
-assert_return(() => call($11, "load8_u", [17133]), 0);
-
-// memory_copy.wast:1222
-assert_return(() => call($11, "load8_u", [17332]), 0);
-
-// memory_copy.wast:1223
-assert_return(() => call($11, "load8_u", [17531]), 0);
-
-// memory_copy.wast:1224
-assert_return(() => call($11, "load8_u", [17730]), 0);
-
-// memory_copy.wast:1225
-assert_return(() => call($11, "load8_u", [17929]), 0);
-
-// memory_copy.wast:1226
-assert_return(() => call($11, "load8_u", [18128]), 0);
-
-// memory_copy.wast:1227
-assert_return(() => call($11, "load8_u", [18327]), 0);
-
-// memory_copy.wast:1228
-assert_return(() => call($11, "load8_u", [18526]), 0);
-
-// memory_copy.wast:1229
-assert_return(() => call($11, "load8_u", [18725]), 0);
-
-// memory_copy.wast:1230
-assert_return(() => call($11, "load8_u", [18924]), 0);
-
-// memory_copy.wast:1231
-assert_return(() => call($11, "load8_u", [19123]), 0);
-
-// memory_copy.wast:1232
-assert_return(() => call($11, "load8_u", [19322]), 0);
-
-// memory_copy.wast:1233
-assert_return(() => call($11, "load8_u", [19521]), 0);
-
-// memory_copy.wast:1234
-assert_return(() => call($11, "load8_u", [19720]), 0);
-
-// memory_copy.wast:1235
-assert_return(() => call($11, "load8_u", [19919]), 0);
-
-// memory_copy.wast:1236
-assert_return(() => call($11, "load8_u", [20118]), 0);
-
-// memory_copy.wast:1237
-assert_return(() => call($11, "load8_u", [20317]), 0);
-
-// memory_copy.wast:1238
-assert_return(() => call($11, "load8_u", [20516]), 0);
-
-// memory_copy.wast:1239
-assert_return(() => call($11, "load8_u", [20715]), 0);
-
-// memory_copy.wast:1240
-assert_return(() => call($11, "load8_u", [20914]), 0);
-
-// memory_copy.wast:1241
-assert_return(() => call($11, "load8_u", [21113]), 0);
-
-// memory_copy.wast:1242
-assert_return(() => call($11, "load8_u", [21312]), 0);
-
-// memory_copy.wast:1243
-assert_return(() => call($11, "load8_u", [21511]), 0);
-
-// memory_copy.wast:1244
-assert_return(() => call($11, "load8_u", [21710]), 0);
-
-// memory_copy.wast:1245
-assert_return(() => call($11, "load8_u", [21909]), 0);
-
-// memory_copy.wast:1246
-assert_return(() => call($11, "load8_u", [22108]), 0);
-
-// memory_copy.wast:1247
-assert_return(() => call($11, "load8_u", [22307]), 0);
-
-// memory_copy.wast:1248
-assert_return(() => call($11, "load8_u", [22506]), 0);
-
-// memory_copy.wast:1249
-assert_return(() => call($11, "load8_u", [22705]), 0);
-
-// memory_copy.wast:1250
-assert_return(() => call($11, "load8_u", [22904]), 0);
-
-// memory_copy.wast:1251
-assert_return(() => call($11, "load8_u", [23103]), 0);
-
-// memory_copy.wast:1252
-assert_return(() => call($11, "load8_u", [23302]), 0);
-
-// memory_copy.wast:1253
-assert_return(() => call($11, "load8_u", [23501]), 0);
-
-// memory_copy.wast:1254
-assert_return(() => call($11, "load8_u", [23700]), 0);
-
-// memory_copy.wast:1255
-assert_return(() => call($11, "load8_u", [23899]), 0);
-
-// memory_copy.wast:1256
-assert_return(() => call($11, "load8_u", [24098]), 0);
-
-// memory_copy.wast:1257
-assert_return(() => call($11, "load8_u", [24297]), 0);
-
-// memory_copy.wast:1258
-assert_return(() => call($11, "load8_u", [24496]), 0);
-
-// memory_copy.wast:1259
-assert_return(() => call($11, "load8_u", [24695]), 0);
-
-// memory_copy.wast:1260
-assert_return(() => call($11, "load8_u", [24894]), 0);
-
-// memory_copy.wast:1261
-assert_return(() => call($11, "load8_u", [25093]), 0);
-
-// memory_copy.wast:1262
-assert_return(() => call($11, "load8_u", [25292]), 0);
-
-// memory_copy.wast:1263
-assert_return(() => call($11, "load8_u", [25491]), 0);
-
-// memory_copy.wast:1264
-assert_return(() => call($11, "load8_u", [25690]), 0);
-
-// memory_copy.wast:1265
-assert_return(() => call($11, "load8_u", [25889]), 0);
-
-// memory_copy.wast:1266
-assert_return(() => call($11, "load8_u", [26088]), 0);
-
-// memory_copy.wast:1267
-assert_return(() => call($11, "load8_u", [26287]), 0);
-
-// memory_copy.wast:1268
-assert_return(() => call($11, "load8_u", [26486]), 0);
-
-// memory_copy.wast:1269
-assert_return(() => call($11, "load8_u", [26685]), 0);
-
-// memory_copy.wast:1270
-assert_return(() => call($11, "load8_u", [26884]), 0);
-
-// memory_copy.wast:1271
-assert_return(() => call($11, "load8_u", [27083]), 0);
-
-// memory_copy.wast:1272
-assert_return(() => call($11, "load8_u", [27282]), 0);
-
-// memory_copy.wast:1273
-assert_return(() => call($11, "load8_u", [27481]), 0);
-
-// memory_copy.wast:1274
-assert_return(() => call($11, "load8_u", [27680]), 0);
-
-// memory_copy.wast:1275
-assert_return(() => call($11, "load8_u", [27879]), 0);
-
-// memory_copy.wast:1276
-assert_return(() => call($11, "load8_u", [28078]), 0);
-
-// memory_copy.wast:1277
-assert_return(() => call($11, "load8_u", [28277]), 0);
-
-// memory_copy.wast:1278
-assert_return(() => call($11, "load8_u", [28476]), 0);
-
-// memory_copy.wast:1279
-assert_return(() => call($11, "load8_u", [28675]), 0);
-
-// memory_copy.wast:1280
-assert_return(() => call($11, "load8_u", [28874]), 0);
-
-// memory_copy.wast:1281
-assert_return(() => call($11, "load8_u", [29073]), 0);
-
-// memory_copy.wast:1282
-assert_return(() => call($11, "load8_u", [29272]), 0);
-
-// memory_copy.wast:1283
-assert_return(() => call($11, "load8_u", [29471]), 0);
-
-// memory_copy.wast:1284
-assert_return(() => call($11, "load8_u", [29670]), 0);
-
-// memory_copy.wast:1285
-assert_return(() => call($11, "load8_u", [29869]), 0);
-
-// memory_copy.wast:1286
-assert_return(() => call($11, "load8_u", [30068]), 0);
-
-// memory_copy.wast:1287
-assert_return(() => call($11, "load8_u", [30267]), 0);
-
-// memory_copy.wast:1288
-assert_return(() => call($11, "load8_u", [30466]), 0);
-
-// memory_copy.wast:1289
-assert_return(() => call($11, "load8_u", [30665]), 0);
-
-// memory_copy.wast:1290
-assert_return(() => call($11, "load8_u", [30864]), 0);
-
-// memory_copy.wast:1291
-assert_return(() => call($11, "load8_u", [31063]), 0);
-
-// memory_copy.wast:1292
-assert_return(() => call($11, "load8_u", [31262]), 0);
-
-// memory_copy.wast:1293
-assert_return(() => call($11, "load8_u", [31461]), 0);
-
-// memory_copy.wast:1294
-assert_return(() => call($11, "load8_u", [31660]), 0);
-
-// memory_copy.wast:1295
-assert_return(() => call($11, "load8_u", [31859]), 0);
-
-// memory_copy.wast:1296
-assert_return(() => call($11, "load8_u", [32058]), 0);
-
-// memory_copy.wast:1297
-assert_return(() => call($11, "load8_u", [32257]), 0);
-
-// memory_copy.wast:1298
-assert_return(() => call($11, "load8_u", [32456]), 0);
-
-// memory_copy.wast:1299
-assert_return(() => call($11, "load8_u", [32655]), 0);
-
-// memory_copy.wast:1300
-assert_return(() => call($11, "load8_u", [32854]), 0);
-
-// memory_copy.wast:1301
-assert_return(() => call($11, "load8_u", [33053]), 0);
-
-// memory_copy.wast:1302
-assert_return(() => call($11, "load8_u", [33252]), 0);
-
-// memory_copy.wast:1303
-assert_return(() => call($11, "load8_u", [33451]), 0);
-
-// memory_copy.wast:1304
-assert_return(() => call($11, "load8_u", [33650]), 0);
-
-// memory_copy.wast:1305
-assert_return(() => call($11, "load8_u", [33849]), 0);
-
-// memory_copy.wast:1306
-assert_return(() => call($11, "load8_u", [34048]), 0);
-
-// memory_copy.wast:1307
-assert_return(() => call($11, "load8_u", [34247]), 0);
-
-// memory_copy.wast:1308
-assert_return(() => call($11, "load8_u", [34446]), 0);
-
-// memory_copy.wast:1309
-assert_return(() => call($11, "load8_u", [34645]), 0);
-
-// memory_copy.wast:1310
-assert_return(() => call($11, "load8_u", [34844]), 0);
-
-// memory_copy.wast:1311
-assert_return(() => call($11, "load8_u", [35043]), 0);
-
-// memory_copy.wast:1312
-assert_return(() => call($11, "load8_u", [35242]), 0);
-
-// memory_copy.wast:1313
-assert_return(() => call($11, "load8_u", [35441]), 0);
-
-// memory_copy.wast:1314
-assert_return(() => call($11, "load8_u", [35640]), 0);
-
-// memory_copy.wast:1315
-assert_return(() => call($11, "load8_u", [35839]), 0);
-
-// memory_copy.wast:1316
-assert_return(() => call($11, "load8_u", [36038]), 0);
-
-// memory_copy.wast:1317
-assert_return(() => call($11, "load8_u", [36237]), 0);
-
-// memory_copy.wast:1318
-assert_return(() => call($11, "load8_u", [36436]), 0);
-
-// memory_copy.wast:1319
-assert_return(() => call($11, "load8_u", [36635]), 0);
-
-// memory_copy.wast:1320
-assert_return(() => call($11, "load8_u", [36834]), 0);
-
-// memory_copy.wast:1321
-assert_return(() => call($11, "load8_u", [37033]), 0);
-
-// memory_copy.wast:1322
-assert_return(() => call($11, "load8_u", [37232]), 0);
-
-// memory_copy.wast:1323
-assert_return(() => call($11, "load8_u", [37431]), 0);
-
-// memory_copy.wast:1324
-assert_return(() => call($11, "load8_u", [37630]), 0);
-
-// memory_copy.wast:1325
-assert_return(() => call($11, "load8_u", [37829]), 0);
-
-// memory_copy.wast:1326
-assert_return(() => call($11, "load8_u", [38028]), 0);
-
-// memory_copy.wast:1327
-assert_return(() => call($11, "load8_u", [38227]), 0);
-
-// memory_copy.wast:1328
-assert_return(() => call($11, "load8_u", [38426]), 0);
-
-// memory_copy.wast:1329
-assert_return(() => call($11, "load8_u", [38625]), 0);
-
-// memory_copy.wast:1330
-assert_return(() => call($11, "load8_u", [38824]), 0);
-
-// memory_copy.wast:1331
-assert_return(() => call($11, "load8_u", [39023]), 0);
-
-// memory_copy.wast:1332
-assert_return(() => call($11, "load8_u", [39222]), 0);
-
-// memory_copy.wast:1333
-assert_return(() => call($11, "load8_u", [39421]), 0);
-
-// memory_copy.wast:1334
-assert_return(() => call($11, "load8_u", [39620]), 0);
-
-// memory_copy.wast:1335
-assert_return(() => call($11, "load8_u", [39819]), 0);
-
-// memory_copy.wast:1336
-assert_return(() => call($11, "load8_u", [40018]), 0);
-
-// memory_copy.wast:1337
-assert_return(() => call($11, "load8_u", [40217]), 0);
-
-// memory_copy.wast:1338
-assert_return(() => call($11, "load8_u", [40416]), 0);
-
-// memory_copy.wast:1339
-assert_return(() => call($11, "load8_u", [40615]), 0);
-
-// memory_copy.wast:1340
-assert_return(() => call($11, "load8_u", [40814]), 0);
-
-// memory_copy.wast:1341
-assert_return(() => call($11, "load8_u", [41013]), 0);
-
-// memory_copy.wast:1342
-assert_return(() => call($11, "load8_u", [41212]), 0);
-
-// memory_copy.wast:1343
-assert_return(() => call($11, "load8_u", [41411]), 0);
-
-// memory_copy.wast:1344
-assert_return(() => call($11, "load8_u", [41610]), 0);
-
-// memory_copy.wast:1345
-assert_return(() => call($11, "load8_u", [41809]), 0);
-
-// memory_copy.wast:1346
-assert_return(() => call($11, "load8_u", [42008]), 0);
-
-// memory_copy.wast:1347
-assert_return(() => call($11, "load8_u", [42207]), 0);
-
-// memory_copy.wast:1348
-assert_return(() => call($11, "load8_u", [42406]), 0);
-
-// memory_copy.wast:1349
-assert_return(() => call($11, "load8_u", [42605]), 0);
-
-// memory_copy.wast:1350
-assert_return(() => call($11, "load8_u", [42804]), 0);
-
-// memory_copy.wast:1351
-assert_return(() => call($11, "load8_u", [43003]), 0);
-
-// memory_copy.wast:1352
-assert_return(() => call($11, "load8_u", [43202]), 0);
-
-// memory_copy.wast:1353
-assert_return(() => call($11, "load8_u", [43401]), 0);
-
-// memory_copy.wast:1354
-assert_return(() => call($11, "load8_u", [43600]), 0);
-
-// memory_copy.wast:1355
-assert_return(() => call($11, "load8_u", [43799]), 0);
-
-// memory_copy.wast:1356
-assert_return(() => call($11, "load8_u", [43998]), 0);
-
-// memory_copy.wast:1357
-assert_return(() => call($11, "load8_u", [44197]), 0);
-
-// memory_copy.wast:1358
-assert_return(() => call($11, "load8_u", [44396]), 0);
-
-// memory_copy.wast:1359
-assert_return(() => call($11, "load8_u", [44595]), 0);
-
-// memory_copy.wast:1360
-assert_return(() => call($11, "load8_u", [44794]), 0);
-
-// memory_copy.wast:1361
-assert_return(() => call($11, "load8_u", [44993]), 0);
-
-// memory_copy.wast:1362
-assert_return(() => call($11, "load8_u", [45192]), 0);
-
-// memory_copy.wast:1363
-assert_return(() => call($11, "load8_u", [45391]), 0);
-
-// memory_copy.wast:1364
-assert_return(() => call($11, "load8_u", [45590]), 0);
-
-// memory_copy.wast:1365
-assert_return(() => call($11, "load8_u", [45789]), 0);
-
-// memory_copy.wast:1366
-assert_return(() => call($11, "load8_u", [45988]), 0);
-
-// memory_copy.wast:1367
-assert_return(() => call($11, "load8_u", [46187]), 0);
-
-// memory_copy.wast:1368
-assert_return(() => call($11, "load8_u", [46386]), 0);
-
-// memory_copy.wast:1369
-assert_return(() => call($11, "load8_u", [46585]), 0);
-
-// memory_copy.wast:1370
-assert_return(() => call($11, "load8_u", [46784]), 0);
-
-// memory_copy.wast:1371
-assert_return(() => call($11, "load8_u", [46983]), 0);
-
-// memory_copy.wast:1372
-assert_return(() => call($11, "load8_u", [47182]), 0);
-
-// memory_copy.wast:1373
-assert_return(() => call($11, "load8_u", [47381]), 0);
-
-// memory_copy.wast:1374
-assert_return(() => call($11, "load8_u", [47580]), 0);
-
-// memory_copy.wast:1375
-assert_return(() => call($11, "load8_u", [47779]), 0);
-
-// memory_copy.wast:1376
-assert_return(() => call($11, "load8_u", [47978]), 0);
-
-// memory_copy.wast:1377
-assert_return(() => call($11, "load8_u", [48177]), 0);
-
-// memory_copy.wast:1378
-assert_return(() => call($11, "load8_u", [48376]), 0);
-
-// memory_copy.wast:1379
-assert_return(() => call($11, "load8_u", [48575]), 0);
-
-// memory_copy.wast:1380
-assert_return(() => call($11, "load8_u", [48774]), 0);
-
-// memory_copy.wast:1381
-assert_return(() => call($11, "load8_u", [48973]), 0);
-
-// memory_copy.wast:1382
-assert_return(() => call($11, "load8_u", [49172]), 0);
-
-// memory_copy.wast:1383
-assert_return(() => call($11, "load8_u", [49371]), 0);
-
-// memory_copy.wast:1384
-assert_return(() => call($11, "load8_u", [49570]), 0);
-
-// memory_copy.wast:1385
-assert_return(() => call($11, "load8_u", [49769]), 0);
-
-// memory_copy.wast:1386
-assert_return(() => call($11, "load8_u", [49968]), 0);
-
-// memory_copy.wast:1387
-assert_return(() => call($11, "load8_u", [50167]), 0);
-
-// memory_copy.wast:1388
-assert_return(() => call($11, "load8_u", [50366]), 0);
-
-// memory_copy.wast:1389
-assert_return(() => call($11, "load8_u", [50565]), 0);
-
-// memory_copy.wast:1390
-assert_return(() => call($11, "load8_u", [50764]), 0);
-
-// memory_copy.wast:1391
-assert_return(() => call($11, "load8_u", [50963]), 0);
-
-// memory_copy.wast:1392
-assert_return(() => call($11, "load8_u", [51162]), 0);
-
-// memory_copy.wast:1393
-assert_return(() => call($11, "load8_u", [51361]), 0);
-
-// memory_copy.wast:1394
-assert_return(() => call($11, "load8_u", [51560]), 0);
-
-// memory_copy.wast:1395
-assert_return(() => call($11, "load8_u", [51759]), 0);
-
-// memory_copy.wast:1396
-assert_return(() => call($11, "load8_u", [51958]), 0);
-
-// memory_copy.wast:1397
-assert_return(() => call($11, "load8_u", [52157]), 0);
-
-// memory_copy.wast:1398
-assert_return(() => call($11, "load8_u", [52356]), 0);
-
-// memory_copy.wast:1399
-assert_return(() => call($11, "load8_u", [52555]), 0);
-
-// memory_copy.wast:1400
-assert_return(() => call($11, "load8_u", [52754]), 0);
-
-// memory_copy.wast:1401
-assert_return(() => call($11, "load8_u", [52953]), 0);
-
-// memory_copy.wast:1402
-assert_return(() => call($11, "load8_u", [53152]), 0);
-
-// memory_copy.wast:1403
-assert_return(() => call($11, "load8_u", [53351]), 0);
-
-// memory_copy.wast:1404
-assert_return(() => call($11, "load8_u", [53550]), 0);
-
-// memory_copy.wast:1405
-assert_return(() => call($11, "load8_u", [53749]), 0);
-
-// memory_copy.wast:1406
-assert_return(() => call($11, "load8_u", [53948]), 0);
-
-// memory_copy.wast:1407
-assert_return(() => call($11, "load8_u", [54147]), 0);
-
-// memory_copy.wast:1408
-assert_return(() => call($11, "load8_u", [54346]), 0);
-
-// memory_copy.wast:1409
-assert_return(() => call($11, "load8_u", [54545]), 0);
-
-// memory_copy.wast:1410
-assert_return(() => call($11, "load8_u", [54744]), 0);
-
-// memory_copy.wast:1411
-assert_return(() => call($11, "load8_u", [54943]), 0);
-
-// memory_copy.wast:1412
-assert_return(() => call($11, "load8_u", [55142]), 0);
-
-// memory_copy.wast:1413
-assert_return(() => call($11, "load8_u", [55341]), 0);
-
-// memory_copy.wast:1414
-assert_return(() => call($11, "load8_u", [55540]), 0);
-
-// memory_copy.wast:1415
-assert_return(() => call($11, "load8_u", [55739]), 0);
-
-// memory_copy.wast:1416
-assert_return(() => call($11, "load8_u", [55938]), 0);
-
-// memory_copy.wast:1417
-assert_return(() => call($11, "load8_u", [56137]), 0);
-
-// memory_copy.wast:1418
-assert_return(() => call($11, "load8_u", [56336]), 0);
-
-// memory_copy.wast:1419
-assert_return(() => call($11, "load8_u", [56535]), 0);
-
-// memory_copy.wast:1420
-assert_return(() => call($11, "load8_u", [56734]), 0);
-
-// memory_copy.wast:1421
-assert_return(() => call($11, "load8_u", [56933]), 0);
-
-// memory_copy.wast:1422
-assert_return(() => call($11, "load8_u", [57132]), 0);
-
-// memory_copy.wast:1423
-assert_return(() => call($11, "load8_u", [57331]), 0);
-
-// memory_copy.wast:1424
-assert_return(() => call($11, "load8_u", [57530]), 0);
-
-// memory_copy.wast:1425
-assert_return(() => call($11, "load8_u", [57729]), 0);
-
-// memory_copy.wast:1426
-assert_return(() => call($11, "load8_u", [57928]), 0);
-
-// memory_copy.wast:1427
-assert_return(() => call($11, "load8_u", [58127]), 0);
-
-// memory_copy.wast:1428
-assert_return(() => call($11, "load8_u", [58326]), 0);
-
-// memory_copy.wast:1429
-assert_return(() => call($11, "load8_u", [58525]), 0);
-
-// memory_copy.wast:1430
-assert_return(() => call($11, "load8_u", [58724]), 0);
-
-// memory_copy.wast:1431
-assert_return(() => call($11, "load8_u", [58923]), 0);
-
-// memory_copy.wast:1432
-assert_return(() => call($11, "load8_u", [59122]), 0);
-
-// memory_copy.wast:1433
-assert_return(() => call($11, "load8_u", [59321]), 0);
-
-// memory_copy.wast:1434
-assert_return(() => call($11, "load8_u", [59520]), 0);
-
-// memory_copy.wast:1435
-assert_return(() => call($11, "load8_u", [59719]), 0);
-
-// memory_copy.wast:1436
-assert_return(() => call($11, "load8_u", [59918]), 0);
-
-// memory_copy.wast:1437
-assert_return(() => call($11, "load8_u", [60117]), 0);
-
-// memory_copy.wast:1438
-assert_return(() => call($11, "load8_u", [60316]), 0);
-
-// memory_copy.wast:1439
-assert_return(() => call($11, "load8_u", [60515]), 0);
-
-// memory_copy.wast:1440
-assert_return(() => call($11, "load8_u", [60714]), 0);
-
-// memory_copy.wast:1441
-assert_return(() => call($11, "load8_u", [60913]), 0);
-
-// memory_copy.wast:1442
-assert_return(() => call($11, "load8_u", [61112]), 0);
-
-// memory_copy.wast:1443
-assert_return(() => call($11, "load8_u", [61311]), 0);
-
-// memory_copy.wast:1444
-assert_return(() => call($11, "load8_u", [61510]), 0);
-
-// memory_copy.wast:1445
-assert_return(() => call($11, "load8_u", [61709]), 0);
-
-// memory_copy.wast:1446
-assert_return(() => call($11, "load8_u", [61908]), 0);
-
-// memory_copy.wast:1447
-assert_return(() => call($11, "load8_u", [62107]), 0);
-
-// memory_copy.wast:1448
-assert_return(() => call($11, "load8_u", [62306]), 0);
-
-// memory_copy.wast:1449
-assert_return(() => call($11, "load8_u", [62505]), 0);
-
-// memory_copy.wast:1450
-assert_return(() => call($11, "load8_u", [62704]), 0);
-
-// memory_copy.wast:1451
-assert_return(() => call($11, "load8_u", [62903]), 0);
-
-// memory_copy.wast:1452
-assert_return(() => call($11, "load8_u", [63102]), 0);
-
-// memory_copy.wast:1453
-assert_return(() => call($11, "load8_u", [63301]), 0);
-
-// memory_copy.wast:1454
-assert_return(() => call($11, "load8_u", [63500]), 0);
-
-// memory_copy.wast:1455
-assert_return(() => call($11, "load8_u", [63699]), 0);
-
-// memory_copy.wast:1456
-assert_return(() => call($11, "load8_u", [63898]), 0);
-
-// memory_copy.wast:1457
-assert_return(() => call($11, "load8_u", [64097]), 0);
-
-// memory_copy.wast:1458
-assert_return(() => call($11, "load8_u", [64296]), 0);
-
-// memory_copy.wast:1459
-assert_return(() => call($11, "load8_u", [64495]), 0);
-
-// memory_copy.wast:1460
-assert_return(() => call($11, "load8_u", [64694]), 0);
-
-// memory_copy.wast:1461
-assert_return(() => call($11, "load8_u", [64893]), 0);
-
-// memory_copy.wast:1462
-assert_return(() => call($11, "load8_u", [65092]), 0);
-
-// memory_copy.wast:1463
-assert_return(() => call($11, "load8_u", [65291]), 0);
-
-// memory_copy.wast:1464
-assert_return(() => call($11, "load8_u", [65490]), 0);
-
-// memory_copy.wast:1465
-assert_return(() => call($11, "load8_u", [65516]), 0);
-
-// memory_copy.wast:1466
-assert_return(() => call($11, "load8_u", [65517]), 1);
-
-// memory_copy.wast:1467
-assert_return(() => call($11, "load8_u", [65518]), 2);
-
-// memory_copy.wast:1468
-assert_return(() => call($11, "load8_u", [65519]), 3);
-
-// memory_copy.wast:1469
-assert_return(() => call($11, "load8_u", [65520]), 4);
-
-// memory_copy.wast:1470
-assert_return(() => call($11, "load8_u", [65521]), 5);
-
-// memory_copy.wast:1471
-assert_return(() => call($11, "load8_u", [65522]), 6);
-
-// memory_copy.wast:1472
-assert_return(() => call($11, "load8_u", [65523]), 7);
-
-// memory_copy.wast:1473
-assert_return(() => call($11, "load8_u", [65524]), 8);
-
-// memory_copy.wast:1474
-assert_return(() => call($11, "load8_u", [65525]), 9);
-
-// memory_copy.wast:1475
-assert_return(() => call($11, "load8_u", [65526]), 10);
-
-// memory_copy.wast:1476
-assert_return(() => call($11, "load8_u", [65527]), 11);
-
-// memory_copy.wast:1477
-assert_return(() => call($11, "load8_u", [65528]), 12);
-
-// memory_copy.wast:1478
-assert_return(() => call($11, "load8_u", [65529]), 13);
-
-// memory_copy.wast:1479
-assert_return(() => call($11, "load8_u", [65530]), 14);
-
-// memory_copy.wast:1480
-assert_return(() => call($11, "load8_u", [65531]), 15);
-
-// memory_copy.wast:1481
-assert_return(() => call($11, "load8_u", [65532]), 16);
-
-// memory_copy.wast:1482
-assert_return(() => call($11, "load8_u", [65533]), 17);
-
-// memory_copy.wast:1483
-assert_return(() => call($11, "load8_u", [65534]), 18);
-
-// memory_copy.wast:1484
-assert_return(() => call($11, "load8_u", [65535]), 19);
-
-// memory_copy.wast:1486
-let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9d\x80\x80\x80\x00\x01\x00\x41\xeb\xff\x03\x0b\x15\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14");
-
-// memory_copy.wast:1494
-assert_trap(() => call($12, "run", [0, 65515, 39]));
-
-// memory_copy.wast:1497
-assert_return(() => call($12, "load8_u", [0]), 0);
-
-// memory_copy.wast:1498
-assert_return(() => call($12, "load8_u", [1]), 1);
-
-// memory_copy.wast:1499
-assert_return(() => call($12, "load8_u", [2]), 2);
-
-// memory_copy.wast:1500
-assert_return(() => call($12, "load8_u", [3]), 3);
-
-// memory_copy.wast:1501
-assert_return(() => call($12, "load8_u", [4]), 4);
-
-// memory_copy.wast:1502
-assert_return(() => call($12, "load8_u", [5]), 5);
-
-// memory_copy.wast:1503
-assert_return(() => call($12, "load8_u", [6]), 6);
-
-// memory_copy.wast:1504
-assert_return(() => call($12, "load8_u", [7]), 7);
-
-// memory_copy.wast:1505
-assert_return(() => call($12, "load8_u", [8]), 8);
-
-// memory_copy.wast:1506
-assert_return(() => call($12, "load8_u", [9]), 9);
-
-// memory_copy.wast:1507
-assert_return(() => call($12, "load8_u", [10]), 10);
-
-// memory_copy.wast:1508
-assert_return(() => call($12, "load8_u", [11]), 11);
-
-// memory_copy.wast:1509
-assert_return(() => call($12, "load8_u", [12]), 12);
-
-// memory_copy.wast:1510
-assert_return(() => call($12, "load8_u", [13]), 13);
-
-// memory_copy.wast:1511
-assert_return(() => call($12, "load8_u", [14]), 14);
-
-// memory_copy.wast:1512
-assert_return(() => call($12, "load8_u", [15]), 15);
-
-// memory_copy.wast:1513
-assert_return(() => call($12, "load8_u", [16]), 16);
-
-// memory_copy.wast:1514
-assert_return(() => call($12, "load8_u", [17]), 17);
-
-// memory_copy.wast:1515
-assert_return(() => call($12, "load8_u", [18]), 18);
-
-// memory_copy.wast:1516
-assert_return(() => call($12, "load8_u", [19]), 19);
-
-// memory_copy.wast:1517
-assert_return(() => call($12, "load8_u", [20]), 20);
-
-// memory_copy.wast:1518
-assert_return(() => call($12, "load8_u", [219]), 0);
-
-// memory_copy.wast:1519
-assert_return(() => call($12, "load8_u", [418]), 0);
-
-// memory_copy.wast:1520
-assert_return(() => call($12, "load8_u", [617]), 0);
-
-// memory_copy.wast:1521
-assert_return(() => call($12, "load8_u", [816]), 0);
-
-// memory_copy.wast:1522
-assert_return(() => call($12, "load8_u", [1015]), 0);
-
-// memory_copy.wast:1523
-assert_return(() => call($12, "load8_u", [1214]), 0);
-
-// memory_copy.wast:1524
-assert_return(() => call($12, "load8_u", [1413]), 0);
-
-// memory_copy.wast:1525
-assert_return(() => call($12, "load8_u", [1612]), 0);
-
-// memory_copy.wast:1526
-assert_return(() => call($12, "load8_u", [1811]), 0);
-
-// memory_copy.wast:1527
-assert_return(() => call($12, "load8_u", [2010]), 0);
-
-// memory_copy.wast:1528
-assert_return(() => call($12, "load8_u", [2209]), 0);
-
-// memory_copy.wast:1529
-assert_return(() => call($12, "load8_u", [2408]), 0);
-
-// memory_copy.wast:1530
-assert_return(() => call($12, "load8_u", [2607]), 0);
-
-// memory_copy.wast:1531
-assert_return(() => call($12, "load8_u", [2806]), 0);
-
-// memory_copy.wast:1532
-assert_return(() => call($12, "load8_u", [3005]), 0);
-
-// memory_copy.wast:1533
-assert_return(() => call($12, "load8_u", [3204]), 0);
-
-// memory_copy.wast:1534
-assert_return(() => call($12, "load8_u", [3403]), 0);
-
-// memory_copy.wast:1535
-assert_return(() => call($12, "load8_u", [3602]), 0);
-
-// memory_copy.wast:1536
-assert_return(() => call($12, "load8_u", [3801]), 0);
-
-// memory_copy.wast:1537
-assert_return(() => call($12, "load8_u", [4000]), 0);
-
-// memory_copy.wast:1538
-assert_return(() => call($12, "load8_u", [4199]), 0);
-
-// memory_copy.wast:1539
-assert_return(() => call($12, "load8_u", [4398]), 0);
-
-// memory_copy.wast:1540
-assert_return(() => call($12, "load8_u", [4597]), 0);
-
-// memory_copy.wast:1541
-assert_return(() => call($12, "load8_u", [4796]), 0);
-
-// memory_copy.wast:1542
-assert_return(() => call($12, "load8_u", [4995]), 0);
-
-// memory_copy.wast:1543
-assert_return(() => call($12, "load8_u", [5194]), 0);
-
-// memory_copy.wast:1544
-assert_return(() => call($12, "load8_u", [5393]), 0);
-
-// memory_copy.wast:1545
-assert_return(() => call($12, "load8_u", [5592]), 0);
-
-// memory_copy.wast:1546
-assert_return(() => call($12, "load8_u", [5791]), 0);
-
-// memory_copy.wast:1547
-assert_return(() => call($12, "load8_u", [5990]), 0);
-
-// memory_copy.wast:1548
-assert_return(() => call($12, "load8_u", [6189]), 0);
-
-// memory_copy.wast:1549
-assert_return(() => call($12, "load8_u", [6388]), 0);
-
-// memory_copy.wast:1550
-assert_return(() => call($12, "load8_u", [6587]), 0);
-
-// memory_copy.wast:1551
-assert_return(() => call($12, "load8_u", [6786]), 0);
-
-// memory_copy.wast:1552
-assert_return(() => call($12, "load8_u", [6985]), 0);
-
-// memory_copy.wast:1553
-assert_return(() => call($12, "load8_u", [7184]), 0);
-
-// memory_copy.wast:1554
-assert_return(() => call($12, "load8_u", [7383]), 0);
-
-// memory_copy.wast:1555
-assert_return(() => call($12, "load8_u", [7582]), 0);
-
-// memory_copy.wast:1556
-assert_return(() => call($12, "load8_u", [7781]), 0);
-
-// memory_copy.wast:1557
-assert_return(() => call($12, "load8_u", [7980]), 0);
-
-// memory_copy.wast:1558
-assert_return(() => call($12, "load8_u", [8179]), 0);
-
-// memory_copy.wast:1559
-assert_return(() => call($12, "load8_u", [8378]), 0);
-
-// memory_copy.wast:1560
-assert_return(() => call($12, "load8_u", [8577]), 0);
-
-// memory_copy.wast:1561
-assert_return(() => call($12, "load8_u", [8776]), 0);
-
-// memory_copy.wast:1562
-assert_return(() => call($12, "load8_u", [8975]), 0);
-
-// memory_copy.wast:1563
-assert_return(() => call($12, "load8_u", [9174]), 0);
-
-// memory_copy.wast:1564
-assert_return(() => call($12, "load8_u", [9373]), 0);
-
-// memory_copy.wast:1565
-assert_return(() => call($12, "load8_u", [9572]), 0);
-
-// memory_copy.wast:1566
-assert_return(() => call($12, "load8_u", [9771]), 0);
-
-// memory_copy.wast:1567
-assert_return(() => call($12, "load8_u", [9970]), 0);
-
-// memory_copy.wast:1568
-assert_return(() => call($12, "load8_u", [10169]), 0);
-
-// memory_copy.wast:1569
-assert_return(() => call($12, "load8_u", [10368]), 0);
-
-// memory_copy.wast:1570
-assert_return(() => call($12, "load8_u", [10567]), 0);
-
-// memory_copy.wast:1571
-assert_return(() => call($12, "load8_u", [10766]), 0);
-
-// memory_copy.wast:1572
-assert_return(() => call($12, "load8_u", [10965]), 0);
-
-// memory_copy.wast:1573
-assert_return(() => call($12, "load8_u", [11164]), 0);
-
-// memory_copy.wast:1574
-assert_return(() => call($12, "load8_u", [11363]), 0);
-
-// memory_copy.wast:1575
-assert_return(() => call($12, "load8_u", [11562]), 0);
-
-// memory_copy.wast:1576
-assert_return(() => call($12, "load8_u", [11761]), 0);
-
-// memory_copy.wast:1577
-assert_return(() => call($12, "load8_u", [11960]), 0);
-
-// memory_copy.wast:1578
-assert_return(() => call($12, "load8_u", [12159]), 0);
-
-// memory_copy.wast:1579
-assert_return(() => call($12, "load8_u", [12358]), 0);
-
-// memory_copy.wast:1580
-assert_return(() => call($12, "load8_u", [12557]), 0);
-
-// memory_copy.wast:1581
-assert_return(() => call($12, "load8_u", [12756]), 0);
-
-// memory_copy.wast:1582
-assert_return(() => call($12, "load8_u", [12955]), 0);
-
-// memory_copy.wast:1583
-assert_return(() => call($12, "load8_u", [13154]), 0);
-
-// memory_copy.wast:1584
-assert_return(() => call($12, "load8_u", [13353]), 0);
-
-// memory_copy.wast:1585
-assert_return(() => call($12, "load8_u", [13552]), 0);
-
-// memory_copy.wast:1586
-assert_return(() => call($12, "load8_u", [13751]), 0);
-
-// memory_copy.wast:1587
-assert_return(() => call($12, "load8_u", [13950]), 0);
-
-// memory_copy.wast:1588
-assert_return(() => call($12, "load8_u", [14149]), 0);
-
-// memory_copy.wast:1589
-assert_return(() => call($12, "load8_u", [14348]), 0);
-
-// memory_copy.wast:1590
-assert_return(() => call($12, "load8_u", [14547]), 0);
-
-// memory_copy.wast:1591
-assert_return(() => call($12, "load8_u", [14746]), 0);
-
-// memory_copy.wast:1592
-assert_return(() => call($12, "load8_u", [14945]), 0);
-
-// memory_copy.wast:1593
-assert_return(() => call($12, "load8_u", [15144]), 0);
-
-// memory_copy.wast:1594
-assert_return(() => call($12, "load8_u", [15343]), 0);
-
-// memory_copy.wast:1595
-assert_return(() => call($12, "load8_u", [15542]), 0);
-
-// memory_copy.wast:1596
-assert_return(() => call($12, "load8_u", [15741]), 0);
-
-// memory_copy.wast:1597
-assert_return(() => call($12, "load8_u", [15940]), 0);
-
-// memory_copy.wast:1598
-assert_return(() => call($12, "load8_u", [16139]), 0);
-
-// memory_copy.wast:1599
-assert_return(() => call($12, "load8_u", [16338]), 0);
-
-// memory_copy.wast:1600
-assert_return(() => call($12, "load8_u", [16537]), 0);
-
-// memory_copy.wast:1601
-assert_return(() => call($12, "load8_u", [16736]), 0);
-
-// memory_copy.wast:1602
-assert_return(() => call($12, "load8_u", [16935]), 0);
-
-// memory_copy.wast:1603
-assert_return(() => call($12, "load8_u", [17134]), 0);
-
-// memory_copy.wast:1604
-assert_return(() => call($12, "load8_u", [17333]), 0);
-
-// memory_copy.wast:1605
-assert_return(() => call($12, "load8_u", [17532]), 0);
-
-// memory_copy.wast:1606
-assert_return(() => call($12, "load8_u", [17731]), 0);
-
-// memory_copy.wast:1607
-assert_return(() => call($12, "load8_u", [17930]), 0);
-
-// memory_copy.wast:1608
-assert_return(() => call($12, "load8_u", [18129]), 0);
-
-// memory_copy.wast:1609
-assert_return(() => call($12, "load8_u", [18328]), 0);
-
-// memory_copy.wast:1610
-assert_return(() => call($12, "load8_u", [18527]), 0);
-
-// memory_copy.wast:1611
-assert_return(() => call($12, "load8_u", [18726]), 0);
-
-// memory_copy.wast:1612
-assert_return(() => call($12, "load8_u", [18925]), 0);
-
-// memory_copy.wast:1613
-assert_return(() => call($12, "load8_u", [19124]), 0);
-
-// memory_copy.wast:1614
-assert_return(() => call($12, "load8_u", [19323]), 0);
-
-// memory_copy.wast:1615
-assert_return(() => call($12, "load8_u", [19522]), 0);
-
-// memory_copy.wast:1616
-assert_return(() => call($12, "load8_u", [19721]), 0);
-
-// memory_copy.wast:1617
-assert_return(() => call($12, "load8_u", [19920]), 0);
-
-// memory_copy.wast:1618
-assert_return(() => call($12, "load8_u", [20119]), 0);
-
-// memory_copy.wast:1619
-assert_return(() => call($12, "load8_u", [20318]), 0);
-
-// memory_copy.wast:1620
-assert_return(() => call($12, "load8_u", [20517]), 0);
-
-// memory_copy.wast:1621
-assert_return(() => call($12, "load8_u", [20716]), 0);
-
-// memory_copy.wast:1622
-assert_return(() => call($12, "load8_u", [20915]), 0);
-
-// memory_copy.wast:1623
-assert_return(() => call($12, "load8_u", [21114]), 0);
-
-// memory_copy.wast:1624
-assert_return(() => call($12, "load8_u", [21313]), 0);
-
-// memory_copy.wast:1625
-assert_return(() => call($12, "load8_u", [21512]), 0);
-
-// memory_copy.wast:1626
-assert_return(() => call($12, "load8_u", [21711]), 0);
-
-// memory_copy.wast:1627
-assert_return(() => call($12, "load8_u", [21910]), 0);
-
-// memory_copy.wast:1628
-assert_return(() => call($12, "load8_u", [22109]), 0);
-
-// memory_copy.wast:1629
-assert_return(() => call($12, "load8_u", [22308]), 0);
-
-// memory_copy.wast:1630
-assert_return(() => call($12, "load8_u", [22507]), 0);
-
-// memory_copy.wast:1631
-assert_return(() => call($12, "load8_u", [22706]), 0);
-
-// memory_copy.wast:1632
-assert_return(() => call($12, "load8_u", [22905]), 0);
-
-// memory_copy.wast:1633
-assert_return(() => call($12, "load8_u", [23104]), 0);
-
-// memory_copy.wast:1634
-assert_return(() => call($12, "load8_u", [23303]), 0);
-
-// memory_copy.wast:1635
-assert_return(() => call($12, "load8_u", [23502]), 0);
-
-// memory_copy.wast:1636
-assert_return(() => call($12, "load8_u", [23701]), 0);
-
-// memory_copy.wast:1637
-assert_return(() => call($12, "load8_u", [23900]), 0);
-
-// memory_copy.wast:1638
-assert_return(() => call($12, "load8_u", [24099]), 0);
-
-// memory_copy.wast:1639
-assert_return(() => call($12, "load8_u", [24298]), 0);
-
-// memory_copy.wast:1640
-assert_return(() => call($12, "load8_u", [24497]), 0);
-
-// memory_copy.wast:1641
-assert_return(() => call($12, "load8_u", [24696]), 0);
-
-// memory_copy.wast:1642
-assert_return(() => call($12, "load8_u", [24895]), 0);
-
-// memory_copy.wast:1643
-assert_return(() => call($12, "load8_u", [25094]), 0);
-
-// memory_copy.wast:1644
-assert_return(() => call($12, "load8_u", [25293]), 0);
-
-// memory_copy.wast:1645
-assert_return(() => call($12, "load8_u", [25492]), 0);
-
-// memory_copy.wast:1646
-assert_return(() => call($12, "load8_u", [25691]), 0);
-
-// memory_copy.wast:1647
-assert_return(() => call($12, "load8_u", [25890]), 0);
-
-// memory_copy.wast:1648
-assert_return(() => call($12, "load8_u", [26089]), 0);
-
-// memory_copy.wast:1649
-assert_return(() => call($12, "load8_u", [26288]), 0);
-
-// memory_copy.wast:1650
-assert_return(() => call($12, "load8_u", [26487]), 0);
-
-// memory_copy.wast:1651
-assert_return(() => call($12, "load8_u", [26686]), 0);
-
-// memory_copy.wast:1652
-assert_return(() => call($12, "load8_u", [26885]), 0);
-
-// memory_copy.wast:1653
-assert_return(() => call($12, "load8_u", [27084]), 0);
-
-// memory_copy.wast:1654
-assert_return(() => call($12, "load8_u", [27283]), 0);
-
-// memory_copy.wast:1655
-assert_return(() => call($12, "load8_u", [27482]), 0);
-
-// memory_copy.wast:1656
-assert_return(() => call($12, "load8_u", [27681]), 0);
-
-// memory_copy.wast:1657
-assert_return(() => call($12, "load8_u", [27880]), 0);
-
-// memory_copy.wast:1658
-assert_return(() => call($12, "load8_u", [28079]), 0);
-
-// memory_copy.wast:1659
-assert_return(() => call($12, "load8_u", [28278]), 0);
-
-// memory_copy.wast:1660
-assert_return(() => call($12, "load8_u", [28477]), 0);
-
-// memory_copy.wast:1661
-assert_return(() => call($12, "load8_u", [28676]), 0);
-
-// memory_copy.wast:1662
-assert_return(() => call($12, "load8_u", [28875]), 0);
-
-// memory_copy.wast:1663
-assert_return(() => call($12, "load8_u", [29074]), 0);
-
-// memory_copy.wast:1664
-assert_return(() => call($12, "load8_u", [29273]), 0);
-
-// memory_copy.wast:1665
-assert_return(() => call($12, "load8_u", [29472]), 0);
-
-// memory_copy.wast:1666
-assert_return(() => call($12, "load8_u", [29671]), 0);
-
-// memory_copy.wast:1667
-assert_return(() => call($12, "load8_u", [29870]), 0);
-
-// memory_copy.wast:1668
-assert_return(() => call($12, "load8_u", [30069]), 0);
-
-// memory_copy.wast:1669
-assert_return(() => call($12, "load8_u", [30268]), 0);
-
-// memory_copy.wast:1670
-assert_return(() => call($12, "load8_u", [30467]), 0);
-
-// memory_copy.wast:1671
-assert_return(() => call($12, "load8_u", [30666]), 0);
-
-// memory_copy.wast:1672
-assert_return(() => call($12, "load8_u", [30865]), 0);
-
-// memory_copy.wast:1673
-assert_return(() => call($12, "load8_u", [31064]), 0);
-
-// memory_copy.wast:1674
-assert_return(() => call($12, "load8_u", [31263]), 0);
-
-// memory_copy.wast:1675
-assert_return(() => call($12, "load8_u", [31462]), 0);
-
-// memory_copy.wast:1676
-assert_return(() => call($12, "load8_u", [31661]), 0);
-
-// memory_copy.wast:1677
-assert_return(() => call($12, "load8_u", [31860]), 0);
-
-// memory_copy.wast:1678
-assert_return(() => call($12, "load8_u", [32059]), 0);
-
-// memory_copy.wast:1679
-assert_return(() => call($12, "load8_u", [32258]), 0);
-
-// memory_copy.wast:1680
-assert_return(() => call($12, "load8_u", [32457]), 0);
-
-// memory_copy.wast:1681
-assert_return(() => call($12, "load8_u", [32656]), 0);
-
-// memory_copy.wast:1682
-assert_return(() => call($12, "load8_u", [32855]), 0);
-
-// memory_copy.wast:1683
-assert_return(() => call($12, "load8_u", [33054]), 0);
-
-// memory_copy.wast:1684
-assert_return(() => call($12, "load8_u", [33253]), 0);
-
-// memory_copy.wast:1685
-assert_return(() => call($12, "load8_u", [33452]), 0);
-
-// memory_copy.wast:1686
-assert_return(() => call($12, "load8_u", [33651]), 0);
-
-// memory_copy.wast:1687
-assert_return(() => call($12, "load8_u", [33850]), 0);
-
-// memory_copy.wast:1688
-assert_return(() => call($12, "load8_u", [34049]), 0);
-
-// memory_copy.wast:1689
-assert_return(() => call($12, "load8_u", [34248]), 0);
-
-// memory_copy.wast:1690
-assert_return(() => call($12, "load8_u", [34447]), 0);
-
-// memory_copy.wast:1691
-assert_return(() => call($12, "load8_u", [34646]), 0);
-
-// memory_copy.wast:1692
-assert_return(() => call($12, "load8_u", [34845]), 0);
-
-// memory_copy.wast:1693
-assert_return(() => call($12, "load8_u", [35044]), 0);
-
-// memory_copy.wast:1694
-assert_return(() => call($12, "load8_u", [35243]), 0);
-
-// memory_copy.wast:1695
-assert_return(() => call($12, "load8_u", [35442]), 0);
-
-// memory_copy.wast:1696
-assert_return(() => call($12, "load8_u", [35641]), 0);
-
-// memory_copy.wast:1697
-assert_return(() => call($12, "load8_u", [35840]), 0);
-
-// memory_copy.wast:1698
-assert_return(() => call($12, "load8_u", [36039]), 0);
-
-// memory_copy.wast:1699
-assert_return(() => call($12, "load8_u", [36238]), 0);
-
-// memory_copy.wast:1700
-assert_return(() => call($12, "load8_u", [36437]), 0);
-
-// memory_copy.wast:1701
-assert_return(() => call($12, "load8_u", [36636]), 0);
-
-// memory_copy.wast:1702
-assert_return(() => call($12, "load8_u", [36835]), 0);
-
-// memory_copy.wast:1703
-assert_return(() => call($12, "load8_u", [37034]), 0);
-
-// memory_copy.wast:1704
-assert_return(() => call($12, "load8_u", [37233]), 0);
-
-// memory_copy.wast:1705
-assert_return(() => call($12, "load8_u", [37432]), 0);
-
-// memory_copy.wast:1706
-assert_return(() => call($12, "load8_u", [37631]), 0);
-
-// memory_copy.wast:1707
-assert_return(() => call($12, "load8_u", [37830]), 0);
-
-// memory_copy.wast:1708
-assert_return(() => call($12, "load8_u", [38029]), 0);
-
-// memory_copy.wast:1709
-assert_return(() => call($12, "load8_u", [38228]), 0);
-
-// memory_copy.wast:1710
-assert_return(() => call($12, "load8_u", [38427]), 0);
-
-// memory_copy.wast:1711
-assert_return(() => call($12, "load8_u", [38626]), 0);
-
-// memory_copy.wast:1712
-assert_return(() => call($12, "load8_u", [38825]), 0);
-
-// memory_copy.wast:1713
-assert_return(() => call($12, "load8_u", [39024]), 0);
-
-// memory_copy.wast:1714
-assert_return(() => call($12, "load8_u", [39223]), 0);
-
-// memory_copy.wast:1715
-assert_return(() => call($12, "load8_u", [39422]), 0);
-
-// memory_copy.wast:1716
-assert_return(() => call($12, "load8_u", [39621]), 0);
-
-// memory_copy.wast:1717
-assert_return(() => call($12, "load8_u", [39820]), 0);
-
-// memory_copy.wast:1718
-assert_return(() => call($12, "load8_u", [40019]), 0);
-
-// memory_copy.wast:1719
-assert_return(() => call($12, "load8_u", [40218]), 0);
-
-// memory_copy.wast:1720
-assert_return(() => call($12, "load8_u", [40417]), 0);
-
-// memory_copy.wast:1721
-assert_return(() => call($12, "load8_u", [40616]), 0);
-
-// memory_copy.wast:1722
-assert_return(() => call($12, "load8_u", [40815]), 0);
-
-// memory_copy.wast:1723
-assert_return(() => call($12, "load8_u", [41014]), 0);
-
-// memory_copy.wast:1724
-assert_return(() => call($12, "load8_u", [41213]), 0);
-
-// memory_copy.wast:1725
-assert_return(() => call($12, "load8_u", [41412]), 0);
-
-// memory_copy.wast:1726
-assert_return(() => call($12, "load8_u", [41611]), 0);
-
-// memory_copy.wast:1727
-assert_return(() => call($12, "load8_u", [41810]), 0);
-
-// memory_copy.wast:1728
-assert_return(() => call($12, "load8_u", [42009]), 0);
-
-// memory_copy.wast:1729
-assert_return(() => call($12, "load8_u", [42208]), 0);
-
-// memory_copy.wast:1730
-assert_return(() => call($12, "load8_u", [42407]), 0);
-
-// memory_copy.wast:1731
-assert_return(() => call($12, "load8_u", [42606]), 0);
-
-// memory_copy.wast:1732
-assert_return(() => call($12, "load8_u", [42805]), 0);
-
-// memory_copy.wast:1733
-assert_return(() => call($12, "load8_u", [43004]), 0);
-
-// memory_copy.wast:1734
-assert_return(() => call($12, "load8_u", [43203]), 0);
-
-// memory_copy.wast:1735
-assert_return(() => call($12, "load8_u", [43402]), 0);
-
-// memory_copy.wast:1736
-assert_return(() => call($12, "load8_u", [43601]), 0);
-
-// memory_copy.wast:1737
-assert_return(() => call($12, "load8_u", [43800]), 0);
-
-// memory_copy.wast:1738
-assert_return(() => call($12, "load8_u", [43999]), 0);
-
-// memory_copy.wast:1739
-assert_return(() => call($12, "load8_u", [44198]), 0);
-
-// memory_copy.wast:1740
-assert_return(() => call($12, "load8_u", [44397]), 0);
-
-// memory_copy.wast:1741
-assert_return(() => call($12, "load8_u", [44596]), 0);
-
-// memory_copy.wast:1742
-assert_return(() => call($12, "load8_u", [44795]), 0);
-
-// memory_copy.wast:1743
-assert_return(() => call($12, "load8_u", [44994]), 0);
-
-// memory_copy.wast:1744
-assert_return(() => call($12, "load8_u", [45193]), 0);
-
-// memory_copy.wast:1745
-assert_return(() => call($12, "load8_u", [45392]), 0);
-
-// memory_copy.wast:1746
-assert_return(() => call($12, "load8_u", [45591]), 0);
-
-// memory_copy.wast:1747
-assert_return(() => call($12, "load8_u", [45790]), 0);
-
-// memory_copy.wast:1748
-assert_return(() => call($12, "load8_u", [45989]), 0);
-
-// memory_copy.wast:1749
-assert_return(() => call($12, "load8_u", [46188]), 0);
-
-// memory_copy.wast:1750
-assert_return(() => call($12, "load8_u", [46387]), 0);
-
-// memory_copy.wast:1751
-assert_return(() => call($12, "load8_u", [46586]), 0);
-
-// memory_copy.wast:1752
-assert_return(() => call($12, "load8_u", [46785]), 0);
-
-// memory_copy.wast:1753
-assert_return(() => call($12, "load8_u", [46984]), 0);
-
-// memory_copy.wast:1754
-assert_return(() => call($12, "load8_u", [47183]), 0);
-
-// memory_copy.wast:1755
-assert_return(() => call($12, "load8_u", [47382]), 0);
-
-// memory_copy.wast:1756
-assert_return(() => call($12, "load8_u", [47581]), 0);
-
-// memory_copy.wast:1757
-assert_return(() => call($12, "load8_u", [47780]), 0);
-
-// memory_copy.wast:1758
-assert_return(() => call($12, "load8_u", [47979]), 0);
-
-// memory_copy.wast:1759
-assert_return(() => call($12, "load8_u", [48178]), 0);
-
-// memory_copy.wast:1760
-assert_return(() => call($12, "load8_u", [48377]), 0);
-
-// memory_copy.wast:1761
-assert_return(() => call($12, "load8_u", [48576]), 0);
-
-// memory_copy.wast:1762
-assert_return(() => call($12, "load8_u", [48775]), 0);
-
-// memory_copy.wast:1763
-assert_return(() => call($12, "load8_u", [48974]), 0);
-
-// memory_copy.wast:1764
-assert_return(() => call($12, "load8_u", [49173]), 0);
-
-// memory_copy.wast:1765
-assert_return(() => call($12, "load8_u", [49372]), 0);
-
-// memory_copy.wast:1766
-assert_return(() => call($12, "load8_u", [49571]), 0);
-
-// memory_copy.wast:1767
-assert_return(() => call($12, "load8_u", [49770]), 0);
-
-// memory_copy.wast:1768
-assert_return(() => call($12, "load8_u", [49969]), 0);
-
-// memory_copy.wast:1769
-assert_return(() => call($12, "load8_u", [50168]), 0);
-
-// memory_copy.wast:1770
-assert_return(() => call($12, "load8_u", [50367]), 0);
-
-// memory_copy.wast:1771
-assert_return(() => call($12, "load8_u", [50566]), 0);
-
-// memory_copy.wast:1772
-assert_return(() => call($12, "load8_u", [50765]), 0);
-
-// memory_copy.wast:1773
-assert_return(() => call($12, "load8_u", [50964]), 0);
-
-// memory_copy.wast:1774
-assert_return(() => call($12, "load8_u", [51163]), 0);
-
-// memory_copy.wast:1775
-assert_return(() => call($12, "load8_u", [51362]), 0);
-
-// memory_copy.wast:1776
-assert_return(() => call($12, "load8_u", [51561]), 0);
-
-// memory_copy.wast:1777
-assert_return(() => call($12, "load8_u", [51760]), 0);
-
-// memory_copy.wast:1778
-assert_return(() => call($12, "load8_u", [51959]), 0);
-
-// memory_copy.wast:1779
-assert_return(() => call($12, "load8_u", [52158]), 0);
-
-// memory_copy.wast:1780
-assert_return(() => call($12, "load8_u", [52357]), 0);
-
-// memory_copy.wast:1781
-assert_return(() => call($12, "load8_u", [52556]), 0);
-
-// memory_copy.wast:1782
-assert_return(() => call($12, "load8_u", [52755]), 0);
-
-// memory_copy.wast:1783
-assert_return(() => call($12, "load8_u", [52954]), 0);
-
-// memory_copy.wast:1784
-assert_return(() => call($12, "load8_u", [53153]), 0);
-
-// memory_copy.wast:1785
-assert_return(() => call($12, "load8_u", [53352]), 0);
-
-// memory_copy.wast:1786
-assert_return(() => call($12, "load8_u", [53551]), 0);
-
-// memory_copy.wast:1787
-assert_return(() => call($12, "load8_u", [53750]), 0);
-
-// memory_copy.wast:1788
-assert_return(() => call($12, "load8_u", [53949]), 0);
-
-// memory_copy.wast:1789
-assert_return(() => call($12, "load8_u", [54148]), 0);
-
-// memory_copy.wast:1790
-assert_return(() => call($12, "load8_u", [54347]), 0);
-
-// memory_copy.wast:1791
-assert_return(() => call($12, "load8_u", [54546]), 0);
-
-// memory_copy.wast:1792
-assert_return(() => call($12, "load8_u", [54745]), 0);
-
-// memory_copy.wast:1793
-assert_return(() => call($12, "load8_u", [54944]), 0);
-
-// memory_copy.wast:1794
-assert_return(() => call($12, "load8_u", [55143]), 0);
-
-// memory_copy.wast:1795
-assert_return(() => call($12, "load8_u", [55342]), 0);
-
-// memory_copy.wast:1796
-assert_return(() => call($12, "load8_u", [55541]), 0);
-
-// memory_copy.wast:1797
-assert_return(() => call($12, "load8_u", [55740]), 0);
-
-// memory_copy.wast:1798
-assert_return(() => call($12, "load8_u", [55939]), 0);
-
-// memory_copy.wast:1799
-assert_return(() => call($12, "load8_u", [56138]), 0);
-
-// memory_copy.wast:1800
-assert_return(() => call($12, "load8_u", [56337]), 0);
-
-// memory_copy.wast:1801
-assert_return(() => call($12, "load8_u", [56536]), 0);
-
-// memory_copy.wast:1802
-assert_return(() => call($12, "load8_u", [56735]), 0);
-
-// memory_copy.wast:1803
-assert_return(() => call($12, "load8_u", [56934]), 0);
-
-// memory_copy.wast:1804
-assert_return(() => call($12, "load8_u", [57133]), 0);
-
-// memory_copy.wast:1805
-assert_return(() => call($12, "load8_u", [57332]), 0);
-
-// memory_copy.wast:1806
-assert_return(() => call($12, "load8_u", [57531]), 0);
-
-// memory_copy.wast:1807
-assert_return(() => call($12, "load8_u", [57730]), 0);
-
-// memory_copy.wast:1808
-assert_return(() => call($12, "load8_u", [57929]), 0);
-
-// memory_copy.wast:1809
-assert_return(() => call($12, "load8_u", [58128]), 0);
-
-// memory_copy.wast:1810
-assert_return(() => call($12, "load8_u", [58327]), 0);
-
-// memory_copy.wast:1811
-assert_return(() => call($12, "load8_u", [58526]), 0);
-
-// memory_copy.wast:1812
-assert_return(() => call($12, "load8_u", [58725]), 0);
-
-// memory_copy.wast:1813
-assert_return(() => call($12, "load8_u", [58924]), 0);
-
-// memory_copy.wast:1814
-assert_return(() => call($12, "load8_u", [59123]), 0);
-
-// memory_copy.wast:1815
-assert_return(() => call($12, "load8_u", [59322]), 0);
-
-// memory_copy.wast:1816
-assert_return(() => call($12, "load8_u", [59521]), 0);
-
-// memory_copy.wast:1817
-assert_return(() => call($12, "load8_u", [59720]), 0);
-
-// memory_copy.wast:1818
-assert_return(() => call($12, "load8_u", [59919]), 0);
-
-// memory_copy.wast:1819
-assert_return(() => call($12, "load8_u", [60118]), 0);
-
-// memory_copy.wast:1820
-assert_return(() => call($12, "load8_u", [60317]), 0);
-
-// memory_copy.wast:1821
-assert_return(() => call($12, "load8_u", [60516]), 0);
-
-// memory_copy.wast:1822
-assert_return(() => call($12, "load8_u", [60715]), 0);
-
-// memory_copy.wast:1823
-assert_return(() => call($12, "load8_u", [60914]), 0);
-
-// memory_copy.wast:1824
-assert_return(() => call($12, "load8_u", [61113]), 0);
-
-// memory_copy.wast:1825
-assert_return(() => call($12, "load8_u", [61312]), 0);
-
-// memory_copy.wast:1826
-assert_return(() => call($12, "load8_u", [61511]), 0);
-
-// memory_copy.wast:1827
-assert_return(() => call($12, "load8_u", [61710]), 0);
-
-// memory_copy.wast:1828
-assert_return(() => call($12, "load8_u", [61909]), 0);
-
-// memory_copy.wast:1829
-assert_return(() => call($12, "load8_u", [62108]), 0);
-
-// memory_copy.wast:1830
-assert_return(() => call($12, "load8_u", [62307]), 0);
-
-// memory_copy.wast:1831
-assert_return(() => call($12, "load8_u", [62506]), 0);
-
-// memory_copy.wast:1832
-assert_return(() => call($12, "load8_u", [62705]), 0);
-
-// memory_copy.wast:1833
-assert_return(() => call($12, "load8_u", [62904]), 0);
-
-// memory_copy.wast:1834
-assert_return(() => call($12, "load8_u", [63103]), 0);
-
-// memory_copy.wast:1835
-assert_return(() => call($12, "load8_u", [63302]), 0);
-
-// memory_copy.wast:1836
-assert_return(() => call($12, "load8_u", [63501]), 0);
-
-// memory_copy.wast:1837
-assert_return(() => call($12, "load8_u", [63700]), 0);
-
-// memory_copy.wast:1838
-assert_return(() => call($12, "load8_u", [63899]), 0);
-
-// memory_copy.wast:1839
-assert_return(() => call($12, "load8_u", [64098]), 0);
-
-// memory_copy.wast:1840
-assert_return(() => call($12, "load8_u", [64297]), 0);
-
-// memory_copy.wast:1841
-assert_return(() => call($12, "load8_u", [64496]), 0);
-
-// memory_copy.wast:1842
-assert_return(() => call($12, "load8_u", [64695]), 0);
-
-// memory_copy.wast:1843
-assert_return(() => call($12, "load8_u", [64894]), 0);
-
-// memory_copy.wast:1844
-assert_return(() => call($12, "load8_u", [65093]), 0);
-
-// memory_copy.wast:1845
-assert_return(() => call($12, "load8_u", [65292]), 0);
-
-// memory_copy.wast:1846
-assert_return(() => call($12, "load8_u", [65491]), 0);
-
-// memory_copy.wast:1847
-assert_return(() => call($12, "load8_u", [65515]), 0);
-
-// memory_copy.wast:1848
-assert_return(() => call($12, "load8_u", [65516]), 1);
-
-// memory_copy.wast:1849
-assert_return(() => call($12, "load8_u", [65517]), 2);
-
-// memory_copy.wast:1850
-assert_return(() => call($12, "load8_u", [65518]), 3);
-
-// memory_copy.wast:1851
-assert_return(() => call($12, "load8_u", [65519]), 4);
-
-// memory_copy.wast:1852
-assert_return(() => call($12, "load8_u", [65520]), 5);
-
-// memory_copy.wast:1853
-assert_return(() => call($12, "load8_u", [65521]), 6);
-
-// memory_copy.wast:1854
-assert_return(() => call($12, "load8_u", [65522]), 7);
-
-// memory_copy.wast:1855
-assert_return(() => call($12, "load8_u", [65523]), 8);
-
-// memory_copy.wast:1856
-assert_return(() => call($12, "load8_u", [65524]), 9);
-
-// memory_copy.wast:1857
-assert_return(() => call($12, "load8_u", [65525]), 10);
-
-// memory_copy.wast:1858
-assert_return(() => call($12, "load8_u", [65526]), 11);
-
-// memory_copy.wast:1859
-assert_return(() => call($12, "load8_u", [65527]), 12);
-
-// memory_copy.wast:1860
-assert_return(() => call($12, "load8_u", [65528]), 13);
-
-// memory_copy.wast:1861
-assert_return(() => call($12, "load8_u", [65529]), 14);
-
-// memory_copy.wast:1862
-assert_return(() => call($12, "load8_u", [65530]), 15);
-
-// memory_copy.wast:1863
-assert_return(() => call($12, "load8_u", [65531]), 16);
-
-// memory_copy.wast:1864
-assert_return(() => call($12, "load8_u", [65532]), 17);
-
-// memory_copy.wast:1865
-assert_return(() => call($12, "load8_u", [65533]), 18);
-
-// memory_copy.wast:1866
-assert_return(() => call($12, "load8_u", [65534]), 19);
-
-// memory_copy.wast:1867
-assert_return(() => call($12, "load8_u", [65535]), 20);
-
-// memory_copy.wast:1869
-let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xce\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:1877
-assert_trap(() => call($13, "run", [65516, 65486, 40]));
-
-// memory_copy.wast:1880
-assert_return(() => call($13, "load8_u", [198]), 0);
-
-// memory_copy.wast:1881
-assert_return(() => call($13, "load8_u", [397]), 0);
-
-// memory_copy.wast:1882
-assert_return(() => call($13, "load8_u", [596]), 0);
-
-// memory_copy.wast:1883
-assert_return(() => call($13, "load8_u", [795]), 0);
-
-// memory_copy.wast:1884
-assert_return(() => call($13, "load8_u", [994]), 0);
-
-// memory_copy.wast:1885
-assert_return(() => call($13, "load8_u", [1193]), 0);
-
-// memory_copy.wast:1886
-assert_return(() => call($13, "load8_u", [1392]), 0);
-
-// memory_copy.wast:1887
-assert_return(() => call($13, "load8_u", [1591]), 0);
-
-// memory_copy.wast:1888
-assert_return(() => call($13, "load8_u", [1790]), 0);
-
-// memory_copy.wast:1889
-assert_return(() => call($13, "load8_u", [1989]), 0);
-
-// memory_copy.wast:1890
-assert_return(() => call($13, "load8_u", [2188]), 0);
-
-// memory_copy.wast:1891
-assert_return(() => call($13, "load8_u", [2387]), 0);
-
-// memory_copy.wast:1892
-assert_return(() => call($13, "load8_u", [2586]), 0);
-
-// memory_copy.wast:1893
-assert_return(() => call($13, "load8_u", [2785]), 0);
-
-// memory_copy.wast:1894
-assert_return(() => call($13, "load8_u", [2984]), 0);
-
-// memory_copy.wast:1895
-assert_return(() => call($13, "load8_u", [3183]), 0);
-
-// memory_copy.wast:1896
-assert_return(() => call($13, "load8_u", [3382]), 0);
-
-// memory_copy.wast:1897
-assert_return(() => call($13, "load8_u", [3581]), 0);
-
-// memory_copy.wast:1898
-assert_return(() => call($13, "load8_u", [3780]), 0);
-
-// memory_copy.wast:1899
-assert_return(() => call($13, "load8_u", [3979]), 0);
-
-// memory_copy.wast:1900
-assert_return(() => call($13, "load8_u", [4178]), 0);
-
-// memory_copy.wast:1901
-assert_return(() => call($13, "load8_u", [4377]), 0);
-
-// memory_copy.wast:1902
-assert_return(() => call($13, "load8_u", [4576]), 0);
-
-// memory_copy.wast:1903
-assert_return(() => call($13, "load8_u", [4775]), 0);
-
-// memory_copy.wast:1904
-assert_return(() => call($13, "load8_u", [4974]), 0);
-
-// memory_copy.wast:1905
-assert_return(() => call($13, "load8_u", [5173]), 0);
-
-// memory_copy.wast:1906
-assert_return(() => call($13, "load8_u", [5372]), 0);
-
-// memory_copy.wast:1907
-assert_return(() => call($13, "load8_u", [5571]), 0);
-
-// memory_copy.wast:1908
-assert_return(() => call($13, "load8_u", [5770]), 0);
-
-// memory_copy.wast:1909
-assert_return(() => call($13, "load8_u", [5969]), 0);
-
-// memory_copy.wast:1910
-assert_return(() => call($13, "load8_u", [6168]), 0);
-
-// memory_copy.wast:1911
-assert_return(() => call($13, "load8_u", [6367]), 0);
-
-// memory_copy.wast:1912
-assert_return(() => call($13, "load8_u", [6566]), 0);
-
-// memory_copy.wast:1913
-assert_return(() => call($13, "load8_u", [6765]), 0);
-
-// memory_copy.wast:1914
-assert_return(() => call($13, "load8_u", [6964]), 0);
-
-// memory_copy.wast:1915
-assert_return(() => call($13, "load8_u", [7163]), 0);
-
-// memory_copy.wast:1916
-assert_return(() => call($13, "load8_u", [7362]), 0);
-
-// memory_copy.wast:1917
-assert_return(() => call($13, "load8_u", [7561]), 0);
-
-// memory_copy.wast:1918
-assert_return(() => call($13, "load8_u", [7760]), 0);
-
-// memory_copy.wast:1919
-assert_return(() => call($13, "load8_u", [7959]), 0);
-
-// memory_copy.wast:1920
-assert_return(() => call($13, "load8_u", [8158]), 0);
-
-// memory_copy.wast:1921
-assert_return(() => call($13, "load8_u", [8357]), 0);
-
-// memory_copy.wast:1922
-assert_return(() => call($13, "load8_u", [8556]), 0);
-
-// memory_copy.wast:1923
-assert_return(() => call($13, "load8_u", [8755]), 0);
-
-// memory_copy.wast:1924
-assert_return(() => call($13, "load8_u", [8954]), 0);
-
-// memory_copy.wast:1925
-assert_return(() => call($13, "load8_u", [9153]), 0);
-
-// memory_copy.wast:1926
-assert_return(() => call($13, "load8_u", [9352]), 0);
-
-// memory_copy.wast:1927
-assert_return(() => call($13, "load8_u", [9551]), 0);
-
-// memory_copy.wast:1928
-assert_return(() => call($13, "load8_u", [9750]), 0);
-
-// memory_copy.wast:1929
-assert_return(() => call($13, "load8_u", [9949]), 0);
-
-// memory_copy.wast:1930
-assert_return(() => call($13, "load8_u", [10148]), 0);
-
-// memory_copy.wast:1931
-assert_return(() => call($13, "load8_u", [10347]), 0);
-
-// memory_copy.wast:1932
-assert_return(() => call($13, "load8_u", [10546]), 0);
-
-// memory_copy.wast:1933
-assert_return(() => call($13, "load8_u", [10745]), 0);
-
-// memory_copy.wast:1934
-assert_return(() => call($13, "load8_u", [10944]), 0);
-
-// memory_copy.wast:1935
-assert_return(() => call($13, "load8_u", [11143]), 0);
-
-// memory_copy.wast:1936
-assert_return(() => call($13, "load8_u", [11342]), 0);
-
-// memory_copy.wast:1937
-assert_return(() => call($13, "load8_u", [11541]), 0);
-
-// memory_copy.wast:1938
-assert_return(() => call($13, "load8_u", [11740]), 0);
-
-// memory_copy.wast:1939
-assert_return(() => call($13, "load8_u", [11939]), 0);
-
-// memory_copy.wast:1940
-assert_return(() => call($13, "load8_u", [12138]), 0);
-
-// memory_copy.wast:1941
-assert_return(() => call($13, "load8_u", [12337]), 0);
-
-// memory_copy.wast:1942
-assert_return(() => call($13, "load8_u", [12536]), 0);
-
-// memory_copy.wast:1943
-assert_return(() => call($13, "load8_u", [12735]), 0);
-
-// memory_copy.wast:1944
-assert_return(() => call($13, "load8_u", [12934]), 0);
-
-// memory_copy.wast:1945
-assert_return(() => call($13, "load8_u", [13133]), 0);
-
-// memory_copy.wast:1946
-assert_return(() => call($13, "load8_u", [13332]), 0);
-
-// memory_copy.wast:1947
-assert_return(() => call($13, "load8_u", [13531]), 0);
-
-// memory_copy.wast:1948
-assert_return(() => call($13, "load8_u", [13730]), 0);
-
-// memory_copy.wast:1949
-assert_return(() => call($13, "load8_u", [13929]), 0);
-
-// memory_copy.wast:1950
-assert_return(() => call($13, "load8_u", [14128]), 0);
-
-// memory_copy.wast:1951
-assert_return(() => call($13, "load8_u", [14327]), 0);
-
-// memory_copy.wast:1952
-assert_return(() => call($13, "load8_u", [14526]), 0);
-
-// memory_copy.wast:1953
-assert_return(() => call($13, "load8_u", [14725]), 0);
-
-// memory_copy.wast:1954
-assert_return(() => call($13, "load8_u", [14924]), 0);
-
-// memory_copy.wast:1955
-assert_return(() => call($13, "load8_u", [15123]), 0);
-
-// memory_copy.wast:1956
-assert_return(() => call($13, "load8_u", [15322]), 0);
-
-// memory_copy.wast:1957
-assert_return(() => call($13, "load8_u", [15521]), 0);
-
-// memory_copy.wast:1958
-assert_return(() => call($13, "load8_u", [15720]), 0);
-
-// memory_copy.wast:1959
-assert_return(() => call($13, "load8_u", [15919]), 0);
-
-// memory_copy.wast:1960
-assert_return(() => call($13, "load8_u", [16118]), 0);
-
-// memory_copy.wast:1961
-assert_return(() => call($13, "load8_u", [16317]), 0);
-
-// memory_copy.wast:1962
-assert_return(() => call($13, "load8_u", [16516]), 0);
-
-// memory_copy.wast:1963
-assert_return(() => call($13, "load8_u", [16715]), 0);
-
-// memory_copy.wast:1964
-assert_return(() => call($13, "load8_u", [16914]), 0);
-
-// memory_copy.wast:1965
-assert_return(() => call($13, "load8_u", [17113]), 0);
-
-// memory_copy.wast:1966
-assert_return(() => call($13, "load8_u", [17312]), 0);
-
-// memory_copy.wast:1967
-assert_return(() => call($13, "load8_u", [17511]), 0);
-
-// memory_copy.wast:1968
-assert_return(() => call($13, "load8_u", [17710]), 0);
-
-// memory_copy.wast:1969
-assert_return(() => call($13, "load8_u", [17909]), 0);
-
-// memory_copy.wast:1970
-assert_return(() => call($13, "load8_u", [18108]), 0);
-
-// memory_copy.wast:1971
-assert_return(() => call($13, "load8_u", [18307]), 0);
-
-// memory_copy.wast:1972
-assert_return(() => call($13, "load8_u", [18506]), 0);
-
-// memory_copy.wast:1973
-assert_return(() => call($13, "load8_u", [18705]), 0);
-
-// memory_copy.wast:1974
-assert_return(() => call($13, "load8_u", [18904]), 0);
-
-// memory_copy.wast:1975
-assert_return(() => call($13, "load8_u", [19103]), 0);
-
-// memory_copy.wast:1976
-assert_return(() => call($13, "load8_u", [19302]), 0);
-
-// memory_copy.wast:1977
-assert_return(() => call($13, "load8_u", [19501]), 0);
-
-// memory_copy.wast:1978
-assert_return(() => call($13, "load8_u", [19700]), 0);
-
-// memory_copy.wast:1979
-assert_return(() => call($13, "load8_u", [19899]), 0);
-
-// memory_copy.wast:1980
-assert_return(() => call($13, "load8_u", [20098]), 0);
-
-// memory_copy.wast:1981
-assert_return(() => call($13, "load8_u", [20297]), 0);
-
-// memory_copy.wast:1982
-assert_return(() => call($13, "load8_u", [20496]), 0);
-
-// memory_copy.wast:1983
-assert_return(() => call($13, "load8_u", [20695]), 0);
-
-// memory_copy.wast:1984
-assert_return(() => call($13, "load8_u", [20894]), 0);
-
-// memory_copy.wast:1985
-assert_return(() => call($13, "load8_u", [21093]), 0);
-
-// memory_copy.wast:1986
-assert_return(() => call($13, "load8_u", [21292]), 0);
-
-// memory_copy.wast:1987
-assert_return(() => call($13, "load8_u", [21491]), 0);
-
-// memory_copy.wast:1988
-assert_return(() => call($13, "load8_u", [21690]), 0);
-
-// memory_copy.wast:1989
-assert_return(() => call($13, "load8_u", [21889]), 0);
-
-// memory_copy.wast:1990
-assert_return(() => call($13, "load8_u", [22088]), 0);
-
-// memory_copy.wast:1991
-assert_return(() => call($13, "load8_u", [22287]), 0);
-
-// memory_copy.wast:1992
-assert_return(() => call($13, "load8_u", [22486]), 0);
-
-// memory_copy.wast:1993
-assert_return(() => call($13, "load8_u", [22685]), 0);
-
-// memory_copy.wast:1994
-assert_return(() => call($13, "load8_u", [22884]), 0);
-
-// memory_copy.wast:1995
-assert_return(() => call($13, "load8_u", [23083]), 0);
-
-// memory_copy.wast:1996
-assert_return(() => call($13, "load8_u", [23282]), 0);
-
-// memory_copy.wast:1997
-assert_return(() => call($13, "load8_u", [23481]), 0);
-
-// memory_copy.wast:1998
-assert_return(() => call($13, "load8_u", [23680]), 0);
-
-// memory_copy.wast:1999
-assert_return(() => call($13, "load8_u", [23879]), 0);
-
-// memory_copy.wast:2000
-assert_return(() => call($13, "load8_u", [24078]), 0);
-
-// memory_copy.wast:2001
-assert_return(() => call($13, "load8_u", [24277]), 0);
-
-// memory_copy.wast:2002
-assert_return(() => call($13, "load8_u", [24476]), 0);
-
-// memory_copy.wast:2003
-assert_return(() => call($13, "load8_u", [24675]), 0);
-
-// memory_copy.wast:2004
-assert_return(() => call($13, "load8_u", [24874]), 0);
-
-// memory_copy.wast:2005
-assert_return(() => call($13, "load8_u", [25073]), 0);
-
-// memory_copy.wast:2006
-assert_return(() => call($13, "load8_u", [25272]), 0);
-
-// memory_copy.wast:2007
-assert_return(() => call($13, "load8_u", [25471]), 0);
-
-// memory_copy.wast:2008
-assert_return(() => call($13, "load8_u", [25670]), 0);
-
-// memory_copy.wast:2009
-assert_return(() => call($13, "load8_u", [25869]), 0);
-
-// memory_copy.wast:2010
-assert_return(() => call($13, "load8_u", [26068]), 0);
-
-// memory_copy.wast:2011
-assert_return(() => call($13, "load8_u", [26267]), 0);
-
-// memory_copy.wast:2012
-assert_return(() => call($13, "load8_u", [26466]), 0);
-
-// memory_copy.wast:2013
-assert_return(() => call($13, "load8_u", [26665]), 0);
-
-// memory_copy.wast:2014
-assert_return(() => call($13, "load8_u", [26864]), 0);
-
-// memory_copy.wast:2015
-assert_return(() => call($13, "load8_u", [27063]), 0);
-
-// memory_copy.wast:2016
-assert_return(() => call($13, "load8_u", [27262]), 0);
-
-// memory_copy.wast:2017
-assert_return(() => call($13, "load8_u", [27461]), 0);
-
-// memory_copy.wast:2018
-assert_return(() => call($13, "load8_u", [27660]), 0);
-
-// memory_copy.wast:2019
-assert_return(() => call($13, "load8_u", [27859]), 0);
-
-// memory_copy.wast:2020
-assert_return(() => call($13, "load8_u", [28058]), 0);
-
-// memory_copy.wast:2021
-assert_return(() => call($13, "load8_u", [28257]), 0);
-
-// memory_copy.wast:2022
-assert_return(() => call($13, "load8_u", [28456]), 0);
-
-// memory_copy.wast:2023
-assert_return(() => call($13, "load8_u", [28655]), 0);
-
-// memory_copy.wast:2024
-assert_return(() => call($13, "load8_u", [28854]), 0);
-
-// memory_copy.wast:2025
-assert_return(() => call($13, "load8_u", [29053]), 0);
-
-// memory_copy.wast:2026
-assert_return(() => call($13, "load8_u", [29252]), 0);
-
-// memory_copy.wast:2027
-assert_return(() => call($13, "load8_u", [29451]), 0);
-
-// memory_copy.wast:2028
-assert_return(() => call($13, "load8_u", [29650]), 0);
-
-// memory_copy.wast:2029
-assert_return(() => call($13, "load8_u", [29849]), 0);
-
-// memory_copy.wast:2030
-assert_return(() => call($13, "load8_u", [30048]), 0);
-
-// memory_copy.wast:2031
-assert_return(() => call($13, "load8_u", [30247]), 0);
-
-// memory_copy.wast:2032
-assert_return(() => call($13, "load8_u", [30446]), 0);
-
-// memory_copy.wast:2033
-assert_return(() => call($13, "load8_u", [30645]), 0);
-
-// memory_copy.wast:2034
-assert_return(() => call($13, "load8_u", [30844]), 0);
-
-// memory_copy.wast:2035
-assert_return(() => call($13, "load8_u", [31043]), 0);
-
-// memory_copy.wast:2036
-assert_return(() => call($13, "load8_u", [31242]), 0);
-
-// memory_copy.wast:2037
-assert_return(() => call($13, "load8_u", [31441]), 0);
-
-// memory_copy.wast:2038
-assert_return(() => call($13, "load8_u", [31640]), 0);
-
-// memory_copy.wast:2039
-assert_return(() => call($13, "load8_u", [31839]), 0);
-
-// memory_copy.wast:2040
-assert_return(() => call($13, "load8_u", [32038]), 0);
-
-// memory_copy.wast:2041
-assert_return(() => call($13, "load8_u", [32237]), 0);
-
-// memory_copy.wast:2042
-assert_return(() => call($13, "load8_u", [32436]), 0);
-
-// memory_copy.wast:2043
-assert_return(() => call($13, "load8_u", [32635]), 0);
-
-// memory_copy.wast:2044
-assert_return(() => call($13, "load8_u", [32834]), 0);
-
-// memory_copy.wast:2045
-assert_return(() => call($13, "load8_u", [33033]), 0);
-
-// memory_copy.wast:2046
-assert_return(() => call($13, "load8_u", [33232]), 0);
-
-// memory_copy.wast:2047
-assert_return(() => call($13, "load8_u", [33431]), 0);
-
-// memory_copy.wast:2048
-assert_return(() => call($13, "load8_u", [33630]), 0);
-
-// memory_copy.wast:2049
-assert_return(() => call($13, "load8_u", [33829]), 0);
-
-// memory_copy.wast:2050
-assert_return(() => call($13, "load8_u", [34028]), 0);
-
-// memory_copy.wast:2051
-assert_return(() => call($13, "load8_u", [34227]), 0);
-
-// memory_copy.wast:2052
-assert_return(() => call($13, "load8_u", [34426]), 0);
-
-// memory_copy.wast:2053
-assert_return(() => call($13, "load8_u", [34625]), 0);
-
-// memory_copy.wast:2054
-assert_return(() => call($13, "load8_u", [34824]), 0);
-
-// memory_copy.wast:2055
-assert_return(() => call($13, "load8_u", [35023]), 0);
-
-// memory_copy.wast:2056
-assert_return(() => call($13, "load8_u", [35222]), 0);
-
-// memory_copy.wast:2057
-assert_return(() => call($13, "load8_u", [35421]), 0);
-
-// memory_copy.wast:2058
-assert_return(() => call($13, "load8_u", [35620]), 0);
-
-// memory_copy.wast:2059
-assert_return(() => call($13, "load8_u", [35819]), 0);
-
-// memory_copy.wast:2060
-assert_return(() => call($13, "load8_u", [36018]), 0);
-
-// memory_copy.wast:2061
-assert_return(() => call($13, "load8_u", [36217]), 0);
-
-// memory_copy.wast:2062
-assert_return(() => call($13, "load8_u", [36416]), 0);
-
-// memory_copy.wast:2063
-assert_return(() => call($13, "load8_u", [36615]), 0);
-
-// memory_copy.wast:2064
-assert_return(() => call($13, "load8_u", [36814]), 0);
-
-// memory_copy.wast:2065
-assert_return(() => call($13, "load8_u", [37013]), 0);
-
-// memory_copy.wast:2066
-assert_return(() => call($13, "load8_u", [37212]), 0);
-
-// memory_copy.wast:2067
-assert_return(() => call($13, "load8_u", [37411]), 0);
-
-// memory_copy.wast:2068
-assert_return(() => call($13, "load8_u", [37610]), 0);
-
-// memory_copy.wast:2069
-assert_return(() => call($13, "load8_u", [37809]), 0);
-
-// memory_copy.wast:2070
-assert_return(() => call($13, "load8_u", [38008]), 0);
-
-// memory_copy.wast:2071
-assert_return(() => call($13, "load8_u", [38207]), 0);
-
-// memory_copy.wast:2072
-assert_return(() => call($13, "load8_u", [38406]), 0);
-
-// memory_copy.wast:2073
-assert_return(() => call($13, "load8_u", [38605]), 0);
-
-// memory_copy.wast:2074
-assert_return(() => call($13, "load8_u", [38804]), 0);
-
-// memory_copy.wast:2075
-assert_return(() => call($13, "load8_u", [39003]), 0);
-
-// memory_copy.wast:2076
-assert_return(() => call($13, "load8_u", [39202]), 0);
-
-// memory_copy.wast:2077
-assert_return(() => call($13, "load8_u", [39401]), 0);
-
-// memory_copy.wast:2078
-assert_return(() => call($13, "load8_u", [39600]), 0);
-
-// memory_copy.wast:2079
-assert_return(() => call($13, "load8_u", [39799]), 0);
-
-// memory_copy.wast:2080
-assert_return(() => call($13, "load8_u", [39998]), 0);
-
-// memory_copy.wast:2081
-assert_return(() => call($13, "load8_u", [40197]), 0);
-
-// memory_copy.wast:2082
-assert_return(() => call($13, "load8_u", [40396]), 0);
-
-// memory_copy.wast:2083
-assert_return(() => call($13, "load8_u", [40595]), 0);
-
-// memory_copy.wast:2084
-assert_return(() => call($13, "load8_u", [40794]), 0);
-
-// memory_copy.wast:2085
-assert_return(() => call($13, "load8_u", [40993]), 0);
-
-// memory_copy.wast:2086
-assert_return(() => call($13, "load8_u", [41192]), 0);
-
-// memory_copy.wast:2087
-assert_return(() => call($13, "load8_u", [41391]), 0);
-
-// memory_copy.wast:2088
-assert_return(() => call($13, "load8_u", [41590]), 0);
-
-// memory_copy.wast:2089
-assert_return(() => call($13, "load8_u", [41789]), 0);
-
-// memory_copy.wast:2090
-assert_return(() => call($13, "load8_u", [41988]), 0);
-
-// memory_copy.wast:2091
-assert_return(() => call($13, "load8_u", [42187]), 0);
-
-// memory_copy.wast:2092
-assert_return(() => call($13, "load8_u", [42386]), 0);
-
-// memory_copy.wast:2093
-assert_return(() => call($13, "load8_u", [42585]), 0);
-
-// memory_copy.wast:2094
-assert_return(() => call($13, "load8_u", [42784]), 0);
-
-// memory_copy.wast:2095
-assert_return(() => call($13, "load8_u", [42983]), 0);
-
-// memory_copy.wast:2096
-assert_return(() => call($13, "load8_u", [43182]), 0);
-
-// memory_copy.wast:2097
-assert_return(() => call($13, "load8_u", [43381]), 0);
-
-// memory_copy.wast:2098
-assert_return(() => call($13, "load8_u", [43580]), 0);
-
-// memory_copy.wast:2099
-assert_return(() => call($13, "load8_u", [43779]), 0);
-
-// memory_copy.wast:2100
-assert_return(() => call($13, "load8_u", [43978]), 0);
-
-// memory_copy.wast:2101
-assert_return(() => call($13, "load8_u", [44177]), 0);
-
-// memory_copy.wast:2102
-assert_return(() => call($13, "load8_u", [44376]), 0);
-
-// memory_copy.wast:2103
-assert_return(() => call($13, "load8_u", [44575]), 0);
-
-// memory_copy.wast:2104
-assert_return(() => call($13, "load8_u", [44774]), 0);
-
-// memory_copy.wast:2105
-assert_return(() => call($13, "load8_u", [44973]), 0);
-
-// memory_copy.wast:2106
-assert_return(() => call($13, "load8_u", [45172]), 0);
-
-// memory_copy.wast:2107
-assert_return(() => call($13, "load8_u", [45371]), 0);
-
-// memory_copy.wast:2108
-assert_return(() => call($13, "load8_u", [45570]), 0);
-
-// memory_copy.wast:2109
-assert_return(() => call($13, "load8_u", [45769]), 0);
-
-// memory_copy.wast:2110
-assert_return(() => call($13, "load8_u", [45968]), 0);
-
-// memory_copy.wast:2111
-assert_return(() => call($13, "load8_u", [46167]), 0);
-
-// memory_copy.wast:2112
-assert_return(() => call($13, "load8_u", [46366]), 0);
-
-// memory_copy.wast:2113
-assert_return(() => call($13, "load8_u", [46565]), 0);
-
-// memory_copy.wast:2114
-assert_return(() => call($13, "load8_u", [46764]), 0);
-
-// memory_copy.wast:2115
-assert_return(() => call($13, "load8_u", [46963]), 0);
-
-// memory_copy.wast:2116
-assert_return(() => call($13, "load8_u", [47162]), 0);
-
-// memory_copy.wast:2117
-assert_return(() => call($13, "load8_u", [47361]), 0);
-
-// memory_copy.wast:2118
-assert_return(() => call($13, "load8_u", [47560]), 0);
-
-// memory_copy.wast:2119
-assert_return(() => call($13, "load8_u", [47759]), 0);
-
-// memory_copy.wast:2120
-assert_return(() => call($13, "load8_u", [47958]), 0);
-
-// memory_copy.wast:2121
-assert_return(() => call($13, "load8_u", [48157]), 0);
-
-// memory_copy.wast:2122
-assert_return(() => call($13, "load8_u", [48356]), 0);
-
-// memory_copy.wast:2123
-assert_return(() => call($13, "load8_u", [48555]), 0);
-
-// memory_copy.wast:2124
-assert_return(() => call($13, "load8_u", [48754]), 0);
-
-// memory_copy.wast:2125
-assert_return(() => call($13, "load8_u", [48953]), 0);
-
-// memory_copy.wast:2126
-assert_return(() => call($13, "load8_u", [49152]), 0);
-
-// memory_copy.wast:2127
-assert_return(() => call($13, "load8_u", [49351]), 0);
-
-// memory_copy.wast:2128
-assert_return(() => call($13, "load8_u", [49550]), 0);
-
-// memory_copy.wast:2129
-assert_return(() => call($13, "load8_u", [49749]), 0);
-
-// memory_copy.wast:2130
-assert_return(() => call($13, "load8_u", [49948]), 0);
-
-// memory_copy.wast:2131
-assert_return(() => call($13, "load8_u", [50147]), 0);
-
-// memory_copy.wast:2132
-assert_return(() => call($13, "load8_u", [50346]), 0);
-
-// memory_copy.wast:2133
-assert_return(() => call($13, "load8_u", [50545]), 0);
-
-// memory_copy.wast:2134
-assert_return(() => call($13, "load8_u", [50744]), 0);
-
-// memory_copy.wast:2135
-assert_return(() => call($13, "load8_u", [50943]), 0);
-
-// memory_copy.wast:2136
-assert_return(() => call($13, "load8_u", [51142]), 0);
-
-// memory_copy.wast:2137
-assert_return(() => call($13, "load8_u", [51341]), 0);
-
-// memory_copy.wast:2138
-assert_return(() => call($13, "load8_u", [51540]), 0);
-
-// memory_copy.wast:2139
-assert_return(() => call($13, "load8_u", [51739]), 0);
-
-// memory_copy.wast:2140
-assert_return(() => call($13, "load8_u", [51938]), 0);
-
-// memory_copy.wast:2141
-assert_return(() => call($13, "load8_u", [52137]), 0);
-
-// memory_copy.wast:2142
-assert_return(() => call($13, "load8_u", [52336]), 0);
-
-// memory_copy.wast:2143
-assert_return(() => call($13, "load8_u", [52535]), 0);
-
-// memory_copy.wast:2144
-assert_return(() => call($13, "load8_u", [52734]), 0);
-
-// memory_copy.wast:2145
-assert_return(() => call($13, "load8_u", [52933]), 0);
-
-// memory_copy.wast:2146
-assert_return(() => call($13, "load8_u", [53132]), 0);
-
-// memory_copy.wast:2147
-assert_return(() => call($13, "load8_u", [53331]), 0);
-
-// memory_copy.wast:2148
-assert_return(() => call($13, "load8_u", [53530]), 0);
-
-// memory_copy.wast:2149
-assert_return(() => call($13, "load8_u", [53729]), 0);
-
-// memory_copy.wast:2150
-assert_return(() => call($13, "load8_u", [53928]), 0);
-
-// memory_copy.wast:2151
-assert_return(() => call($13, "load8_u", [54127]), 0);
-
-// memory_copy.wast:2152
-assert_return(() => call($13, "load8_u", [54326]), 0);
-
-// memory_copy.wast:2153
-assert_return(() => call($13, "load8_u", [54525]), 0);
-
-// memory_copy.wast:2154
-assert_return(() => call($13, "load8_u", [54724]), 0);
-
-// memory_copy.wast:2155
-assert_return(() => call($13, "load8_u", [54923]), 0);
-
-// memory_copy.wast:2156
-assert_return(() => call($13, "load8_u", [55122]), 0);
-
-// memory_copy.wast:2157
-assert_return(() => call($13, "load8_u", [55321]), 0);
-
-// memory_copy.wast:2158
-assert_return(() => call($13, "load8_u", [55520]), 0);
-
-// memory_copy.wast:2159
-assert_return(() => call($13, "load8_u", [55719]), 0);
-
-// memory_copy.wast:2160
-assert_return(() => call($13, "load8_u", [55918]), 0);
-
-// memory_copy.wast:2161
-assert_return(() => call($13, "load8_u", [56117]), 0);
-
-// memory_copy.wast:2162
-assert_return(() => call($13, "load8_u", [56316]), 0);
-
-// memory_copy.wast:2163
-assert_return(() => call($13, "load8_u", [56515]), 0);
-
-// memory_copy.wast:2164
-assert_return(() => call($13, "load8_u", [56714]), 0);
-
-// memory_copy.wast:2165
-assert_return(() => call($13, "load8_u", [56913]), 0);
-
-// memory_copy.wast:2166
-assert_return(() => call($13, "load8_u", [57112]), 0);
-
-// memory_copy.wast:2167
-assert_return(() => call($13, "load8_u", [57311]), 0);
-
-// memory_copy.wast:2168
-assert_return(() => call($13, "load8_u", [57510]), 0);
-
-// memory_copy.wast:2169
-assert_return(() => call($13, "load8_u", [57709]), 0);
-
-// memory_copy.wast:2170
-assert_return(() => call($13, "load8_u", [57908]), 0);
-
-// memory_copy.wast:2171
-assert_return(() => call($13, "load8_u", [58107]), 0);
-
-// memory_copy.wast:2172
-assert_return(() => call($13, "load8_u", [58306]), 0);
-
-// memory_copy.wast:2173
-assert_return(() => call($13, "load8_u", [58505]), 0);
-
-// memory_copy.wast:2174
-assert_return(() => call($13, "load8_u", [58704]), 0);
-
-// memory_copy.wast:2175
-assert_return(() => call($13, "load8_u", [58903]), 0);
-
-// memory_copy.wast:2176
-assert_return(() => call($13, "load8_u", [59102]), 0);
-
-// memory_copy.wast:2177
-assert_return(() => call($13, "load8_u", [59301]), 0);
-
-// memory_copy.wast:2178
-assert_return(() => call($13, "load8_u", [59500]), 0);
-
-// memory_copy.wast:2179
-assert_return(() => call($13, "load8_u", [59699]), 0);
-
-// memory_copy.wast:2180
-assert_return(() => call($13, "load8_u", [59898]), 0);
-
-// memory_copy.wast:2181
-assert_return(() => call($13, "load8_u", [60097]), 0);
-
-// memory_copy.wast:2182
-assert_return(() => call($13, "load8_u", [60296]), 0);
-
-// memory_copy.wast:2183
-assert_return(() => call($13, "load8_u", [60495]), 0);
-
-// memory_copy.wast:2184
-assert_return(() => call($13, "load8_u", [60694]), 0);
-
-// memory_copy.wast:2185
-assert_return(() => call($13, "load8_u", [60893]), 0);
-
-// memory_copy.wast:2186
-assert_return(() => call($13, "load8_u", [61092]), 0);
-
-// memory_copy.wast:2187
-assert_return(() => call($13, "load8_u", [61291]), 0);
-
-// memory_copy.wast:2188
-assert_return(() => call($13, "load8_u", [61490]), 0);
-
-// memory_copy.wast:2189
-assert_return(() => call($13, "load8_u", [61689]), 0);
-
-// memory_copy.wast:2190
-assert_return(() => call($13, "load8_u", [61888]), 0);
-
-// memory_copy.wast:2191
-assert_return(() => call($13, "load8_u", [62087]), 0);
-
-// memory_copy.wast:2192
-assert_return(() => call($13, "load8_u", [62286]), 0);
-
-// memory_copy.wast:2193
-assert_return(() => call($13, "load8_u", [62485]), 0);
-
-// memory_copy.wast:2194
-assert_return(() => call($13, "load8_u", [62684]), 0);
-
-// memory_copy.wast:2195
-assert_return(() => call($13, "load8_u", [62883]), 0);
-
-// memory_copy.wast:2196
-assert_return(() => call($13, "load8_u", [63082]), 0);
-
-// memory_copy.wast:2197
-assert_return(() => call($13, "load8_u", [63281]), 0);
-
-// memory_copy.wast:2198
-assert_return(() => call($13, "load8_u", [63480]), 0);
-
-// memory_copy.wast:2199
-assert_return(() => call($13, "load8_u", [63679]), 0);
-
-// memory_copy.wast:2200
-assert_return(() => call($13, "load8_u", [63878]), 0);
-
-// memory_copy.wast:2201
-assert_return(() => call($13, "load8_u", [64077]), 0);
-
-// memory_copy.wast:2202
-assert_return(() => call($13, "load8_u", [64276]), 0);
-
-// memory_copy.wast:2203
-assert_return(() => call($13, "load8_u", [64475]), 0);
-
-// memory_copy.wast:2204
-assert_return(() => call($13, "load8_u", [64674]), 0);
-
-// memory_copy.wast:2205
-assert_return(() => call($13, "load8_u", [64873]), 0);
-
-// memory_copy.wast:2206
-assert_return(() => call($13, "load8_u", [65072]), 0);
-
-// memory_copy.wast:2207
-assert_return(() => call($13, "load8_u", [65271]), 0);
-
-// memory_copy.wast:2208
-assert_return(() => call($13, "load8_u", [65470]), 0);
-
-// memory_copy.wast:2209
-assert_return(() => call($13, "load8_u", [65486]), 0);
-
-// memory_copy.wast:2210
-assert_return(() => call($13, "load8_u", [65487]), 1);
-
-// memory_copy.wast:2211
-assert_return(() => call($13, "load8_u", [65488]), 2);
-
-// memory_copy.wast:2212
-assert_return(() => call($13, "load8_u", [65489]), 3);
-
-// memory_copy.wast:2213
-assert_return(() => call($13, "load8_u", [65490]), 4);
-
-// memory_copy.wast:2214
-assert_return(() => call($13, "load8_u", [65491]), 5);
-
-// memory_copy.wast:2215
-assert_return(() => call($13, "load8_u", [65492]), 6);
-
-// memory_copy.wast:2216
-assert_return(() => call($13, "load8_u", [65493]), 7);
-
-// memory_copy.wast:2217
-assert_return(() => call($13, "load8_u", [65494]), 8);
-
-// memory_copy.wast:2218
-assert_return(() => call($13, "load8_u", [65495]), 9);
-
-// memory_copy.wast:2219
-assert_return(() => call($13, "load8_u", [65496]), 10);
-
-// memory_copy.wast:2220
-assert_return(() => call($13, "load8_u", [65497]), 11);
-
-// memory_copy.wast:2221
-assert_return(() => call($13, "load8_u", [65498]), 12);
-
-// memory_copy.wast:2222
-assert_return(() => call($13, "load8_u", [65499]), 13);
-
-// memory_copy.wast:2223
-assert_return(() => call($13, "load8_u", [65500]), 14);
-
-// memory_copy.wast:2224
-assert_return(() => call($13, "load8_u", [65501]), 15);
-
-// memory_copy.wast:2225
-assert_return(() => call($13, "load8_u", [65502]), 16);
-
-// memory_copy.wast:2226
-assert_return(() => call($13, "load8_u", [65503]), 17);
-
-// memory_copy.wast:2227
-assert_return(() => call($13, "load8_u", [65504]), 18);
-
-// memory_copy.wast:2228
-assert_return(() => call($13, "load8_u", [65505]), 19);
-
-// memory_copy.wast:2230
-let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:2238
-assert_trap(() => call($14, "run", [65486, 65516, 40]));
-
-// memory_copy.wast:2241
-assert_return(() => call($14, "load8_u", [198]), 0);
-
-// memory_copy.wast:2242
-assert_return(() => call($14, "load8_u", [397]), 0);
-
-// memory_copy.wast:2243
-assert_return(() => call($14, "load8_u", [596]), 0);
-
-// memory_copy.wast:2244
-assert_return(() => call($14, "load8_u", [795]), 0);
-
-// memory_copy.wast:2245
-assert_return(() => call($14, "load8_u", [994]), 0);
-
-// memory_copy.wast:2246
-assert_return(() => call($14, "load8_u", [1193]), 0);
-
-// memory_copy.wast:2247
-assert_return(() => call($14, "load8_u", [1392]), 0);
-
-// memory_copy.wast:2248
-assert_return(() => call($14, "load8_u", [1591]), 0);
-
-// memory_copy.wast:2249
-assert_return(() => call($14, "load8_u", [1790]), 0);
-
-// memory_copy.wast:2250
-assert_return(() => call($14, "load8_u", [1989]), 0);
-
-// memory_copy.wast:2251
-assert_return(() => call($14, "load8_u", [2188]), 0);
-
-// memory_copy.wast:2252
-assert_return(() => call($14, "load8_u", [2387]), 0);
-
-// memory_copy.wast:2253
-assert_return(() => call($14, "load8_u", [2586]), 0);
-
-// memory_copy.wast:2254
-assert_return(() => call($14, "load8_u", [2785]), 0);
-
-// memory_copy.wast:2255
-assert_return(() => call($14, "load8_u", [2984]), 0);
-
-// memory_copy.wast:2256
-assert_return(() => call($14, "load8_u", [3183]), 0);
-
-// memory_copy.wast:2257
-assert_return(() => call($14, "load8_u", [3382]), 0);
-
-// memory_copy.wast:2258
-assert_return(() => call($14, "load8_u", [3581]), 0);
-
-// memory_copy.wast:2259
-assert_return(() => call($14, "load8_u", [3780]), 0);
-
-// memory_copy.wast:2260
-assert_return(() => call($14, "load8_u", [3979]), 0);
-
-// memory_copy.wast:2261
-assert_return(() => call($14, "load8_u", [4178]), 0);
-
-// memory_copy.wast:2262
-assert_return(() => call($14, "load8_u", [4377]), 0);
-
-// memory_copy.wast:2263
-assert_return(() => call($14, "load8_u", [4576]), 0);
-
-// memory_copy.wast:2264
-assert_return(() => call($14, "load8_u", [4775]), 0);
-
-// memory_copy.wast:2265
-assert_return(() => call($14, "load8_u", [4974]), 0);
-
-// memory_copy.wast:2266
-assert_return(() => call($14, "load8_u", [5173]), 0);
-
-// memory_copy.wast:2267
-assert_return(() => call($14, "load8_u", [5372]), 0);
-
-// memory_copy.wast:2268
-assert_return(() => call($14, "load8_u", [5571]), 0);
-
-// memory_copy.wast:2269
-assert_return(() => call($14, "load8_u", [5770]), 0);
-
-// memory_copy.wast:2270
-assert_return(() => call($14, "load8_u", [5969]), 0);
-
-// memory_copy.wast:2271
-assert_return(() => call($14, "load8_u", [6168]), 0);
-
-// memory_copy.wast:2272
-assert_return(() => call($14, "load8_u", [6367]), 0);
-
-// memory_copy.wast:2273
-assert_return(() => call($14, "load8_u", [6566]), 0);
-
-// memory_copy.wast:2274
-assert_return(() => call($14, "load8_u", [6765]), 0);
-
-// memory_copy.wast:2275
-assert_return(() => call($14, "load8_u", [6964]), 0);
-
-// memory_copy.wast:2276
-assert_return(() => call($14, "load8_u", [7163]), 0);
-
-// memory_copy.wast:2277
-assert_return(() => call($14, "load8_u", [7362]), 0);
-
-// memory_copy.wast:2278
-assert_return(() => call($14, "load8_u", [7561]), 0);
-
-// memory_copy.wast:2279
-assert_return(() => call($14, "load8_u", [7760]), 0);
-
-// memory_copy.wast:2280
-assert_return(() => call($14, "load8_u", [7959]), 0);
-
-// memory_copy.wast:2281
-assert_return(() => call($14, "load8_u", [8158]), 0);
-
-// memory_copy.wast:2282
-assert_return(() => call($14, "load8_u", [8357]), 0);
-
-// memory_copy.wast:2283
-assert_return(() => call($14, "load8_u", [8556]), 0);
-
-// memory_copy.wast:2284
-assert_return(() => call($14, "load8_u", [8755]), 0);
-
-// memory_copy.wast:2285
-assert_return(() => call($14, "load8_u", [8954]), 0);
-
-// memory_copy.wast:2286
-assert_return(() => call($14, "load8_u", [9153]), 0);
-
-// memory_copy.wast:2287
-assert_return(() => call($14, "load8_u", [9352]), 0);
-
-// memory_copy.wast:2288
-assert_return(() => call($14, "load8_u", [9551]), 0);
-
-// memory_copy.wast:2289
-assert_return(() => call($14, "load8_u", [9750]), 0);
-
-// memory_copy.wast:2290
-assert_return(() => call($14, "load8_u", [9949]), 0);
-
-// memory_copy.wast:2291
-assert_return(() => call($14, "load8_u", [10148]), 0);
-
-// memory_copy.wast:2292
-assert_return(() => call($14, "load8_u", [10347]), 0);
-
-// memory_copy.wast:2293
-assert_return(() => call($14, "load8_u", [10546]), 0);
-
-// memory_copy.wast:2294
-assert_return(() => call($14, "load8_u", [10745]), 0);
-
-// memory_copy.wast:2295
-assert_return(() => call($14, "load8_u", [10944]), 0);
-
-// memory_copy.wast:2296
-assert_return(() => call($14, "load8_u", [11143]), 0);
-
-// memory_copy.wast:2297
-assert_return(() => call($14, "load8_u", [11342]), 0);
-
-// memory_copy.wast:2298
-assert_return(() => call($14, "load8_u", [11541]), 0);
-
-// memory_copy.wast:2299
-assert_return(() => call($14, "load8_u", [11740]), 0);
-
-// memory_copy.wast:2300
-assert_return(() => call($14, "load8_u", [11939]), 0);
-
-// memory_copy.wast:2301
-assert_return(() => call($14, "load8_u", [12138]), 0);
-
-// memory_copy.wast:2302
-assert_return(() => call($14, "load8_u", [12337]), 0);
-
-// memory_copy.wast:2303
-assert_return(() => call($14, "load8_u", [12536]), 0);
-
-// memory_copy.wast:2304
-assert_return(() => call($14, "load8_u", [12735]), 0);
-
-// memory_copy.wast:2305
-assert_return(() => call($14, "load8_u", [12934]), 0);
-
-// memory_copy.wast:2306
-assert_return(() => call($14, "load8_u", [13133]), 0);
-
-// memory_copy.wast:2307
-assert_return(() => call($14, "load8_u", [13332]), 0);
-
-// memory_copy.wast:2308
-assert_return(() => call($14, "load8_u", [13531]), 0);
-
-// memory_copy.wast:2309
-assert_return(() => call($14, "load8_u", [13730]), 0);
-
-// memory_copy.wast:2310
-assert_return(() => call($14, "load8_u", [13929]), 0);
-
-// memory_copy.wast:2311
-assert_return(() => call($14, "load8_u", [14128]), 0);
-
-// memory_copy.wast:2312
-assert_return(() => call($14, "load8_u", [14327]), 0);
-
-// memory_copy.wast:2313
-assert_return(() => call($14, "load8_u", [14526]), 0);
-
-// memory_copy.wast:2314
-assert_return(() => call($14, "load8_u", [14725]), 0);
-
-// memory_copy.wast:2315
-assert_return(() => call($14, "load8_u", [14924]), 0);
-
-// memory_copy.wast:2316
-assert_return(() => call($14, "load8_u", [15123]), 0);
-
-// memory_copy.wast:2317
-assert_return(() => call($14, "load8_u", [15322]), 0);
-
-// memory_copy.wast:2318
-assert_return(() => call($14, "load8_u", [15521]), 0);
-
-// memory_copy.wast:2319
-assert_return(() => call($14, "load8_u", [15720]), 0);
-
-// memory_copy.wast:2320
-assert_return(() => call($14, "load8_u", [15919]), 0);
-
-// memory_copy.wast:2321
-assert_return(() => call($14, "load8_u", [16118]), 0);
-
-// memory_copy.wast:2322
-assert_return(() => call($14, "load8_u", [16317]), 0);
-
-// memory_copy.wast:2323
-assert_return(() => call($14, "load8_u", [16516]), 0);
-
-// memory_copy.wast:2324
-assert_return(() => call($14, "load8_u", [16715]), 0);
-
-// memory_copy.wast:2325
-assert_return(() => call($14, "load8_u", [16914]), 0);
-
-// memory_copy.wast:2326
-assert_return(() => call($14, "load8_u", [17113]), 0);
-
-// memory_copy.wast:2327
-assert_return(() => call($14, "load8_u", [17312]), 0);
-
-// memory_copy.wast:2328
-assert_return(() => call($14, "load8_u", [17511]), 0);
-
-// memory_copy.wast:2329
-assert_return(() => call($14, "load8_u", [17710]), 0);
-
-// memory_copy.wast:2330
-assert_return(() => call($14, "load8_u", [17909]), 0);
-
-// memory_copy.wast:2331
-assert_return(() => call($14, "load8_u", [18108]), 0);
-
-// memory_copy.wast:2332
-assert_return(() => call($14, "load8_u", [18307]), 0);
-
-// memory_copy.wast:2333
-assert_return(() => call($14, "load8_u", [18506]), 0);
-
-// memory_copy.wast:2334
-assert_return(() => call($14, "load8_u", [18705]), 0);
-
-// memory_copy.wast:2335
-assert_return(() => call($14, "load8_u", [18904]), 0);
-
-// memory_copy.wast:2336
-assert_return(() => call($14, "load8_u", [19103]), 0);
-
-// memory_copy.wast:2337
-assert_return(() => call($14, "load8_u", [19302]), 0);
-
-// memory_copy.wast:2338
-assert_return(() => call($14, "load8_u", [19501]), 0);
-
-// memory_copy.wast:2339
-assert_return(() => call($14, "load8_u", [19700]), 0);
-
-// memory_copy.wast:2340
-assert_return(() => call($14, "load8_u", [19899]), 0);
-
-// memory_copy.wast:2341
-assert_return(() => call($14, "load8_u", [20098]), 0);
-
-// memory_copy.wast:2342
-assert_return(() => call($14, "load8_u", [20297]), 0);
-
-// memory_copy.wast:2343
-assert_return(() => call($14, "load8_u", [20496]), 0);
-
-// memory_copy.wast:2344
-assert_return(() => call($14, "load8_u", [20695]), 0);
-
-// memory_copy.wast:2345
-assert_return(() => call($14, "load8_u", [20894]), 0);
-
-// memory_copy.wast:2346
-assert_return(() => call($14, "load8_u", [21093]), 0);
-
-// memory_copy.wast:2347
-assert_return(() => call($14, "load8_u", [21292]), 0);
-
-// memory_copy.wast:2348
-assert_return(() => call($14, "load8_u", [21491]), 0);
-
-// memory_copy.wast:2349
-assert_return(() => call($14, "load8_u", [21690]), 0);
-
-// memory_copy.wast:2350
-assert_return(() => call($14, "load8_u", [21889]), 0);
-
-// memory_copy.wast:2351
-assert_return(() => call($14, "load8_u", [22088]), 0);
-
-// memory_copy.wast:2352
-assert_return(() => call($14, "load8_u", [22287]), 0);
-
-// memory_copy.wast:2353
-assert_return(() => call($14, "load8_u", [22486]), 0);
-
-// memory_copy.wast:2354
-assert_return(() => call($14, "load8_u", [22685]), 0);
-
-// memory_copy.wast:2355
-assert_return(() => call($14, "load8_u", [22884]), 0);
-
-// memory_copy.wast:2356
-assert_return(() => call($14, "load8_u", [23083]), 0);
-
-// memory_copy.wast:2357
-assert_return(() => call($14, "load8_u", [23282]), 0);
-
-// memory_copy.wast:2358
-assert_return(() => call($14, "load8_u", [23481]), 0);
-
-// memory_copy.wast:2359
-assert_return(() => call($14, "load8_u", [23680]), 0);
-
-// memory_copy.wast:2360
-assert_return(() => call($14, "load8_u", [23879]), 0);
-
-// memory_copy.wast:2361
-assert_return(() => call($14, "load8_u", [24078]), 0);
-
-// memory_copy.wast:2362
-assert_return(() => call($14, "load8_u", [24277]), 0);
-
-// memory_copy.wast:2363
-assert_return(() => call($14, "load8_u", [24476]), 0);
-
-// memory_copy.wast:2364
-assert_return(() => call($14, "load8_u", [24675]), 0);
-
-// memory_copy.wast:2365
-assert_return(() => call($14, "load8_u", [24874]), 0);
-
-// memory_copy.wast:2366
-assert_return(() => call($14, "load8_u", [25073]), 0);
-
-// memory_copy.wast:2367
-assert_return(() => call($14, "load8_u", [25272]), 0);
-
-// memory_copy.wast:2368
-assert_return(() => call($14, "load8_u", [25471]), 0);
-
-// memory_copy.wast:2369
-assert_return(() => call($14, "load8_u", [25670]), 0);
-
-// memory_copy.wast:2370
-assert_return(() => call($14, "load8_u", [25869]), 0);
-
-// memory_copy.wast:2371
-assert_return(() => call($14, "load8_u", [26068]), 0);
-
-// memory_copy.wast:2372
-assert_return(() => call($14, "load8_u", [26267]), 0);
-
-// memory_copy.wast:2373
-assert_return(() => call($14, "load8_u", [26466]), 0);
-
-// memory_copy.wast:2374
-assert_return(() => call($14, "load8_u", [26665]), 0);
-
-// memory_copy.wast:2375
-assert_return(() => call($14, "load8_u", [26864]), 0);
-
-// memory_copy.wast:2376
-assert_return(() => call($14, "load8_u", [27063]), 0);
-
-// memory_copy.wast:2377
-assert_return(() => call($14, "load8_u", [27262]), 0);
-
-// memory_copy.wast:2378
-assert_return(() => call($14, "load8_u", [27461]), 0);
-
-// memory_copy.wast:2379
-assert_return(() => call($14, "load8_u", [27660]), 0);
-
-// memory_copy.wast:2380
-assert_return(() => call($14, "load8_u", [27859]), 0);
-
-// memory_copy.wast:2381
-assert_return(() => call($14, "load8_u", [28058]), 0);
-
-// memory_copy.wast:2382
-assert_return(() => call($14, "load8_u", [28257]), 0);
-
-// memory_copy.wast:2383
-assert_return(() => call($14, "load8_u", [28456]), 0);
-
-// memory_copy.wast:2384
-assert_return(() => call($14, "load8_u", [28655]), 0);
-
-// memory_copy.wast:2385
-assert_return(() => call($14, "load8_u", [28854]), 0);
-
-// memory_copy.wast:2386
-assert_return(() => call($14, "load8_u", [29053]), 0);
-
-// memory_copy.wast:2387
-assert_return(() => call($14, "load8_u", [29252]), 0);
-
-// memory_copy.wast:2388
-assert_return(() => call($14, "load8_u", [29451]), 0);
-
-// memory_copy.wast:2389
-assert_return(() => call($14, "load8_u", [29650]), 0);
-
-// memory_copy.wast:2390
-assert_return(() => call($14, "load8_u", [29849]), 0);
-
-// memory_copy.wast:2391
-assert_return(() => call($14, "load8_u", [30048]), 0);
-
-// memory_copy.wast:2392
-assert_return(() => call($14, "load8_u", [30247]), 0);
-
-// memory_copy.wast:2393
-assert_return(() => call($14, "load8_u", [30446]), 0);
-
-// memory_copy.wast:2394
-assert_return(() => call($14, "load8_u", [30645]), 0);
-
-// memory_copy.wast:2395
-assert_return(() => call($14, "load8_u", [30844]), 0);
-
-// memory_copy.wast:2396
-assert_return(() => call($14, "load8_u", [31043]), 0);
-
-// memory_copy.wast:2397
-assert_return(() => call($14, "load8_u", [31242]), 0);
-
-// memory_copy.wast:2398
-assert_return(() => call($14, "load8_u", [31441]), 0);
-
-// memory_copy.wast:2399
-assert_return(() => call($14, "load8_u", [31640]), 0);
-
-// memory_copy.wast:2400
-assert_return(() => call($14, "load8_u", [31839]), 0);
-
-// memory_copy.wast:2401
-assert_return(() => call($14, "load8_u", [32038]), 0);
-
-// memory_copy.wast:2402
-assert_return(() => call($14, "load8_u", [32237]), 0);
-
-// memory_copy.wast:2403
-assert_return(() => call($14, "load8_u", [32436]), 0);
-
-// memory_copy.wast:2404
-assert_return(() => call($14, "load8_u", [32635]), 0);
-
-// memory_copy.wast:2405
-assert_return(() => call($14, "load8_u", [32834]), 0);
-
-// memory_copy.wast:2406
-assert_return(() => call($14, "load8_u", [33033]), 0);
-
-// memory_copy.wast:2407
-assert_return(() => call($14, "load8_u", [33232]), 0);
-
-// memory_copy.wast:2408
-assert_return(() => call($14, "load8_u", [33431]), 0);
-
-// memory_copy.wast:2409
-assert_return(() => call($14, "load8_u", [33630]), 0);
-
-// memory_copy.wast:2410
-assert_return(() => call($14, "load8_u", [33829]), 0);
-
-// memory_copy.wast:2411
-assert_return(() => call($14, "load8_u", [34028]), 0);
-
-// memory_copy.wast:2412
-assert_return(() => call($14, "load8_u", [34227]), 0);
-
-// memory_copy.wast:2413
-assert_return(() => call($14, "load8_u", [34426]), 0);
-
-// memory_copy.wast:2414
-assert_return(() => call($14, "load8_u", [34625]), 0);
-
-// memory_copy.wast:2415
-assert_return(() => call($14, "load8_u", [34824]), 0);
-
-// memory_copy.wast:2416
-assert_return(() => call($14, "load8_u", [35023]), 0);
-
-// memory_copy.wast:2417
-assert_return(() => call($14, "load8_u", [35222]), 0);
-
-// memory_copy.wast:2418
-assert_return(() => call($14, "load8_u", [35421]), 0);
-
-// memory_copy.wast:2419
-assert_return(() => call($14, "load8_u", [35620]), 0);
-
-// memory_copy.wast:2420
-assert_return(() => call($14, "load8_u", [35819]), 0);
-
-// memory_copy.wast:2421
-assert_return(() => call($14, "load8_u", [36018]), 0);
-
-// memory_copy.wast:2422
-assert_return(() => call($14, "load8_u", [36217]), 0);
-
-// memory_copy.wast:2423
-assert_return(() => call($14, "load8_u", [36416]), 0);
-
-// memory_copy.wast:2424
-assert_return(() => call($14, "load8_u", [36615]), 0);
-
-// memory_copy.wast:2425
-assert_return(() => call($14, "load8_u", [36814]), 0);
-
-// memory_copy.wast:2426
-assert_return(() => call($14, "load8_u", [37013]), 0);
-
-// memory_copy.wast:2427
-assert_return(() => call($14, "load8_u", [37212]), 0);
-
-// memory_copy.wast:2428
-assert_return(() => call($14, "load8_u", [37411]), 0);
-
-// memory_copy.wast:2429
-assert_return(() => call($14, "load8_u", [37610]), 0);
-
-// memory_copy.wast:2430
-assert_return(() => call($14, "load8_u", [37809]), 0);
-
-// memory_copy.wast:2431
-assert_return(() => call($14, "load8_u", [38008]), 0);
-
-// memory_copy.wast:2432
-assert_return(() => call($14, "load8_u", [38207]), 0);
-
-// memory_copy.wast:2433
-assert_return(() => call($14, "load8_u", [38406]), 0);
-
-// memory_copy.wast:2434
-assert_return(() => call($14, "load8_u", [38605]), 0);
-
-// memory_copy.wast:2435
-assert_return(() => call($14, "load8_u", [38804]), 0);
-
-// memory_copy.wast:2436
-assert_return(() => call($14, "load8_u", [39003]), 0);
-
-// memory_copy.wast:2437
-assert_return(() => call($14, "load8_u", [39202]), 0);
-
-// memory_copy.wast:2438
-assert_return(() => call($14, "load8_u", [39401]), 0);
-
-// memory_copy.wast:2439
-assert_return(() => call($14, "load8_u", [39600]), 0);
-
-// memory_copy.wast:2440
-assert_return(() => call($14, "load8_u", [39799]), 0);
-
-// memory_copy.wast:2441
-assert_return(() => call($14, "load8_u", [39998]), 0);
-
-// memory_copy.wast:2442
-assert_return(() => call($14, "load8_u", [40197]), 0);
-
-// memory_copy.wast:2443
-assert_return(() => call($14, "load8_u", [40396]), 0);
-
-// memory_copy.wast:2444
-assert_return(() => call($14, "load8_u", [40595]), 0);
-
-// memory_copy.wast:2445
-assert_return(() => call($14, "load8_u", [40794]), 0);
-
-// memory_copy.wast:2446
-assert_return(() => call($14, "load8_u", [40993]), 0);
-
-// memory_copy.wast:2447
-assert_return(() => call($14, "load8_u", [41192]), 0);
-
-// memory_copy.wast:2448
-assert_return(() => call($14, "load8_u", [41391]), 0);
-
-// memory_copy.wast:2449
-assert_return(() => call($14, "load8_u", [41590]), 0);
-
-// memory_copy.wast:2450
-assert_return(() => call($14, "load8_u", [41789]), 0);
-
-// memory_copy.wast:2451
-assert_return(() => call($14, "load8_u", [41988]), 0);
-
-// memory_copy.wast:2452
-assert_return(() => call($14, "load8_u", [42187]), 0);
-
-// memory_copy.wast:2453
-assert_return(() => call($14, "load8_u", [42386]), 0);
-
-// memory_copy.wast:2454
-assert_return(() => call($14, "load8_u", [42585]), 0);
-
-// memory_copy.wast:2455
-assert_return(() => call($14, "load8_u", [42784]), 0);
-
-// memory_copy.wast:2456
-assert_return(() => call($14, "load8_u", [42983]), 0);
-
-// memory_copy.wast:2457
-assert_return(() => call($14, "load8_u", [43182]), 0);
-
-// memory_copy.wast:2458
-assert_return(() => call($14, "load8_u", [43381]), 0);
-
-// memory_copy.wast:2459
-assert_return(() => call($14, "load8_u", [43580]), 0);
-
-// memory_copy.wast:2460
-assert_return(() => call($14, "load8_u", [43779]), 0);
-
-// memory_copy.wast:2461
-assert_return(() => call($14, "load8_u", [43978]), 0);
-
-// memory_copy.wast:2462
-assert_return(() => call($14, "load8_u", [44177]), 0);
-
-// memory_copy.wast:2463
-assert_return(() => call($14, "load8_u", [44376]), 0);
-
-// memory_copy.wast:2464
-assert_return(() => call($14, "load8_u", [44575]), 0);
-
-// memory_copy.wast:2465
-assert_return(() => call($14, "load8_u", [44774]), 0);
-
-// memory_copy.wast:2466
-assert_return(() => call($14, "load8_u", [44973]), 0);
-
-// memory_copy.wast:2467
-assert_return(() => call($14, "load8_u", [45172]), 0);
-
-// memory_copy.wast:2468
-assert_return(() => call($14, "load8_u", [45371]), 0);
-
-// memory_copy.wast:2469
-assert_return(() => call($14, "load8_u", [45570]), 0);
-
-// memory_copy.wast:2470
-assert_return(() => call($14, "load8_u", [45769]), 0);
-
-// memory_copy.wast:2471
-assert_return(() => call($14, "load8_u", [45968]), 0);
-
-// memory_copy.wast:2472
-assert_return(() => call($14, "load8_u", [46167]), 0);
-
-// memory_copy.wast:2473
-assert_return(() => call($14, "load8_u", [46366]), 0);
-
-// memory_copy.wast:2474
-assert_return(() => call($14, "load8_u", [46565]), 0);
-
-// memory_copy.wast:2475
-assert_return(() => call($14, "load8_u", [46764]), 0);
-
-// memory_copy.wast:2476
-assert_return(() => call($14, "load8_u", [46963]), 0);
-
-// memory_copy.wast:2477
-assert_return(() => call($14, "load8_u", [47162]), 0);
-
-// memory_copy.wast:2478
-assert_return(() => call($14, "load8_u", [47361]), 0);
-
-// memory_copy.wast:2479
-assert_return(() => call($14, "load8_u", [47560]), 0);
-
-// memory_copy.wast:2480
-assert_return(() => call($14, "load8_u", [47759]), 0);
-
-// memory_copy.wast:2481
-assert_return(() => call($14, "load8_u", [47958]), 0);
-
-// memory_copy.wast:2482
-assert_return(() => call($14, "load8_u", [48157]), 0);
-
-// memory_copy.wast:2483
-assert_return(() => call($14, "load8_u", [48356]), 0);
-
-// memory_copy.wast:2484
-assert_return(() => call($14, "load8_u", [48555]), 0);
-
-// memory_copy.wast:2485
-assert_return(() => call($14, "load8_u", [48754]), 0);
-
-// memory_copy.wast:2486
-assert_return(() => call($14, "load8_u", [48953]), 0);
-
-// memory_copy.wast:2487
-assert_return(() => call($14, "load8_u", [49152]), 0);
-
-// memory_copy.wast:2488
-assert_return(() => call($14, "load8_u", [49351]), 0);
-
-// memory_copy.wast:2489
-assert_return(() => call($14, "load8_u", [49550]), 0);
-
-// memory_copy.wast:2490
-assert_return(() => call($14, "load8_u", [49749]), 0);
-
-// memory_copy.wast:2491
-assert_return(() => call($14, "load8_u", [49948]), 0);
-
-// memory_copy.wast:2492
-assert_return(() => call($14, "load8_u", [50147]), 0);
-
-// memory_copy.wast:2493
-assert_return(() => call($14, "load8_u", [50346]), 0);
-
-// memory_copy.wast:2494
-assert_return(() => call($14, "load8_u", [50545]), 0);
-
-// memory_copy.wast:2495
-assert_return(() => call($14, "load8_u", [50744]), 0);
-
-// memory_copy.wast:2496
-assert_return(() => call($14, "load8_u", [50943]), 0);
-
-// memory_copy.wast:2497
-assert_return(() => call($14, "load8_u", [51142]), 0);
-
-// memory_copy.wast:2498
-assert_return(() => call($14, "load8_u", [51341]), 0);
-
-// memory_copy.wast:2499
-assert_return(() => call($14, "load8_u", [51540]), 0);
-
-// memory_copy.wast:2500
-assert_return(() => call($14, "load8_u", [51739]), 0);
-
-// memory_copy.wast:2501
-assert_return(() => call($14, "load8_u", [51938]), 0);
-
-// memory_copy.wast:2502
-assert_return(() => call($14, "load8_u", [52137]), 0);
-
-// memory_copy.wast:2503
-assert_return(() => call($14, "load8_u", [52336]), 0);
-
-// memory_copy.wast:2504
-assert_return(() => call($14, "load8_u", [52535]), 0);
-
-// memory_copy.wast:2505
-assert_return(() => call($14, "load8_u", [52734]), 0);
-
-// memory_copy.wast:2506
-assert_return(() => call($14, "load8_u", [52933]), 0);
-
-// memory_copy.wast:2507
-assert_return(() => call($14, "load8_u", [53132]), 0);
-
-// memory_copy.wast:2508
-assert_return(() => call($14, "load8_u", [53331]), 0);
-
-// memory_copy.wast:2509
-assert_return(() => call($14, "load8_u", [53530]), 0);
-
-// memory_copy.wast:2510
-assert_return(() => call($14, "load8_u", [53729]), 0);
-
-// memory_copy.wast:2511
-assert_return(() => call($14, "load8_u", [53928]), 0);
-
-// memory_copy.wast:2512
-assert_return(() => call($14, "load8_u", [54127]), 0);
-
-// memory_copy.wast:2513
-assert_return(() => call($14, "load8_u", [54326]), 0);
-
-// memory_copy.wast:2514
-assert_return(() => call($14, "load8_u", [54525]), 0);
-
-// memory_copy.wast:2515
-assert_return(() => call($14, "load8_u", [54724]), 0);
-
-// memory_copy.wast:2516
-assert_return(() => call($14, "load8_u", [54923]), 0);
-
-// memory_copy.wast:2517
-assert_return(() => call($14, "load8_u", [55122]), 0);
-
-// memory_copy.wast:2518
-assert_return(() => call($14, "load8_u", [55321]), 0);
-
-// memory_copy.wast:2519
-assert_return(() => call($14, "load8_u", [55520]), 0);
-
-// memory_copy.wast:2520
-assert_return(() => call($14, "load8_u", [55719]), 0);
-
-// memory_copy.wast:2521
-assert_return(() => call($14, "load8_u", [55918]), 0);
-
-// memory_copy.wast:2522
-assert_return(() => call($14, "load8_u", [56117]), 0);
-
-// memory_copy.wast:2523
-assert_return(() => call($14, "load8_u", [56316]), 0);
-
-// memory_copy.wast:2524
-assert_return(() => call($14, "load8_u", [56515]), 0);
-
-// memory_copy.wast:2525
-assert_return(() => call($14, "load8_u", [56714]), 0);
-
-// memory_copy.wast:2526
-assert_return(() => call($14, "load8_u", [56913]), 0);
-
-// memory_copy.wast:2527
-assert_return(() => call($14, "load8_u", [57112]), 0);
-
-// memory_copy.wast:2528
-assert_return(() => call($14, "load8_u", [57311]), 0);
-
-// memory_copy.wast:2529
-assert_return(() => call($14, "load8_u", [57510]), 0);
-
-// memory_copy.wast:2530
-assert_return(() => call($14, "load8_u", [57709]), 0);
-
-// memory_copy.wast:2531
-assert_return(() => call($14, "load8_u", [57908]), 0);
-
-// memory_copy.wast:2532
-assert_return(() => call($14, "load8_u", [58107]), 0);
-
-// memory_copy.wast:2533
-assert_return(() => call($14, "load8_u", [58306]), 0);
-
-// memory_copy.wast:2534
-assert_return(() => call($14, "load8_u", [58505]), 0);
-
-// memory_copy.wast:2535
-assert_return(() => call($14, "load8_u", [58704]), 0);
-
-// memory_copy.wast:2536
-assert_return(() => call($14, "load8_u", [58903]), 0);
-
-// memory_copy.wast:2537
-assert_return(() => call($14, "load8_u", [59102]), 0);
-
-// memory_copy.wast:2538
-assert_return(() => call($14, "load8_u", [59301]), 0);
-
-// memory_copy.wast:2539
-assert_return(() => call($14, "load8_u", [59500]), 0);
-
-// memory_copy.wast:2540
-assert_return(() => call($14, "load8_u", [59699]), 0);
-
-// memory_copy.wast:2541
-assert_return(() => call($14, "load8_u", [59898]), 0);
-
-// memory_copy.wast:2542
-assert_return(() => call($14, "load8_u", [60097]), 0);
-
-// memory_copy.wast:2543
-assert_return(() => call($14, "load8_u", [60296]), 0);
-
-// memory_copy.wast:2544
-assert_return(() => call($14, "load8_u", [60495]), 0);
-
-// memory_copy.wast:2545
-assert_return(() => call($14, "load8_u", [60694]), 0);
-
-// memory_copy.wast:2546
-assert_return(() => call($14, "load8_u", [60893]), 0);
-
-// memory_copy.wast:2547
-assert_return(() => call($14, "load8_u", [61092]), 0);
-
-// memory_copy.wast:2548
-assert_return(() => call($14, "load8_u", [61291]), 0);
-
-// memory_copy.wast:2549
-assert_return(() => call($14, "load8_u", [61490]), 0);
-
-// memory_copy.wast:2550
-assert_return(() => call($14, "load8_u", [61689]), 0);
-
-// memory_copy.wast:2551
-assert_return(() => call($14, "load8_u", [61888]), 0);
-
-// memory_copy.wast:2552
-assert_return(() => call($14, "load8_u", [62087]), 0);
-
-// memory_copy.wast:2553
-assert_return(() => call($14, "load8_u", [62286]), 0);
-
-// memory_copy.wast:2554
-assert_return(() => call($14, "load8_u", [62485]), 0);
-
-// memory_copy.wast:2555
-assert_return(() => call($14, "load8_u", [62684]), 0);
-
-// memory_copy.wast:2556
-assert_return(() => call($14, "load8_u", [62883]), 0);
-
-// memory_copy.wast:2557
-assert_return(() => call($14, "load8_u", [63082]), 0);
-
-// memory_copy.wast:2558
-assert_return(() => call($14, "load8_u", [63281]), 0);
-
-// memory_copy.wast:2559
-assert_return(() => call($14, "load8_u", [63480]), 0);
-
-// memory_copy.wast:2560
-assert_return(() => call($14, "load8_u", [63679]), 0);
-
-// memory_copy.wast:2561
-assert_return(() => call($14, "load8_u", [63878]), 0);
-
-// memory_copy.wast:2562
-assert_return(() => call($14, "load8_u", [64077]), 0);
-
-// memory_copy.wast:2563
-assert_return(() => call($14, "load8_u", [64276]), 0);
-
-// memory_copy.wast:2564
-assert_return(() => call($14, "load8_u", [64475]), 0);
-
-// memory_copy.wast:2565
-assert_return(() => call($14, "load8_u", [64674]), 0);
-
-// memory_copy.wast:2566
-assert_return(() => call($14, "load8_u", [64873]), 0);
-
-// memory_copy.wast:2567
-assert_return(() => call($14, "load8_u", [65072]), 0);
-
-// memory_copy.wast:2568
-assert_return(() => call($14, "load8_u", [65271]), 0);
-
-// memory_copy.wast:2569
-assert_return(() => call($14, "load8_u", [65470]), 0);
-
-// memory_copy.wast:2570
-assert_return(() => call($14, "load8_u", [65486]), 0);
-
-// memory_copy.wast:2571
-assert_return(() => call($14, "load8_u", [65487]), 1);
-
-// memory_copy.wast:2572
-assert_return(() => call($14, "load8_u", [65488]), 2);
-
-// memory_copy.wast:2573
-assert_return(() => call($14, "load8_u", [65489]), 3);
-
-// memory_copy.wast:2574
-assert_return(() => call($14, "load8_u", [65490]), 4);
-
-// memory_copy.wast:2575
-assert_return(() => call($14, "load8_u", [65491]), 5);
-
-// memory_copy.wast:2576
-assert_return(() => call($14, "load8_u", [65492]), 6);
-
-// memory_copy.wast:2577
-assert_return(() => call($14, "load8_u", [65493]), 7);
-
-// memory_copy.wast:2578
-assert_return(() => call($14, "load8_u", [65494]), 8);
-
-// memory_copy.wast:2579
-assert_return(() => call($14, "load8_u", [65495]), 9);
-
-// memory_copy.wast:2580
-assert_return(() => call($14, "load8_u", [65496]), 10);
-
-// memory_copy.wast:2581
-assert_return(() => call($14, "load8_u", [65497]), 11);
-
-// memory_copy.wast:2582
-assert_return(() => call($14, "load8_u", [65498]), 12);
-
-// memory_copy.wast:2583
-assert_return(() => call($14, "load8_u", [65499]), 13);
-
-// memory_copy.wast:2584
-assert_return(() => call($14, "load8_u", [65500]), 14);
-
-// memory_copy.wast:2585
-assert_return(() => call($14, "load8_u", [65501]), 15);
-
-// memory_copy.wast:2586
-assert_return(() => call($14, "load8_u", [65502]), 16);
-
-// memory_copy.wast:2587
-assert_return(() => call($14, "load8_u", [65503]), 17);
-
-// memory_copy.wast:2588
-assert_return(() => call($14, "load8_u", [65504]), 18);
-
-// memory_copy.wast:2589
-assert_return(() => call($14, "load8_u", [65505]), 19);
-
-// memory_copy.wast:2590
-assert_return(() => call($14, "load8_u", [65516]), 0);
-
-// memory_copy.wast:2591
-assert_return(() => call($14, "load8_u", [65517]), 1);
-
-// memory_copy.wast:2592
-assert_return(() => call($14, "load8_u", [65518]), 2);
-
-// memory_copy.wast:2593
-assert_return(() => call($14, "load8_u", [65519]), 3);
-
-// memory_copy.wast:2594
-assert_return(() => call($14, "load8_u", [65520]), 4);
-
-// memory_copy.wast:2595
-assert_return(() => call($14, "load8_u", [65521]), 5);
-
-// memory_copy.wast:2596
-assert_return(() => call($14, "load8_u", [65522]), 6);
-
-// memory_copy.wast:2597
-assert_return(() => call($14, "load8_u", [65523]), 7);
-
-// memory_copy.wast:2598
-assert_return(() => call($14, "load8_u", [65524]), 8);
-
-// memory_copy.wast:2599
-assert_return(() => call($14, "load8_u", [65525]), 9);
-
-// memory_copy.wast:2600
-assert_return(() => call($14, "load8_u", [65526]), 10);
-
-// memory_copy.wast:2601
-assert_return(() => call($14, "load8_u", [65527]), 11);
-
-// memory_copy.wast:2602
-assert_return(() => call($14, "load8_u", [65528]), 12);
-
-// memory_copy.wast:2603
-assert_return(() => call($14, "load8_u", [65529]), 13);
-
-// memory_copy.wast:2604
-assert_return(() => call($14, "load8_u", [65530]), 14);
-
-// memory_copy.wast:2605
-assert_return(() => call($14, "load8_u", [65531]), 15);
-
-// memory_copy.wast:2606
-assert_return(() => call($14, "load8_u", [65532]), 16);
-
-// memory_copy.wast:2607
-assert_return(() => call($14, "load8_u", [65533]), 17);
-
-// memory_copy.wast:2608
-assert_return(() => call($14, "load8_u", [65534]), 18);
-
-// memory_copy.wast:2609
-assert_return(() => call($14, "load8_u", [65535]), 19);
-
-// memory_copy.wast:2611
-let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xe2\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:2619
-assert_trap(() => call($15, "run", [65516, 65506, 40]));
-
-// memory_copy.wast:2622
-assert_return(() => call($15, "load8_u", [198]), 0);
-
-// memory_copy.wast:2623
-assert_return(() => call($15, "load8_u", [397]), 0);
-
-// memory_copy.wast:2624
-assert_return(() => call($15, "load8_u", [596]), 0);
-
-// memory_copy.wast:2625
-assert_return(() => call($15, "load8_u", [795]), 0);
-
-// memory_copy.wast:2626
-assert_return(() => call($15, "load8_u", [994]), 0);
-
-// memory_copy.wast:2627
-assert_return(() => call($15, "load8_u", [1193]), 0);
-
-// memory_copy.wast:2628
-assert_return(() => call($15, "load8_u", [1392]), 0);
-
-// memory_copy.wast:2629
-assert_return(() => call($15, "load8_u", [1591]), 0);
-
-// memory_copy.wast:2630
-assert_return(() => call($15, "load8_u", [1790]), 0);
-
-// memory_copy.wast:2631
-assert_return(() => call($15, "load8_u", [1989]), 0);
-
-// memory_copy.wast:2632
-assert_return(() => call($15, "load8_u", [2188]), 0);
-
-// memory_copy.wast:2633
-assert_return(() => call($15, "load8_u", [2387]), 0);
-
-// memory_copy.wast:2634
-assert_return(() => call($15, "load8_u", [2586]), 0);
-
-// memory_copy.wast:2635
-assert_return(() => call($15, "load8_u", [2785]), 0);
-
-// memory_copy.wast:2636
-assert_return(() => call($15, "load8_u", [2984]), 0);
-
-// memory_copy.wast:2637
-assert_return(() => call($15, "load8_u", [3183]), 0);
-
-// memory_copy.wast:2638
-assert_return(() => call($15, "load8_u", [3382]), 0);
-
-// memory_copy.wast:2639
-assert_return(() => call($15, "load8_u", [3581]), 0);
-
-// memory_copy.wast:2640
-assert_return(() => call($15, "load8_u", [3780]), 0);
-
-// memory_copy.wast:2641
-assert_return(() => call($15, "load8_u", [3979]), 0);
-
-// memory_copy.wast:2642
-assert_return(() => call($15, "load8_u", [4178]), 0);
-
-// memory_copy.wast:2643
-assert_return(() => call($15, "load8_u", [4377]), 0);
-
-// memory_copy.wast:2644
-assert_return(() => call($15, "load8_u", [4576]), 0);
-
-// memory_copy.wast:2645
-assert_return(() => call($15, "load8_u", [4775]), 0);
-
-// memory_copy.wast:2646
-assert_return(() => call($15, "load8_u", [4974]), 0);
-
-// memory_copy.wast:2647
-assert_return(() => call($15, "load8_u", [5173]), 0);
-
-// memory_copy.wast:2648
-assert_return(() => call($15, "load8_u", [5372]), 0);
-
-// memory_copy.wast:2649
-assert_return(() => call($15, "load8_u", [5571]), 0);
-
-// memory_copy.wast:2650
-assert_return(() => call($15, "load8_u", [5770]), 0);
-
-// memory_copy.wast:2651
-assert_return(() => call($15, "load8_u", [5969]), 0);
-
-// memory_copy.wast:2652
-assert_return(() => call($15, "load8_u", [6168]), 0);
-
-// memory_copy.wast:2653
-assert_return(() => call($15, "load8_u", [6367]), 0);
-
-// memory_copy.wast:2654
-assert_return(() => call($15, "load8_u", [6566]), 0);
-
-// memory_copy.wast:2655
-assert_return(() => call($15, "load8_u", [6765]), 0);
-
-// memory_copy.wast:2656
-assert_return(() => call($15, "load8_u", [6964]), 0);
-
-// memory_copy.wast:2657
-assert_return(() => call($15, "load8_u", [7163]), 0);
-
-// memory_copy.wast:2658
-assert_return(() => call($15, "load8_u", [7362]), 0);
-
-// memory_copy.wast:2659
-assert_return(() => call($15, "load8_u", [7561]), 0);
-
-// memory_copy.wast:2660
-assert_return(() => call($15, "load8_u", [7760]), 0);
-
-// memory_copy.wast:2661
-assert_return(() => call($15, "load8_u", [7959]), 0);
-
-// memory_copy.wast:2662
-assert_return(() => call($15, "load8_u", [8158]), 0);
-
-// memory_copy.wast:2663
-assert_return(() => call($15, "load8_u", [8357]), 0);
-
-// memory_copy.wast:2664
-assert_return(() => call($15, "load8_u", [8556]), 0);
-
-// memory_copy.wast:2665
-assert_return(() => call($15, "load8_u", [8755]), 0);
-
-// memory_copy.wast:2666
-assert_return(() => call($15, "load8_u", [8954]), 0);
-
-// memory_copy.wast:2667
-assert_return(() => call($15, "load8_u", [9153]), 0);
-
-// memory_copy.wast:2668
-assert_return(() => call($15, "load8_u", [9352]), 0);
-
-// memory_copy.wast:2669
-assert_return(() => call($15, "load8_u", [9551]), 0);
-
-// memory_copy.wast:2670
-assert_return(() => call($15, "load8_u", [9750]), 0);
-
-// memory_copy.wast:2671
-assert_return(() => call($15, "load8_u", [9949]), 0);
-
-// memory_copy.wast:2672
-assert_return(() => call($15, "load8_u", [10148]), 0);
-
-// memory_copy.wast:2673
-assert_return(() => call($15, "load8_u", [10347]), 0);
-
-// memory_copy.wast:2674
-assert_return(() => call($15, "load8_u", [10546]), 0);
-
-// memory_copy.wast:2675
-assert_return(() => call($15, "load8_u", [10745]), 0);
-
-// memory_copy.wast:2676
-assert_return(() => call($15, "load8_u", [10944]), 0);
-
-// memory_copy.wast:2677
-assert_return(() => call($15, "load8_u", [11143]), 0);
-
-// memory_copy.wast:2678
-assert_return(() => call($15, "load8_u", [11342]), 0);
-
-// memory_copy.wast:2679
-assert_return(() => call($15, "load8_u", [11541]), 0);
-
-// memory_copy.wast:2680
-assert_return(() => call($15, "load8_u", [11740]), 0);
-
-// memory_copy.wast:2681
-assert_return(() => call($15, "load8_u", [11939]), 0);
-
-// memory_copy.wast:2682
-assert_return(() => call($15, "load8_u", [12138]), 0);
-
-// memory_copy.wast:2683
-assert_return(() => call($15, "load8_u", [12337]), 0);
-
-// memory_copy.wast:2684
-assert_return(() => call($15, "load8_u", [12536]), 0);
-
-// memory_copy.wast:2685
-assert_return(() => call($15, "load8_u", [12735]), 0);
-
-// memory_copy.wast:2686
-assert_return(() => call($15, "load8_u", [12934]), 0);
-
-// memory_copy.wast:2687
-assert_return(() => call($15, "load8_u", [13133]), 0);
-
-// memory_copy.wast:2688
-assert_return(() => call($15, "load8_u", [13332]), 0);
-
-// memory_copy.wast:2689
-assert_return(() => call($15, "load8_u", [13531]), 0);
-
-// memory_copy.wast:2690
-assert_return(() => call($15, "load8_u", [13730]), 0);
-
-// memory_copy.wast:2691
-assert_return(() => call($15, "load8_u", [13929]), 0);
-
-// memory_copy.wast:2692
-assert_return(() => call($15, "load8_u", [14128]), 0);
-
-// memory_copy.wast:2693
-assert_return(() => call($15, "load8_u", [14327]), 0);
-
-// memory_copy.wast:2694
-assert_return(() => call($15, "load8_u", [14526]), 0);
-
-// memory_copy.wast:2695
-assert_return(() => call($15, "load8_u", [14725]), 0);
-
-// memory_copy.wast:2696
-assert_return(() => call($15, "load8_u", [14924]), 0);
-
-// memory_copy.wast:2697
-assert_return(() => call($15, "load8_u", [15123]), 0);
-
-// memory_copy.wast:2698
-assert_return(() => call($15, "load8_u", [15322]), 0);
-
-// memory_copy.wast:2699
-assert_return(() => call($15, "load8_u", [15521]), 0);
-
-// memory_copy.wast:2700
-assert_return(() => call($15, "load8_u", [15720]), 0);
-
-// memory_copy.wast:2701
-assert_return(() => call($15, "load8_u", [15919]), 0);
-
-// memory_copy.wast:2702
-assert_return(() => call($15, "load8_u", [16118]), 0);
-
-// memory_copy.wast:2703
-assert_return(() => call($15, "load8_u", [16317]), 0);
-
-// memory_copy.wast:2704
-assert_return(() => call($15, "load8_u", [16516]), 0);
-
-// memory_copy.wast:2705
-assert_return(() => call($15, "load8_u", [16715]), 0);
-
-// memory_copy.wast:2706
-assert_return(() => call($15, "load8_u", [16914]), 0);
-
-// memory_copy.wast:2707
-assert_return(() => call($15, "load8_u", [17113]), 0);
-
-// memory_copy.wast:2708
-assert_return(() => call($15, "load8_u", [17312]), 0);
-
-// memory_copy.wast:2709
-assert_return(() => call($15, "load8_u", [17511]), 0);
-
-// memory_copy.wast:2710
-assert_return(() => call($15, "load8_u", [17710]), 0);
-
-// memory_copy.wast:2711
-assert_return(() => call($15, "load8_u", [17909]), 0);
-
-// memory_copy.wast:2712
-assert_return(() => call($15, "load8_u", [18108]), 0);
-
-// memory_copy.wast:2713
-assert_return(() => call($15, "load8_u", [18307]), 0);
-
-// memory_copy.wast:2714
-assert_return(() => call($15, "load8_u", [18506]), 0);
-
-// memory_copy.wast:2715
-assert_return(() => call($15, "load8_u", [18705]), 0);
-
-// memory_copy.wast:2716
-assert_return(() => call($15, "load8_u", [18904]), 0);
-
-// memory_copy.wast:2717
-assert_return(() => call($15, "load8_u", [19103]), 0);
-
-// memory_copy.wast:2718
-assert_return(() => call($15, "load8_u", [19302]), 0);
-
-// memory_copy.wast:2719
-assert_return(() => call($15, "load8_u", [19501]), 0);
-
-// memory_copy.wast:2720
-assert_return(() => call($15, "load8_u", [19700]), 0);
-
-// memory_copy.wast:2721
-assert_return(() => call($15, "load8_u", [19899]), 0);
-
-// memory_copy.wast:2722
-assert_return(() => call($15, "load8_u", [20098]), 0);
-
-// memory_copy.wast:2723
-assert_return(() => call($15, "load8_u", [20297]), 0);
-
-// memory_copy.wast:2724
-assert_return(() => call($15, "load8_u", [20496]), 0);
-
-// memory_copy.wast:2725
-assert_return(() => call($15, "load8_u", [20695]), 0);
-
-// memory_copy.wast:2726
-assert_return(() => call($15, "load8_u", [20894]), 0);
-
-// memory_copy.wast:2727
-assert_return(() => call($15, "load8_u", [21093]), 0);
-
-// memory_copy.wast:2728
-assert_return(() => call($15, "load8_u", [21292]), 0);
-
-// memory_copy.wast:2729
-assert_return(() => call($15, "load8_u", [21491]), 0);
-
-// memory_copy.wast:2730
-assert_return(() => call($15, "load8_u", [21690]), 0);
-
-// memory_copy.wast:2731
-assert_return(() => call($15, "load8_u", [21889]), 0);
-
-// memory_copy.wast:2732
-assert_return(() => call($15, "load8_u", [22088]), 0);
-
-// memory_copy.wast:2733
-assert_return(() => call($15, "load8_u", [22287]), 0);
-
-// memory_copy.wast:2734
-assert_return(() => call($15, "load8_u", [22486]), 0);
-
-// memory_copy.wast:2735
-assert_return(() => call($15, "load8_u", [22685]), 0);
-
-// memory_copy.wast:2736
-assert_return(() => call($15, "load8_u", [22884]), 0);
-
-// memory_copy.wast:2737
-assert_return(() => call($15, "load8_u", [23083]), 0);
-
-// memory_copy.wast:2738
-assert_return(() => call($15, "load8_u", [23282]), 0);
-
-// memory_copy.wast:2739
-assert_return(() => call($15, "load8_u", [23481]), 0);
-
-// memory_copy.wast:2740
-assert_return(() => call($15, "load8_u", [23680]), 0);
-
-// memory_copy.wast:2741
-assert_return(() => call($15, "load8_u", [23879]), 0);
-
-// memory_copy.wast:2742
-assert_return(() => call($15, "load8_u", [24078]), 0);
-
-// memory_copy.wast:2743
-assert_return(() => call($15, "load8_u", [24277]), 0);
-
-// memory_copy.wast:2744
-assert_return(() => call($15, "load8_u", [24476]), 0);
-
-// memory_copy.wast:2745
-assert_return(() => call($15, "load8_u", [24675]), 0);
-
-// memory_copy.wast:2746
-assert_return(() => call($15, "load8_u", [24874]), 0);
-
-// memory_copy.wast:2747
-assert_return(() => call($15, "load8_u", [25073]), 0);
-
-// memory_copy.wast:2748
-assert_return(() => call($15, "load8_u", [25272]), 0);
-
-// memory_copy.wast:2749
-assert_return(() => call($15, "load8_u", [25471]), 0);
-
-// memory_copy.wast:2750
-assert_return(() => call($15, "load8_u", [25670]), 0);
-
-// memory_copy.wast:2751
-assert_return(() => call($15, "load8_u", [25869]), 0);
-
-// memory_copy.wast:2752
-assert_return(() => call($15, "load8_u", [26068]), 0);
-
-// memory_copy.wast:2753
-assert_return(() => call($15, "load8_u", [26267]), 0);
-
-// memory_copy.wast:2754
-assert_return(() => call($15, "load8_u", [26466]), 0);
-
-// memory_copy.wast:2755
-assert_return(() => call($15, "load8_u", [26665]), 0);
-
-// memory_copy.wast:2756
-assert_return(() => call($15, "load8_u", [26864]), 0);
-
-// memory_copy.wast:2757
-assert_return(() => call($15, "load8_u", [27063]), 0);
-
-// memory_copy.wast:2758
-assert_return(() => call($15, "load8_u", [27262]), 0);
-
-// memory_copy.wast:2759
-assert_return(() => call($15, "load8_u", [27461]), 0);
-
-// memory_copy.wast:2760
-assert_return(() => call($15, "load8_u", [27660]), 0);
-
-// memory_copy.wast:2761
-assert_return(() => call($15, "load8_u", [27859]), 0);
-
-// memory_copy.wast:2762
-assert_return(() => call($15, "load8_u", [28058]), 0);
-
-// memory_copy.wast:2763
-assert_return(() => call($15, "load8_u", [28257]), 0);
-
-// memory_copy.wast:2764
-assert_return(() => call($15, "load8_u", [28456]), 0);
-
-// memory_copy.wast:2765
-assert_return(() => call($15, "load8_u", [28655]), 0);
-
-// memory_copy.wast:2766
-assert_return(() => call($15, "load8_u", [28854]), 0);
-
-// memory_copy.wast:2767
-assert_return(() => call($15, "load8_u", [29053]), 0);
-
-// memory_copy.wast:2768
-assert_return(() => call($15, "load8_u", [29252]), 0);
-
-// memory_copy.wast:2769
-assert_return(() => call($15, "load8_u", [29451]), 0);
-
-// memory_copy.wast:2770
-assert_return(() => call($15, "load8_u", [29650]), 0);
-
-// memory_copy.wast:2771
-assert_return(() => call($15, "load8_u", [29849]), 0);
-
-// memory_copy.wast:2772
-assert_return(() => call($15, "load8_u", [30048]), 0);
-
-// memory_copy.wast:2773
-assert_return(() => call($15, "load8_u", [30247]), 0);
-
-// memory_copy.wast:2774
-assert_return(() => call($15, "load8_u", [30446]), 0);
-
-// memory_copy.wast:2775
-assert_return(() => call($15, "load8_u", [30645]), 0);
-
-// memory_copy.wast:2776
-assert_return(() => call($15, "load8_u", [30844]), 0);
-
-// memory_copy.wast:2777
-assert_return(() => call($15, "load8_u", [31043]), 0);
-
-// memory_copy.wast:2778
-assert_return(() => call($15, "load8_u", [31242]), 0);
-
-// memory_copy.wast:2779
-assert_return(() => call($15, "load8_u", [31441]), 0);
-
-// memory_copy.wast:2780
-assert_return(() => call($15, "load8_u", [31640]), 0);
-
-// memory_copy.wast:2781
-assert_return(() => call($15, "load8_u", [31839]), 0);
-
-// memory_copy.wast:2782
-assert_return(() => call($15, "load8_u", [32038]), 0);
-
-// memory_copy.wast:2783
-assert_return(() => call($15, "load8_u", [32237]), 0);
-
-// memory_copy.wast:2784
-assert_return(() => call($15, "load8_u", [32436]), 0);
-
-// memory_copy.wast:2785
-assert_return(() => call($15, "load8_u", [32635]), 0);
-
-// memory_copy.wast:2786
-assert_return(() => call($15, "load8_u", [32834]), 0);
-
-// memory_copy.wast:2787
-assert_return(() => call($15, "load8_u", [33033]), 0);
-
-// memory_copy.wast:2788
-assert_return(() => call($15, "load8_u", [33232]), 0);
-
-// memory_copy.wast:2789
-assert_return(() => call($15, "load8_u", [33431]), 0);
-
-// memory_copy.wast:2790
-assert_return(() => call($15, "load8_u", [33630]), 0);
-
-// memory_copy.wast:2791
-assert_return(() => call($15, "load8_u", [33829]), 0);
-
-// memory_copy.wast:2792
-assert_return(() => call($15, "load8_u", [34028]), 0);
-
-// memory_copy.wast:2793
-assert_return(() => call($15, "load8_u", [34227]), 0);
-
-// memory_copy.wast:2794
-assert_return(() => call($15, "load8_u", [34426]), 0);
-
-// memory_copy.wast:2795
-assert_return(() => call($15, "load8_u", [34625]), 0);
-
-// memory_copy.wast:2796
-assert_return(() => call($15, "load8_u", [34824]), 0);
-
-// memory_copy.wast:2797
-assert_return(() => call($15, "load8_u", [35023]), 0);
-
-// memory_copy.wast:2798
-assert_return(() => call($15, "load8_u", [35222]), 0);
-
-// memory_copy.wast:2799
-assert_return(() => call($15, "load8_u", [35421]), 0);
-
-// memory_copy.wast:2800
-assert_return(() => call($15, "load8_u", [35620]), 0);
-
-// memory_copy.wast:2801
-assert_return(() => call($15, "load8_u", [35819]), 0);
-
-// memory_copy.wast:2802
-assert_return(() => call($15, "load8_u", [36018]), 0);
-
-// memory_copy.wast:2803
-assert_return(() => call($15, "load8_u", [36217]), 0);
-
-// memory_copy.wast:2804
-assert_return(() => call($15, "load8_u", [36416]), 0);
-
-// memory_copy.wast:2805
-assert_return(() => call($15, "load8_u", [36615]), 0);
-
-// memory_copy.wast:2806
-assert_return(() => call($15, "load8_u", [36814]), 0);
-
-// memory_copy.wast:2807
-assert_return(() => call($15, "load8_u", [37013]), 0);
-
-// memory_copy.wast:2808
-assert_return(() => call($15, "load8_u", [37212]), 0);
-
-// memory_copy.wast:2809
-assert_return(() => call($15, "load8_u", [37411]), 0);
-
-// memory_copy.wast:2810
-assert_return(() => call($15, "load8_u", [37610]), 0);
-
-// memory_copy.wast:2811
-assert_return(() => call($15, "load8_u", [37809]), 0);
-
-// memory_copy.wast:2812
-assert_return(() => call($15, "load8_u", [38008]), 0);
-
-// memory_copy.wast:2813
-assert_return(() => call($15, "load8_u", [38207]), 0);
-
-// memory_copy.wast:2814
-assert_return(() => call($15, "load8_u", [38406]), 0);
-
-// memory_copy.wast:2815
-assert_return(() => call($15, "load8_u", [38605]), 0);
-
-// memory_copy.wast:2816
-assert_return(() => call($15, "load8_u", [38804]), 0);
-
-// memory_copy.wast:2817
-assert_return(() => call($15, "load8_u", [39003]), 0);
-
-// memory_copy.wast:2818
-assert_return(() => call($15, "load8_u", [39202]), 0);
-
-// memory_copy.wast:2819
-assert_return(() => call($15, "load8_u", [39401]), 0);
-
-// memory_copy.wast:2820
-assert_return(() => call($15, "load8_u", [39600]), 0);
-
-// memory_copy.wast:2821
-assert_return(() => call($15, "load8_u", [39799]), 0);
-
-// memory_copy.wast:2822
-assert_return(() => call($15, "load8_u", [39998]), 0);
-
-// memory_copy.wast:2823
-assert_return(() => call($15, "load8_u", [40197]), 0);
-
-// memory_copy.wast:2824
-assert_return(() => call($15, "load8_u", [40396]), 0);
-
-// memory_copy.wast:2825
-assert_return(() => call($15, "load8_u", [40595]), 0);
-
-// memory_copy.wast:2826
-assert_return(() => call($15, "load8_u", [40794]), 0);
-
-// memory_copy.wast:2827
-assert_return(() => call($15, "load8_u", [40993]), 0);
-
-// memory_copy.wast:2828
-assert_return(() => call($15, "load8_u", [41192]), 0);
-
-// memory_copy.wast:2829
-assert_return(() => call($15, "load8_u", [41391]), 0);
-
-// memory_copy.wast:2830
-assert_return(() => call($15, "load8_u", [41590]), 0);
-
-// memory_copy.wast:2831
-assert_return(() => call($15, "load8_u", [41789]), 0);
-
-// memory_copy.wast:2832
-assert_return(() => call($15, "load8_u", [41988]), 0);
-
-// memory_copy.wast:2833
-assert_return(() => call($15, "load8_u", [42187]), 0);
-
-// memory_copy.wast:2834
-assert_return(() => call($15, "load8_u", [42386]), 0);
-
-// memory_copy.wast:2835
-assert_return(() => call($15, "load8_u", [42585]), 0);
-
-// memory_copy.wast:2836
-assert_return(() => call($15, "load8_u", [42784]), 0);
-
-// memory_copy.wast:2837
-assert_return(() => call($15, "load8_u", [42983]), 0);
-
-// memory_copy.wast:2838
-assert_return(() => call($15, "load8_u", [43182]), 0);
-
-// memory_copy.wast:2839
-assert_return(() => call($15, "load8_u", [43381]), 0);
-
-// memory_copy.wast:2840
-assert_return(() => call($15, "load8_u", [43580]), 0);
-
-// memory_copy.wast:2841
-assert_return(() => call($15, "load8_u", [43779]), 0);
-
-// memory_copy.wast:2842
-assert_return(() => call($15, "load8_u", [43978]), 0);
-
-// memory_copy.wast:2843
-assert_return(() => call($15, "load8_u", [44177]), 0);
-
-// memory_copy.wast:2844
-assert_return(() => call($15, "load8_u", [44376]), 0);
-
-// memory_copy.wast:2845
-assert_return(() => call($15, "load8_u", [44575]), 0);
-
-// memory_copy.wast:2846
-assert_return(() => call($15, "load8_u", [44774]), 0);
-
-// memory_copy.wast:2847
-assert_return(() => call($15, "load8_u", [44973]), 0);
-
-// memory_copy.wast:2848
-assert_return(() => call($15, "load8_u", [45172]), 0);
-
-// memory_copy.wast:2849
-assert_return(() => call($15, "load8_u", [45371]), 0);
-
-// memory_copy.wast:2850
-assert_return(() => call($15, "load8_u", [45570]), 0);
-
-// memory_copy.wast:2851
-assert_return(() => call($15, "load8_u", [45769]), 0);
-
-// memory_copy.wast:2852
-assert_return(() => call($15, "load8_u", [45968]), 0);
-
-// memory_copy.wast:2853
-assert_return(() => call($15, "load8_u", [46167]), 0);
-
-// memory_copy.wast:2854
-assert_return(() => call($15, "load8_u", [46366]), 0);
-
-// memory_copy.wast:2855
-assert_return(() => call($15, "load8_u", [46565]), 0);
-
-// memory_copy.wast:2856
-assert_return(() => call($15, "load8_u", [46764]), 0);
-
-// memory_copy.wast:2857
-assert_return(() => call($15, "load8_u", [46963]), 0);
-
-// memory_copy.wast:2858
-assert_return(() => call($15, "load8_u", [47162]), 0);
-
-// memory_copy.wast:2859
-assert_return(() => call($15, "load8_u", [47361]), 0);
-
-// memory_copy.wast:2860
-assert_return(() => call($15, "load8_u", [47560]), 0);
-
-// memory_copy.wast:2861
-assert_return(() => call($15, "load8_u", [47759]), 0);
-
-// memory_copy.wast:2862
-assert_return(() => call($15, "load8_u", [47958]), 0);
-
-// memory_copy.wast:2863
-assert_return(() => call($15, "load8_u", [48157]), 0);
-
-// memory_copy.wast:2864
-assert_return(() => call($15, "load8_u", [48356]), 0);
-
-// memory_copy.wast:2865
-assert_return(() => call($15, "load8_u", [48555]), 0);
-
-// memory_copy.wast:2866
-assert_return(() => call($15, "load8_u", [48754]), 0);
-
-// memory_copy.wast:2867
-assert_return(() => call($15, "load8_u", [48953]), 0);
-
-// memory_copy.wast:2868
-assert_return(() => call($15, "load8_u", [49152]), 0);
-
-// memory_copy.wast:2869
-assert_return(() => call($15, "load8_u", [49351]), 0);
-
-// memory_copy.wast:2870
-assert_return(() => call($15, "load8_u", [49550]), 0);
-
-// memory_copy.wast:2871
-assert_return(() => call($15, "load8_u", [49749]), 0);
-
-// memory_copy.wast:2872
-assert_return(() => call($15, "load8_u", [49948]), 0);
-
-// memory_copy.wast:2873
-assert_return(() => call($15, "load8_u", [50147]), 0);
-
-// memory_copy.wast:2874
-assert_return(() => call($15, "load8_u", [50346]), 0);
-
-// memory_copy.wast:2875
-assert_return(() => call($15, "load8_u", [50545]), 0);
-
-// memory_copy.wast:2876
-assert_return(() => call($15, "load8_u", [50744]), 0);
-
-// memory_copy.wast:2877
-assert_return(() => call($15, "load8_u", [50943]), 0);
-
-// memory_copy.wast:2878
-assert_return(() => call($15, "load8_u", [51142]), 0);
-
-// memory_copy.wast:2879
-assert_return(() => call($15, "load8_u", [51341]), 0);
-
-// memory_copy.wast:2880
-assert_return(() => call($15, "load8_u", [51540]), 0);
-
-// memory_copy.wast:2881
-assert_return(() => call($15, "load8_u", [51739]), 0);
-
-// memory_copy.wast:2882
-assert_return(() => call($15, "load8_u", [51938]), 0);
-
-// memory_copy.wast:2883
-assert_return(() => call($15, "load8_u", [52137]), 0);
-
-// memory_copy.wast:2884
-assert_return(() => call($15, "load8_u", [52336]), 0);
-
-// memory_copy.wast:2885
-assert_return(() => call($15, "load8_u", [52535]), 0);
-
-// memory_copy.wast:2886
-assert_return(() => call($15, "load8_u", [52734]), 0);
-
-// memory_copy.wast:2887
-assert_return(() => call($15, "load8_u", [52933]), 0);
-
-// memory_copy.wast:2888
-assert_return(() => call($15, "load8_u", [53132]), 0);
-
-// memory_copy.wast:2889
-assert_return(() => call($15, "load8_u", [53331]), 0);
-
-// memory_copy.wast:2890
-assert_return(() => call($15, "load8_u", [53530]), 0);
-
-// memory_copy.wast:2891
-assert_return(() => call($15, "load8_u", [53729]), 0);
-
-// memory_copy.wast:2892
-assert_return(() => call($15, "load8_u", [53928]), 0);
-
-// memory_copy.wast:2893
-assert_return(() => call($15, "load8_u", [54127]), 0);
-
-// memory_copy.wast:2894
-assert_return(() => call($15, "load8_u", [54326]), 0);
-
-// memory_copy.wast:2895
-assert_return(() => call($15, "load8_u", [54525]), 0);
-
-// memory_copy.wast:2896
-assert_return(() => call($15, "load8_u", [54724]), 0);
-
-// memory_copy.wast:2897
-assert_return(() => call($15, "load8_u", [54923]), 0);
-
-// memory_copy.wast:2898
-assert_return(() => call($15, "load8_u", [55122]), 0);
-
-// memory_copy.wast:2899
-assert_return(() => call($15, "load8_u", [55321]), 0);
-
-// memory_copy.wast:2900
-assert_return(() => call($15, "load8_u", [55520]), 0);
-
-// memory_copy.wast:2901
-assert_return(() => call($15, "load8_u", [55719]), 0);
-
-// memory_copy.wast:2902
-assert_return(() => call($15, "load8_u", [55918]), 0);
-
-// memory_copy.wast:2903
-assert_return(() => call($15, "load8_u", [56117]), 0);
-
-// memory_copy.wast:2904
-assert_return(() => call($15, "load8_u", [56316]), 0);
-
-// memory_copy.wast:2905
-assert_return(() => call($15, "load8_u", [56515]), 0);
-
-// memory_copy.wast:2906
-assert_return(() => call($15, "load8_u", [56714]), 0);
-
-// memory_copy.wast:2907
-assert_return(() => call($15, "load8_u", [56913]), 0);
-
-// memory_copy.wast:2908
-assert_return(() => call($15, "load8_u", [57112]), 0);
-
-// memory_copy.wast:2909
-assert_return(() => call($15, "load8_u", [57311]), 0);
-
-// memory_copy.wast:2910
-assert_return(() => call($15, "load8_u", [57510]), 0);
-
-// memory_copy.wast:2911
-assert_return(() => call($15, "load8_u", [57709]), 0);
-
-// memory_copy.wast:2912
-assert_return(() => call($15, "load8_u", [57908]), 0);
-
-// memory_copy.wast:2913
-assert_return(() => call($15, "load8_u", [58107]), 0);
-
-// memory_copy.wast:2914
-assert_return(() => call($15, "load8_u", [58306]), 0);
-
-// memory_copy.wast:2915
-assert_return(() => call($15, "load8_u", [58505]), 0);
-
-// memory_copy.wast:2916
-assert_return(() => call($15, "load8_u", [58704]), 0);
-
-// memory_copy.wast:2917
-assert_return(() => call($15, "load8_u", [58903]), 0);
-
-// memory_copy.wast:2918
-assert_return(() => call($15, "load8_u", [59102]), 0);
-
-// memory_copy.wast:2919
-assert_return(() => call($15, "load8_u", [59301]), 0);
-
-// memory_copy.wast:2920
-assert_return(() => call($15, "load8_u", [59500]), 0);
-
-// memory_copy.wast:2921
-assert_return(() => call($15, "load8_u", [59699]), 0);
-
-// memory_copy.wast:2922
-assert_return(() => call($15, "load8_u", [59898]), 0);
-
-// memory_copy.wast:2923
-assert_return(() => call($15, "load8_u", [60097]), 0);
-
-// memory_copy.wast:2924
-assert_return(() => call($15, "load8_u", [60296]), 0);
-
-// memory_copy.wast:2925
-assert_return(() => call($15, "load8_u", [60495]), 0);
-
-// memory_copy.wast:2926
-assert_return(() => call($15, "load8_u", [60694]), 0);
-
-// memory_copy.wast:2927
-assert_return(() => call($15, "load8_u", [60893]), 0);
-
-// memory_copy.wast:2928
-assert_return(() => call($15, "load8_u", [61092]), 0);
-
-// memory_copy.wast:2929
-assert_return(() => call($15, "load8_u", [61291]), 0);
-
-// memory_copy.wast:2930
-assert_return(() => call($15, "load8_u", [61490]), 0);
-
-// memory_copy.wast:2931
-assert_return(() => call($15, "load8_u", [61689]), 0);
-
-// memory_copy.wast:2932
-assert_return(() => call($15, "load8_u", [61888]), 0);
-
-// memory_copy.wast:2933
-assert_return(() => call($15, "load8_u", [62087]), 0);
-
-// memory_copy.wast:2934
-assert_return(() => call($15, "load8_u", [62286]), 0);
-
-// memory_copy.wast:2935
-assert_return(() => call($15, "load8_u", [62485]), 0);
-
-// memory_copy.wast:2936
-assert_return(() => call($15, "load8_u", [62684]), 0);
-
-// memory_copy.wast:2937
-assert_return(() => call($15, "load8_u", [62883]), 0);
-
-// memory_copy.wast:2938
-assert_return(() => call($15, "load8_u", [63082]), 0);
-
-// memory_copy.wast:2939
-assert_return(() => call($15, "load8_u", [63281]), 0);
-
-// memory_copy.wast:2940
-assert_return(() => call($15, "load8_u", [63480]), 0);
-
-// memory_copy.wast:2941
-assert_return(() => call($15, "load8_u", [63679]), 0);
-
-// memory_copy.wast:2942
-assert_return(() => call($15, "load8_u", [63878]), 0);
-
-// memory_copy.wast:2943
-assert_return(() => call($15, "load8_u", [64077]), 0);
-
-// memory_copy.wast:2944
-assert_return(() => call($15, "load8_u", [64276]), 0);
-
-// memory_copy.wast:2945
-assert_return(() => call($15, "load8_u", [64475]), 0);
-
-// memory_copy.wast:2946
-assert_return(() => call($15, "load8_u", [64674]), 0);
-
-// memory_copy.wast:2947
-assert_return(() => call($15, "load8_u", [64873]), 0);
-
-// memory_copy.wast:2948
-assert_return(() => call($15, "load8_u", [65072]), 0);
-
-// memory_copy.wast:2949
-assert_return(() => call($15, "load8_u", [65271]), 0);
-
-// memory_copy.wast:2950
-assert_return(() => call($15, "load8_u", [65470]), 0);
-
-// memory_copy.wast:2951
-assert_return(() => call($15, "load8_u", [65506]), 0);
-
-// memory_copy.wast:2952
-assert_return(() => call($15, "load8_u", [65507]), 1);
-
-// memory_copy.wast:2953
-assert_return(() => call($15, "load8_u", [65508]), 2);
-
-// memory_copy.wast:2954
-assert_return(() => call($15, "load8_u", [65509]), 3);
-
-// memory_copy.wast:2955
-assert_return(() => call($15, "load8_u", [65510]), 4);
-
-// memory_copy.wast:2956
-assert_return(() => call($15, "load8_u", [65511]), 5);
-
-// memory_copy.wast:2957
-assert_return(() => call($15, "load8_u", [65512]), 6);
-
-// memory_copy.wast:2958
-assert_return(() => call($15, "load8_u", [65513]), 7);
-
-// memory_copy.wast:2959
-assert_return(() => call($15, "load8_u", [65514]), 8);
-
-// memory_copy.wast:2960
-assert_return(() => call($15, "load8_u", [65515]), 9);
-
-// memory_copy.wast:2961
-assert_return(() => call($15, "load8_u", [65516]), 10);
-
-// memory_copy.wast:2962
-assert_return(() => call($15, "load8_u", [65517]), 11);
-
-// memory_copy.wast:2963
-assert_return(() => call($15, "load8_u", [65518]), 12);
-
-// memory_copy.wast:2964
-assert_return(() => call($15, "load8_u", [65519]), 13);
-
-// memory_copy.wast:2965
-assert_return(() => call($15, "load8_u", [65520]), 14);
-
-// memory_copy.wast:2966
-assert_return(() => call($15, "load8_u", [65521]), 15);
-
-// memory_copy.wast:2967
-assert_return(() => call($15, "load8_u", [65522]), 16);
-
-// memory_copy.wast:2968
-assert_return(() => call($15, "load8_u", [65523]), 17);
-
-// memory_copy.wast:2969
-assert_return(() => call($15, "load8_u", [65524]), 18);
-
-// memory_copy.wast:2970
-assert_return(() => call($15, "load8_u", [65525]), 19);
-
-// memory_copy.wast:2972
-let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:2980
-assert_trap(() => call($16, "run", [65506, 65516, 40]));
-
-// memory_copy.wast:2983
-assert_return(() => call($16, "load8_u", [198]), 0);
-
-// memory_copy.wast:2984
-assert_return(() => call($16, "load8_u", [397]), 0);
-
-// memory_copy.wast:2985
-assert_return(() => call($16, "load8_u", [596]), 0);
-
-// memory_copy.wast:2986
-assert_return(() => call($16, "load8_u", [795]), 0);
-
-// memory_copy.wast:2987
-assert_return(() => call($16, "load8_u", [994]), 0);
-
-// memory_copy.wast:2988
-assert_return(() => call($16, "load8_u", [1193]), 0);
-
-// memory_copy.wast:2989
-assert_return(() => call($16, "load8_u", [1392]), 0);
-
-// memory_copy.wast:2990
-assert_return(() => call($16, "load8_u", [1591]), 0);
-
-// memory_copy.wast:2991
-assert_return(() => call($16, "load8_u", [1790]), 0);
-
-// memory_copy.wast:2992
-assert_return(() => call($16, "load8_u", [1989]), 0);
-
-// memory_copy.wast:2993
-assert_return(() => call($16, "load8_u", [2188]), 0);
-
-// memory_copy.wast:2994
-assert_return(() => call($16, "load8_u", [2387]), 0);
-
-// memory_copy.wast:2995
-assert_return(() => call($16, "load8_u", [2586]), 0);
-
-// memory_copy.wast:2996
-assert_return(() => call($16, "load8_u", [2785]), 0);
-
-// memory_copy.wast:2997
-assert_return(() => call($16, "load8_u", [2984]), 0);
-
-// memory_copy.wast:2998
-assert_return(() => call($16, "load8_u", [3183]), 0);
-
-// memory_copy.wast:2999
-assert_return(() => call($16, "load8_u", [3382]), 0);
-
-// memory_copy.wast:3000
-assert_return(() => call($16, "load8_u", [3581]), 0);
-
-// memory_copy.wast:3001
-assert_return(() => call($16, "load8_u", [3780]), 0);
-
-// memory_copy.wast:3002
-assert_return(() => call($16, "load8_u", [3979]), 0);
-
-// memory_copy.wast:3003
-assert_return(() => call($16, "load8_u", [4178]), 0);
-
-// memory_copy.wast:3004
-assert_return(() => call($16, "load8_u", [4377]), 0);
-
-// memory_copy.wast:3005
-assert_return(() => call($16, "load8_u", [4576]), 0);
-
-// memory_copy.wast:3006
-assert_return(() => call($16, "load8_u", [4775]), 0);
-
-// memory_copy.wast:3007
-assert_return(() => call($16, "load8_u", [4974]), 0);
-
-// memory_copy.wast:3008
-assert_return(() => call($16, "load8_u", [5173]), 0);
-
-// memory_copy.wast:3009
-assert_return(() => call($16, "load8_u", [5372]), 0);
-
-// memory_copy.wast:3010
-assert_return(() => call($16, "load8_u", [5571]), 0);
-
-// memory_copy.wast:3011
-assert_return(() => call($16, "load8_u", [5770]), 0);
-
-// memory_copy.wast:3012
-assert_return(() => call($16, "load8_u", [5969]), 0);
-
-// memory_copy.wast:3013
-assert_return(() => call($16, "load8_u", [6168]), 0);
-
-// memory_copy.wast:3014
-assert_return(() => call($16, "load8_u", [6367]), 0);
-
-// memory_copy.wast:3015
-assert_return(() => call($16, "load8_u", [6566]), 0);
-
-// memory_copy.wast:3016
-assert_return(() => call($16, "load8_u", [6765]), 0);
-
-// memory_copy.wast:3017
-assert_return(() => call($16, "load8_u", [6964]), 0);
-
-// memory_copy.wast:3018
-assert_return(() => call($16, "load8_u", [7163]), 0);
-
-// memory_copy.wast:3019
-assert_return(() => call($16, "load8_u", [7362]), 0);
-
-// memory_copy.wast:3020
-assert_return(() => call($16, "load8_u", [7561]), 0);
-
-// memory_copy.wast:3021
-assert_return(() => call($16, "load8_u", [7760]), 0);
-
-// memory_copy.wast:3022
-assert_return(() => call($16, "load8_u", [7959]), 0);
-
-// memory_copy.wast:3023
-assert_return(() => call($16, "load8_u", [8158]), 0);
-
-// memory_copy.wast:3024
-assert_return(() => call($16, "load8_u", [8357]), 0);
-
-// memory_copy.wast:3025
-assert_return(() => call($16, "load8_u", [8556]), 0);
-
-// memory_copy.wast:3026
-assert_return(() => call($16, "load8_u", [8755]), 0);
-
-// memory_copy.wast:3027
-assert_return(() => call($16, "load8_u", [8954]), 0);
-
-// memory_copy.wast:3028
-assert_return(() => call($16, "load8_u", [9153]), 0);
-
-// memory_copy.wast:3029
-assert_return(() => call($16, "load8_u", [9352]), 0);
-
-// memory_copy.wast:3030
-assert_return(() => call($16, "load8_u", [9551]), 0);
-
-// memory_copy.wast:3031
-assert_return(() => call($16, "load8_u", [9750]), 0);
-
-// memory_copy.wast:3032
-assert_return(() => call($16, "load8_u", [9949]), 0);
-
-// memory_copy.wast:3033
-assert_return(() => call($16, "load8_u", [10148]), 0);
-
-// memory_copy.wast:3034
-assert_return(() => call($16, "load8_u", [10347]), 0);
-
-// memory_copy.wast:3035
-assert_return(() => call($16, "load8_u", [10546]), 0);
-
-// memory_copy.wast:3036
-assert_return(() => call($16, "load8_u", [10745]), 0);
-
-// memory_copy.wast:3037
-assert_return(() => call($16, "load8_u", [10944]), 0);
-
-// memory_copy.wast:3038
-assert_return(() => call($16, "load8_u", [11143]), 0);
-
-// memory_copy.wast:3039
-assert_return(() => call($16, "load8_u", [11342]), 0);
-
-// memory_copy.wast:3040
-assert_return(() => call($16, "load8_u", [11541]), 0);
-
-// memory_copy.wast:3041
-assert_return(() => call($16, "load8_u", [11740]), 0);
-
-// memory_copy.wast:3042
-assert_return(() => call($16, "load8_u", [11939]), 0);
-
-// memory_copy.wast:3043
-assert_return(() => call($16, "load8_u", [12138]), 0);
-
-// memory_copy.wast:3044
-assert_return(() => call($16, "load8_u", [12337]), 0);
-
-// memory_copy.wast:3045
-assert_return(() => call($16, "load8_u", [12536]), 0);
-
-// memory_copy.wast:3046
-assert_return(() => call($16, "load8_u", [12735]), 0);
-
-// memory_copy.wast:3047
-assert_return(() => call($16, "load8_u", [12934]), 0);
-
-// memory_copy.wast:3048
-assert_return(() => call($16, "load8_u", [13133]), 0);
-
-// memory_copy.wast:3049
-assert_return(() => call($16, "load8_u", [13332]), 0);
-
-// memory_copy.wast:3050
-assert_return(() => call($16, "load8_u", [13531]), 0);
-
-// memory_copy.wast:3051
-assert_return(() => call($16, "load8_u", [13730]), 0);
-
-// memory_copy.wast:3052
-assert_return(() => call($16, "load8_u", [13929]), 0);
-
-// memory_copy.wast:3053
-assert_return(() => call($16, "load8_u", [14128]), 0);
-
-// memory_copy.wast:3054
-assert_return(() => call($16, "load8_u", [14327]), 0);
-
-// memory_copy.wast:3055
-assert_return(() => call($16, "load8_u", [14526]), 0);
-
-// memory_copy.wast:3056
-assert_return(() => call($16, "load8_u", [14725]), 0);
-
-// memory_copy.wast:3057
-assert_return(() => call($16, "load8_u", [14924]), 0);
-
-// memory_copy.wast:3058
-assert_return(() => call($16, "load8_u", [15123]), 0);
-
-// memory_copy.wast:3059
-assert_return(() => call($16, "load8_u", [15322]), 0);
-
-// memory_copy.wast:3060
-assert_return(() => call($16, "load8_u", [15521]), 0);
-
-// memory_copy.wast:3061
-assert_return(() => call($16, "load8_u", [15720]), 0);
-
-// memory_copy.wast:3062
-assert_return(() => call($16, "load8_u", [15919]), 0);
-
-// memory_copy.wast:3063
-assert_return(() => call($16, "load8_u", [16118]), 0);
-
-// memory_copy.wast:3064
-assert_return(() => call($16, "load8_u", [16317]), 0);
-
-// memory_copy.wast:3065
-assert_return(() => call($16, "load8_u", [16516]), 0);
-
-// memory_copy.wast:3066
-assert_return(() => call($16, "load8_u", [16715]), 0);
-
-// memory_copy.wast:3067
-assert_return(() => call($16, "load8_u", [16914]), 0);
-
-// memory_copy.wast:3068
-assert_return(() => call($16, "load8_u", [17113]), 0);
-
-// memory_copy.wast:3069
-assert_return(() => call($16, "load8_u", [17312]), 0);
-
-// memory_copy.wast:3070
-assert_return(() => call($16, "load8_u", [17511]), 0);
-
-// memory_copy.wast:3071
-assert_return(() => call($16, "load8_u", [17710]), 0);
-
-// memory_copy.wast:3072
-assert_return(() => call($16, "load8_u", [17909]), 0);
-
-// memory_copy.wast:3073
-assert_return(() => call($16, "load8_u", [18108]), 0);
-
-// memory_copy.wast:3074
-assert_return(() => call($16, "load8_u", [18307]), 0);
-
-// memory_copy.wast:3075
-assert_return(() => call($16, "load8_u", [18506]), 0);
-
-// memory_copy.wast:3076
-assert_return(() => call($16, "load8_u", [18705]), 0);
-
-// memory_copy.wast:3077
-assert_return(() => call($16, "load8_u", [18904]), 0);
-
-// memory_copy.wast:3078
-assert_return(() => call($16, "load8_u", [19103]), 0);
-
-// memory_copy.wast:3079
-assert_return(() => call($16, "load8_u", [19302]), 0);
-
-// memory_copy.wast:3080
-assert_return(() => call($16, "load8_u", [19501]), 0);
-
-// memory_copy.wast:3081
-assert_return(() => call($16, "load8_u", [19700]), 0);
-
-// memory_copy.wast:3082
-assert_return(() => call($16, "load8_u", [19899]), 0);
-
-// memory_copy.wast:3083
-assert_return(() => call($16, "load8_u", [20098]), 0);
-
-// memory_copy.wast:3084
-assert_return(() => call($16, "load8_u", [20297]), 0);
-
-// memory_copy.wast:3085
-assert_return(() => call($16, "load8_u", [20496]), 0);
-
-// memory_copy.wast:3086
-assert_return(() => call($16, "load8_u", [20695]), 0);
-
-// memory_copy.wast:3087
-assert_return(() => call($16, "load8_u", [20894]), 0);
-
-// memory_copy.wast:3088
-assert_return(() => call($16, "load8_u", [21093]), 0);
-
-// memory_copy.wast:3089
-assert_return(() => call($16, "load8_u", [21292]), 0);
-
-// memory_copy.wast:3090
-assert_return(() => call($16, "load8_u", [21491]), 0);
-
-// memory_copy.wast:3091
-assert_return(() => call($16, "load8_u", [21690]), 0);
-
-// memory_copy.wast:3092
-assert_return(() => call($16, "load8_u", [21889]), 0);
-
-// memory_copy.wast:3093
-assert_return(() => call($16, "load8_u", [22088]), 0);
-
-// memory_copy.wast:3094
-assert_return(() => call($16, "load8_u", [22287]), 0);
-
-// memory_copy.wast:3095
-assert_return(() => call($16, "load8_u", [22486]), 0);
-
-// memory_copy.wast:3096
-assert_return(() => call($16, "load8_u", [22685]), 0);
-
-// memory_copy.wast:3097
-assert_return(() => call($16, "load8_u", [22884]), 0);
-
-// memory_copy.wast:3098
-assert_return(() => call($16, "load8_u", [23083]), 0);
-
-// memory_copy.wast:3099
-assert_return(() => call($16, "load8_u", [23282]), 0);
-
-// memory_copy.wast:3100
-assert_return(() => call($16, "load8_u", [23481]), 0);
-
-// memory_copy.wast:3101
-assert_return(() => call($16, "load8_u", [23680]), 0);
-
-// memory_copy.wast:3102
-assert_return(() => call($16, "load8_u", [23879]), 0);
-
-// memory_copy.wast:3103
-assert_return(() => call($16, "load8_u", [24078]), 0);
-
-// memory_copy.wast:3104
-assert_return(() => call($16, "load8_u", [24277]), 0);
-
-// memory_copy.wast:3105
-assert_return(() => call($16, "load8_u", [24476]), 0);
-
-// memory_copy.wast:3106
-assert_return(() => call($16, "load8_u", [24675]), 0);
-
-// memory_copy.wast:3107
-assert_return(() => call($16, "load8_u", [24874]), 0);
-
-// memory_copy.wast:3108
-assert_return(() => call($16, "load8_u", [25073]), 0);
-
-// memory_copy.wast:3109
-assert_return(() => call($16, "load8_u", [25272]), 0);
-
-// memory_copy.wast:3110
-assert_return(() => call($16, "load8_u", [25471]), 0);
-
-// memory_copy.wast:3111
-assert_return(() => call($16, "load8_u", [25670]), 0);
-
-// memory_copy.wast:3112
-assert_return(() => call($16, "load8_u", [25869]), 0);
-
-// memory_copy.wast:3113
-assert_return(() => call($16, "load8_u", [26068]), 0);
-
-// memory_copy.wast:3114
-assert_return(() => call($16, "load8_u", [26267]), 0);
-
-// memory_copy.wast:3115
-assert_return(() => call($16, "load8_u", [26466]), 0);
-
-// memory_copy.wast:3116
-assert_return(() => call($16, "load8_u", [26665]), 0);
-
-// memory_copy.wast:3117
-assert_return(() => call($16, "load8_u", [26864]), 0);
-
-// memory_copy.wast:3118
-assert_return(() => call($16, "load8_u", [27063]), 0);
-
-// memory_copy.wast:3119
-assert_return(() => call($16, "load8_u", [27262]), 0);
-
-// memory_copy.wast:3120
-assert_return(() => call($16, "load8_u", [27461]), 0);
-
-// memory_copy.wast:3121
-assert_return(() => call($16, "load8_u", [27660]), 0);
-
-// memory_copy.wast:3122
-assert_return(() => call($16, "load8_u", [27859]), 0);
-
-// memory_copy.wast:3123
-assert_return(() => call($16, "load8_u", [28058]), 0);
-
-// memory_copy.wast:3124
-assert_return(() => call($16, "load8_u", [28257]), 0);
-
-// memory_copy.wast:3125
-assert_return(() => call($16, "load8_u", [28456]), 0);
-
-// memory_copy.wast:3126
-assert_return(() => call($16, "load8_u", [28655]), 0);
-
-// memory_copy.wast:3127
-assert_return(() => call($16, "load8_u", [28854]), 0);
-
-// memory_copy.wast:3128
-assert_return(() => call($16, "load8_u", [29053]), 0);
-
-// memory_copy.wast:3129
-assert_return(() => call($16, "load8_u", [29252]), 0);
-
-// memory_copy.wast:3130
-assert_return(() => call($16, "load8_u", [29451]), 0);
-
-// memory_copy.wast:3131
-assert_return(() => call($16, "load8_u", [29650]), 0);
-
-// memory_copy.wast:3132
-assert_return(() => call($16, "load8_u", [29849]), 0);
-
-// memory_copy.wast:3133
-assert_return(() => call($16, "load8_u", [30048]), 0);
-
-// memory_copy.wast:3134
-assert_return(() => call($16, "load8_u", [30247]), 0);
-
-// memory_copy.wast:3135
-assert_return(() => call($16, "load8_u", [30446]), 0);
-
-// memory_copy.wast:3136
-assert_return(() => call($16, "load8_u", [30645]), 0);
-
-// memory_copy.wast:3137
-assert_return(() => call($16, "load8_u", [30844]), 0);
-
-// memory_copy.wast:3138
-assert_return(() => call($16, "load8_u", [31043]), 0);
-
-// memory_copy.wast:3139
-assert_return(() => call($16, "load8_u", [31242]), 0);
-
-// memory_copy.wast:3140
-assert_return(() => call($16, "load8_u", [31441]), 0);
-
-// memory_copy.wast:3141
-assert_return(() => call($16, "load8_u", [31640]), 0);
-
-// memory_copy.wast:3142
-assert_return(() => call($16, "load8_u", [31839]), 0);
-
-// memory_copy.wast:3143
-assert_return(() => call($16, "load8_u", [32038]), 0);
-
-// memory_copy.wast:3144
-assert_return(() => call($16, "load8_u", [32237]), 0);
-
-// memory_copy.wast:3145
-assert_return(() => call($16, "load8_u", [32436]), 0);
-
-// memory_copy.wast:3146
-assert_return(() => call($16, "load8_u", [32635]), 0);
-
-// memory_copy.wast:3147
-assert_return(() => call($16, "load8_u", [32834]), 0);
-
-// memory_copy.wast:3148
-assert_return(() => call($16, "load8_u", [33033]), 0);
-
-// memory_copy.wast:3149
-assert_return(() => call($16, "load8_u", [33232]), 0);
-
-// memory_copy.wast:3150
-assert_return(() => call($16, "load8_u", [33431]), 0);
-
-// memory_copy.wast:3151
-assert_return(() => call($16, "load8_u", [33630]), 0);
-
-// memory_copy.wast:3152
-assert_return(() => call($16, "load8_u", [33829]), 0);
-
-// memory_copy.wast:3153
-assert_return(() => call($16, "load8_u", [34028]), 0);
-
-// memory_copy.wast:3154
-assert_return(() => call($16, "load8_u", [34227]), 0);
-
-// memory_copy.wast:3155
-assert_return(() => call($16, "load8_u", [34426]), 0);
-
-// memory_copy.wast:3156
-assert_return(() => call($16, "load8_u", [34625]), 0);
-
-// memory_copy.wast:3157
-assert_return(() => call($16, "load8_u", [34824]), 0);
-
-// memory_copy.wast:3158
-assert_return(() => call($16, "load8_u", [35023]), 0);
-
-// memory_copy.wast:3159
-assert_return(() => call($16, "load8_u", [35222]), 0);
-
-// memory_copy.wast:3160
-assert_return(() => call($16, "load8_u", [35421]), 0);
-
-// memory_copy.wast:3161
-assert_return(() => call($16, "load8_u", [35620]), 0);
-
-// memory_copy.wast:3162
-assert_return(() => call($16, "load8_u", [35819]), 0);
-
-// memory_copy.wast:3163
-assert_return(() => call($16, "load8_u", [36018]), 0);
-
-// memory_copy.wast:3164
-assert_return(() => call($16, "load8_u", [36217]), 0);
-
-// memory_copy.wast:3165
-assert_return(() => call($16, "load8_u", [36416]), 0);
-
-// memory_copy.wast:3166
-assert_return(() => call($16, "load8_u", [36615]), 0);
-
-// memory_copy.wast:3167
-assert_return(() => call($16, "load8_u", [36814]), 0);
-
-// memory_copy.wast:3168
-assert_return(() => call($16, "load8_u", [37013]), 0);
-
-// memory_copy.wast:3169
-assert_return(() => call($16, "load8_u", [37212]), 0);
-
-// memory_copy.wast:3170
-assert_return(() => call($16, "load8_u", [37411]), 0);
-
-// memory_copy.wast:3171
-assert_return(() => call($16, "load8_u", [37610]), 0);
-
-// memory_copy.wast:3172
-assert_return(() => call($16, "load8_u", [37809]), 0);
-
-// memory_copy.wast:3173
-assert_return(() => call($16, "load8_u", [38008]), 0);
-
-// memory_copy.wast:3174
-assert_return(() => call($16, "load8_u", [38207]), 0);
-
-// memory_copy.wast:3175
-assert_return(() => call($16, "load8_u", [38406]), 0);
-
-// memory_copy.wast:3176
-assert_return(() => call($16, "load8_u", [38605]), 0);
-
-// memory_copy.wast:3177
-assert_return(() => call($16, "load8_u", [38804]), 0);
-
-// memory_copy.wast:3178
-assert_return(() => call($16, "load8_u", [39003]), 0);
-
-// memory_copy.wast:3179
-assert_return(() => call($16, "load8_u", [39202]), 0);
-
-// memory_copy.wast:3180
-assert_return(() => call($16, "load8_u", [39401]), 0);
-
-// memory_copy.wast:3181
-assert_return(() => call($16, "load8_u", [39600]), 0);
-
-// memory_copy.wast:3182
-assert_return(() => call($16, "load8_u", [39799]), 0);
-
-// memory_copy.wast:3183
-assert_return(() => call($16, "load8_u", [39998]), 0);
-
-// memory_copy.wast:3184
-assert_return(() => call($16, "load8_u", [40197]), 0);
-
-// memory_copy.wast:3185
-assert_return(() => call($16, "load8_u", [40396]), 0);
-
-// memory_copy.wast:3186
-assert_return(() => call($16, "load8_u", [40595]), 0);
-
-// memory_copy.wast:3187
-assert_return(() => call($16, "load8_u", [40794]), 0);
-
-// memory_copy.wast:3188
-assert_return(() => call($16, "load8_u", [40993]), 0);
-
-// memory_copy.wast:3189
-assert_return(() => call($16, "load8_u", [41192]), 0);
-
-// memory_copy.wast:3190
-assert_return(() => call($16, "load8_u", [41391]), 0);
-
-// memory_copy.wast:3191
-assert_return(() => call($16, "load8_u", [41590]), 0);
-
-// memory_copy.wast:3192
-assert_return(() => call($16, "load8_u", [41789]), 0);
-
-// memory_copy.wast:3193
-assert_return(() => call($16, "load8_u", [41988]), 0);
-
-// memory_copy.wast:3194
-assert_return(() => call($16, "load8_u", [42187]), 0);
-
-// memory_copy.wast:3195
-assert_return(() => call($16, "load8_u", [42386]), 0);
-
-// memory_copy.wast:3196
-assert_return(() => call($16, "load8_u", [42585]), 0);
-
-// memory_copy.wast:3197
-assert_return(() => call($16, "load8_u", [42784]), 0);
-
-// memory_copy.wast:3198
-assert_return(() => call($16, "load8_u", [42983]), 0);
-
-// memory_copy.wast:3199
-assert_return(() => call($16, "load8_u", [43182]), 0);
-
-// memory_copy.wast:3200
-assert_return(() => call($16, "load8_u", [43381]), 0);
-
-// memory_copy.wast:3201
-assert_return(() => call($16, "load8_u", [43580]), 0);
-
-// memory_copy.wast:3202
-assert_return(() => call($16, "load8_u", [43779]), 0);
-
-// memory_copy.wast:3203
-assert_return(() => call($16, "load8_u", [43978]), 0);
-
-// memory_copy.wast:3204
-assert_return(() => call($16, "load8_u", [44177]), 0);
-
-// memory_copy.wast:3205
-assert_return(() => call($16, "load8_u", [44376]), 0);
-
-// memory_copy.wast:3206
-assert_return(() => call($16, "load8_u", [44575]), 0);
-
-// memory_copy.wast:3207
-assert_return(() => call($16, "load8_u", [44774]), 0);
-
-// memory_copy.wast:3208
-assert_return(() => call($16, "load8_u", [44973]), 0);
-
-// memory_copy.wast:3209
-assert_return(() => call($16, "load8_u", [45172]), 0);
-
-// memory_copy.wast:3210
-assert_return(() => call($16, "load8_u", [45371]), 0);
-
-// memory_copy.wast:3211
-assert_return(() => call($16, "load8_u", [45570]), 0);
-
-// memory_copy.wast:3212
-assert_return(() => call($16, "load8_u", [45769]), 0);
-
-// memory_copy.wast:3213
-assert_return(() => call($16, "load8_u", [45968]), 0);
-
-// memory_copy.wast:3214
-assert_return(() => call($16, "load8_u", [46167]), 0);
-
-// memory_copy.wast:3215
-assert_return(() => call($16, "load8_u", [46366]), 0);
-
-// memory_copy.wast:3216
-assert_return(() => call($16, "load8_u", [46565]), 0);
-
-// memory_copy.wast:3217
-assert_return(() => call($16, "load8_u", [46764]), 0);
-
-// memory_copy.wast:3218
-assert_return(() => call($16, "load8_u", [46963]), 0);
-
-// memory_copy.wast:3219
-assert_return(() => call($16, "load8_u", [47162]), 0);
-
-// memory_copy.wast:3220
-assert_return(() => call($16, "load8_u", [47361]), 0);
-
-// memory_copy.wast:3221
-assert_return(() => call($16, "load8_u", [47560]), 0);
-
-// memory_copy.wast:3222
-assert_return(() => call($16, "load8_u", [47759]), 0);
-
-// memory_copy.wast:3223
-assert_return(() => call($16, "load8_u", [47958]), 0);
-
-// memory_copy.wast:3224
-assert_return(() => call($16, "load8_u", [48157]), 0);
-
-// memory_copy.wast:3225
-assert_return(() => call($16, "load8_u", [48356]), 0);
-
-// memory_copy.wast:3226
-assert_return(() => call($16, "load8_u", [48555]), 0);
-
-// memory_copy.wast:3227
-assert_return(() => call($16, "load8_u", [48754]), 0);
-
-// memory_copy.wast:3228
-assert_return(() => call($16, "load8_u", [48953]), 0);
-
-// memory_copy.wast:3229
-assert_return(() => call($16, "load8_u", [49152]), 0);
-
-// memory_copy.wast:3230
-assert_return(() => call($16, "load8_u", [49351]), 0);
-
-// memory_copy.wast:3231
-assert_return(() => call($16, "load8_u", [49550]), 0);
-
-// memory_copy.wast:3232
-assert_return(() => call($16, "load8_u", [49749]), 0);
-
-// memory_copy.wast:3233
-assert_return(() => call($16, "load8_u", [49948]), 0);
-
-// memory_copy.wast:3234
-assert_return(() => call($16, "load8_u", [50147]), 0);
-
-// memory_copy.wast:3235
-assert_return(() => call($16, "load8_u", [50346]), 0);
-
-// memory_copy.wast:3236
-assert_return(() => call($16, "load8_u", [50545]), 0);
-
-// memory_copy.wast:3237
-assert_return(() => call($16, "load8_u", [50744]), 0);
-
-// memory_copy.wast:3238
-assert_return(() => call($16, "load8_u", [50943]), 0);
-
-// memory_copy.wast:3239
-assert_return(() => call($16, "load8_u", [51142]), 0);
-
-// memory_copy.wast:3240
-assert_return(() => call($16, "load8_u", [51341]), 0);
-
-// memory_copy.wast:3241
-assert_return(() => call($16, "load8_u", [51540]), 0);
-
-// memory_copy.wast:3242
-assert_return(() => call($16, "load8_u", [51739]), 0);
-
-// memory_copy.wast:3243
-assert_return(() => call($16, "load8_u", [51938]), 0);
-
-// memory_copy.wast:3244
-assert_return(() => call($16, "load8_u", [52137]), 0);
-
-// memory_copy.wast:3245
-assert_return(() => call($16, "load8_u", [52336]), 0);
-
-// memory_copy.wast:3246
-assert_return(() => call($16, "load8_u", [52535]), 0);
-
-// memory_copy.wast:3247
-assert_return(() => call($16, "load8_u", [52734]), 0);
-
-// memory_copy.wast:3248
-assert_return(() => call($16, "load8_u", [52933]), 0);
-
-// memory_copy.wast:3249
-assert_return(() => call($16, "load8_u", [53132]), 0);
-
-// memory_copy.wast:3250
-assert_return(() => call($16, "load8_u", [53331]), 0);
-
-// memory_copy.wast:3251
-assert_return(() => call($16, "load8_u", [53530]), 0);
-
-// memory_copy.wast:3252
-assert_return(() => call($16, "load8_u", [53729]), 0);
-
-// memory_copy.wast:3253
-assert_return(() => call($16, "load8_u", [53928]), 0);
-
-// memory_copy.wast:3254
-assert_return(() => call($16, "load8_u", [54127]), 0);
-
-// memory_copy.wast:3255
-assert_return(() => call($16, "load8_u", [54326]), 0);
-
-// memory_copy.wast:3256
-assert_return(() => call($16, "load8_u", [54525]), 0);
-
-// memory_copy.wast:3257
-assert_return(() => call($16, "load8_u", [54724]), 0);
-
-// memory_copy.wast:3258
-assert_return(() => call($16, "load8_u", [54923]), 0);
-
-// memory_copy.wast:3259
-assert_return(() => call($16, "load8_u", [55122]), 0);
-
-// memory_copy.wast:3260
-assert_return(() => call($16, "load8_u", [55321]), 0);
-
-// memory_copy.wast:3261
-assert_return(() => call($16, "load8_u", [55520]), 0);
-
-// memory_copy.wast:3262
-assert_return(() => call($16, "load8_u", [55719]), 0);
-
-// memory_copy.wast:3263
-assert_return(() => call($16, "load8_u", [55918]), 0);
-
-// memory_copy.wast:3264
-assert_return(() => call($16, "load8_u", [56117]), 0);
-
-// memory_copy.wast:3265
-assert_return(() => call($16, "load8_u", [56316]), 0);
-
-// memory_copy.wast:3266
-assert_return(() => call($16, "load8_u", [56515]), 0);
-
-// memory_copy.wast:3267
-assert_return(() => call($16, "load8_u", [56714]), 0);
-
-// memory_copy.wast:3268
-assert_return(() => call($16, "load8_u", [56913]), 0);
-
-// memory_copy.wast:3269
-assert_return(() => call($16, "load8_u", [57112]), 0);
-
-// memory_copy.wast:3270
-assert_return(() => call($16, "load8_u", [57311]), 0);
-
-// memory_copy.wast:3271
-assert_return(() => call($16, "load8_u", [57510]), 0);
-
-// memory_copy.wast:3272
-assert_return(() => call($16, "load8_u", [57709]), 0);
-
-// memory_copy.wast:3273
-assert_return(() => call($16, "load8_u", [57908]), 0);
-
-// memory_copy.wast:3274
-assert_return(() => call($16, "load8_u", [58107]), 0);
-
-// memory_copy.wast:3275
-assert_return(() => call($16, "load8_u", [58306]), 0);
-
-// memory_copy.wast:3276
-assert_return(() => call($16, "load8_u", [58505]), 0);
-
-// memory_copy.wast:3277
-assert_return(() => call($16, "load8_u", [58704]), 0);
-
-// memory_copy.wast:3278
-assert_return(() => call($16, "load8_u", [58903]), 0);
-
-// memory_copy.wast:3279
-assert_return(() => call($16, "load8_u", [59102]), 0);
-
-// memory_copy.wast:3280
-assert_return(() => call($16, "load8_u", [59301]), 0);
-
-// memory_copy.wast:3281
-assert_return(() => call($16, "load8_u", [59500]), 0);
-
-// memory_copy.wast:3282
-assert_return(() => call($16, "load8_u", [59699]), 0);
-
-// memory_copy.wast:3283
-assert_return(() => call($16, "load8_u", [59898]), 0);
-
-// memory_copy.wast:3284
-assert_return(() => call($16, "load8_u", [60097]), 0);
-
-// memory_copy.wast:3285
-assert_return(() => call($16, "load8_u", [60296]), 0);
-
-// memory_copy.wast:3286
-assert_return(() => call($16, "load8_u", [60495]), 0);
-
-// memory_copy.wast:3287
-assert_return(() => call($16, "load8_u", [60694]), 0);
-
-// memory_copy.wast:3288
-assert_return(() => call($16, "load8_u", [60893]), 0);
-
-// memory_copy.wast:3289
-assert_return(() => call($16, "load8_u", [61092]), 0);
-
-// memory_copy.wast:3290
-assert_return(() => call($16, "load8_u", [61291]), 0);
-
-// memory_copy.wast:3291
-assert_return(() => call($16, "load8_u", [61490]), 0);
-
-// memory_copy.wast:3292
-assert_return(() => call($16, "load8_u", [61689]), 0);
-
-// memory_copy.wast:3293
-assert_return(() => call($16, "load8_u", [61888]), 0);
-
-// memory_copy.wast:3294
-assert_return(() => call($16, "load8_u", [62087]), 0);
-
-// memory_copy.wast:3295
-assert_return(() => call($16, "load8_u", [62286]), 0);
-
-// memory_copy.wast:3296
-assert_return(() => call($16, "load8_u", [62485]), 0);
-
-// memory_copy.wast:3297
-assert_return(() => call($16, "load8_u", [62684]), 0);
-
-// memory_copy.wast:3298
-assert_return(() => call($16, "load8_u", [62883]), 0);
-
-// memory_copy.wast:3299
-assert_return(() => call($16, "load8_u", [63082]), 0);
-
-// memory_copy.wast:3300
-assert_return(() => call($16, "load8_u", [63281]), 0);
-
-// memory_copy.wast:3301
-assert_return(() => call($16, "load8_u", [63480]), 0);
-
-// memory_copy.wast:3302
-assert_return(() => call($16, "load8_u", [63679]), 0);
-
-// memory_copy.wast:3303
-assert_return(() => call($16, "load8_u", [63878]), 0);
-
-// memory_copy.wast:3304
-assert_return(() => call($16, "load8_u", [64077]), 0);
-
-// memory_copy.wast:3305
-assert_return(() => call($16, "load8_u", [64276]), 0);
-
-// memory_copy.wast:3306
-assert_return(() => call($16, "load8_u", [64475]), 0);
-
-// memory_copy.wast:3307
-assert_return(() => call($16, "load8_u", [64674]), 0);
-
-// memory_copy.wast:3308
-assert_return(() => call($16, "load8_u", [64873]), 0);
-
-// memory_copy.wast:3309
-assert_return(() => call($16, "load8_u", [65072]), 0);
-
-// memory_copy.wast:3310
-assert_return(() => call($16, "load8_u", [65271]), 0);
-
-// memory_copy.wast:3311
-assert_return(() => call($16, "load8_u", [65470]), 0);
-
-// memory_copy.wast:3312
-assert_return(() => call($16, "load8_u", [65506]), 0);
-
-// memory_copy.wast:3313
-assert_return(() => call($16, "load8_u", [65507]), 1);
-
-// memory_copy.wast:3314
-assert_return(() => call($16, "load8_u", [65508]), 2);
-
-// memory_copy.wast:3315
-assert_return(() => call($16, "load8_u", [65509]), 3);
-
-// memory_copy.wast:3316
-assert_return(() => call($16, "load8_u", [65510]), 4);
-
-// memory_copy.wast:3317
-assert_return(() => call($16, "load8_u", [65511]), 5);
-
-// memory_copy.wast:3318
-assert_return(() => call($16, "load8_u", [65512]), 6);
-
-// memory_copy.wast:3319
-assert_return(() => call($16, "load8_u", [65513]), 7);
-
-// memory_copy.wast:3320
-assert_return(() => call($16, "load8_u", [65514]), 8);
-
-// memory_copy.wast:3321
-assert_return(() => call($16, "load8_u", [65515]), 9);
-
-// memory_copy.wast:3322
-assert_return(() => call($16, "load8_u", [65516]), 10);
-
-// memory_copy.wast:3323
-assert_return(() => call($16, "load8_u", [65517]), 11);
-
-// memory_copy.wast:3324
-assert_return(() => call($16, "load8_u", [65518]), 12);
-
-// memory_copy.wast:3325
-assert_return(() => call($16, "load8_u", [65519]), 13);
-
-// memory_copy.wast:3326
-assert_return(() => call($16, "load8_u", [65520]), 14);
-
-// memory_copy.wast:3327
-assert_return(() => call($16, "load8_u", [65521]), 15);
-
-// memory_copy.wast:3328
-assert_return(() => call($16, "load8_u", [65522]), 16);
-
-// memory_copy.wast:3329
-assert_return(() => call($16, "load8_u", [65523]), 17);
-
-// memory_copy.wast:3330
-assert_return(() => call($16, "load8_u", [65524]), 18);
-
-// memory_copy.wast:3331
-assert_return(() => call($16, "load8_u", [65525]), 19);
-
-// memory_copy.wast:3332
-assert_return(() => call($16, "load8_u", [65526]), 10);
-
-// memory_copy.wast:3333
-assert_return(() => call($16, "load8_u", [65527]), 11);
-
-// memory_copy.wast:3334
-assert_return(() => call($16, "load8_u", [65528]), 12);
-
-// memory_copy.wast:3335
-assert_return(() => call($16, "load8_u", [65529]), 13);
-
-// memory_copy.wast:3336
-assert_return(() => call($16, "load8_u", [65530]), 14);
-
-// memory_copy.wast:3337
-assert_return(() => call($16, "load8_u", [65531]), 15);
-
-// memory_copy.wast:3338
-assert_return(() => call($16, "load8_u", [65532]), 16);
-
-// memory_copy.wast:3339
-assert_return(() => call($16, "load8_u", [65533]), 17);
-
-// memory_copy.wast:3340
-assert_return(() => call($16, "load8_u", [65534]), 18);
-
-// memory_copy.wast:3341
-assert_return(() => call($16, "load8_u", [65535]), 19);
-
-// memory_copy.wast:3343
-let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:3351
-assert_trap(() => call($17, "run", [65516, 65516, 40]));
-
-// memory_copy.wast:3354
-assert_return(() => call($17, "load8_u", [198]), 0);
-
-// memory_copy.wast:3355
-assert_return(() => call($17, "load8_u", [397]), 0);
-
-// memory_copy.wast:3356
-assert_return(() => call($17, "load8_u", [596]), 0);
-
-// memory_copy.wast:3357
-assert_return(() => call($17, "load8_u", [795]), 0);
-
-// memory_copy.wast:3358
-assert_return(() => call($17, "load8_u", [994]), 0);
-
-// memory_copy.wast:3359
-assert_return(() => call($17, "load8_u", [1193]), 0);
-
-// memory_copy.wast:3360
-assert_return(() => call($17, "load8_u", [1392]), 0);
-
-// memory_copy.wast:3361
-assert_return(() => call($17, "load8_u", [1591]), 0);
-
-// memory_copy.wast:3362
-assert_return(() => call($17, "load8_u", [1790]), 0);
-
-// memory_copy.wast:3363
-assert_return(() => call($17, "load8_u", [1989]), 0);
-
-// memory_copy.wast:3364
-assert_return(() => call($17, "load8_u", [2188]), 0);
-
-// memory_copy.wast:3365
-assert_return(() => call($17, "load8_u", [2387]), 0);
-
-// memory_copy.wast:3366
-assert_return(() => call($17, "load8_u", [2586]), 0);
-
-// memory_copy.wast:3367
-assert_return(() => call($17, "load8_u", [2785]), 0);
-
-// memory_copy.wast:3368
-assert_return(() => call($17, "load8_u", [2984]), 0);
-
-// memory_copy.wast:3369
-assert_return(() => call($17, "load8_u", [3183]), 0);
-
-// memory_copy.wast:3370
-assert_return(() => call($17, "load8_u", [3382]), 0);
-
-// memory_copy.wast:3371
-assert_return(() => call($17, "load8_u", [3581]), 0);
-
-// memory_copy.wast:3372
-assert_return(() => call($17, "load8_u", [3780]), 0);
-
-// memory_copy.wast:3373
-assert_return(() => call($17, "load8_u", [3979]), 0);
-
-// memory_copy.wast:3374
-assert_return(() => call($17, "load8_u", [4178]), 0);
-
-// memory_copy.wast:3375
-assert_return(() => call($17, "load8_u", [4377]), 0);
-
-// memory_copy.wast:3376
-assert_return(() => call($17, "load8_u", [4576]), 0);
-
-// memory_copy.wast:3377
-assert_return(() => call($17, "load8_u", [4775]), 0);
-
-// memory_copy.wast:3378
-assert_return(() => call($17, "load8_u", [4974]), 0);
-
-// memory_copy.wast:3379
-assert_return(() => call($17, "load8_u", [5173]), 0);
-
-// memory_copy.wast:3380
-assert_return(() => call($17, "load8_u", [5372]), 0);
-
-// memory_copy.wast:3381
-assert_return(() => call($17, "load8_u", [5571]), 0);
-
-// memory_copy.wast:3382
-assert_return(() => call($17, "load8_u", [5770]), 0);
-
-// memory_copy.wast:3383
-assert_return(() => call($17, "load8_u", [5969]), 0);
-
-// memory_copy.wast:3384
-assert_return(() => call($17, "load8_u", [6168]), 0);
-
-// memory_copy.wast:3385
-assert_return(() => call($17, "load8_u", [6367]), 0);
-
-// memory_copy.wast:3386
-assert_return(() => call($17, "load8_u", [6566]), 0);
-
-// memory_copy.wast:3387
-assert_return(() => call($17, "load8_u", [6765]), 0);
-
-// memory_copy.wast:3388
-assert_return(() => call($17, "load8_u", [6964]), 0);
-
-// memory_copy.wast:3389
-assert_return(() => call($17, "load8_u", [7163]), 0);
-
-// memory_copy.wast:3390
-assert_return(() => call($17, "load8_u", [7362]), 0);
-
-// memory_copy.wast:3391
-assert_return(() => call($17, "load8_u", [7561]), 0);
-
-// memory_copy.wast:3392
-assert_return(() => call($17, "load8_u", [7760]), 0);
-
-// memory_copy.wast:3393
-assert_return(() => call($17, "load8_u", [7959]), 0);
-
-// memory_copy.wast:3394
-assert_return(() => call($17, "load8_u", [8158]), 0);
-
-// memory_copy.wast:3395
-assert_return(() => call($17, "load8_u", [8357]), 0);
-
-// memory_copy.wast:3396
-assert_return(() => call($17, "load8_u", [8556]), 0);
-
-// memory_copy.wast:3397
-assert_return(() => call($17, "load8_u", [8755]), 0);
-
-// memory_copy.wast:3398
-assert_return(() => call($17, "load8_u", [8954]), 0);
-
-// memory_copy.wast:3399
-assert_return(() => call($17, "load8_u", [9153]), 0);
-
-// memory_copy.wast:3400
-assert_return(() => call($17, "load8_u", [9352]), 0);
-
-// memory_copy.wast:3401
-assert_return(() => call($17, "load8_u", [9551]), 0);
-
-// memory_copy.wast:3402
-assert_return(() => call($17, "load8_u", [9750]), 0);
-
-// memory_copy.wast:3403
-assert_return(() => call($17, "load8_u", [9949]), 0);
-
-// memory_copy.wast:3404
-assert_return(() => call($17, "load8_u", [10148]), 0);
-
-// memory_copy.wast:3405
-assert_return(() => call($17, "load8_u", [10347]), 0);
-
-// memory_copy.wast:3406
-assert_return(() => call($17, "load8_u", [10546]), 0);
-
-// memory_copy.wast:3407
-assert_return(() => call($17, "load8_u", [10745]), 0);
-
-// memory_copy.wast:3408
-assert_return(() => call($17, "load8_u", [10944]), 0);
-
-// memory_copy.wast:3409
-assert_return(() => call($17, "load8_u", [11143]), 0);
-
-// memory_copy.wast:3410
-assert_return(() => call($17, "load8_u", [11342]), 0);
-
-// memory_copy.wast:3411
-assert_return(() => call($17, "load8_u", [11541]), 0);
-
-// memory_copy.wast:3412
-assert_return(() => call($17, "load8_u", [11740]), 0);
-
-// memory_copy.wast:3413
-assert_return(() => call($17, "load8_u", [11939]), 0);
-
-// memory_copy.wast:3414
-assert_return(() => call($17, "load8_u", [12138]), 0);
-
-// memory_copy.wast:3415
-assert_return(() => call($17, "load8_u", [12337]), 0);
-
-// memory_copy.wast:3416
-assert_return(() => call($17, "load8_u", [12536]), 0);
-
-// memory_copy.wast:3417
-assert_return(() => call($17, "load8_u", [12735]), 0);
-
-// memory_copy.wast:3418
-assert_return(() => call($17, "load8_u", [12934]), 0);
-
-// memory_copy.wast:3419
-assert_return(() => call($17, "load8_u", [13133]), 0);
-
-// memory_copy.wast:3420
-assert_return(() => call($17, "load8_u", [13332]), 0);
-
-// memory_copy.wast:3421
-assert_return(() => call($17, "load8_u", [13531]), 0);
-
-// memory_copy.wast:3422
-assert_return(() => call($17, "load8_u", [13730]), 0);
-
-// memory_copy.wast:3423
-assert_return(() => call($17, "load8_u", [13929]), 0);
-
-// memory_copy.wast:3424
-assert_return(() => call($17, "load8_u", [14128]), 0);
-
-// memory_copy.wast:3425
-assert_return(() => call($17, "load8_u", [14327]), 0);
-
-// memory_copy.wast:3426
-assert_return(() => call($17, "load8_u", [14526]), 0);
-
-// memory_copy.wast:3427
-assert_return(() => call($17, "load8_u", [14725]), 0);
-
-// memory_copy.wast:3428
-assert_return(() => call($17, "load8_u", [14924]), 0);
-
-// memory_copy.wast:3429
-assert_return(() => call($17, "load8_u", [15123]), 0);
-
-// memory_copy.wast:3430
-assert_return(() => call($17, "load8_u", [15322]), 0);
-
-// memory_copy.wast:3431
-assert_return(() => call($17, "load8_u", [15521]), 0);
-
-// memory_copy.wast:3432
-assert_return(() => call($17, "load8_u", [15720]), 0);
-
-// memory_copy.wast:3433
-assert_return(() => call($17, "load8_u", [15919]), 0);
-
-// memory_copy.wast:3434
-assert_return(() => call($17, "load8_u", [16118]), 0);
-
-// memory_copy.wast:3435
-assert_return(() => call($17, "load8_u", [16317]), 0);
-
-// memory_copy.wast:3436
-assert_return(() => call($17, "load8_u", [16516]), 0);
-
-// memory_copy.wast:3437
-assert_return(() => call($17, "load8_u", [16715]), 0);
-
-// memory_copy.wast:3438
-assert_return(() => call($17, "load8_u", [16914]), 0);
-
-// memory_copy.wast:3439
-assert_return(() => call($17, "load8_u", [17113]), 0);
-
-// memory_copy.wast:3440
-assert_return(() => call($17, "load8_u", [17312]), 0);
-
-// memory_copy.wast:3441
-assert_return(() => call($17, "load8_u", [17511]), 0);
-
-// memory_copy.wast:3442
-assert_return(() => call($17, "load8_u", [17710]), 0);
-
-// memory_copy.wast:3443
-assert_return(() => call($17, "load8_u", [17909]), 0);
-
-// memory_copy.wast:3444
-assert_return(() => call($17, "load8_u", [18108]), 0);
-
-// memory_copy.wast:3445
-assert_return(() => call($17, "load8_u", [18307]), 0);
-
-// memory_copy.wast:3446
-assert_return(() => call($17, "load8_u", [18506]), 0);
-
-// memory_copy.wast:3447
-assert_return(() => call($17, "load8_u", [18705]), 0);
-
-// memory_copy.wast:3448
-assert_return(() => call($17, "load8_u", [18904]), 0);
-
-// memory_copy.wast:3449
-assert_return(() => call($17, "load8_u", [19103]), 0);
-
-// memory_copy.wast:3450
-assert_return(() => call($17, "load8_u", [19302]), 0);
-
-// memory_copy.wast:3451
-assert_return(() => call($17, "load8_u", [19501]), 0);
-
-// memory_copy.wast:3452
-assert_return(() => call($17, "load8_u", [19700]), 0);
-
-// memory_copy.wast:3453
-assert_return(() => call($17, "load8_u", [19899]), 0);
-
-// memory_copy.wast:3454
-assert_return(() => call($17, "load8_u", [20098]), 0);
-
-// memory_copy.wast:3455
-assert_return(() => call($17, "load8_u", [20297]), 0);
-
-// memory_copy.wast:3456
-assert_return(() => call($17, "load8_u", [20496]), 0);
-
-// memory_copy.wast:3457
-assert_return(() => call($17, "load8_u", [20695]), 0);
-
-// memory_copy.wast:3458
-assert_return(() => call($17, "load8_u", [20894]), 0);
-
-// memory_copy.wast:3459
-assert_return(() => call($17, "load8_u", [21093]), 0);
-
-// memory_copy.wast:3460
-assert_return(() => call($17, "load8_u", [21292]), 0);
-
-// memory_copy.wast:3461
-assert_return(() => call($17, "load8_u", [21491]), 0);
-
-// memory_copy.wast:3462
-assert_return(() => call($17, "load8_u", [21690]), 0);
-
-// memory_copy.wast:3463
-assert_return(() => call($17, "load8_u", [21889]), 0);
-
-// memory_copy.wast:3464
-assert_return(() => call($17, "load8_u", [22088]), 0);
-
-// memory_copy.wast:3465
-assert_return(() => call($17, "load8_u", [22287]), 0);
-
-// memory_copy.wast:3466
-assert_return(() => call($17, "load8_u", [22486]), 0);
-
-// memory_copy.wast:3467
-assert_return(() => call($17, "load8_u", [22685]), 0);
-
-// memory_copy.wast:3468
-assert_return(() => call($17, "load8_u", [22884]), 0);
-
-// memory_copy.wast:3469
-assert_return(() => call($17, "load8_u", [23083]), 0);
-
-// memory_copy.wast:3470
-assert_return(() => call($17, "load8_u", [23282]), 0);
-
-// memory_copy.wast:3471
-assert_return(() => call($17, "load8_u", [23481]), 0);
-
-// memory_copy.wast:3472
-assert_return(() => call($17, "load8_u", [23680]), 0);
-
-// memory_copy.wast:3473
-assert_return(() => call($17, "load8_u", [23879]), 0);
-
-// memory_copy.wast:3474
-assert_return(() => call($17, "load8_u", [24078]), 0);
-
-// memory_copy.wast:3475
-assert_return(() => call($17, "load8_u", [24277]), 0);
-
-// memory_copy.wast:3476
-assert_return(() => call($17, "load8_u", [24476]), 0);
-
-// memory_copy.wast:3477
-assert_return(() => call($17, "load8_u", [24675]), 0);
-
-// memory_copy.wast:3478
-assert_return(() => call($17, "load8_u", [24874]), 0);
-
-// memory_copy.wast:3479
-assert_return(() => call($17, "load8_u", [25073]), 0);
-
-// memory_copy.wast:3480
-assert_return(() => call($17, "load8_u", [25272]), 0);
-
-// memory_copy.wast:3481
-assert_return(() => call($17, "load8_u", [25471]), 0);
-
-// memory_copy.wast:3482
-assert_return(() => call($17, "load8_u", [25670]), 0);
-
-// memory_copy.wast:3483
-assert_return(() => call($17, "load8_u", [25869]), 0);
-
-// memory_copy.wast:3484
-assert_return(() => call($17, "load8_u", [26068]), 0);
-
-// memory_copy.wast:3485
-assert_return(() => call($17, "load8_u", [26267]), 0);
-
-// memory_copy.wast:3486
-assert_return(() => call($17, "load8_u", [26466]), 0);
-
-// memory_copy.wast:3487
-assert_return(() => call($17, "load8_u", [26665]), 0);
-
-// memory_copy.wast:3488
-assert_return(() => call($17, "load8_u", [26864]), 0);
-
-// memory_copy.wast:3489
-assert_return(() => call($17, "load8_u", [27063]), 0);
-
-// memory_copy.wast:3490
-assert_return(() => call($17, "load8_u", [27262]), 0);
-
-// memory_copy.wast:3491
-assert_return(() => call($17, "load8_u", [27461]), 0);
-
-// memory_copy.wast:3492
-assert_return(() => call($17, "load8_u", [27660]), 0);
-
-// memory_copy.wast:3493
-assert_return(() => call($17, "load8_u", [27859]), 0);
-
-// memory_copy.wast:3494
-assert_return(() => call($17, "load8_u", [28058]), 0);
-
-// memory_copy.wast:3495
-assert_return(() => call($17, "load8_u", [28257]), 0);
-
-// memory_copy.wast:3496
-assert_return(() => call($17, "load8_u", [28456]), 0);
-
-// memory_copy.wast:3497
-assert_return(() => call($17, "load8_u", [28655]), 0);
-
-// memory_copy.wast:3498
-assert_return(() => call($17, "load8_u", [28854]), 0);
-
-// memory_copy.wast:3499
-assert_return(() => call($17, "load8_u", [29053]), 0);
-
-// memory_copy.wast:3500
-assert_return(() => call($17, "load8_u", [29252]), 0);
-
-// memory_copy.wast:3501
-assert_return(() => call($17, "load8_u", [29451]), 0);
-
-// memory_copy.wast:3502
-assert_return(() => call($17, "load8_u", [29650]), 0);
-
-// memory_copy.wast:3503
-assert_return(() => call($17, "load8_u", [29849]), 0);
-
-// memory_copy.wast:3504
-assert_return(() => call($17, "load8_u", [30048]), 0);
-
-// memory_copy.wast:3505
-assert_return(() => call($17, "load8_u", [30247]), 0);
-
-// memory_copy.wast:3506
-assert_return(() => call($17, "load8_u", [30446]), 0);
-
-// memory_copy.wast:3507
-assert_return(() => call($17, "load8_u", [30645]), 0);
-
-// memory_copy.wast:3508
-assert_return(() => call($17, "load8_u", [30844]), 0);
-
-// memory_copy.wast:3509
-assert_return(() => call($17, "load8_u", [31043]), 0);
-
-// memory_copy.wast:3510
-assert_return(() => call($17, "load8_u", [31242]), 0);
-
-// memory_copy.wast:3511
-assert_return(() => call($17, "load8_u", [31441]), 0);
-
-// memory_copy.wast:3512
-assert_return(() => call($17, "load8_u", [31640]), 0);
-
-// memory_copy.wast:3513
-assert_return(() => call($17, "load8_u", [31839]), 0);
-
-// memory_copy.wast:3514
-assert_return(() => call($17, "load8_u", [32038]), 0);
-
-// memory_copy.wast:3515
-assert_return(() => call($17, "load8_u", [32237]), 0);
-
-// memory_copy.wast:3516
-assert_return(() => call($17, "load8_u", [32436]), 0);
-
-// memory_copy.wast:3517
-assert_return(() => call($17, "load8_u", [32635]), 0);
-
-// memory_copy.wast:3518
-assert_return(() => call($17, "load8_u", [32834]), 0);
-
-// memory_copy.wast:3519
-assert_return(() => call($17, "load8_u", [33033]), 0);
-
-// memory_copy.wast:3520
-assert_return(() => call($17, "load8_u", [33232]), 0);
-
-// memory_copy.wast:3521
-assert_return(() => call($17, "load8_u", [33431]), 0);
-
-// memory_copy.wast:3522
-assert_return(() => call($17, "load8_u", [33630]), 0);
-
-// memory_copy.wast:3523
-assert_return(() => call($17, "load8_u", [33829]), 0);
-
-// memory_copy.wast:3524
-assert_return(() => call($17, "load8_u", [34028]), 0);
-
-// memory_copy.wast:3525
-assert_return(() => call($17, "load8_u", [34227]), 0);
-
-// memory_copy.wast:3526
-assert_return(() => call($17, "load8_u", [34426]), 0);
-
-// memory_copy.wast:3527
-assert_return(() => call($17, "load8_u", [34625]), 0);
-
-// memory_copy.wast:3528
-assert_return(() => call($17, "load8_u", [34824]), 0);
-
-// memory_copy.wast:3529
-assert_return(() => call($17, "load8_u", [35023]), 0);
-
-// memory_copy.wast:3530
-assert_return(() => call($17, "load8_u", [35222]), 0);
-
-// memory_copy.wast:3531
-assert_return(() => call($17, "load8_u", [35421]), 0);
-
-// memory_copy.wast:3532
-assert_return(() => call($17, "load8_u", [35620]), 0);
-
-// memory_copy.wast:3533
-assert_return(() => call($17, "load8_u", [35819]), 0);
-
-// memory_copy.wast:3534
-assert_return(() => call($17, "load8_u", [36018]), 0);
-
-// memory_copy.wast:3535
-assert_return(() => call($17, "load8_u", [36217]), 0);
-
-// memory_copy.wast:3536
-assert_return(() => call($17, "load8_u", [36416]), 0);
-
-// memory_copy.wast:3537
-assert_return(() => call($17, "load8_u", [36615]), 0);
-
-// memory_copy.wast:3538
-assert_return(() => call($17, "load8_u", [36814]), 0);
-
-// memory_copy.wast:3539
-assert_return(() => call($17, "load8_u", [37013]), 0);
-
-// memory_copy.wast:3540
-assert_return(() => call($17, "load8_u", [37212]), 0);
-
-// memory_copy.wast:3541
-assert_return(() => call($17, "load8_u", [37411]), 0);
-
-// memory_copy.wast:3542
-assert_return(() => call($17, "load8_u", [37610]), 0);
-
-// memory_copy.wast:3543
-assert_return(() => call($17, "load8_u", [37809]), 0);
-
-// memory_copy.wast:3544
-assert_return(() => call($17, "load8_u", [38008]), 0);
-
-// memory_copy.wast:3545
-assert_return(() => call($17, "load8_u", [38207]), 0);
-
-// memory_copy.wast:3546
-assert_return(() => call($17, "load8_u", [38406]), 0);
-
-// memory_copy.wast:3547
-assert_return(() => call($17, "load8_u", [38605]), 0);
-
-// memory_copy.wast:3548
-assert_return(() => call($17, "load8_u", [38804]), 0);
-
-// memory_copy.wast:3549
-assert_return(() => call($17, "load8_u", [39003]), 0);
-
-// memory_copy.wast:3550
-assert_return(() => call($17, "load8_u", [39202]), 0);
-
-// memory_copy.wast:3551
-assert_return(() => call($17, "load8_u", [39401]), 0);
-
-// memory_copy.wast:3552
-assert_return(() => call($17, "load8_u", [39600]), 0);
-
-// memory_copy.wast:3553
-assert_return(() => call($17, "load8_u", [39799]), 0);
-
-// memory_copy.wast:3554
-assert_return(() => call($17, "load8_u", [39998]), 0);
-
-// memory_copy.wast:3555
-assert_return(() => call($17, "load8_u", [40197]), 0);
-
-// memory_copy.wast:3556
-assert_return(() => call($17, "load8_u", [40396]), 0);
-
-// memory_copy.wast:3557
-assert_return(() => call($17, "load8_u", [40595]), 0);
-
-// memory_copy.wast:3558
-assert_return(() => call($17, "load8_u", [40794]), 0);
-
-// memory_copy.wast:3559
-assert_return(() => call($17, "load8_u", [40993]), 0);
-
-// memory_copy.wast:3560
-assert_return(() => call($17, "load8_u", [41192]), 0);
-
-// memory_copy.wast:3561
-assert_return(() => call($17, "load8_u", [41391]), 0);
-
-// memory_copy.wast:3562
-assert_return(() => call($17, "load8_u", [41590]), 0);
-
-// memory_copy.wast:3563
-assert_return(() => call($17, "load8_u", [41789]), 0);
-
-// memory_copy.wast:3564
-assert_return(() => call($17, "load8_u", [41988]), 0);
-
-// memory_copy.wast:3565
-assert_return(() => call($17, "load8_u", [42187]), 0);
-
-// memory_copy.wast:3566
-assert_return(() => call($17, "load8_u", [42386]), 0);
-
-// memory_copy.wast:3567
-assert_return(() => call($17, "load8_u", [42585]), 0);
-
-// memory_copy.wast:3568
-assert_return(() => call($17, "load8_u", [42784]), 0);
-
-// memory_copy.wast:3569
-assert_return(() => call($17, "load8_u", [42983]), 0);
-
-// memory_copy.wast:3570
-assert_return(() => call($17, "load8_u", [43182]), 0);
-
-// memory_copy.wast:3571
-assert_return(() => call($17, "load8_u", [43381]), 0);
-
-// memory_copy.wast:3572
-assert_return(() => call($17, "load8_u", [43580]), 0);
-
-// memory_copy.wast:3573
-assert_return(() => call($17, "load8_u", [43779]), 0);
-
-// memory_copy.wast:3574
-assert_return(() => call($17, "load8_u", [43978]), 0);
-
-// memory_copy.wast:3575
-assert_return(() => call($17, "load8_u", [44177]), 0);
-
-// memory_copy.wast:3576
-assert_return(() => call($17, "load8_u", [44376]), 0);
-
-// memory_copy.wast:3577
-assert_return(() => call($17, "load8_u", [44575]), 0);
-
-// memory_copy.wast:3578
-assert_return(() => call($17, "load8_u", [44774]), 0);
-
-// memory_copy.wast:3579
-assert_return(() => call($17, "load8_u", [44973]), 0);
-
-// memory_copy.wast:3580
-assert_return(() => call($17, "load8_u", [45172]), 0);
-
-// memory_copy.wast:3581
-assert_return(() => call($17, "load8_u", [45371]), 0);
-
-// memory_copy.wast:3582
-assert_return(() => call($17, "load8_u", [45570]), 0);
-
-// memory_copy.wast:3583
-assert_return(() => call($17, "load8_u", [45769]), 0);
-
-// memory_copy.wast:3584
-assert_return(() => call($17, "load8_u", [45968]), 0);
-
-// memory_copy.wast:3585
-assert_return(() => call($17, "load8_u", [46167]), 0);
-
-// memory_copy.wast:3586
-assert_return(() => call($17, "load8_u", [46366]), 0);
-
-// memory_copy.wast:3587
-assert_return(() => call($17, "load8_u", [46565]), 0);
-
-// memory_copy.wast:3588
-assert_return(() => call($17, "load8_u", [46764]), 0);
-
-// memory_copy.wast:3589
-assert_return(() => call($17, "load8_u", [46963]), 0);
-
-// memory_copy.wast:3590
-assert_return(() => call($17, "load8_u", [47162]), 0);
-
-// memory_copy.wast:3591
-assert_return(() => call($17, "load8_u", [47361]), 0);
-
-// memory_copy.wast:3592
-assert_return(() => call($17, "load8_u", [47560]), 0);
-
-// memory_copy.wast:3593
-assert_return(() => call($17, "load8_u", [47759]), 0);
-
-// memory_copy.wast:3594
-assert_return(() => call($17, "load8_u", [47958]), 0);
-
-// memory_copy.wast:3595
-assert_return(() => call($17, "load8_u", [48157]), 0);
-
-// memory_copy.wast:3596
-assert_return(() => call($17, "load8_u", [48356]), 0);
-
-// memory_copy.wast:3597
-assert_return(() => call($17, "load8_u", [48555]), 0);
-
-// memory_copy.wast:3598
-assert_return(() => call($17, "load8_u", [48754]), 0);
-
-// memory_copy.wast:3599
-assert_return(() => call($17, "load8_u", [48953]), 0);
-
-// memory_copy.wast:3600
-assert_return(() => call($17, "load8_u", [49152]), 0);
-
-// memory_copy.wast:3601
-assert_return(() => call($17, "load8_u", [49351]), 0);
-
-// memory_copy.wast:3602
-assert_return(() => call($17, "load8_u", [49550]), 0);
-
-// memory_copy.wast:3603
-assert_return(() => call($17, "load8_u", [49749]), 0);
-
-// memory_copy.wast:3604
-assert_return(() => call($17, "load8_u", [49948]), 0);
-
-// memory_copy.wast:3605
-assert_return(() => call($17, "load8_u", [50147]), 0);
-
-// memory_copy.wast:3606
-assert_return(() => call($17, "load8_u", [50346]), 0);
-
-// memory_copy.wast:3607
-assert_return(() => call($17, "load8_u", [50545]), 0);
-
-// memory_copy.wast:3608
-assert_return(() => call($17, "load8_u", [50744]), 0);
-
-// memory_copy.wast:3609
-assert_return(() => call($17, "load8_u", [50943]), 0);
-
-// memory_copy.wast:3610
-assert_return(() => call($17, "load8_u", [51142]), 0);
-
-// memory_copy.wast:3611
-assert_return(() => call($17, "load8_u", [51341]), 0);
-
-// memory_copy.wast:3612
-assert_return(() => call($17, "load8_u", [51540]), 0);
-
-// memory_copy.wast:3613
-assert_return(() => call($17, "load8_u", [51739]), 0);
-
-// memory_copy.wast:3614
-assert_return(() => call($17, "load8_u", [51938]), 0);
-
-// memory_copy.wast:3615
-assert_return(() => call($17, "load8_u", [52137]), 0);
-
-// memory_copy.wast:3616
-assert_return(() => call($17, "load8_u", [52336]), 0);
-
-// memory_copy.wast:3617
-assert_return(() => call($17, "load8_u", [52535]), 0);
-
-// memory_copy.wast:3618
-assert_return(() => call($17, "load8_u", [52734]), 0);
-
-// memory_copy.wast:3619
-assert_return(() => call($17, "load8_u", [52933]), 0);
-
-// memory_copy.wast:3620
-assert_return(() => call($17, "load8_u", [53132]), 0);
-
-// memory_copy.wast:3621
-assert_return(() => call($17, "load8_u", [53331]), 0);
-
-// memory_copy.wast:3622
-assert_return(() => call($17, "load8_u", [53530]), 0);
-
-// memory_copy.wast:3623
-assert_return(() => call($17, "load8_u", [53729]), 0);
-
-// memory_copy.wast:3624
-assert_return(() => call($17, "load8_u", [53928]), 0);
-
-// memory_copy.wast:3625
-assert_return(() => call($17, "load8_u", [54127]), 0);
-
-// memory_copy.wast:3626
-assert_return(() => call($17, "load8_u", [54326]), 0);
-
-// memory_copy.wast:3627
-assert_return(() => call($17, "load8_u", [54525]), 0);
-
-// memory_copy.wast:3628
-assert_return(() => call($17, "load8_u", [54724]), 0);
-
-// memory_copy.wast:3629
-assert_return(() => call($17, "load8_u", [54923]), 0);
-
-// memory_copy.wast:3630
-assert_return(() => call($17, "load8_u", [55122]), 0);
-
-// memory_copy.wast:3631
-assert_return(() => call($17, "load8_u", [55321]), 0);
-
-// memory_copy.wast:3632
-assert_return(() => call($17, "load8_u", [55520]), 0);
-
-// memory_copy.wast:3633
-assert_return(() => call($17, "load8_u", [55719]), 0);
-
-// memory_copy.wast:3634
-assert_return(() => call($17, "load8_u", [55918]), 0);
-
-// memory_copy.wast:3635
-assert_return(() => call($17, "load8_u", [56117]), 0);
-
-// memory_copy.wast:3636
-assert_return(() => call($17, "load8_u", [56316]), 0);
-
-// memory_copy.wast:3637
-assert_return(() => call($17, "load8_u", [56515]), 0);
-
-// memory_copy.wast:3638
-assert_return(() => call($17, "load8_u", [56714]), 0);
-
-// memory_copy.wast:3639
-assert_return(() => call($17, "load8_u", [56913]), 0);
-
-// memory_copy.wast:3640
-assert_return(() => call($17, "load8_u", [57112]), 0);
-
-// memory_copy.wast:3641
-assert_return(() => call($17, "load8_u", [57311]), 0);
-
-// memory_copy.wast:3642
-assert_return(() => call($17, "load8_u", [57510]), 0);
-
-// memory_copy.wast:3643
-assert_return(() => call($17, "load8_u", [57709]), 0);
-
-// memory_copy.wast:3644
-assert_return(() => call($17, "load8_u", [57908]), 0);
-
-// memory_copy.wast:3645
-assert_return(() => call($17, "load8_u", [58107]), 0);
-
-// memory_copy.wast:3646
-assert_return(() => call($17, "load8_u", [58306]), 0);
-
-// memory_copy.wast:3647
-assert_return(() => call($17, "load8_u", [58505]), 0);
-
-// memory_copy.wast:3648
-assert_return(() => call($17, "load8_u", [58704]), 0);
-
-// memory_copy.wast:3649
-assert_return(() => call($17, "load8_u", [58903]), 0);
-
-// memory_copy.wast:3650
-assert_return(() => call($17, "load8_u", [59102]), 0);
-
-// memory_copy.wast:3651
-assert_return(() => call($17, "load8_u", [59301]), 0);
-
-// memory_copy.wast:3652
-assert_return(() => call($17, "load8_u", [59500]), 0);
-
-// memory_copy.wast:3653
-assert_return(() => call($17, "load8_u", [59699]), 0);
-
-// memory_copy.wast:3654
-assert_return(() => call($17, "load8_u", [59898]), 0);
-
-// memory_copy.wast:3655
-assert_return(() => call($17, "load8_u", [60097]), 0);
-
-// memory_copy.wast:3656
-assert_return(() => call($17, "load8_u", [60296]), 0);
-
-// memory_copy.wast:3657
-assert_return(() => call($17, "load8_u", [60495]), 0);
-
-// memory_copy.wast:3658
-assert_return(() => call($17, "load8_u", [60694]), 0);
-
-// memory_copy.wast:3659
-assert_return(() => call($17, "load8_u", [60893]), 0);
-
-// memory_copy.wast:3660
-assert_return(() => call($17, "load8_u", [61092]), 0);
-
-// memory_copy.wast:3661
-assert_return(() => call($17, "load8_u", [61291]), 0);
-
-// memory_copy.wast:3662
-assert_return(() => call($17, "load8_u", [61490]), 0);
-
-// memory_copy.wast:3663
-assert_return(() => call($17, "load8_u", [61689]), 0);
-
-// memory_copy.wast:3664
-assert_return(() => call($17, "load8_u", [61888]), 0);
-
-// memory_copy.wast:3665
-assert_return(() => call($17, "load8_u", [62087]), 0);
-
-// memory_copy.wast:3666
-assert_return(() => call($17, "load8_u", [62286]), 0);
-
-// memory_copy.wast:3667
-assert_return(() => call($17, "load8_u", [62485]), 0);
-
-// memory_copy.wast:3668
-assert_return(() => call($17, "load8_u", [62684]), 0);
-
-// memory_copy.wast:3669
-assert_return(() => call($17, "load8_u", [62883]), 0);
-
-// memory_copy.wast:3670
-assert_return(() => call($17, "load8_u", [63082]), 0);
-
-// memory_copy.wast:3671
-assert_return(() => call($17, "load8_u", [63281]), 0);
-
-// memory_copy.wast:3672
-assert_return(() => call($17, "load8_u", [63480]), 0);
-
-// memory_copy.wast:3673
-assert_return(() => call($17, "load8_u", [63679]), 0);
-
-// memory_copy.wast:3674
-assert_return(() => call($17, "load8_u", [63878]), 0);
-
-// memory_copy.wast:3675
-assert_return(() => call($17, "load8_u", [64077]), 0);
-
-// memory_copy.wast:3676
-assert_return(() => call($17, "load8_u", [64276]), 0);
-
-// memory_copy.wast:3677
-assert_return(() => call($17, "load8_u", [64475]), 0);
-
-// memory_copy.wast:3678
-assert_return(() => call($17, "load8_u", [64674]), 0);
-
-// memory_copy.wast:3679
-assert_return(() => call($17, "load8_u", [64873]), 0);
-
-// memory_copy.wast:3680
-assert_return(() => call($17, "load8_u", [65072]), 0);
-
-// memory_copy.wast:3681
-assert_return(() => call($17, "load8_u", [65271]), 0);
-
-// memory_copy.wast:3682
-assert_return(() => call($17, "load8_u", [65470]), 0);
-
-// memory_copy.wast:3683
-assert_return(() => call($17, "load8_u", [65516]), 0);
-
-// memory_copy.wast:3684
-assert_return(() => call($17, "load8_u", [65517]), 1);
-
-// memory_copy.wast:3685
-assert_return(() => call($17, "load8_u", [65518]), 2);
-
-// memory_copy.wast:3686
-assert_return(() => call($17, "load8_u", [65519]), 3);
-
-// memory_copy.wast:3687
-assert_return(() => call($17, "load8_u", [65520]), 4);
-
-// memory_copy.wast:3688
-assert_return(() => call($17, "load8_u", [65521]), 5);
-
-// memory_copy.wast:3689
-assert_return(() => call($17, "load8_u", [65522]), 6);
-
-// memory_copy.wast:3690
-assert_return(() => call($17, "load8_u", [65523]), 7);
-
-// memory_copy.wast:3691
-assert_return(() => call($17, "load8_u", [65524]), 8);
-
-// memory_copy.wast:3692
-assert_return(() => call($17, "load8_u", [65525]), 9);
-
-// memory_copy.wast:3693
-assert_return(() => call($17, "load8_u", [65526]), 10);
-
-// memory_copy.wast:3694
-assert_return(() => call($17, "load8_u", [65527]), 11);
-
-// memory_copy.wast:3695
-assert_return(() => call($17, "load8_u", [65528]), 12);
-
-// memory_copy.wast:3696
-assert_return(() => call($17, "load8_u", [65529]), 13);
-
-// memory_copy.wast:3697
-assert_return(() => call($17, "load8_u", [65530]), 14);
-
-// memory_copy.wast:3698
-assert_return(() => call($17, "load8_u", [65531]), 15);
-
-// memory_copy.wast:3699
-assert_return(() => call($17, "load8_u", [65532]), 16);
-
-// memory_copy.wast:3700
-assert_return(() => call($17, "load8_u", [65533]), 17);
-
-// memory_copy.wast:3701
-assert_return(() => call($17, "load8_u", [65534]), 18);
-
-// memory_copy.wast:3702
-assert_return(() => call($17, "load8_u", [65535]), 19);
-
-// memory_copy.wast:3704
-let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:3712
-assert_trap(() => call($18, "run", [0, 65516, -4096]));
-
-// memory_copy.wast:3715
-assert_return(() => call($18, "load8_u", [0]), 0);
-
-// memory_copy.wast:3716
-assert_return(() => call($18, "load8_u", [1]), 1);
-
-// memory_copy.wast:3717
-assert_return(() => call($18, "load8_u", [2]), 2);
-
-// memory_copy.wast:3718
-assert_return(() => call($18, "load8_u", [3]), 3);
-
-// memory_copy.wast:3719
-assert_return(() => call($18, "load8_u", [4]), 4);
-
-// memory_copy.wast:3720
-assert_return(() => call($18, "load8_u", [5]), 5);
-
-// memory_copy.wast:3721
-assert_return(() => call($18, "load8_u", [6]), 6);
-
-// memory_copy.wast:3722
-assert_return(() => call($18, "load8_u", [7]), 7);
-
-// memory_copy.wast:3723
-assert_return(() => call($18, "load8_u", [8]), 8);
-
-// memory_copy.wast:3724
-assert_return(() => call($18, "load8_u", [9]), 9);
-
-// memory_copy.wast:3725
-assert_return(() => call($18, "load8_u", [10]), 10);
-
-// memory_copy.wast:3726
-assert_return(() => call($18, "load8_u", [11]), 11);
-
-// memory_copy.wast:3727
-assert_return(() => call($18, "load8_u", [12]), 12);
-
-// memory_copy.wast:3728
-assert_return(() => call($18, "load8_u", [13]), 13);
-
-// memory_copy.wast:3729
-assert_return(() => call($18, "load8_u", [14]), 14);
-
-// memory_copy.wast:3730
-assert_return(() => call($18, "load8_u", [15]), 15);
-
-// memory_copy.wast:3731
-assert_return(() => call($18, "load8_u", [16]), 16);
-
-// memory_copy.wast:3732
-assert_return(() => call($18, "load8_u", [17]), 17);
-
-// memory_copy.wast:3733
-assert_return(() => call($18, "load8_u", [18]), 18);
-
-// memory_copy.wast:3734
-assert_return(() => call($18, "load8_u", [19]), 19);
-
-// memory_copy.wast:3735
-assert_return(() => call($18, "load8_u", [218]), 0);
-
-// memory_copy.wast:3736
-assert_return(() => call($18, "load8_u", [417]), 0);
-
-// memory_copy.wast:3737
-assert_return(() => call($18, "load8_u", [616]), 0);
-
-// memory_copy.wast:3738
-assert_return(() => call($18, "load8_u", [815]), 0);
-
-// memory_copy.wast:3739
-assert_return(() => call($18, "load8_u", [1014]), 0);
-
-// memory_copy.wast:3740
-assert_return(() => call($18, "load8_u", [1213]), 0);
-
-// memory_copy.wast:3741
-assert_return(() => call($18, "load8_u", [1412]), 0);
-
-// memory_copy.wast:3742
-assert_return(() => call($18, "load8_u", [1611]), 0);
-
-// memory_copy.wast:3743
-assert_return(() => call($18, "load8_u", [1810]), 0);
-
-// memory_copy.wast:3744
-assert_return(() => call($18, "load8_u", [2009]), 0);
-
-// memory_copy.wast:3745
-assert_return(() => call($18, "load8_u", [2208]), 0);
-
-// memory_copy.wast:3746
-assert_return(() => call($18, "load8_u", [2407]), 0);
-
-// memory_copy.wast:3747
-assert_return(() => call($18, "load8_u", [2606]), 0);
-
-// memory_copy.wast:3748
-assert_return(() => call($18, "load8_u", [2805]), 0);
-
-// memory_copy.wast:3749
-assert_return(() => call($18, "load8_u", [3004]), 0);
-
-// memory_copy.wast:3750
-assert_return(() => call($18, "load8_u", [3203]), 0);
-
-// memory_copy.wast:3751
-assert_return(() => call($18, "load8_u", [3402]), 0);
-
-// memory_copy.wast:3752
-assert_return(() => call($18, "load8_u", [3601]), 0);
-
-// memory_copy.wast:3753
-assert_return(() => call($18, "load8_u", [3800]), 0);
-
-// memory_copy.wast:3754
-assert_return(() => call($18, "load8_u", [3999]), 0);
-
-// memory_copy.wast:3755
-assert_return(() => call($18, "load8_u", [4198]), 0);
-
-// memory_copy.wast:3756
-assert_return(() => call($18, "load8_u", [4397]), 0);
-
-// memory_copy.wast:3757
-assert_return(() => call($18, "load8_u", [4596]), 0);
-
-// memory_copy.wast:3758
-assert_return(() => call($18, "load8_u", [4795]), 0);
-
-// memory_copy.wast:3759
-assert_return(() => call($18, "load8_u", [4994]), 0);
-
-// memory_copy.wast:3760
-assert_return(() => call($18, "load8_u", [5193]), 0);
-
-// memory_copy.wast:3761
-assert_return(() => call($18, "load8_u", [5392]), 0);
-
-// memory_copy.wast:3762
-assert_return(() => call($18, "load8_u", [5591]), 0);
-
-// memory_copy.wast:3763
-assert_return(() => call($18, "load8_u", [5790]), 0);
-
-// memory_copy.wast:3764
-assert_return(() => call($18, "load8_u", [5989]), 0);
-
-// memory_copy.wast:3765
-assert_return(() => call($18, "load8_u", [6188]), 0);
-
-// memory_copy.wast:3766
-assert_return(() => call($18, "load8_u", [6387]), 0);
-
-// memory_copy.wast:3767
-assert_return(() => call($18, "load8_u", [6586]), 0);
-
-// memory_copy.wast:3768
-assert_return(() => call($18, "load8_u", [6785]), 0);
-
-// memory_copy.wast:3769
-assert_return(() => call($18, "load8_u", [6984]), 0);
-
-// memory_copy.wast:3770
-assert_return(() => call($18, "load8_u", [7183]), 0);
-
-// memory_copy.wast:3771
-assert_return(() => call($18, "load8_u", [7382]), 0);
-
-// memory_copy.wast:3772
-assert_return(() => call($18, "load8_u", [7581]), 0);
-
-// memory_copy.wast:3773
-assert_return(() => call($18, "load8_u", [7780]), 0);
-
-// memory_copy.wast:3774
-assert_return(() => call($18, "load8_u", [7979]), 0);
-
-// memory_copy.wast:3775
-assert_return(() => call($18, "load8_u", [8178]), 0);
-
-// memory_copy.wast:3776
-assert_return(() => call($18, "load8_u", [8377]), 0);
-
-// memory_copy.wast:3777
-assert_return(() => call($18, "load8_u", [8576]), 0);
-
-// memory_copy.wast:3778
-assert_return(() => call($18, "load8_u", [8775]), 0);
-
-// memory_copy.wast:3779
-assert_return(() => call($18, "load8_u", [8974]), 0);
-
-// memory_copy.wast:3780
-assert_return(() => call($18, "load8_u", [9173]), 0);
-
-// memory_copy.wast:3781
-assert_return(() => call($18, "load8_u", [9372]), 0);
-
-// memory_copy.wast:3782
-assert_return(() => call($18, "load8_u", [9571]), 0);
-
-// memory_copy.wast:3783
-assert_return(() => call($18, "load8_u", [9770]), 0);
-
-// memory_copy.wast:3784
-assert_return(() => call($18, "load8_u", [9969]), 0);
-
-// memory_copy.wast:3785
-assert_return(() => call($18, "load8_u", [10168]), 0);
-
-// memory_copy.wast:3786
-assert_return(() => call($18, "load8_u", [10367]), 0);
-
-// memory_copy.wast:3787
-assert_return(() => call($18, "load8_u", [10566]), 0);
-
-// memory_copy.wast:3788
-assert_return(() => call($18, "load8_u", [10765]), 0);
-
-// memory_copy.wast:3789
-assert_return(() => call($18, "load8_u", [10964]), 0);
-
-// memory_copy.wast:3790
-assert_return(() => call($18, "load8_u", [11163]), 0);
-
-// memory_copy.wast:3791
-assert_return(() => call($18, "load8_u", [11362]), 0);
-
-// memory_copy.wast:3792
-assert_return(() => call($18, "load8_u", [11561]), 0);
-
-// memory_copy.wast:3793
-assert_return(() => call($18, "load8_u", [11760]), 0);
-
-// memory_copy.wast:3794
-assert_return(() => call($18, "load8_u", [11959]), 0);
-
-// memory_copy.wast:3795
-assert_return(() => call($18, "load8_u", [12158]), 0);
-
-// memory_copy.wast:3796
-assert_return(() => call($18, "load8_u", [12357]), 0);
-
-// memory_copy.wast:3797
-assert_return(() => call($18, "load8_u", [12556]), 0);
-
-// memory_copy.wast:3798
-assert_return(() => call($18, "load8_u", [12755]), 0);
-
-// memory_copy.wast:3799
-assert_return(() => call($18, "load8_u", [12954]), 0);
-
-// memory_copy.wast:3800
-assert_return(() => call($18, "load8_u", [13153]), 0);
-
-// memory_copy.wast:3801
-assert_return(() => call($18, "load8_u", [13352]), 0);
-
-// memory_copy.wast:3802
-assert_return(() => call($18, "load8_u", [13551]), 0);
-
-// memory_copy.wast:3803
-assert_return(() => call($18, "load8_u", [13750]), 0);
-
-// memory_copy.wast:3804
-assert_return(() => call($18, "load8_u", [13949]), 0);
-
-// memory_copy.wast:3805
-assert_return(() => call($18, "load8_u", [14148]), 0);
-
-// memory_copy.wast:3806
-assert_return(() => call($18, "load8_u", [14347]), 0);
-
-// memory_copy.wast:3807
-assert_return(() => call($18, "load8_u", [14546]), 0);
-
-// memory_copy.wast:3808
-assert_return(() => call($18, "load8_u", [14745]), 0);
-
-// memory_copy.wast:3809
-assert_return(() => call($18, "load8_u", [14944]), 0);
-
-// memory_copy.wast:3810
-assert_return(() => call($18, "load8_u", [15143]), 0);
-
-// memory_copy.wast:3811
-assert_return(() => call($18, "load8_u", [15342]), 0);
-
-// memory_copy.wast:3812
-assert_return(() => call($18, "load8_u", [15541]), 0);
-
-// memory_copy.wast:3813
-assert_return(() => call($18, "load8_u", [15740]), 0);
-
-// memory_copy.wast:3814
-assert_return(() => call($18, "load8_u", [15939]), 0);
-
-// memory_copy.wast:3815
-assert_return(() => call($18, "load8_u", [16138]), 0);
-
-// memory_copy.wast:3816
-assert_return(() => call($18, "load8_u", [16337]), 0);
-
-// memory_copy.wast:3817
-assert_return(() => call($18, "load8_u", [16536]), 0);
-
-// memory_copy.wast:3818
-assert_return(() => call($18, "load8_u", [16735]), 0);
-
-// memory_copy.wast:3819
-assert_return(() => call($18, "load8_u", [16934]), 0);
-
-// memory_copy.wast:3820
-assert_return(() => call($18, "load8_u", [17133]), 0);
-
-// memory_copy.wast:3821
-assert_return(() => call($18, "load8_u", [17332]), 0);
-
-// memory_copy.wast:3822
-assert_return(() => call($18, "load8_u", [17531]), 0);
-
-// memory_copy.wast:3823
-assert_return(() => call($18, "load8_u", [17730]), 0);
-
-// memory_copy.wast:3824
-assert_return(() => call($18, "load8_u", [17929]), 0);
-
-// memory_copy.wast:3825
-assert_return(() => call($18, "load8_u", [18128]), 0);
-
-// memory_copy.wast:3826
-assert_return(() => call($18, "load8_u", [18327]), 0);
-
-// memory_copy.wast:3827
-assert_return(() => call($18, "load8_u", [18526]), 0);
-
-// memory_copy.wast:3828
-assert_return(() => call($18, "load8_u", [18725]), 0);
-
-// memory_copy.wast:3829
-assert_return(() => call($18, "load8_u", [18924]), 0);
-
-// memory_copy.wast:3830
-assert_return(() => call($18, "load8_u", [19123]), 0);
-
-// memory_copy.wast:3831
-assert_return(() => call($18, "load8_u", [19322]), 0);
-
-// memory_copy.wast:3832
-assert_return(() => call($18, "load8_u", [19521]), 0);
-
-// memory_copy.wast:3833
-assert_return(() => call($18, "load8_u", [19720]), 0);
-
-// memory_copy.wast:3834
-assert_return(() => call($18, "load8_u", [19919]), 0);
-
-// memory_copy.wast:3835
-assert_return(() => call($18, "load8_u", [20118]), 0);
-
-// memory_copy.wast:3836
-assert_return(() => call($18, "load8_u", [20317]), 0);
-
-// memory_copy.wast:3837
-assert_return(() => call($18, "load8_u", [20516]), 0);
-
-// memory_copy.wast:3838
-assert_return(() => call($18, "load8_u", [20715]), 0);
-
-// memory_copy.wast:3839
-assert_return(() => call($18, "load8_u", [20914]), 0);
-
-// memory_copy.wast:3840
-assert_return(() => call($18, "load8_u", [21113]), 0);
-
-// memory_copy.wast:3841
-assert_return(() => call($18, "load8_u", [21312]), 0);
-
-// memory_copy.wast:3842
-assert_return(() => call($18, "load8_u", [21511]), 0);
-
-// memory_copy.wast:3843
-assert_return(() => call($18, "load8_u", [21710]), 0);
-
-// memory_copy.wast:3844
-assert_return(() => call($18, "load8_u", [21909]), 0);
-
-// memory_copy.wast:3845
-assert_return(() => call($18, "load8_u", [22108]), 0);
-
-// memory_copy.wast:3846
-assert_return(() => call($18, "load8_u", [22307]), 0);
-
-// memory_copy.wast:3847
-assert_return(() => call($18, "load8_u", [22506]), 0);
-
-// memory_copy.wast:3848
-assert_return(() => call($18, "load8_u", [22705]), 0);
-
-// memory_copy.wast:3849
-assert_return(() => call($18, "load8_u", [22904]), 0);
-
-// memory_copy.wast:3850
-assert_return(() => call($18, "load8_u", [23103]), 0);
-
-// memory_copy.wast:3851
-assert_return(() => call($18, "load8_u", [23302]), 0);
-
-// memory_copy.wast:3852
-assert_return(() => call($18, "load8_u", [23501]), 0);
-
-// memory_copy.wast:3853
-assert_return(() => call($18, "load8_u", [23700]), 0);
-
-// memory_copy.wast:3854
-assert_return(() => call($18, "load8_u", [23899]), 0);
-
-// memory_copy.wast:3855
-assert_return(() => call($18, "load8_u", [24098]), 0);
-
-// memory_copy.wast:3856
-assert_return(() => call($18, "load8_u", [24297]), 0);
-
-// memory_copy.wast:3857
-assert_return(() => call($18, "load8_u", [24496]), 0);
-
-// memory_copy.wast:3858
-assert_return(() => call($18, "load8_u", [24695]), 0);
-
-// memory_copy.wast:3859
-assert_return(() => call($18, "load8_u", [24894]), 0);
-
-// memory_copy.wast:3860
-assert_return(() => call($18, "load8_u", [25093]), 0);
-
-// memory_copy.wast:3861
-assert_return(() => call($18, "load8_u", [25292]), 0);
-
-// memory_copy.wast:3862
-assert_return(() => call($18, "load8_u", [25491]), 0);
-
-// memory_copy.wast:3863
-assert_return(() => call($18, "load8_u", [25690]), 0);
-
-// memory_copy.wast:3864
-assert_return(() => call($18, "load8_u", [25889]), 0);
-
-// memory_copy.wast:3865
-assert_return(() => call($18, "load8_u", [26088]), 0);
-
-// memory_copy.wast:3866
-assert_return(() => call($18, "load8_u", [26287]), 0);
-
-// memory_copy.wast:3867
-assert_return(() => call($18, "load8_u", [26486]), 0);
-
-// memory_copy.wast:3868
-assert_return(() => call($18, "load8_u", [26685]), 0);
-
-// memory_copy.wast:3869
-assert_return(() => call($18, "load8_u", [26884]), 0);
-
-// memory_copy.wast:3870
-assert_return(() => call($18, "load8_u", [27083]), 0);
-
-// memory_copy.wast:3871
-assert_return(() => call($18, "load8_u", [27282]), 0);
-
-// memory_copy.wast:3872
-assert_return(() => call($18, "load8_u", [27481]), 0);
-
-// memory_copy.wast:3873
-assert_return(() => call($18, "load8_u", [27680]), 0);
-
-// memory_copy.wast:3874
-assert_return(() => call($18, "load8_u", [27879]), 0);
-
-// memory_copy.wast:3875
-assert_return(() => call($18, "load8_u", [28078]), 0);
-
-// memory_copy.wast:3876
-assert_return(() => call($18, "load8_u", [28277]), 0);
-
-// memory_copy.wast:3877
-assert_return(() => call($18, "load8_u", [28476]), 0);
-
-// memory_copy.wast:3878
-assert_return(() => call($18, "load8_u", [28675]), 0);
-
-// memory_copy.wast:3879
-assert_return(() => call($18, "load8_u", [28874]), 0);
-
-// memory_copy.wast:3880
-assert_return(() => call($18, "load8_u", [29073]), 0);
-
-// memory_copy.wast:3881
-assert_return(() => call($18, "load8_u", [29272]), 0);
-
-// memory_copy.wast:3882
-assert_return(() => call($18, "load8_u", [29471]), 0);
-
-// memory_copy.wast:3883
-assert_return(() => call($18, "load8_u", [29670]), 0);
-
-// memory_copy.wast:3884
-assert_return(() => call($18, "load8_u", [29869]), 0);
-
-// memory_copy.wast:3885
-assert_return(() => call($18, "load8_u", [30068]), 0);
-
-// memory_copy.wast:3886
-assert_return(() => call($18, "load8_u", [30267]), 0);
-
-// memory_copy.wast:3887
-assert_return(() => call($18, "load8_u", [30466]), 0);
-
-// memory_copy.wast:3888
-assert_return(() => call($18, "load8_u", [30665]), 0);
-
-// memory_copy.wast:3889
-assert_return(() => call($18, "load8_u", [30864]), 0);
-
-// memory_copy.wast:3890
-assert_return(() => call($18, "load8_u", [31063]), 0);
-
-// memory_copy.wast:3891
-assert_return(() => call($18, "load8_u", [31262]), 0);
-
-// memory_copy.wast:3892
-assert_return(() => call($18, "load8_u", [31461]), 0);
-
-// memory_copy.wast:3893
-assert_return(() => call($18, "load8_u", [31660]), 0);
-
-// memory_copy.wast:3894
-assert_return(() => call($18, "load8_u", [31859]), 0);
-
-// memory_copy.wast:3895
-assert_return(() => call($18, "load8_u", [32058]), 0);
-
-// memory_copy.wast:3896
-assert_return(() => call($18, "load8_u", [32257]), 0);
-
-// memory_copy.wast:3897
-assert_return(() => call($18, "load8_u", [32456]), 0);
-
-// memory_copy.wast:3898
-assert_return(() => call($18, "load8_u", [32655]), 0);
-
-// memory_copy.wast:3899
-assert_return(() => call($18, "load8_u", [32854]), 0);
-
-// memory_copy.wast:3900
-assert_return(() => call($18, "load8_u", [33053]), 0);
-
-// memory_copy.wast:3901
-assert_return(() => call($18, "load8_u", [33252]), 0);
-
-// memory_copy.wast:3902
-assert_return(() => call($18, "load8_u", [33451]), 0);
-
-// memory_copy.wast:3903
-assert_return(() => call($18, "load8_u", [33650]), 0);
-
-// memory_copy.wast:3904
-assert_return(() => call($18, "load8_u", [33849]), 0);
-
-// memory_copy.wast:3905
-assert_return(() => call($18, "load8_u", [34048]), 0);
-
-// memory_copy.wast:3906
-assert_return(() => call($18, "load8_u", [34247]), 0);
-
-// memory_copy.wast:3907
-assert_return(() => call($18, "load8_u", [34446]), 0);
-
-// memory_copy.wast:3908
-assert_return(() => call($18, "load8_u", [34645]), 0);
-
-// memory_copy.wast:3909
-assert_return(() => call($18, "load8_u", [34844]), 0);
-
-// memory_copy.wast:3910
-assert_return(() => call($18, "load8_u", [35043]), 0);
-
-// memory_copy.wast:3911
-assert_return(() => call($18, "load8_u", [35242]), 0);
-
-// memory_copy.wast:3912
-assert_return(() => call($18, "load8_u", [35441]), 0);
-
-// memory_copy.wast:3913
-assert_return(() => call($18, "load8_u", [35640]), 0);
-
-// memory_copy.wast:3914
-assert_return(() => call($18, "load8_u", [35839]), 0);
-
-// memory_copy.wast:3915
-assert_return(() => call($18, "load8_u", [36038]), 0);
-
-// memory_copy.wast:3916
-assert_return(() => call($18, "load8_u", [36237]), 0);
-
-// memory_copy.wast:3917
-assert_return(() => call($18, "load8_u", [36436]), 0);
-
-// memory_copy.wast:3918
-assert_return(() => call($18, "load8_u", [36635]), 0);
-
-// memory_copy.wast:3919
-assert_return(() => call($18, "load8_u", [36834]), 0);
-
-// memory_copy.wast:3920
-assert_return(() => call($18, "load8_u", [37033]), 0);
-
-// memory_copy.wast:3921
-assert_return(() => call($18, "load8_u", [37232]), 0);
-
-// memory_copy.wast:3922
-assert_return(() => call($18, "load8_u", [37431]), 0);
-
-// memory_copy.wast:3923
-assert_return(() => call($18, "load8_u", [37630]), 0);
-
-// memory_copy.wast:3924
-assert_return(() => call($18, "load8_u", [37829]), 0);
-
-// memory_copy.wast:3925
-assert_return(() => call($18, "load8_u", [38028]), 0);
-
-// memory_copy.wast:3926
-assert_return(() => call($18, "load8_u", [38227]), 0);
-
-// memory_copy.wast:3927
-assert_return(() => call($18, "load8_u", [38426]), 0);
-
-// memory_copy.wast:3928
-assert_return(() => call($18, "load8_u", [38625]), 0);
-
-// memory_copy.wast:3929
-assert_return(() => call($18, "load8_u", [38824]), 0);
-
-// memory_copy.wast:3930
-assert_return(() => call($18, "load8_u", [39023]), 0);
-
-// memory_copy.wast:3931
-assert_return(() => call($18, "load8_u", [39222]), 0);
-
-// memory_copy.wast:3932
-assert_return(() => call($18, "load8_u", [39421]), 0);
-
-// memory_copy.wast:3933
-assert_return(() => call($18, "load8_u", [39620]), 0);
-
-// memory_copy.wast:3934
-assert_return(() => call($18, "load8_u", [39819]), 0);
-
-// memory_copy.wast:3935
-assert_return(() => call($18, "load8_u", [40018]), 0);
-
-// memory_copy.wast:3936
-assert_return(() => call($18, "load8_u", [40217]), 0);
-
-// memory_copy.wast:3937
-assert_return(() => call($18, "load8_u", [40416]), 0);
-
-// memory_copy.wast:3938
-assert_return(() => call($18, "load8_u", [40615]), 0);
-
-// memory_copy.wast:3939
-assert_return(() => call($18, "load8_u", [40814]), 0);
-
-// memory_copy.wast:3940
-assert_return(() => call($18, "load8_u", [41013]), 0);
-
-// memory_copy.wast:3941
-assert_return(() => call($18, "load8_u", [41212]), 0);
-
-// memory_copy.wast:3942
-assert_return(() => call($18, "load8_u", [41411]), 0);
-
-// memory_copy.wast:3943
-assert_return(() => call($18, "load8_u", [41610]), 0);
-
-// memory_copy.wast:3944
-assert_return(() => call($18, "load8_u", [41809]), 0);
-
-// memory_copy.wast:3945
-assert_return(() => call($18, "load8_u", [42008]), 0);
-
-// memory_copy.wast:3946
-assert_return(() => call($18, "load8_u", [42207]), 0);
-
-// memory_copy.wast:3947
-assert_return(() => call($18, "load8_u", [42406]), 0);
-
-// memory_copy.wast:3948
-assert_return(() => call($18, "load8_u", [42605]), 0);
-
-// memory_copy.wast:3949
-assert_return(() => call($18, "load8_u", [42804]), 0);
-
-// memory_copy.wast:3950
-assert_return(() => call($18, "load8_u", [43003]), 0);
-
-// memory_copy.wast:3951
-assert_return(() => call($18, "load8_u", [43202]), 0);
-
-// memory_copy.wast:3952
-assert_return(() => call($18, "load8_u", [43401]), 0);
-
-// memory_copy.wast:3953
-assert_return(() => call($18, "load8_u", [43600]), 0);
-
-// memory_copy.wast:3954
-assert_return(() => call($18, "load8_u", [43799]), 0);
-
-// memory_copy.wast:3955
-assert_return(() => call($18, "load8_u", [43998]), 0);
-
-// memory_copy.wast:3956
-assert_return(() => call($18, "load8_u", [44197]), 0);
-
-// memory_copy.wast:3957
-assert_return(() => call($18, "load8_u", [44396]), 0);
-
-// memory_copy.wast:3958
-assert_return(() => call($18, "load8_u", [44595]), 0);
-
-// memory_copy.wast:3959
-assert_return(() => call($18, "load8_u", [44794]), 0);
-
-// memory_copy.wast:3960
-assert_return(() => call($18, "load8_u", [44993]), 0);
-
-// memory_copy.wast:3961
-assert_return(() => call($18, "load8_u", [45192]), 0);
-
-// memory_copy.wast:3962
-assert_return(() => call($18, "load8_u", [45391]), 0);
-
-// memory_copy.wast:3963
-assert_return(() => call($18, "load8_u", [45590]), 0);
-
-// memory_copy.wast:3964
-assert_return(() => call($18, "load8_u", [45789]), 0);
-
-// memory_copy.wast:3965
-assert_return(() => call($18, "load8_u", [45988]), 0);
-
-// memory_copy.wast:3966
-assert_return(() => call($18, "load8_u", [46187]), 0);
-
-// memory_copy.wast:3967
-assert_return(() => call($18, "load8_u", [46386]), 0);
-
-// memory_copy.wast:3968
-assert_return(() => call($18, "load8_u", [46585]), 0);
-
-// memory_copy.wast:3969
-assert_return(() => call($18, "load8_u", [46784]), 0);
-
-// memory_copy.wast:3970
-assert_return(() => call($18, "load8_u", [46983]), 0);
-
-// memory_copy.wast:3971
-assert_return(() => call($18, "load8_u", [47182]), 0);
-
-// memory_copy.wast:3972
-assert_return(() => call($18, "load8_u", [47381]), 0);
-
-// memory_copy.wast:3973
-assert_return(() => call($18, "load8_u", [47580]), 0);
-
-// memory_copy.wast:3974
-assert_return(() => call($18, "load8_u", [47779]), 0);
-
-// memory_copy.wast:3975
-assert_return(() => call($18, "load8_u", [47978]), 0);
-
-// memory_copy.wast:3976
-assert_return(() => call($18, "load8_u", [48177]), 0);
-
-// memory_copy.wast:3977
-assert_return(() => call($18, "load8_u", [48376]), 0);
-
-// memory_copy.wast:3978
-assert_return(() => call($18, "load8_u", [48575]), 0);
-
-// memory_copy.wast:3979
-assert_return(() => call($18, "load8_u", [48774]), 0);
-
-// memory_copy.wast:3980
-assert_return(() => call($18, "load8_u", [48973]), 0);
-
-// memory_copy.wast:3981
-assert_return(() => call($18, "load8_u", [49172]), 0);
-
-// memory_copy.wast:3982
-assert_return(() => call($18, "load8_u", [49371]), 0);
-
-// memory_copy.wast:3983
-assert_return(() => call($18, "load8_u", [49570]), 0);
-
-// memory_copy.wast:3984
-assert_return(() => call($18, "load8_u", [49769]), 0);
-
-// memory_copy.wast:3985
-assert_return(() => call($18, "load8_u", [49968]), 0);
-
-// memory_copy.wast:3986
-assert_return(() => call($18, "load8_u", [50167]), 0);
-
-// memory_copy.wast:3987
-assert_return(() => call($18, "load8_u", [50366]), 0);
-
-// memory_copy.wast:3988
-assert_return(() => call($18, "load8_u", [50565]), 0);
-
-// memory_copy.wast:3989
-assert_return(() => call($18, "load8_u", [50764]), 0);
-
-// memory_copy.wast:3990
-assert_return(() => call($18, "load8_u", [50963]), 0);
-
-// memory_copy.wast:3991
-assert_return(() => call($18, "load8_u", [51162]), 0);
-
-// memory_copy.wast:3992
-assert_return(() => call($18, "load8_u", [51361]), 0);
-
-// memory_copy.wast:3993
-assert_return(() => call($18, "load8_u", [51560]), 0);
-
-// memory_copy.wast:3994
-assert_return(() => call($18, "load8_u", [51759]), 0);
-
-// memory_copy.wast:3995
-assert_return(() => call($18, "load8_u", [51958]), 0);
-
-// memory_copy.wast:3996
-assert_return(() => call($18, "load8_u", [52157]), 0);
-
-// memory_copy.wast:3997
-assert_return(() => call($18, "load8_u", [52356]), 0);
-
-// memory_copy.wast:3998
-assert_return(() => call($18, "load8_u", [52555]), 0);
-
-// memory_copy.wast:3999
-assert_return(() => call($18, "load8_u", [52754]), 0);
-
-// memory_copy.wast:4000
-assert_return(() => call($18, "load8_u", [52953]), 0);
-
-// memory_copy.wast:4001
-assert_return(() => call($18, "load8_u", [53152]), 0);
-
-// memory_copy.wast:4002
-assert_return(() => call($18, "load8_u", [53351]), 0);
-
-// memory_copy.wast:4003
-assert_return(() => call($18, "load8_u", [53550]), 0);
-
-// memory_copy.wast:4004
-assert_return(() => call($18, "load8_u", [53749]), 0);
-
-// memory_copy.wast:4005
-assert_return(() => call($18, "load8_u", [53948]), 0);
-
-// memory_copy.wast:4006
-assert_return(() => call($18, "load8_u", [54147]), 0);
-
-// memory_copy.wast:4007
-assert_return(() => call($18, "load8_u", [54346]), 0);
-
-// memory_copy.wast:4008
-assert_return(() => call($18, "load8_u", [54545]), 0);
-
-// memory_copy.wast:4009
-assert_return(() => call($18, "load8_u", [54744]), 0);
-
-// memory_copy.wast:4010
-assert_return(() => call($18, "load8_u", [54943]), 0);
-
-// memory_copy.wast:4011
-assert_return(() => call($18, "load8_u", [55142]), 0);
-
-// memory_copy.wast:4012
-assert_return(() => call($18, "load8_u", [55341]), 0);
-
-// memory_copy.wast:4013
-assert_return(() => call($18, "load8_u", [55540]), 0);
-
-// memory_copy.wast:4014
-assert_return(() => call($18, "load8_u", [55739]), 0);
-
-// memory_copy.wast:4015
-assert_return(() => call($18, "load8_u", [55938]), 0);
-
-// memory_copy.wast:4016
-assert_return(() => call($18, "load8_u", [56137]), 0);
-
-// memory_copy.wast:4017
-assert_return(() => call($18, "load8_u", [56336]), 0);
-
-// memory_copy.wast:4018
-assert_return(() => call($18, "load8_u", [56535]), 0);
-
-// memory_copy.wast:4019
-assert_return(() => call($18, "load8_u", [56734]), 0);
-
-// memory_copy.wast:4020
-assert_return(() => call($18, "load8_u", [56933]), 0);
-
-// memory_copy.wast:4021
-assert_return(() => call($18, "load8_u", [57132]), 0);
-
-// memory_copy.wast:4022
-assert_return(() => call($18, "load8_u", [57331]), 0);
-
-// memory_copy.wast:4023
-assert_return(() => call($18, "load8_u", [57530]), 0);
-
-// memory_copy.wast:4024
-assert_return(() => call($18, "load8_u", [57729]), 0);
-
-// memory_copy.wast:4025
-assert_return(() => call($18, "load8_u", [57928]), 0);
-
-// memory_copy.wast:4026
-assert_return(() => call($18, "load8_u", [58127]), 0);
-
-// memory_copy.wast:4027
-assert_return(() => call($18, "load8_u", [58326]), 0);
-
-// memory_copy.wast:4028
-assert_return(() => call($18, "load8_u", [58525]), 0);
-
-// memory_copy.wast:4029
-assert_return(() => call($18, "load8_u", [58724]), 0);
-
-// memory_copy.wast:4030
-assert_return(() => call($18, "load8_u", [58923]), 0);
-
-// memory_copy.wast:4031
-assert_return(() => call($18, "load8_u", [59122]), 0);
-
-// memory_copy.wast:4032
-assert_return(() => call($18, "load8_u", [59321]), 0);
-
-// memory_copy.wast:4033
-assert_return(() => call($18, "load8_u", [59520]), 0);
-
-// memory_copy.wast:4034
-assert_return(() => call($18, "load8_u", [59719]), 0);
-
-// memory_copy.wast:4035
-assert_return(() => call($18, "load8_u", [59918]), 0);
-
-// memory_copy.wast:4036
-assert_return(() => call($18, "load8_u", [60117]), 0);
-
-// memory_copy.wast:4037
-assert_return(() => call($18, "load8_u", [60316]), 0);
-
-// memory_copy.wast:4038
-assert_return(() => call($18, "load8_u", [60515]), 0);
-
-// memory_copy.wast:4039
-assert_return(() => call($18, "load8_u", [60714]), 0);
-
-// memory_copy.wast:4040
-assert_return(() => call($18, "load8_u", [60913]), 0);
-
-// memory_copy.wast:4041
-assert_return(() => call($18, "load8_u", [61112]), 0);
-
-// memory_copy.wast:4042
-assert_return(() => call($18, "load8_u", [61311]), 0);
-
-// memory_copy.wast:4043
-assert_return(() => call($18, "load8_u", [61510]), 0);
-
-// memory_copy.wast:4044
-assert_return(() => call($18, "load8_u", [61709]), 0);
-
-// memory_copy.wast:4045
-assert_return(() => call($18, "load8_u", [61908]), 0);
-
-// memory_copy.wast:4046
-assert_return(() => call($18, "load8_u", [62107]), 0);
-
-// memory_copy.wast:4047
-assert_return(() => call($18, "load8_u", [62306]), 0);
-
-// memory_copy.wast:4048
-assert_return(() => call($18, "load8_u", [62505]), 0);
-
-// memory_copy.wast:4049
-assert_return(() => call($18, "load8_u", [62704]), 0);
-
-// memory_copy.wast:4050
-assert_return(() => call($18, "load8_u", [62903]), 0);
-
-// memory_copy.wast:4051
-assert_return(() => call($18, "load8_u", [63102]), 0);
-
-// memory_copy.wast:4052
-assert_return(() => call($18, "load8_u", [63301]), 0);
-
-// memory_copy.wast:4053
-assert_return(() => call($18, "load8_u", [63500]), 0);
-
-// memory_copy.wast:4054
-assert_return(() => call($18, "load8_u", [63699]), 0);
-
-// memory_copy.wast:4055
-assert_return(() => call($18, "load8_u", [63898]), 0);
-
-// memory_copy.wast:4056
-assert_return(() => call($18, "load8_u", [64097]), 0);
-
-// memory_copy.wast:4057
-assert_return(() => call($18, "load8_u", [64296]), 0);
-
-// memory_copy.wast:4058
-assert_return(() => call($18, "load8_u", [64495]), 0);
-
-// memory_copy.wast:4059
-assert_return(() => call($18, "load8_u", [64694]), 0);
-
-// memory_copy.wast:4060
-assert_return(() => call($18, "load8_u", [64893]), 0);
-
-// memory_copy.wast:4061
-assert_return(() => call($18, "load8_u", [65092]), 0);
-
-// memory_copy.wast:4062
-assert_return(() => call($18, "load8_u", [65291]), 0);
-
-// memory_copy.wast:4063
-assert_return(() => call($18, "load8_u", [65490]), 0);
-
-// memory_copy.wast:4064
-assert_return(() => call($18, "load8_u", [65516]), 0);
-
-// memory_copy.wast:4065
-assert_return(() => call($18, "load8_u", [65517]), 1);
-
-// memory_copy.wast:4066
-assert_return(() => call($18, "load8_u", [65518]), 2);
-
-// memory_copy.wast:4067
-assert_return(() => call($18, "load8_u", [65519]), 3);
-
-// memory_copy.wast:4068
-assert_return(() => call($18, "load8_u", [65520]), 4);
-
-// memory_copy.wast:4069
-assert_return(() => call($18, "load8_u", [65521]), 5);
-
-// memory_copy.wast:4070
-assert_return(() => call($18, "load8_u", [65522]), 6);
-
-// memory_copy.wast:4071
-assert_return(() => call($18, "load8_u", [65523]), 7);
-
-// memory_copy.wast:4072
-assert_return(() => call($18, "load8_u", [65524]), 8);
-
-// memory_copy.wast:4073
-assert_return(() => call($18, "load8_u", [65525]), 9);
-
-// memory_copy.wast:4074
-assert_return(() => call($18, "load8_u", [65526]), 10);
-
-// memory_copy.wast:4075
-assert_return(() => call($18, "load8_u", [65527]), 11);
-
-// memory_copy.wast:4076
-assert_return(() => call($18, "load8_u", [65528]), 12);
-
-// memory_copy.wast:4077
-assert_return(() => call($18, "load8_u", [65529]), 13);
-
-// memory_copy.wast:4078
-assert_return(() => call($18, "load8_u", [65530]), 14);
-
-// memory_copy.wast:4079
-assert_return(() => call($18, "load8_u", [65531]), 15);
-
-// memory_copy.wast:4080
-assert_return(() => call($18, "load8_u", [65532]), 16);
-
-// memory_copy.wast:4081
-assert_return(() => call($18, "load8_u", [65533]), 17);
-
-// memory_copy.wast:4082
-assert_return(() => call($18, "load8_u", [65534]), 18);
-
-// memory_copy.wast:4083
-assert_return(() => call($18, "load8_u", [65535]), 19);
-
-// memory_copy.wast:4085
-let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\x80\xe0\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
-
-// memory_copy.wast:4093
-assert_trap(() => call($19, "run", [65516, 61440, -256]));
-
-// memory_copy.wast:4096
-assert_return(() => call($19, "load8_u", [198]), 0);
-
-// memory_copy.wast:4097
-assert_return(() => call($19, "load8_u", [397]), 0);
-
-// memory_copy.wast:4098
-assert_return(() => call($19, "load8_u", [596]), 0);
-
-// memory_copy.wast:4099
-assert_return(() => call($19, "load8_u", [795]), 0);
-
-// memory_copy.wast:4100
-assert_return(() => call($19, "load8_u", [994]), 0);
-
-// memory_copy.wast:4101
-assert_return(() => call($19, "load8_u", [1193]), 0);
-
-// memory_copy.wast:4102
-assert_return(() => call($19, "load8_u", [1392]), 0);
-
-// memory_copy.wast:4103
-assert_return(() => call($19, "load8_u", [1591]), 0);
-
-// memory_copy.wast:4104
-assert_return(() => call($19, "load8_u", [1790]), 0);
-
-// memory_copy.wast:4105
-assert_return(() => call($19, "load8_u", [1989]), 0);
-
-// memory_copy.wast:4106
-assert_return(() => call($19, "load8_u", [2188]), 0);
-
-// memory_copy.wast:4107
-assert_return(() => call($19, "load8_u", [2387]), 0);
-
-// memory_copy.wast:4108
-assert_return(() => call($19, "load8_u", [2586]), 0);
-
-// memory_copy.wast:4109
-assert_return(() => call($19, "load8_u", [2785]), 0);
-
-// memory_copy.wast:4110
-assert_return(() => call($19, "load8_u", [2984]), 0);
-
-// memory_copy.wast:4111
-assert_return(() => call($19, "load8_u", [3183]), 0);
-
-// memory_copy.wast:4112
-assert_return(() => call($19, "load8_u", [3382]), 0);
-
-// memory_copy.wast:4113
-assert_return(() => call($19, "load8_u", [3581]), 0);
-
-// memory_copy.wast:4114
-assert_return(() => call($19, "load8_u", [3780]), 0);
-
-// memory_copy.wast:4115
-assert_return(() => call($19, "load8_u", [3979]), 0);
-
-// memory_copy.wast:4116
-assert_return(() => call($19, "load8_u", [4178]), 0);
-
-// memory_copy.wast:4117
-assert_return(() => call($19, "load8_u", [4377]), 0);
-
-// memory_copy.wast:4118
-assert_return(() => call($19, "load8_u", [4576]), 0);
-
-// memory_copy.wast:4119
-assert_return(() => call($19, "load8_u", [4775]), 0);
-
-// memory_copy.wast:4120
-assert_return(() => call($19, "load8_u", [4974]), 0);
-
-// memory_copy.wast:4121
-assert_return(() => call($19, "load8_u", [5173]), 0);
-
-// memory_copy.wast:4122
-assert_return(() => call($19, "load8_u", [5372]), 0);
-
-// memory_copy.wast:4123
-assert_return(() => call($19, "load8_u", [5571]), 0);
-
-// memory_copy.wast:4124
-assert_return(() => call($19, "load8_u", [5770]), 0);
-
-// memory_copy.wast:4125
-assert_return(() => call($19, "load8_u", [5969]), 0);
-
-// memory_copy.wast:4126
-assert_return(() => call($19, "load8_u", [6168]), 0);
-
-// memory_copy.wast:4127
-assert_return(() => call($19, "load8_u", [6367]), 0);
-
-// memory_copy.wast:4128
-assert_return(() => call($19, "load8_u", [6566]), 0);
-
-// memory_copy.wast:4129
-assert_return(() => call($19, "load8_u", [6765]), 0);
-
-// memory_copy.wast:4130
-assert_return(() => call($19, "load8_u", [6964]), 0);
-
-// memory_copy.wast:4131
-assert_return(() => call($19, "load8_u", [7163]), 0);
-
-// memory_copy.wast:4132
-assert_return(() => call($19, "load8_u", [7362]), 0);
-
-// memory_copy.wast:4133
-assert_return(() => call($19, "load8_u", [7561]), 0);
-
-// memory_copy.wast:4134
-assert_return(() => call($19, "load8_u", [7760]), 0);
-
-// memory_copy.wast:4135
-assert_return(() => call($19, "load8_u", [7959]), 0);
-
-// memory_copy.wast:4136
-assert_return(() => call($19, "load8_u", [8158]), 0);
-
-// memory_copy.wast:4137
-assert_return(() => call($19, "load8_u", [8357]), 0);
-
-// memory_copy.wast:4138
-assert_return(() => call($19, "load8_u", [8556]), 0);
-
-// memory_copy.wast:4139
-assert_return(() => call($19, "load8_u", [8755]), 0);
-
-// memory_copy.wast:4140
-assert_return(() => call($19, "load8_u", [8954]), 0);
-
-// memory_copy.wast:4141
-assert_return(() => call($19, "load8_u", [9153]), 0);
-
-// memory_copy.wast:4142
-assert_return(() => call($19, "load8_u", [9352]), 0);
-
-// memory_copy.wast:4143
-assert_return(() => call($19, "load8_u", [9551]), 0);
-
-// memory_copy.wast:4144
-assert_return(() => call($19, "load8_u", [9750]), 0);
-
-// memory_copy.wast:4145
-assert_return(() => call($19, "load8_u", [9949]), 0);
-
-// memory_copy.wast:4146
-assert_return(() => call($19, "load8_u", [10148]), 0);
-
-// memory_copy.wast:4147
-assert_return(() => call($19, "load8_u", [10347]), 0);
-
-// memory_copy.wast:4148
-assert_return(() => call($19, "load8_u", [10546]), 0);
-
-// memory_copy.wast:4149
-assert_return(() => call($19, "load8_u", [10745]), 0);
-
-// memory_copy.wast:4150
-assert_return(() => call($19, "load8_u", [10944]), 0);
-
-// memory_copy.wast:4151
-assert_return(() => call($19, "load8_u", [11143]), 0);
-
-// memory_copy.wast:4152
-assert_return(() => call($19, "load8_u", [11342]), 0);
-
-// memory_copy.wast:4153
-assert_return(() => call($19, "load8_u", [11541]), 0);
-
-// memory_copy.wast:4154
-assert_return(() => call($19, "load8_u", [11740]), 0);
-
-// memory_copy.wast:4155
-assert_return(() => call($19, "load8_u", [11939]), 0);
-
-// memory_copy.wast:4156
-assert_return(() => call($19, "load8_u", [12138]), 0);
-
-// memory_copy.wast:4157
-assert_return(() => call($19, "load8_u", [12337]), 0);
-
-// memory_copy.wast:4158
-assert_return(() => call($19, "load8_u", [12536]), 0);
-
-// memory_copy.wast:4159
-assert_return(() => call($19, "load8_u", [12735]), 0);
-
-// memory_copy.wast:4160
-assert_return(() => call($19, "load8_u", [12934]), 0);
-
-// memory_copy.wast:4161
-assert_return(() => call($19, "load8_u", [13133]), 0);
-
-// memory_copy.wast:4162
-assert_return(() => call($19, "load8_u", [13332]), 0);
-
-// memory_copy.wast:4163
-assert_return(() => call($19, "load8_u", [13531]), 0);
-
-// memory_copy.wast:4164
-assert_return(() => call($19, "load8_u", [13730]), 0);
-
-// memory_copy.wast:4165
-assert_return(() => call($19, "load8_u", [13929]), 0);
-
-// memory_copy.wast:4166
-assert_return(() => call($19, "load8_u", [14128]), 0);
-
-// memory_copy.wast:4167
-assert_return(() => call($19, "load8_u", [14327]), 0);
-
-// memory_copy.wast:4168
-assert_return(() => call($19, "load8_u", [14526]), 0);
-
-// memory_copy.wast:4169
-assert_return(() => call($19, "load8_u", [14725]), 0);
-
-// memory_copy.wast:4170
-assert_return(() => call($19, "load8_u", [14924]), 0);
-
-// memory_copy.wast:4171
-assert_return(() => call($19, "load8_u", [15123]), 0);
-
-// memory_copy.wast:4172
-assert_return(() => call($19, "load8_u", [15322]), 0);
-
-// memory_copy.wast:4173
-assert_return(() => call($19, "load8_u", [15521]), 0);
-
-// memory_copy.wast:4174
-assert_return(() => call($19, "load8_u", [15720]), 0);
-
-// memory_copy.wast:4175
-assert_return(() => call($19, "load8_u", [15919]), 0);
-
-// memory_copy.wast:4176
-assert_return(() => call($19, "load8_u", [16118]), 0);
-
-// memory_copy.wast:4177
-assert_return(() => call($19, "load8_u", [16317]), 0);
-
-// memory_copy.wast:4178
-assert_return(() => call($19, "load8_u", [16516]), 0);
-
-// memory_copy.wast:4179
-assert_return(() => call($19, "load8_u", [16715]), 0);
-
-// memory_copy.wast:4180
-assert_return(() => call($19, "load8_u", [16914]), 0);
-
-// memory_copy.wast:4181
-assert_return(() => call($19, "load8_u", [17113]), 0);
-
-// memory_copy.wast:4182
-assert_return(() => call($19, "load8_u", [17312]), 0);
-
-// memory_copy.wast:4183
-assert_return(() => call($19, "load8_u", [17511]), 0);
-
-// memory_copy.wast:4184
-assert_return(() => call($19, "load8_u", [17710]), 0);
-
-// memory_copy.wast:4185
-assert_return(() => call($19, "load8_u", [17909]), 0);
-
-// memory_copy.wast:4186
-assert_return(() => call($19, "load8_u", [18108]), 0);
-
-// memory_copy.wast:4187
-assert_return(() => call($19, "load8_u", [18307]), 0);
-
-// memory_copy.wast:4188
-assert_return(() => call($19, "load8_u", [18506]), 0);
-
-// memory_copy.wast:4189
-assert_return(() => call($19, "load8_u", [18705]), 0);
-
-// memory_copy.wast:4190
-assert_return(() => call($19, "load8_u", [18904]), 0);
-
-// memory_copy.wast:4191
-assert_return(() => call($19, "load8_u", [19103]), 0);
-
-// memory_copy.wast:4192
-assert_return(() => call($19, "load8_u", [19302]), 0);
-
-// memory_copy.wast:4193
-assert_return(() => call($19, "load8_u", [19501]), 0);
-
-// memory_copy.wast:4194
-assert_return(() => call($19, "load8_u", [19700]), 0);
-
-// memory_copy.wast:4195
-assert_return(() => call($19, "load8_u", [19899]), 0);
-
-// memory_copy.wast:4196
-assert_return(() => call($19, "load8_u", [20098]), 0);
-
-// memory_copy.wast:4197
-assert_return(() => call($19, "load8_u", [20297]), 0);
-
-// memory_copy.wast:4198
-assert_return(() => call($19, "load8_u", [20496]), 0);
-
-// memory_copy.wast:4199
-assert_return(() => call($19, "load8_u", [20695]), 0);
-
-// memory_copy.wast:4200
-assert_return(() => call($19, "load8_u", [20894]), 0);
-
-// memory_copy.wast:4201
-assert_return(() => call($19, "load8_u", [21093]), 0);
-
-// memory_copy.wast:4202
-assert_return(() => call($19, "load8_u", [21292]), 0);
-
-// memory_copy.wast:4203
-assert_return(() => call($19, "load8_u", [21491]), 0);
-
-// memory_copy.wast:4204
-assert_return(() => call($19, "load8_u", [21690]), 0);
-
-// memory_copy.wast:4205
-assert_return(() => call($19, "load8_u", [21889]), 0);
-
-// memory_copy.wast:4206
-assert_return(() => call($19, "load8_u", [22088]), 0);
-
-// memory_copy.wast:4207
-assert_return(() => call($19, "load8_u", [22287]), 0);
-
-// memory_copy.wast:4208
-assert_return(() => call($19, "load8_u", [22486]), 0);
-
-// memory_copy.wast:4209
-assert_return(() => call($19, "load8_u", [22685]), 0);
-
-// memory_copy.wast:4210
-assert_return(() => call($19, "load8_u", [22884]), 0);
-
-// memory_copy.wast:4211
-assert_return(() => call($19, "load8_u", [23083]), 0);
-
-// memory_copy.wast:4212
-assert_return(() => call($19, "load8_u", [23282]), 0);
-
-// memory_copy.wast:4213
-assert_return(() => call($19, "load8_u", [23481]), 0);
-
-// memory_copy.wast:4214
-assert_return(() => call($19, "load8_u", [23680]), 0);
-
-// memory_copy.wast:4215
-assert_return(() => call($19, "load8_u", [23879]), 0);
-
-// memory_copy.wast:4216
-assert_return(() => call($19, "load8_u", [24078]), 0);
-
-// memory_copy.wast:4217
-assert_return(() => call($19, "load8_u", [24277]), 0);
-
-// memory_copy.wast:4218
-assert_return(() => call($19, "load8_u", [24476]), 0);
-
-// memory_copy.wast:4219
-assert_return(() => call($19, "load8_u", [24675]), 0);
-
-// memory_copy.wast:4220
-assert_return(() => call($19, "load8_u", [24874]), 0);
-
-// memory_copy.wast:4221
-assert_return(() => call($19, "load8_u", [25073]), 0);
-
-// memory_copy.wast:4222
-assert_return(() => call($19, "load8_u", [25272]), 0);
-
-// memory_copy.wast:4223
-assert_return(() => call($19, "load8_u", [25471]), 0);
-
-// memory_copy.wast:4224
-assert_return(() => call($19, "load8_u", [25670]), 0);
-
-// memory_copy.wast:4225
-assert_return(() => call($19, "load8_u", [25869]), 0);
-
-// memory_copy.wast:4226
-assert_return(() => call($19, "load8_u", [26068]), 0);
-
-// memory_copy.wast:4227
-assert_return(() => call($19, "load8_u", [26267]), 0);
-
-// memory_copy.wast:4228
-assert_return(() => call($19, "load8_u", [26466]), 0);
-
-// memory_copy.wast:4229
-assert_return(() => call($19, "load8_u", [26665]), 0);
-
-// memory_copy.wast:4230
-assert_return(() => call($19, "load8_u", [26864]), 0);
-
-// memory_copy.wast:4231
-assert_return(() => call($19, "load8_u", [27063]), 0);
-
-// memory_copy.wast:4232
-assert_return(() => call($19, "load8_u", [27262]), 0);
-
-// memory_copy.wast:4233
-assert_return(() => call($19, "load8_u", [27461]), 0);
-
-// memory_copy.wast:4234
-assert_return(() => call($19, "load8_u", [27660]), 0);
-
-// memory_copy.wast:4235
-assert_return(() => call($19, "load8_u", [27859]), 0);
-
-// memory_copy.wast:4236
-assert_return(() => call($19, "load8_u", [28058]), 0);
-
-// memory_copy.wast:4237
-assert_return(() => call($19, "load8_u", [28257]), 0);
-
-// memory_copy.wast:4238
-assert_return(() => call($19, "load8_u", [28456]), 0);
-
-// memory_copy.wast:4239
-assert_return(() => call($19, "load8_u", [28655]), 0);
-
-// memory_copy.wast:4240
-assert_return(() => call($19, "load8_u", [28854]), 0);
-
-// memory_copy.wast:4241
-assert_return(() => call($19, "load8_u", [29053]), 0);
-
-// memory_copy.wast:4242
-assert_return(() => call($19, "load8_u", [29252]), 0);
-
-// memory_copy.wast:4243
-assert_return(() => call($19, "load8_u", [29451]), 0);
-
-// memory_copy.wast:4244
-assert_return(() => call($19, "load8_u", [29650]), 0);
-
-// memory_copy.wast:4245
-assert_return(() => call($19, "load8_u", [29849]), 0);
-
-// memory_copy.wast:4246
-assert_return(() => call($19, "load8_u", [30048]), 0);
-
-// memory_copy.wast:4247
-assert_return(() => call($19, "load8_u", [30247]), 0);
-
-// memory_copy.wast:4248
-assert_return(() => call($19, "load8_u", [30446]), 0);
-
-// memory_copy.wast:4249
-assert_return(() => call($19, "load8_u", [30645]), 0);
-
-// memory_copy.wast:4250
-assert_return(() => call($19, "load8_u", [30844]), 0);
-
-// memory_copy.wast:4251
-assert_return(() => call($19, "load8_u", [31043]), 0);
-
-// memory_copy.wast:4252
-assert_return(() => call($19, "load8_u", [31242]), 0);
-
-// memory_copy.wast:4253
-assert_return(() => call($19, "load8_u", [31441]), 0);
-
-// memory_copy.wast:4254
-assert_return(() => call($19, "load8_u", [31640]), 0);
-
-// memory_copy.wast:4255
-assert_return(() => call($19, "load8_u", [31839]), 0);
-
-// memory_copy.wast:4256
-assert_return(() => call($19, "load8_u", [32038]), 0);
-
-// memory_copy.wast:4257
-assert_return(() => call($19, "load8_u", [32237]), 0);
-
-// memory_copy.wast:4258
-assert_return(() => call($19, "load8_u", [32436]), 0);
-
-// memory_copy.wast:4259
-assert_return(() => call($19, "load8_u", [32635]), 0);
-
-// memory_copy.wast:4260
-assert_return(() => call($19, "load8_u", [32834]), 0);
-
-// memory_copy.wast:4261
-assert_return(() => call($19, "load8_u", [33033]), 0);
-
-// memory_copy.wast:4262
-assert_return(() => call($19, "load8_u", [33232]), 0);
-
-// memory_copy.wast:4263
-assert_return(() => call($19, "load8_u", [33431]), 0);
-
-// memory_copy.wast:4264
-assert_return(() => call($19, "load8_u", [33630]), 0);
-
-// memory_copy.wast:4265
-assert_return(() => call($19, "load8_u", [33829]), 0);
-
-// memory_copy.wast:4266
-assert_return(() => call($19, "load8_u", [34028]), 0);
-
-// memory_copy.wast:4267
-assert_return(() => call($19, "load8_u", [34227]), 0);
-
-// memory_copy.wast:4268
-assert_return(() => call($19, "load8_u", [34426]), 0);
-
-// memory_copy.wast:4269
-assert_return(() => call($19, "load8_u", [34625]), 0);
-
-// memory_copy.wast:4270
-assert_return(() => call($19, "load8_u", [34824]), 0);
-
-// memory_copy.wast:4271
-assert_return(() => call($19, "load8_u", [35023]), 0);
-
-// memory_copy.wast:4272
-assert_return(() => call($19, "load8_u", [35222]), 0);
-
-// memory_copy.wast:4273
-assert_return(() => call($19, "load8_u", [35421]), 0);
-
-// memory_copy.wast:4274
-assert_return(() => call($19, "load8_u", [35620]), 0);
-
-// memory_copy.wast:4275
-assert_return(() => call($19, "load8_u", [35819]), 0);
-
-// memory_copy.wast:4276
-assert_return(() => call($19, "load8_u", [36018]), 0);
-
-// memory_copy.wast:4277
-assert_return(() => call($19, "load8_u", [36217]), 0);
-
-// memory_copy.wast:4278
-assert_return(() => call($19, "load8_u", [36416]), 0);
-
-// memory_copy.wast:4279
-assert_return(() => call($19, "load8_u", [36615]), 0);
-
-// memory_copy.wast:4280
-assert_return(() => call($19, "load8_u", [36814]), 0);
-
-// memory_copy.wast:4281
-assert_return(() => call($19, "load8_u", [37013]), 0);
-
-// memory_copy.wast:4282
-assert_return(() => call($19, "load8_u", [37212]), 0);
-
-// memory_copy.wast:4283
-assert_return(() => call($19, "load8_u", [37411]), 0);
-
-// memory_copy.wast:4284
-assert_return(() => call($19, "load8_u", [37610]), 0);
-
-// memory_copy.wast:4285
-assert_return(() => call($19, "load8_u", [37809]), 0);
-
-// memory_copy.wast:4286
-assert_return(() => call($19, "load8_u", [38008]), 0);
-
-// memory_copy.wast:4287
-assert_return(() => call($19, "load8_u", [38207]), 0);
-
-// memory_copy.wast:4288
-assert_return(() => call($19, "load8_u", [38406]), 0);
-
-// memory_copy.wast:4289
-assert_return(() => call($19, "load8_u", [38605]), 0);
-
-// memory_copy.wast:4290
-assert_return(() => call($19, "load8_u", [38804]), 0);
-
-// memory_copy.wast:4291
-assert_return(() => call($19, "load8_u", [39003]), 0);
-
-// memory_copy.wast:4292
-assert_return(() => call($19, "load8_u", [39202]), 0);
-
-// memory_copy.wast:4293
-assert_return(() => call($19, "load8_u", [39401]), 0);
-
-// memory_copy.wast:4294
-assert_return(() => call($19, "load8_u", [39600]), 0);
-
-// memory_copy.wast:4295
-assert_return(() => call($19, "load8_u", [39799]), 0);
-
-// memory_copy.wast:4296
-assert_return(() => call($19, "load8_u", [39998]), 0);
-
-// memory_copy.wast:4297
-assert_return(() => call($19, "load8_u", [40197]), 0);
-
-// memory_copy.wast:4298
-assert_return(() => call($19, "load8_u", [40396]), 0);
-
-// memory_copy.wast:4299
-assert_return(() => call($19, "load8_u", [40595]), 0);
-
-// memory_copy.wast:4300
-assert_return(() => call($19, "load8_u", [40794]), 0);
-
-// memory_copy.wast:4301
-assert_return(() => call($19, "load8_u", [40993]), 0);
-
-// memory_copy.wast:4302
-assert_return(() => call($19, "load8_u", [41192]), 0);
-
-// memory_copy.wast:4303
-assert_return(() => call($19, "load8_u", [41391]), 0);
-
-// memory_copy.wast:4304
-assert_return(() => call($19, "load8_u", [41590]), 0);
-
-// memory_copy.wast:4305
-assert_return(() => call($19, "load8_u", [41789]), 0);
-
-// memory_copy.wast:4306
-assert_return(() => call($19, "load8_u", [41988]), 0);
-
-// memory_copy.wast:4307
-assert_return(() => call($19, "load8_u", [42187]), 0);
-
-// memory_copy.wast:4308
-assert_return(() => call($19, "load8_u", [42386]), 0);
-
-// memory_copy.wast:4309
-assert_return(() => call($19, "load8_u", [42585]), 0);
-
-// memory_copy.wast:4310
-assert_return(() => call($19, "load8_u", [42784]), 0);
-
-// memory_copy.wast:4311
-assert_return(() => call($19, "load8_u", [42983]), 0);
-
-// memory_copy.wast:4312
-assert_return(() => call($19, "load8_u", [43182]), 0);
-
-// memory_copy.wast:4313
-assert_return(() => call($19, "load8_u", [43381]), 0);
-
-// memory_copy.wast:4314
-assert_return(() => call($19, "load8_u", [43580]), 0);
-
-// memory_copy.wast:4315
-assert_return(() => call($19, "load8_u", [43779]), 0);
-
-// memory_copy.wast:4316
-assert_return(() => call($19, "load8_u", [43978]), 0);
-
-// memory_copy.wast:4317
-assert_return(() => call($19, "load8_u", [44177]), 0);
-
-// memory_copy.wast:4318
-assert_return(() => call($19, "load8_u", [44376]), 0);
-
-// memory_copy.wast:4319
-assert_return(() => call($19, "load8_u", [44575]), 0);
-
-// memory_copy.wast:4320
-assert_return(() => call($19, "load8_u", [44774]), 0);
-
-// memory_copy.wast:4321
-assert_return(() => call($19, "load8_u", [44973]), 0);
-
-// memory_copy.wast:4322
-assert_return(() => call($19, "load8_u", [45172]), 0);
-
-// memory_copy.wast:4323
-assert_return(() => call($19, "load8_u", [45371]), 0);
-
-// memory_copy.wast:4324
-assert_return(() => call($19, "load8_u", [45570]), 0);
-
-// memory_copy.wast:4325
-assert_return(() => call($19, "load8_u", [45769]), 0);
-
-// memory_copy.wast:4326
-assert_return(() => call($19, "load8_u", [45968]), 0);
-
-// memory_copy.wast:4327
-assert_return(() => call($19, "load8_u", [46167]), 0);
-
-// memory_copy.wast:4328
-assert_return(() => call($19, "load8_u", [46366]), 0);
-
-// memory_copy.wast:4329
-assert_return(() => call($19, "load8_u", [46565]), 0);
-
-// memory_copy.wast:4330
-assert_return(() => call($19, "load8_u", [46764]), 0);
-
-// memory_copy.wast:4331
-assert_return(() => call($19, "load8_u", [46963]), 0);
-
-// memory_copy.wast:4332
-assert_return(() => call($19, "load8_u", [47162]), 0);
-
-// memory_copy.wast:4333
-assert_return(() => call($19, "load8_u", [47361]), 0);
-
-// memory_copy.wast:4334
-assert_return(() => call($19, "load8_u", [47560]), 0);
-
-// memory_copy.wast:4335
-assert_return(() => call($19, "load8_u", [47759]), 0);
-
-// memory_copy.wast:4336
-assert_return(() => call($19, "load8_u", [47958]), 0);
-
-// memory_copy.wast:4337
-assert_return(() => call($19, "load8_u", [48157]), 0);
-
-// memory_copy.wast:4338
-assert_return(() => call($19, "load8_u", [48356]), 0);
-
-// memory_copy.wast:4339
-assert_return(() => call($19, "load8_u", [48555]), 0);
-
-// memory_copy.wast:4340
-assert_return(() => call($19, "load8_u", [48754]), 0);
-
-// memory_copy.wast:4341
-assert_return(() => call($19, "load8_u", [48953]), 0);
-
-// memory_copy.wast:4342
-assert_return(() => call($19, "load8_u", [49152]), 0);
-
-// memory_copy.wast:4343
-assert_return(() => call($19, "load8_u", [49351]), 0);
-
-// memory_copy.wast:4344
-assert_return(() => call($19, "load8_u", [49550]), 0);
-
-// memory_copy.wast:4345
-assert_return(() => call($19, "load8_u", [49749]), 0);
-
-// memory_copy.wast:4346
-assert_return(() => call($19, "load8_u", [49948]), 0);
-
-// memory_copy.wast:4347
-assert_return(() => call($19, "load8_u", [50147]), 0);
-
-// memory_copy.wast:4348
-assert_return(() => call($19, "load8_u", [50346]), 0);
-
-// memory_copy.wast:4349
-assert_return(() => call($19, "load8_u", [50545]), 0);
-
-// memory_copy.wast:4350
-assert_return(() => call($19, "load8_u", [50744]), 0);
-
-// memory_copy.wast:4351
-assert_return(() => call($19, "load8_u", [50943]), 0);
-
-// memory_copy.wast:4352
-assert_return(() => call($19, "load8_u", [51142]), 0);
-
-// memory_copy.wast:4353
-assert_return(() => call($19, "load8_u", [51341]), 0);
-
-// memory_copy.wast:4354
-assert_return(() => call($19, "load8_u", [51540]), 0);
-
-// memory_copy.wast:4355
-assert_return(() => call($19, "load8_u", [51739]), 0);
-
-// memory_copy.wast:4356
-assert_return(() => call($19, "load8_u", [51938]), 0);
-
-// memory_copy.wast:4357
-assert_return(() => call($19, "load8_u", [52137]), 0);
-
-// memory_copy.wast:4358
-assert_return(() => call($19, "load8_u", [52336]), 0);
-
-// memory_copy.wast:4359
-assert_return(() => call($19, "load8_u", [52535]), 0);
-
-// memory_copy.wast:4360
-assert_return(() => call($19, "load8_u", [52734]), 0);
-
-// memory_copy.wast:4361
-assert_return(() => call($19, "load8_u", [52933]), 0);
-
-// memory_copy.wast:4362
-assert_return(() => call($19, "load8_u", [53132]), 0);
-
-// memory_copy.wast:4363
-assert_return(() => call($19, "load8_u", [53331]), 0);
-
-// memory_copy.wast:4364
-assert_return(() => call($19, "load8_u", [53530]), 0);
-
-// memory_copy.wast:4365
-assert_return(() => call($19, "load8_u", [53729]), 0);
-
-// memory_copy.wast:4366
-assert_return(() => call($19, "load8_u", [53928]), 0);
-
-// memory_copy.wast:4367
-assert_return(() => call($19, "load8_u", [54127]), 0);
-
-// memory_copy.wast:4368
-assert_return(() => call($19, "load8_u", [54326]), 0);
-
-// memory_copy.wast:4369
-assert_return(() => call($19, "load8_u", [54525]), 0);
-
-// memory_copy.wast:4370
-assert_return(() => call($19, "load8_u", [54724]), 0);
-
-// memory_copy.wast:4371
-assert_return(() => call($19, "load8_u", [54923]), 0);
-
-// memory_copy.wast:4372
-assert_return(() => call($19, "load8_u", [55122]), 0);
-
-// memory_copy.wast:4373
-assert_return(() => call($19, "load8_u", [55321]), 0);
-
-// memory_copy.wast:4374
-assert_return(() => call($19, "load8_u", [55520]), 0);
-
-// memory_copy.wast:4375
-assert_return(() => call($19, "load8_u", [55719]), 0);
-
-// memory_copy.wast:4376
-assert_return(() => call($19, "load8_u", [55918]), 0);
-
-// memory_copy.wast:4377
-assert_return(() => call($19, "load8_u", [56117]), 0);
-
-// memory_copy.wast:4378
-assert_return(() => call($19, "load8_u", [56316]), 0);
-
-// memory_copy.wast:4379
-assert_return(() => call($19, "load8_u", [56515]), 0);
-
-// memory_copy.wast:4380
-assert_return(() => call($19, "load8_u", [56714]), 0);
-
-// memory_copy.wast:4381
-assert_return(() => call($19, "load8_u", [56913]), 0);
-
-// memory_copy.wast:4382
-assert_return(() => call($19, "load8_u", [57112]), 0);
-
-// memory_copy.wast:4383
-assert_return(() => call($19, "load8_u", [57311]), 0);
-
-// memory_copy.wast:4384
-assert_return(() => call($19, "load8_u", [57510]), 0);
-
-// memory_copy.wast:4385
-assert_return(() => call($19, "load8_u", [57709]), 0);
-
-// memory_copy.wast:4386
-assert_return(() => call($19, "load8_u", [57908]), 0);
-
-// memory_copy.wast:4387
-assert_return(() => call($19, "load8_u", [58107]), 0);
-
-// memory_copy.wast:4388
-assert_return(() => call($19, "load8_u", [58306]), 0);
-
-// memory_copy.wast:4389
-assert_return(() => call($19, "load8_u", [58505]), 0);
-
-// memory_copy.wast:4390
-assert_return(() => call($19, "load8_u", [58704]), 0);
-
-// memory_copy.wast:4391
-assert_return(() => call($19, "load8_u", [58903]), 0);
-
-// memory_copy.wast:4392
-assert_return(() => call($19, "load8_u", [59102]), 0);
-
-// memory_copy.wast:4393
-assert_return(() => call($19, "load8_u", [59301]), 0);
-
-// memory_copy.wast:4394
-assert_return(() => call($19, "load8_u", [59500]), 0);
-
-// memory_copy.wast:4395
-assert_return(() => call($19, "load8_u", [59699]), 0);
-
-// memory_copy.wast:4396
-assert_return(() => call($19, "load8_u", [59898]), 0);
-
-// memory_copy.wast:4397
-assert_return(() => call($19, "load8_u", [60097]), 0);
-
-// memory_copy.wast:4398
-assert_return(() => call($19, "load8_u", [60296]), 0);
-
-// memory_copy.wast:4399
-assert_return(() => call($19, "load8_u", [60495]), 0);
-
-// memory_copy.wast:4400
-assert_return(() => call($19, "load8_u", [60694]), 0);
-
-// memory_copy.wast:4401
-assert_return(() => call($19, "load8_u", [60893]), 0);
-
-// memory_copy.wast:4402
-assert_return(() => call($19, "load8_u", [61092]), 0);
-
-// memory_copy.wast:4403
-assert_return(() => call($19, "load8_u", [61291]), 0);
-
-// memory_copy.wast:4404
-assert_return(() => call($19, "load8_u", [61440]), 0);
-
-// memory_copy.wast:4405
-assert_return(() => call($19, "load8_u", [61441]), 1);
-
-// memory_copy.wast:4406
-assert_return(() => call($19, "load8_u", [61442]), 2);
-
-// memory_copy.wast:4407
-assert_return(() => call($19, "load8_u", [61443]), 3);
-
-// memory_copy.wast:4408
-assert_return(() => call($19, "load8_u", [61444]), 4);
-
-// memory_copy.wast:4409
-assert_return(() => call($19, "load8_u", [61445]), 5);
-
-// memory_copy.wast:4410
-assert_return(() => call($19, "load8_u", [61446]), 6);
-
-// memory_copy.wast:4411
-assert_return(() => call($19, "load8_u", [61447]), 7);
-
-// memory_copy.wast:4412
-assert_return(() => call($19, "load8_u", [61448]), 8);
-
-// memory_copy.wast:4413
-assert_return(() => call($19, "load8_u", [61449]), 9);
-
-// memory_copy.wast:4414
-assert_return(() => call($19, "load8_u", [61450]), 10);
-
-// memory_copy.wast:4415
-assert_return(() => call($19, "load8_u", [61451]), 11);
-
-// memory_copy.wast:4416
-assert_return(() => call($19, "load8_u", [61452]), 12);
-
-// memory_copy.wast:4417
-assert_return(() => call($19, "load8_u", [61453]), 13);
-
-// memory_copy.wast:4418
-assert_return(() => call($19, "load8_u", [61454]), 14);
-
-// memory_copy.wast:4419
-assert_return(() => call($19, "load8_u", [61455]), 15);
-
-// memory_copy.wast:4420
-assert_return(() => call($19, "load8_u", [61456]), 16);
-
-// memory_copy.wast:4421
-assert_return(() => call($19, "load8_u", [61457]), 17);
-
-// memory_copy.wast:4422
-assert_return(() => call($19, "load8_u", [61458]), 18);
-
-// memory_copy.wast:4423
-assert_return(() => call($19, "load8_u", [61459]), 19);
-
-// memory_copy.wast:4424
-assert_return(() => call($19, "load8_u", [61510]), 0);
-
-// memory_copy.wast:4425
-assert_return(() => call($19, "load8_u", [61709]), 0);
-
-// memory_copy.wast:4426
-assert_return(() => call($19, "load8_u", [61908]), 0);
-
-// memory_copy.wast:4427
-assert_return(() => call($19, "load8_u", [62107]), 0);
-
-// memory_copy.wast:4428
-assert_return(() => call($19, "load8_u", [62306]), 0);
-
-// memory_copy.wast:4429
-assert_return(() => call($19, "load8_u", [62505]), 0);
-
-// memory_copy.wast:4430
-assert_return(() => call($19, "load8_u", [62704]), 0);
-
-// memory_copy.wast:4431
-assert_return(() => call($19, "load8_u", [62903]), 0);
-
-// memory_copy.wast:4432
-assert_return(() => call($19, "load8_u", [63102]), 0);
-
-// memory_copy.wast:4433
-assert_return(() => call($19, "load8_u", [63301]), 0);
-
-// memory_copy.wast:4434
-assert_return(() => call($19, "load8_u", [63500]), 0);
-
-// memory_copy.wast:4435
-assert_return(() => call($19, "load8_u", [63699]), 0);
-
-// memory_copy.wast:4436
-assert_return(() => call($19, "load8_u", [63898]), 0);
-
-// memory_copy.wast:4437
-assert_return(() => call($19, "load8_u", [64097]), 0);
-
-// memory_copy.wast:4438
-assert_return(() => call($19, "load8_u", [64296]), 0);
-
-// memory_copy.wast:4439
-assert_return(() => call($19, "load8_u", [64495]), 0);
-
-// memory_copy.wast:4440
-assert_return(() => call($19, "load8_u", [64694]), 0);
-
-// memory_copy.wast:4441
-assert_return(() => call($19, "load8_u", [64893]), 0);
-
-// memory_copy.wast:4442
-assert_return(() => call($19, "load8_u", [65092]), 0);
-
-// memory_copy.wast:4443
-assert_return(() => call($19, "load8_u", [65291]), 0);
-
-// memory_copy.wast:4444
-assert_return(() => call($19, "load8_u", [65490]), 0);
-
-// memory_copy.wast:4446
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4452
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4459
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4466
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4473
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4480
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4487
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4494
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4501
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4508
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4515
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4522
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4529
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4536
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4543
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4550
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4557
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4564
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4571
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4578
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4585
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4592
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4599
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4606
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4613
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4620
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4627
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4634
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4641
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4648
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4655
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4662
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4669
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4676
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4683
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4690
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4697
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4704
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4711
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4718
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4725
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4732
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4739
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4746
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4753
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4760
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4767
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4774
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4781
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4788
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4795
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4802
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4809
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4816
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4823
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4830
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4837
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4844
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4851
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4858
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4865
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4872
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4879
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4886
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa7\x80\x80\x80\x00\x01\xa1\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4894
-let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xc8\x80\x80\x80\x00\x02\x96\x80\x80\x80\x00\x00\x41\x0a\x41\xd5\x00\x41\x0a\xfc\x0b\x00\x41\x09\x41\x0a\x41\x05\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
-
-// memory_copy.wast:4911
-run(() => call($20, "test", []));
-
-// memory_copy.wast:4913
-assert_return(() => call($20, "checkRange", [0, 9, 0]), -1);
-
-// memory_copy.wast:4915
-assert_return(() => call($20, "checkRange", [9, 20, 85]), -1);
-
-// memory_copy.wast:4917
-assert_return(() => call($20, "checkRange", [20, 65536, 0]), -1);
-
-// memory_copy.wast:4920
-let $21 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xc8\x80\x80\x80\x00\x02\x96\x80\x80\x80\x00\x00\x41\x0a\x41\xd5\x00\x41\x0a\xfc\x0b\x00\x41\x10\x41\x0f\x41\x05\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
-
-// memory_copy.wast:4937
-run(() => call($21, "test", []));
-
-// memory_copy.wast:4939
-assert_return(() => call($21, "checkRange", [0, 10, 0]), -1);
-
-// memory_copy.wast:4941
-assert_return(() => call($21, "checkRange", [10, 21, 85]), -1);
-
-// memory_copy.wast:4943
-assert_return(() => call($21, "checkRange", [21, 65536, 0]), -1);
-
-// memory_copy.wast:4946
-let $22 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x41\x80\xfe\x03\x41\x80\x80\x02\x41\x81\x02\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4950
-assert_trap(() => call($22, "test", []));
-
-// memory_copy.wast:4952
-let $23 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\x7e\x41\x80\x80\x01\x41\x81\x02\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4956
-assert_trap(() => call($23, "test", []));
-
-// memory_copy.wast:4958
-let $24 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x41\x80\x80\x02\x41\x80\xfe\x03\x41\x81\x02\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4962
-assert_trap(() => call($24, "test", []));
-
-// memory_copy.wast:4964
-let $25 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\x80\x01\x41\x80\x7e\x41\x81\x02\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4968
-assert_trap(() => call($25, "test", []));
-
-// memory_copy.wast:4970
-let $26 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xdc\x80\x80\x80\x00\x02\xaa\x80\x80\x80\x00\x00\x41\x00\x41\xd5\x00\x41\x80\x80\x02\xfc\x0b\x00\x41\x80\x80\x02\x41\xaa\x01\x41\x80\x80\x02\xfc\x0b\x00\x41\x80\xa0\x02\x41\x80\xe0\x01\x41\x00\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
-
-// memory_copy.wast:4988
-run(() => call($26, "test", []));
-
-// memory_copy.wast:4990
-assert_return(() => call($26, "checkRange", [0, 32768, 85]), -1);
-
-// memory_copy.wast:4992
-assert_return(() => call($26, "checkRange", [32768, 65536, 170]), -1);
-
-// memory_copy.wast:4994
-let $27 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\x80\x04\x41\x80\xe0\x01\x41\x00\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:4998
-run(() => call($27, "test", []));
-
-// memory_copy.wast:5000
-let $28 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\xa0\x02\x41\x80\x80\x04\x41\x00\xfc\x0a\x00\x00\x0b");
-
-// memory_copy.wast:5004
-run(() => call($28, "test", []));
-
-// memory_copy.wast:5006
-let $29 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xbe\x95\x80\x80\x00\x02\x8c\x95\x80\x80\x00\x00\x41\xe7\x8a\x01\x41\x01\x41\xc0\x0a\xfc\x0b\x00\x41\xe9\xb0\x02\x41\x02\x41\x9f\x08\xfc\x0b\x00\x41\xd1\xb8\x03\x41\x03\x41\xdc\x07\xfc\x0b\x00\x41\xca\xa8\x02\x41\x04\x41\xc2\x02\xfc\x0b\x00\x41\xa9\x3e\x41\x05\x41\xca\x0f\xfc\x0b\x00\x41\xba\xb1\x01\x41\x06\x41\xdc\x17\xfc\x0b\x00\x41\xf2\x83\x01\x41\x07\x41\xc4\x12\xfc\x0b\x00\x41\xe3\xd3\x02\x41\x08\x41\xc3\x06\xfc\x0b\x00\x41\xfc\x00\x41\x09\x41\xf1\x0a\xfc\x0b\x00\x41\xd4\x10\x41\x0a\x41\xc6\x15\xfc\x0b\x00\x41\x9b\xc6\x00\x41\x0b\x41\x9a\x18\xfc\x0b\x00\x41\xe7\x9b\x03\x41\x0c\x41\xe5\x05\xfc\x0b\x00\x41\xf6\x1e\x41\x0d\x41\x87\x16\xfc\x0b\x00\x41\xb3\x84\x03\x41\x0e\x41\x80\x0a\xfc\x0b\x00\x41\xc9\x89\x03\x41\x0f\x41\xba\x0b\xfc\x0b\x00\x41\x8d\xa0\x01\x41\x10\x41\xd6\x18\xfc\x0b\x00\x41\xb1\xf4\x02\x41\x11\x41\xa0\x04\xfc\x0b\x00\x41\xa3\xe1\x00\x41\x12\x41\xed\x14\xfc\x0b\x00\x41\xa5\xc2\x01\x41\x13\x41\xdb\x14\xfc\x0b\x00\x41\x85\xe2\x02\x41\x14\x41\xa2\x0c\xfc\x0b\x00\x41\xd8\xd0\x02\x41\x15\x41\x9b\x0d\xfc\x0b\x00\x41\xde\x88\x02\x41\x16\x41\x86\x05\xfc\x0b\x00\x41\xab\xfb\x02\x41\x17\x41\xc2\x0e\xfc\x0b\x00\x41\xcd\xa1\x03\x41\x18\x41\xe1\x14\xfc\x0b\x00\x41\x9b\xed\x01\x41\x19\x41\xd5\x07\xfc\x0b\x00\x41\xd4\xc8\x00\x41\x1a\x41\x8f\x0e\xfc\x0b\x00\x41\x8e\x88\x03\x41\x1b\x41\xe7\x03\xfc\x0b\x00\x41\xa1\xea\x03\x41\x1c\x41\x92\x04\xfc\x0b\x00\x41\xdc\x9b\x02\x41\x1d\x41\xaf\x07\xfc\x0b\x00\x41\xf0\x34\x41\x1e\x41\xfd\x02\xfc\x0b\x00\x41\xbe\x90\x03\x41\x1f\x41\x91\x18\xfc\x0b\x00\x41\xc1\x84\x03\x41\x20\x41\x92\x05\xfc\x0b\x00\x41\xfc\xdb\x02\x41\x21\x41\xa6\x0d\xfc\x0b\x00\x41\xbe\x84\x02\x41\x22\x41\xc4\x08\xfc\x0b\x00\x41\xfe\x8c\x03\x41\x23\x41\x82\x0b\xfc\x0b\x00\x41\xea\xf3\x02\x41\x24\x41\x9c\x11\xfc\x0b\x00\x41\xeb\xa6\x03\x41\x25\x41\xda\x12\xfc\x0b\x00\x41\x8f\xaf\x03\x41\x26\x41\xfa\x01\xfc\x0b\x00\x41\xdc\xb0\x01\x41\x27\x41\xb1\x10\xfc\x0b\x00\x41\xec\x85\x01\x41\x28\x41\xc0\x19\xfc\x0b\x00\x41\xbb\xa8\x03\x41\x29\x41\xe3\x19\xfc\x0b\x00\x41\xb2\xb4\x02\x41\x2a\x41\xec\x15\xfc\x0b\x00\x41\xbc\x9a\x02\x41\x2b\x41\x96\x10\xfc\x0b\x00\x41\xec\x93\x02\x41\x2c\x41\xcb\x15\xfc\x0b\x00\x41\xdb\xff\x01\x41\x2d\x41\xb8\x02\xfc\x0b\x00\x41\x82\xf2\x03\x41\x2e\x41\xc0\x01\xfc\x0b\x00\x41\xfe\xf1\x01\x41\x2f\x41\xd4\x04\xfc\x0b\x00\x41\xfb\x81\x01\x41\x30\x41\xf5\x03\xfc\x0b\x00\x41\xaa\xbd\x03\x41\x31\x41\xae\x05\xfc\x0b\x00\x41\xfb\x8b\x02\x41\x32\x41\x81\x03\xfc\x0b\x00\x41\xd1\xdb\x03\x41\x33\x41\x87\x07\xfc\x0b\x00\x41\x85\xe0\x03\x41\x34\x41\xd6\x12\xfc\x0b\x00\x41\xfc\xee\x02\x41\x35\x41\xa1\x0b\xfc\x0b\x00\x41\xf5\xca\x01\x41\x36\x41\xda\x18\xfc\x0b\x00\x41\xbe\x2b\x41\x37\x41\xd7\x10\xfc\x0b\x00\x41\x89\x99\x02\x41\x38\x41\x87\x04\xfc\x0b\x00\x41\xdc\xde\x02\x41\x39\x41\xd0\x19\xfc\x0b\x00\x41\xa8\xed\x02\x41\x3a\x41\x8e\x0d\xfc\x0b\x00\x41\x8f\xec\x02\x41\x3b\x41\xe0\x18\xfc\x0b\x00\x41\xb1\xaf\x01\x41\x3c\x41\xa1\x0b\xfc\x0b\x00\x41\xf1\xc9\x03\x41\x3d\x41\x97\x05\xfc\x0b\x00\x41\x85\xfc\x01\x41\x3e\x41\x87\x0d\xfc\x0b\x00\x41\xf7\x17\x41\x3f\x41\xd1\x05\xfc\x0b\x00\x41\xe9\x89\x02\x41\xc0\x00\x41\xd4\x00\xfc\x0b\x00\x41\xba\x84\x02\x41\xc1\x00\x41\xed\x0f\xfc\x0b\x00\x41\xca\x9f\x02\x41\xc2\x00\x41\x1d\xfc\x0b\x00\x41\xcb\x95\x01\x41\xc3\x00\x41\xda\x17\xfc\x0b\x00\x41\xc8\xe2\x00\x41\xc4\x00\x41\x93\x08\xfc\x0b\x00\x41\xe4\x8e\x01\x41\xc5\x00\x41\xfc\x19\xfc\x0b\x00\x41\x9f\x24\x41\xc6\x00\x41\xc3\x08\xfc\x0b\x00\x41\x9e\xfe\x00\x41\xc7\x00\x41\xcd\x0f\xfc\x0b\x00\x41\x9c\x8e\x01\x41\xc8\x00\x41\xd3\x11\xfc\x0b\x00\x41\xe4\x8a\x03\x41\xc9\x00\x41\xf5\x18\xfc\x0b\x00\x41\x94\xd6\x00\x41\xca\x00\x41\xb0\x0f\xfc\x0b\x00\x41\xda\xfc\x00\x41\xcb\x00\x41\xaf\x0b\xfc\x0b\x00\x41\xde\xe2\x02\x41\xcc\x00\x41\x99\x09\xfc\x0b\x00\x41\xf9\xa6\x03\x41\xcd\x00\x41\xa0\x0c\xfc\x0b\x00\x41\xbb\x82\x02\x41\xce\x00\x41\xea\x0c\xfc\x0b\x00\x41\xe4\xdc\x03\x41\xcf\x00\x41\xd4\x19\xfc\x0b\x00\x41\x91\x94\x03\x41\xd0\x00\x41\xdf\x01\xfc\x0b\x00\x41\x89\x22\x41\xd1\x00\x41\xfb\x10\xfc\x0b\x00\x41\xaa\xc1\x03\x41\xd2\x00\x41\xaa\x0a\xfc\x0b\x00\x41\xac\xb3\x03\x41\xd3\x00\x41\xd8\x14\xfc\x0b\x00\x41\x9b\xbc\x01\x41\xd4\x00\x41\x95\x08\xfc\x0b\x00\x41\xaf\xd1\x02\x41\xd5\x00\x41\x99\x18\xfc\x0b\x00\x41\xb3\xfc\x01\x41\xd6\x00\x41\xec\x15\xfc\x0b\x00\x41\xe3\x1d\x41\xd7\x00\x41\xda\x0f\xfc\x0b\x00\x41\xc8\xac\x03\x41\xd8\x00\x41\x00\xfc\x0b\x00\x41\x95\x86\x03\x41\xd9\x00\x41\x95\x10\xfc\x0b\x00\x41\xbb\x9f\x01\x41\xda\x00\x41\xd0\x16\xfc\x0b\x00\x41\xa2\x88\x02\x41\xdb\x00\x41\xc0\x01\xfc\x0b\x00\x41\xba\xc9\x00\x41\xdc\x00\x41\x93\x11\xfc\x0b\x00\x41\xfd\xe0\x00\x41\xdd\x00\x41\x18\xfc\x0b\x00\x41\x8b\xee\x00\x41\xde\x00\x41\xc1\x04\xfc\x0b\x00\x41\x9a\xd8\x02\x41\xdf\x00\x41\xa9\x10\xfc\x0b\x00\x41\xff\x9e\x02\x41\xe0\x00\x41\xec\x1a\xfc\x0b\x00\x41\xf8\xb5\x01\x41\xe1\x00\x41\xcd\x15\xfc\x0b\x00\x41\xf8\x31\x41\xe2\x00\x41\xbe\x06\xfc\x0b\x00\x41\x9b\x84\x02\x41\xe3\x00\x41\x92\x0f\xfc\x0b\x00\x41\xb5\xab\x01\x41\xe4\x00\x41\xbe\x15\xfc\x0b\x00\x41\xce\xce\x03\x41\xe8\xa7\x03\x41\xb2\x10\xfc\x0a\x00\x00\x41\xb2\xec\x03\x41\xb8\xb2\x02\x41\xe6\x01\xfc\x0a\x00\x00\x41\xf9\x94\x03\x41\xcd\xb8\x01\x41\xfc\x11\xfc\x0a\x00\x00\x41\xb4\x34\x41\xbc\xbb\x01\x41\xff\x04\xfc\x0a\x00\x00\x41\xce\x36\x41\xf7\x84\x02\x41\xc9\x08\xfc\x0a\x00\x00\x41\xcb\x97\x01\x41\xec\xd0\x00\x41\xfd\x18\xfc\x0a\x00\x00\x41\xac\xd5\x01\x41\x86\xa9\x03\x41\xe4\x00\xfc\x0a\x00\x00\x41\xd5\xd4\x01\x41\xa2\xd5\x02\x41\xb5\x0d\xfc\x0a\x00\x00\x41\xf0\xd8\x03\x41\xb5\xc3\x00\x41\xf7\x00\xfc\x0a\x00\x00\x41\xbb\x2e\x41\x84\x12\x41\x92\x05\xfc\x0a\x00\x00\x41\xb3\x25\x41\xaf\x93\x03\x41\xdd\x11\xfc\x0a\x00\x00\x41\xc9\xe2\x00\x41\xfd\x95\x01\x41\xc1\x06\xfc\x0a\x00\x00\x41\xce\xdc\x00\x41\xa9\xeb\x02\x41\xe4\x19\xfc\x0a\x00\x00\x41\xf0\xd8\x00\x41\xd4\xdf\x02\x41\xe9\x11\xfc\x0a\x00\x00\x41\x8a\x8b\x02\x41\xa9\x34\x41\x8c\x14\xfc\x0a\x00\x00\x41\xc8\x26\x41\x9a\x0d\x41\xb0\x0a\xfc\x0a\x00\x00\x41\xbc\xed\x03\x41\xd5\x3b\x41\x86\x0d\xfc\x0a\x00\x00\x41\x98\xdc\x02\x41\xa8\x8f\x01\x41\x21\xfc\x0a\x00\x00\x41\x8e\xd7\x02\x41\xcc\xae\x01\x41\x93\x0b\xfc\x0a\x00\x00\x41\xad\xec\x02\x41\x9b\x85\x03\x41\x9a\x0b\xfc\x0a\x00\x00\x41\xc4\xf1\x03\x41\xb3\xc4\x00\x41\xc2\x06\xfc\x0a\x00\x00\x41\xcd\x85\x02\x41\xa3\x9d\x01\x41\xf5\x19\xfc\x0a\x00\x00\x41\xff\xbc\x02\x41\xad\xa8\x03\x41\x81\x19\xfc\x0a\x00\x00\x41\xd4\xc9\x01\x41\xf6\xce\x03\x41\x94\x13\xfc\x0a\x00\x00\x41\xde\x99\x01\x41\xb2\xbc\x03\x41\xda\x02\xfc\x0a\x00\x00\x41\xec\xfb\x00\x41\xca\x98\x02\x41\xfe\x12\xfc\x0a\x00\x00\x41\xb0\xdc\x00\x41\xf6\x95\x02\x41\xac\x02\xfc\x0a\x00\x00\x41\xa3\xd0\x03\x41\x85\xed\x00\x41\xd1\x18\xfc\x0a\x00\x00\x41\xfb\x8b\x02\x41\xb2\xd9\x03\x41\x81\x0a\xfc\x0a\x00\x00\x41\x84\xc6\x00\x41\xf4\xdf\x00\x41\xaf\x07\xfc\x0a\x00\x00\x41\x8b\x16\x41\xb9\xd1\x00\x41\xdf\x0e\xfc\x0a\x00\x00\x41\xba\xd1\x02\x41\x86\xd7\x02\x41\xe2\x05\xfc\x0a\x00\x00\x41\xbe\xec\x03\x41\x85\x94\x01\x41\xfa\x00\xfc\x0a\x00\x00\x41\xec\xbb\x01\x41\xd9\xdd\x02\x41\xdb\x0d\xfc\x0a\x00\x00\x41\xd0\xb0\x01\x41\xa3\xf3\x00\x41\xbe\x05\xfc\x0a\x00\x00\x41\x94\xd8\x00\x41\xd3\xcf\x01\x41\xa6\x0e\xfc\x0a\x00\x00\x41\xb4\xb4\x01\x41\xf7\x9f\x01\x41\xa8\x08\xfc\x0a\x00\x00\x41\xa0\xbf\x03\x41\xf2\xab\x03\x41\xc7\x14\xfc\x0a\x00\x00\x41\x94\xc7\x01\x41\x81\x08\x41\xa9\x18\xfc\x0a\x00\x00\x41\xb4\x83\x03\x41\xbc\xd9\x02\x41\xcf\x07\xfc\x0a\x00\x00\x41\xf8\xdc\x01\x41\xfa\xc5\x02\x41\xa0\x12\xfc\x0a\x00\x00\x41\xe9\xde\x03\x41\xe6\x01\x41\xb8\x16\xfc\x0a\x00\x00\x41\xd0\xaf\x01\x41\x9a\x9a\x03\x41\x95\x11\xfc\x0a\x00\x00\x41\xe9\xbc\x02\x41\xea\xca\x00\x41\xa6\x0f\xfc\x0a\x00\x00\x41\xcc\xe2\x01\x41\xfe\xa2\x01\x41\x8a\x11\xfc\x0a\x00\x00\x41\xa5\x9e\x03\x41\xb3\xd7\x02\x41\x8d\x08\xfc\x0a\x00\x00\x41\x84\xc7\x01\x41\xd3\x96\x02\x41\xf2\x0c\xfc\x0a\x00\x00\x41\x94\xc9\x03\x41\xfb\xe5\x02\x41\xc2\x0f\xfc\x0a\x00\x00\x41\x99\xab\x02\x41\x90\x2d\x41\xa3\x0f\xfc\x0a\x00\x00\x41\xd7\xde\x01\x41\xc4\xb0\x03\x41\xc0\x12\xfc\x0a\x00\x00\x41\x9b\xe9\x03\x41\xbc\x8d\x01\x41\xcc\x0a\xfc\x0a\x00\x00\x41\xe5\x87\x03\x41\xa5\xec\x00\x41\xfe\x02\xfc\x0a\x00\x00\x41\x88\x84\x01\x41\xf5\x9b\x02\x41\xec\x0e\xfc\x0a\x00\x00\x41\xe2\xf7\x02\x41\xde\xd8\x00\x41\xf7\x15\xfc\x0a\x00\x00\x41\xe0\xde\x01\x41\xaa\xbb\x02\x41\xc3\x02\xfc\x0a\x00\x00\x41\xb2\x95\x02\x41\xd0\xd9\x01\x41\x86\x0d\xfc\x0a\x00\x00\x41\xfa\xeb\x03\x41\xd4\xa0\x03\x41\xbd\x0a\xfc\x0a\x00\x00\x41\xb5\xee\x00\x41\xe8\xe9\x02\x41\x84\x05\xfc\x0a\x00\x00\x41\xe6\xe2\x01\x41\x82\x95\x01\x41\xf0\x03\xfc\x0a\x00\x00\x41\x98\xdf\x02\x41\xd9\xf3\x02\x41\xe0\x15\xfc\x0a\x00\x00\x41\x87\xb5\x02\x41\xf5\xdc\x02\x41\xc6\x0a\xfc\x0a\x00\x00\x41\xf0\xd0\x00\x41\xda\xe4\x01\x41\xc3\x0b\xfc\x0a\x00\x00\x41\xbf\xee\x02\x41\xe2\xe8\x02\x41\xbb\x0b\xfc\x0a\x00\x00\x41\xa9\x26\x41\xc4\xe0\x01\x41\xe7\x0e\xfc\x0a\x00\x00\x41\xfc\xa8\x02\x41\xa5\xbf\x03\x41\xd7\x0d\xfc\x0a\x00\x00\x41\xce\xce\x01\x41\xd7\xd4\x01\x41\xe7\x08\xfc\x0a\x00\x00\x41\xd3\xcb\x03\x41\xd1\xc0\x01\x41\xa7\x08\xfc\x0a\x00\x00\x41\xac\xdf\x03\x41\x86\xaf\x02\x41\xfe\x05\xfc\x0a\x00\x00\x41\x80\xd9\x02\x41\xec\x11\x41\xf0\x0b\xfc\x0a\x00\x00\x41\xe4\xff\x01\x41\x85\xf1\x02\x41\xc6\x17\xfc\x0a\x00\x00\x41\x8c\xd7\x00\x41\x8c\xa6\x01\x41\xf3\x07\xfc\x0a\x00\x00\x41\xf1\x3b\x41\xfc\xf6\x01\x41\xda\x17\xfc\x0a\x00\x00\x41\xfc\x8c\x01\x41\xbb\xe5\x00\x41\xf8\x19\xfc\x0a\x00\x00\x41\xda\xbf\x03\x41\xe1\xb4\x03\x41\xb4\x02\xfc\x0a\x00\x00\x41\xe3\xc0\x01\x41\xaf\x83\x01\x41\x83\x09\xfc\x0a\x00\x00\x41\xbc\x9b\x01\x41\x83\xcf\x00\x41\xd2\x05\xfc\x0a\x00\x00\x41\xe9\x16\x41\xaf\x2e\x41\xc2\x12\xfc\x0a\x00\x00\x41\xff\xfb\x01\x41\xaf\x87\x03\x41\xee\x16\xfc\x0a\x00\x00\x41\x96\xf6\x00\x41\x93\x87\x01\x41\xaf\x14\xfc\x0a\x00\x00\x41\x87\xe4\x02\x41\x9f\xde\x01\x41\xfd\x0f\xfc\x0a\x00\x00\x41\xed\xae\x03\x41\x91\x9a\x02\x41\xa4\x14\xfc\x0a\x00\x00\x41\xad\xde\x01\x41\x8d\xa7\x03\x41\x90\x09\xfc\x0a\x00\x00\x41\xcf\xf6\x02\x41\x89\xa1\x03\x41\xc1\x18\xfc\x0a\x00\x00\x41\xb6\xef\x01\x41\xe3\xe0\x02\x41\xd9\x14\xfc\x0a\x00\x00\x41\xc1\x27\x41\xc7\x21\x41\x34\xfc\x0a\x00\x00\x41\xa4\x34\x41\x83\xbd\x01\x41\xb9\x03\xfc\x0a\x00\x00\x41\xd8\x81\x02\x41\xed\xd3\x01\x41\xf5\x1a\xfc\x0a\x00\x00\x41\x92\xfe\x01\x41\xec\xcf\x03\x41\xe1\x15\xfc\x0a\x00\x00\x41\xb9\x8c\x02\x41\x82\xc6\x00\x41\xe6\x12\xfc\x0a\x00\x00\x41\xe5\x8b\x01\x41\x8a\xaa\x03\x41\xb5\x1a\xfc\x0a\x00\x00\x41\x9d\xb1\x01\x41\xf7\xd8\x02\x41\x88\x01\xfc\x0a\x00\x00\x41\xd1\xcd\x03\x41\xa5\x37\x41\x95\x08\xfc\x0a\x00\x00\x41\xc1\xcf\x02\x41\xf4\xad\x03\x41\xd5\x12\xfc\x0a\x00\x00\x41\x95\xdd\x02\x41\xaa\x9d\x01\x41\xed\x06\xfc\x0a\x00\x00\x41\xca\x9f\x02\x41\xec\xc4\x01\x41\xf7\x1a\xfc\x0a\x00\x00\x41\xae\xe5\x02\x41\x90\xf9\x01\x41\xd6\x06\xfc\x0a\x00\x00\x41\xac\xbd\x01\x41\xfa\xf8\x01\x41\xe1\x0a\xfc\x0a\x00\x00\x41\xf2\x87\x02\x41\xb4\x05\x41\xba\x0c\xfc\x0a\x00\x00\x41\xca\xd9\x03\x41\x99\x91\x01\x41\xab\x17\xfc\x0a\x00\x00\x41\xc2\x89\x03\x41\xb7\xc2\x02\x41\xfe\x0a\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
-
-// memory_copy.wast:5222
-run(() => call($29, "test", []));
-
-// memory_copy.wast:5224
-assert_return(() => call($29, "checkRange", [0, 124, 0]), -1);
-
-// memory_copy.wast:5226
-assert_return(() => call($29, "checkRange", [124, 1517, 9]), -1);
-
-// memory_copy.wast:5228
-assert_return(() => call($29, "checkRange", [1517, 2132, 0]), -1);
-
-// memory_copy.wast:5230
-assert_return(() => call($29, "checkRange", [2132, 2827, 10]), -1);
-
-// memory_copy.wast:5232
-assert_return(() => call($29, "checkRange", [2827, 2921, 92]), -1);
-
-// memory_copy.wast:5234
-assert_return(() => call($29, "checkRange", [2921, 3538, 83]), -1);
-
-// memory_copy.wast:5236
-assert_return(() => call($29, "checkRange", [3538, 3786, 77]), -1);
-
-// memory_copy.wast:5238
-assert_return(() => call($29, "checkRange", [3786, 4042, 97]), -1);
-
-// memory_copy.wast:5240
-assert_return(() => call($29, "checkRange", [4042, 4651, 99]), -1);
-
-// memory_copy.wast:5242
-assert_return(() => call($29, "checkRange", [4651, 5057, 0]), -1);
-
-// memory_copy.wast:5244
-assert_return(() => call($29, "checkRange", [5057, 5109, 99]), -1);
-
-// memory_copy.wast:5246
-assert_return(() => call($29, "checkRange", [5109, 5291, 0]), -1);
-
-// memory_copy.wast:5248
-assert_return(() => call($29, "checkRange", [5291, 5524, 72]), -1);
-
-// memory_copy.wast:5250
-assert_return(() => call($29, "checkRange", [5524, 5691, 92]), -1);
-
-// memory_copy.wast:5252
-assert_return(() => call($29, "checkRange", [5691, 6552, 83]), -1);
-
-// memory_copy.wast:5254
-assert_return(() => call($29, "checkRange", [6552, 7133, 77]), -1);
-
-// memory_copy.wast:5256
-assert_return(() => call($29, "checkRange", [7133, 7665, 99]), -1);
-
-// memory_copy.wast:5258
-assert_return(() => call($29, "checkRange", [7665, 8314, 0]), -1);
-
-// memory_copy.wast:5260
-assert_return(() => call($29, "checkRange", [8314, 8360, 62]), -1);
-
-// memory_copy.wast:5262
-assert_return(() => call($29, "checkRange", [8360, 8793, 86]), -1);
-
-// memory_copy.wast:5264
-assert_return(() => call($29, "checkRange", [8793, 8979, 83]), -1);
-
-// memory_copy.wast:5266
-assert_return(() => call($29, "checkRange", [8979, 9373, 79]), -1);
-
-// memory_copy.wast:5268
-assert_return(() => call($29, "checkRange", [9373, 9518, 95]), -1);
-
-// memory_copy.wast:5270
-assert_return(() => call($29, "checkRange", [9518, 9934, 59]), -1);
-
-// memory_copy.wast:5272
-assert_return(() => call($29, "checkRange", [9934, 10087, 77]), -1);
-
-// memory_copy.wast:5274
-assert_return(() => call($29, "checkRange", [10087, 10206, 5]), -1);
-
-// memory_copy.wast:5276
-assert_return(() => call($29, "checkRange", [10206, 10230, 77]), -1);
-
-// memory_copy.wast:5278
-assert_return(() => call($29, "checkRange", [10230, 10249, 41]), -1);
-
-// memory_copy.wast:5280
-assert_return(() => call($29, "checkRange", [10249, 11148, 83]), -1);
-
-// memory_copy.wast:5282
-assert_return(() => call($29, "checkRange", [11148, 11356, 74]), -1);
-
-// memory_copy.wast:5284
-assert_return(() => call($29, "checkRange", [11356, 11380, 93]), -1);
-
-// memory_copy.wast:5286
-assert_return(() => call($29, "checkRange", [11380, 11939, 74]), -1);
-
-// memory_copy.wast:5288
-assert_return(() => call($29, "checkRange", [11939, 12159, 68]), -1);
-
-// memory_copy.wast:5290
-assert_return(() => call($29, "checkRange", [12159, 12575, 83]), -1);
-
-// memory_copy.wast:5292
-assert_return(() => call($29, "checkRange", [12575, 12969, 79]), -1);
-
-// memory_copy.wast:5294
-assert_return(() => call($29, "checkRange", [12969, 13114, 95]), -1);
-
-// memory_copy.wast:5296
-assert_return(() => call($29, "checkRange", [13114, 14133, 59]), -1);
-
-// memory_copy.wast:5298
-assert_return(() => call($29, "checkRange", [14133, 14404, 76]), -1);
-
-// memory_copy.wast:5300
-assert_return(() => call($29, "checkRange", [14404, 14428, 57]), -1);
-
-// memory_copy.wast:5302
-assert_return(() => call($29, "checkRange", [14428, 14458, 59]), -1);
-
-// memory_copy.wast:5304
-assert_return(() => call($29, "checkRange", [14458, 14580, 32]), -1);
-
-// memory_copy.wast:5306
-assert_return(() => call($29, "checkRange", [14580, 14777, 89]), -1);
-
-// memory_copy.wast:5308
-assert_return(() => call($29, "checkRange", [14777, 15124, 59]), -1);
-
-// memory_copy.wast:5310
-assert_return(() => call($29, "checkRange", [15124, 15126, 36]), -1);
-
-// memory_copy.wast:5312
-assert_return(() => call($29, "checkRange", [15126, 15192, 100]), -1);
-
-// memory_copy.wast:5314
-assert_return(() => call($29, "checkRange", [15192, 15871, 96]), -1);
-
-// memory_copy.wast:5316
-assert_return(() => call($29, "checkRange", [15871, 15998, 95]), -1);
-
-// memory_copy.wast:5318
-assert_return(() => call($29, "checkRange", [15998, 17017, 59]), -1);
-
-// memory_copy.wast:5320
-assert_return(() => call($29, "checkRange", [17017, 17288, 76]), -1);
-
-// memory_copy.wast:5322
-assert_return(() => call($29, "checkRange", [17288, 17312, 57]), -1);
-
-// memory_copy.wast:5324
-assert_return(() => call($29, "checkRange", [17312, 17342, 59]), -1);
-
-// memory_copy.wast:5326
-assert_return(() => call($29, "checkRange", [17342, 17464, 32]), -1);
-
-// memory_copy.wast:5328
-assert_return(() => call($29, "checkRange", [17464, 17661, 89]), -1);
-
-// memory_copy.wast:5330
-assert_return(() => call($29, "checkRange", [17661, 17727, 59]), -1);
-
-// memory_copy.wast:5332
-assert_return(() => call($29, "checkRange", [17727, 17733, 5]), -1);
-
-// memory_copy.wast:5334
-assert_return(() => call($29, "checkRange", [17733, 17893, 96]), -1);
-
-// memory_copy.wast:5336
-assert_return(() => call($29, "checkRange", [17893, 18553, 77]), -1);
-
-// memory_copy.wast:5338
-assert_return(() => call($29, "checkRange", [18553, 18744, 42]), -1);
-
-// memory_copy.wast:5340
-assert_return(() => call($29, "checkRange", [18744, 18801, 76]), -1);
-
-// memory_copy.wast:5342
-assert_return(() => call($29, "checkRange", [18801, 18825, 57]), -1);
-
-// memory_copy.wast:5344
-assert_return(() => call($29, "checkRange", [18825, 18876, 59]), -1);
-
-// memory_copy.wast:5346
-assert_return(() => call($29, "checkRange", [18876, 18885, 77]), -1);
-
-// memory_copy.wast:5348
-assert_return(() => call($29, "checkRange", [18885, 18904, 41]), -1);
-
-// memory_copy.wast:5350
-assert_return(() => call($29, "checkRange", [18904, 19567, 83]), -1);
-
-// memory_copy.wast:5352
-assert_return(() => call($29, "checkRange", [19567, 20403, 96]), -1);
-
-// memory_copy.wast:5354
-assert_return(() => call($29, "checkRange", [20403, 21274, 77]), -1);
-
-// memory_copy.wast:5356
-assert_return(() => call($29, "checkRange", [21274, 21364, 100]), -1);
-
-// memory_copy.wast:5358
-assert_return(() => call($29, "checkRange", [21364, 21468, 74]), -1);
-
-// memory_copy.wast:5360
-assert_return(() => call($29, "checkRange", [21468, 21492, 93]), -1);
-
-// memory_copy.wast:5362
-assert_return(() => call($29, "checkRange", [21492, 22051, 74]), -1);
-
-// memory_copy.wast:5364
-assert_return(() => call($29, "checkRange", [22051, 22480, 68]), -1);
-
-// memory_copy.wast:5366
-assert_return(() => call($29, "checkRange", [22480, 22685, 100]), -1);
-
-// memory_copy.wast:5368
-assert_return(() => call($29, "checkRange", [22685, 22694, 68]), -1);
-
-// memory_copy.wast:5370
-assert_return(() => call($29, "checkRange", [22694, 22821, 10]), -1);
-
-// memory_copy.wast:5372
-assert_return(() => call($29, "checkRange", [22821, 22869, 100]), -1);
-
-// memory_copy.wast:5374
-assert_return(() => call($29, "checkRange", [22869, 24107, 97]), -1);
-
-// memory_copy.wast:5376
-assert_return(() => call($29, "checkRange", [24107, 24111, 37]), -1);
-
-// memory_copy.wast:5378
-assert_return(() => call($29, "checkRange", [24111, 24236, 77]), -1);
-
-// memory_copy.wast:5380
-assert_return(() => call($29, "checkRange", [24236, 24348, 72]), -1);
-
-// memory_copy.wast:5382
-assert_return(() => call($29, "checkRange", [24348, 24515, 92]), -1);
-
-// memory_copy.wast:5384
-assert_return(() => call($29, "checkRange", [24515, 24900, 83]), -1);
-
-// memory_copy.wast:5386
-assert_return(() => call($29, "checkRange", [24900, 25136, 95]), -1);
-
-// memory_copy.wast:5388
-assert_return(() => call($29, "checkRange", [25136, 25182, 85]), -1);
-
-// memory_copy.wast:5390
-assert_return(() => call($29, "checkRange", [25182, 25426, 68]), -1);
-
-// memory_copy.wast:5392
-assert_return(() => call($29, "checkRange", [25426, 25613, 89]), -1);
-
-// memory_copy.wast:5394
-assert_return(() => call($29, "checkRange", [25613, 25830, 96]), -1);
-
-// memory_copy.wast:5396
-assert_return(() => call($29, "checkRange", [25830, 26446, 100]), -1);
-
-// memory_copy.wast:5398
-assert_return(() => call($29, "checkRange", [26446, 26517, 10]), -1);
-
-// memory_copy.wast:5400
-assert_return(() => call($29, "checkRange", [26517, 27468, 92]), -1);
-
-// memory_copy.wast:5402
-assert_return(() => call($29, "checkRange", [27468, 27503, 95]), -1);
-
-// memory_copy.wast:5404
-assert_return(() => call($29, "checkRange", [27503, 27573, 77]), -1);
-
-// memory_copy.wast:5406
-assert_return(() => call($29, "checkRange", [27573, 28245, 92]), -1);
-
-// memory_copy.wast:5408
-assert_return(() => call($29, "checkRange", [28245, 28280, 95]), -1);
-
-// memory_copy.wast:5410
-assert_return(() => call($29, "checkRange", [28280, 29502, 77]), -1);
-
-// memory_copy.wast:5412
-assert_return(() => call($29, "checkRange", [29502, 29629, 42]), -1);
-
-// memory_copy.wast:5414
-assert_return(() => call($29, "checkRange", [29629, 30387, 83]), -1);
-
-// memory_copy.wast:5416
-assert_return(() => call($29, "checkRange", [30387, 30646, 77]), -1);
-
-// memory_copy.wast:5418
-assert_return(() => call($29, "checkRange", [30646, 31066, 92]), -1);
-
-// memory_copy.wast:5420
-assert_return(() => call($29, "checkRange", [31066, 31131, 77]), -1);
-
-// memory_copy.wast:5422
-assert_return(() => call($29, "checkRange", [31131, 31322, 42]), -1);
-
-// memory_copy.wast:5424
-assert_return(() => call($29, "checkRange", [31322, 31379, 76]), -1);
-
-// memory_copy.wast:5426
-assert_return(() => call($29, "checkRange", [31379, 31403, 57]), -1);
-
-// memory_copy.wast:5428
-assert_return(() => call($29, "checkRange", [31403, 31454, 59]), -1);
-
-// memory_copy.wast:5430
-assert_return(() => call($29, "checkRange", [31454, 31463, 77]), -1);
-
-// memory_copy.wast:5432
-assert_return(() => call($29, "checkRange", [31463, 31482, 41]), -1);
-
-// memory_copy.wast:5434
-assert_return(() => call($29, "checkRange", [31482, 31649, 83]), -1);
-
-// memory_copy.wast:5436
-assert_return(() => call($29, "checkRange", [31649, 31978, 72]), -1);
-
-// memory_copy.wast:5438
-assert_return(() => call($29, "checkRange", [31978, 32145, 92]), -1);
-
-// memory_copy.wast:5440
-assert_return(() => call($29, "checkRange", [32145, 32530, 83]), -1);
-
-// memory_copy.wast:5442
-assert_return(() => call($29, "checkRange", [32530, 32766, 95]), -1);
-
-// memory_copy.wast:5444
-assert_return(() => call($29, "checkRange", [32766, 32812, 85]), -1);
-
-// memory_copy.wast:5446
-assert_return(() => call($29, "checkRange", [32812, 33056, 68]), -1);
-
-// memory_copy.wast:5448
-assert_return(() => call($29, "checkRange", [33056, 33660, 89]), -1);
-
-// memory_copy.wast:5450
-assert_return(() => call($29, "checkRange", [33660, 33752, 59]), -1);
-
-// memory_copy.wast:5452
-assert_return(() => call($29, "checkRange", [33752, 33775, 36]), -1);
-
-// memory_copy.wast:5454
-assert_return(() => call($29, "checkRange", [33775, 33778, 32]), -1);
-
-// memory_copy.wast:5456
-assert_return(() => call($29, "checkRange", [33778, 34603, 9]), -1);
-
-// memory_copy.wast:5458
-assert_return(() => call($29, "checkRange", [34603, 35218, 0]), -1);
-
-// memory_copy.wast:5460
-assert_return(() => call($29, "checkRange", [35218, 35372, 10]), -1);
-
-// memory_copy.wast:5462
-assert_return(() => call($29, "checkRange", [35372, 35486, 77]), -1);
-
-// memory_copy.wast:5464
-assert_return(() => call($29, "checkRange", [35486, 35605, 5]), -1);
-
-// memory_copy.wast:5466
-assert_return(() => call($29, "checkRange", [35605, 35629, 77]), -1);
-
-// memory_copy.wast:5468
-assert_return(() => call($29, "checkRange", [35629, 35648, 41]), -1);
-
-// memory_copy.wast:5470
-assert_return(() => call($29, "checkRange", [35648, 36547, 83]), -1);
-
-// memory_copy.wast:5472
-assert_return(() => call($29, "checkRange", [36547, 36755, 74]), -1);
-
-// memory_copy.wast:5474
-assert_return(() => call($29, "checkRange", [36755, 36767, 93]), -1);
-
-// memory_copy.wast:5476
-assert_return(() => call($29, "checkRange", [36767, 36810, 83]), -1);
-
-// memory_copy.wast:5478
-assert_return(() => call($29, "checkRange", [36810, 36839, 100]), -1);
-
-// memory_copy.wast:5480
-assert_return(() => call($29, "checkRange", [36839, 37444, 96]), -1);
-
-// memory_copy.wast:5482
-assert_return(() => call($29, "checkRange", [37444, 38060, 100]), -1);
-
-// memory_copy.wast:5484
-assert_return(() => call($29, "checkRange", [38060, 38131, 10]), -1);
-
-// memory_copy.wast:5486
-assert_return(() => call($29, "checkRange", [38131, 39082, 92]), -1);
-
-// memory_copy.wast:5488
-assert_return(() => call($29, "checkRange", [39082, 39117, 95]), -1);
-
-// memory_copy.wast:5490
-assert_return(() => call($29, "checkRange", [39117, 39187, 77]), -1);
-
-// memory_copy.wast:5492
-assert_return(() => call($29, "checkRange", [39187, 39859, 92]), -1);
-
-// memory_copy.wast:5494
-assert_return(() => call($29, "checkRange", [39859, 39894, 95]), -1);
-
-// memory_copy.wast:5496
-assert_return(() => call($29, "checkRange", [39894, 40257, 77]), -1);
-
-// memory_copy.wast:5498
-assert_return(() => call($29, "checkRange", [40257, 40344, 89]), -1);
-
-// memory_copy.wast:5500
-assert_return(() => call($29, "checkRange", [40344, 40371, 59]), -1);
-
-// memory_copy.wast:5502
-assert_return(() => call($29, "checkRange", [40371, 40804, 77]), -1);
-
-// memory_copy.wast:5504
-assert_return(() => call($29, "checkRange", [40804, 40909, 5]), -1);
-
-// memory_copy.wast:5506
-assert_return(() => call($29, "checkRange", [40909, 42259, 92]), -1);
-
-// memory_copy.wast:5508
-assert_return(() => call($29, "checkRange", [42259, 42511, 77]), -1);
-
-// memory_copy.wast:5510
-assert_return(() => call($29, "checkRange", [42511, 42945, 83]), -1);
-
-// memory_copy.wast:5512
-assert_return(() => call($29, "checkRange", [42945, 43115, 77]), -1);
-
-// memory_copy.wast:5514
-assert_return(() => call($29, "checkRange", [43115, 43306, 42]), -1);
-
-// memory_copy.wast:5516
-assert_return(() => call($29, "checkRange", [43306, 43363, 76]), -1);
-
-// memory_copy.wast:5518
-assert_return(() => call($29, "checkRange", [43363, 43387, 57]), -1);
-
-// memory_copy.wast:5520
-assert_return(() => call($29, "checkRange", [43387, 43438, 59]), -1);
-
-// memory_copy.wast:5522
-assert_return(() => call($29, "checkRange", [43438, 43447, 77]), -1);
-
-// memory_copy.wast:5524
-assert_return(() => call($29, "checkRange", [43447, 43466, 41]), -1);
-
-// memory_copy.wast:5526
-assert_return(() => call($29, "checkRange", [43466, 44129, 83]), -1);
-
-// memory_copy.wast:5528
-assert_return(() => call($29, "checkRange", [44129, 44958, 96]), -1);
-
-// memory_copy.wast:5530
-assert_return(() => call($29, "checkRange", [44958, 45570, 77]), -1);
-
-// memory_copy.wast:5532
-assert_return(() => call($29, "checkRange", [45570, 45575, 92]), -1);
-
-// memory_copy.wast:5534
-assert_return(() => call($29, "checkRange", [45575, 45640, 77]), -1);
-
-// memory_copy.wast:5536
-assert_return(() => call($29, "checkRange", [45640, 45742, 42]), -1);
-
-// memory_copy.wast:5538
-assert_return(() => call($29, "checkRange", [45742, 45832, 72]), -1);
-
-// memory_copy.wast:5540
-assert_return(() => call($29, "checkRange", [45832, 45999, 92]), -1);
-
-// memory_copy.wast:5542
-assert_return(() => call($29, "checkRange", [45999, 46384, 83]), -1);
-
-// memory_copy.wast:5544
-assert_return(() => call($29, "checkRange", [46384, 46596, 95]), -1);
-
-// memory_copy.wast:5546
-assert_return(() => call($29, "checkRange", [46596, 46654, 92]), -1);
-
-// memory_copy.wast:5548
-assert_return(() => call($29, "checkRange", [46654, 47515, 83]), -1);
-
-// memory_copy.wast:5550
-assert_return(() => call($29, "checkRange", [47515, 47620, 77]), -1);
-
-// memory_copy.wast:5552
-assert_return(() => call($29, "checkRange", [47620, 47817, 79]), -1);
-
-// memory_copy.wast:5554
-assert_return(() => call($29, "checkRange", [47817, 47951, 95]), -1);
-
-// memory_copy.wast:5556
-assert_return(() => call($29, "checkRange", [47951, 48632, 100]), -1);
-
-// memory_copy.wast:5558
-assert_return(() => call($29, "checkRange", [48632, 48699, 97]), -1);
-
-// memory_copy.wast:5560
-assert_return(() => call($29, "checkRange", [48699, 48703, 37]), -1);
-
-// memory_copy.wast:5562
-assert_return(() => call($29, "checkRange", [48703, 49764, 77]), -1);
-
-// memory_copy.wast:5564
-assert_return(() => call($29, "checkRange", [49764, 49955, 42]), -1);
-
-// memory_copy.wast:5566
-assert_return(() => call($29, "checkRange", [49955, 50012, 76]), -1);
-
-// memory_copy.wast:5568
-assert_return(() => call($29, "checkRange", [50012, 50036, 57]), -1);
-
-// memory_copy.wast:5570
-assert_return(() => call($29, "checkRange", [50036, 50087, 59]), -1);
-
-// memory_copy.wast:5572
-assert_return(() => call($29, "checkRange", [50087, 50096, 77]), -1);
-
-// memory_copy.wast:5574
-assert_return(() => call($29, "checkRange", [50096, 50115, 41]), -1);
-
-// memory_copy.wast:5576
-assert_return(() => call($29, "checkRange", [50115, 50370, 83]), -1);
-
-// memory_copy.wast:5578
-assert_return(() => call($29, "checkRange", [50370, 51358, 92]), -1);
-
-// memory_copy.wast:5580
-assert_return(() => call($29, "checkRange", [51358, 51610, 77]), -1);
-
-// memory_copy.wast:5582
-assert_return(() => call($29, "checkRange", [51610, 51776, 83]), -1);
-
-// memory_copy.wast:5584
-assert_return(() => call($29, "checkRange", [51776, 51833, 89]), -1);
-
-// memory_copy.wast:5586
-assert_return(() => call($29, "checkRange", [51833, 52895, 100]), -1);
-
-// memory_copy.wast:5588
-assert_return(() => call($29, "checkRange", [52895, 53029, 97]), -1);
-
-// memory_copy.wast:5590
-assert_return(() => call($29, "checkRange", [53029, 53244, 68]), -1);
-
-// memory_copy.wast:5592
-assert_return(() => call($29, "checkRange", [53244, 54066, 100]), -1);
-
-// memory_copy.wast:5594
-assert_return(() => call($29, "checkRange", [54066, 54133, 97]), -1);
-
-// memory_copy.wast:5596
-assert_return(() => call($29, "checkRange", [54133, 54137, 37]), -1);
-
-// memory_copy.wast:5598
-assert_return(() => call($29, "checkRange", [54137, 55198, 77]), -1);
-
-// memory_copy.wast:5600
-assert_return(() => call($29, "checkRange", [55198, 55389, 42]), -1);
-
-// memory_copy.wast:5602
-assert_return(() => call($29, "checkRange", [55389, 55446, 76]), -1);
-
-// memory_copy.wast:5604
-assert_return(() => call($29, "checkRange", [55446, 55470, 57]), -1);
-
-// memory_copy.wast:5606
-assert_return(() => call($29, "checkRange", [55470, 55521, 59]), -1);
-
-// memory_copy.wast:5608
-assert_return(() => call($29, "checkRange", [55521, 55530, 77]), -1);
-
-// memory_copy.wast:5610
-assert_return(() => call($29, "checkRange", [55530, 55549, 41]), -1);
-
-// memory_copy.wast:5612
-assert_return(() => call($29, "checkRange", [55549, 56212, 83]), -1);
-
-// memory_copy.wast:5614
-assert_return(() => call($29, "checkRange", [56212, 57048, 96]), -1);
-
-// memory_copy.wast:5616
-assert_return(() => call($29, "checkRange", [57048, 58183, 77]), -1);
-
-// memory_copy.wast:5618
-assert_return(() => call($29, "checkRange", [58183, 58202, 41]), -1);
-
-// memory_copy.wast:5620
-assert_return(() => call($29, "checkRange", [58202, 58516, 83]), -1);
-
-// memory_copy.wast:5622
-assert_return(() => call($29, "checkRange", [58516, 58835, 95]), -1);
-
-// memory_copy.wast:5624
-assert_return(() => call($29, "checkRange", [58835, 58855, 77]), -1);
-
-// memory_copy.wast:5626
-assert_return(() => call($29, "checkRange", [58855, 59089, 95]), -1);
-
-// memory_copy.wast:5628
-assert_return(() => call($29, "checkRange", [59089, 59145, 77]), -1);
-
-// memory_copy.wast:5630
-assert_return(() => call($29, "checkRange", [59145, 59677, 99]), -1);
-
-// memory_copy.wast:5632
-assert_return(() => call($29, "checkRange", [59677, 60134, 0]), -1);
-
-// memory_copy.wast:5634
-assert_return(() => call($29, "checkRange", [60134, 60502, 89]), -1);
-
-// memory_copy.wast:5636
-assert_return(() => call($29, "checkRange", [60502, 60594, 59]), -1);
-
-// memory_copy.wast:5638
-assert_return(() => call($29, "checkRange", [60594, 60617, 36]), -1);
-
-// memory_copy.wast:5640
-assert_return(() => call($29, "checkRange", [60617, 60618, 32]), -1);
-
-// memory_copy.wast:5642
-assert_return(() => call($29, "checkRange", [60618, 60777, 42]), -1);
-
-// memory_copy.wast:5644
-assert_return(() => call($29, "checkRange", [60777, 60834, 76]), -1);
-
-// memory_copy.wast:5646
-assert_return(() => call($29, "checkRange", [60834, 60858, 57]), -1);
-
-// memory_copy.wast:5648
-assert_return(() => call($29, "checkRange", [60858, 60909, 59]), -1);
-
-// memory_copy.wast:5650
-assert_return(() => call($29, "checkRange", [60909, 60918, 77]), -1);
-
-// memory_copy.wast:5652
-assert_return(() => call($29, "checkRange", [60918, 60937, 41]), -1);
-
-// memory_copy.wast:5654
-assert_return(() => call($29, "checkRange", [60937, 61600, 83]), -1);
-
-// memory_copy.wast:5656
-assert_return(() => call($29, "checkRange", [61600, 62436, 96]), -1);
-
-// memory_copy.wast:5658
-assert_return(() => call($29, "checkRange", [62436, 63307, 77]), -1);
-
-// memory_copy.wast:5660
-assert_return(() => call($29, "checkRange", [63307, 63397, 100]), -1);
-
-// memory_copy.wast:5662
-assert_return(() => call($29, "checkRange", [63397, 63501, 74]), -1);
-
-// memory_copy.wast:5664
-assert_return(() => call($29, "checkRange", [63501, 63525, 93]), -1);
-
-// memory_copy.wast:5666
-assert_return(() => call($29, "checkRange", [63525, 63605, 74]), -1);
-
-// memory_copy.wast:5668
-assert_return(() => call($29, "checkRange", [63605, 63704, 100]), -1);
-
-// memory_copy.wast:5670
-assert_return(() => call($29, "checkRange", [63704, 63771, 97]), -1);
-
-// memory_copy.wast:5672
-assert_return(() => call($29, "checkRange", [63771, 63775, 37]), -1);
-
-// memory_copy.wast:5674
-assert_return(() => call($29, "checkRange", [63775, 64311, 77]), -1);
-
-// memory_copy.wast:5676
-assert_return(() => call($29, "checkRange", [64311, 64331, 26]), -1);
-
-// memory_copy.wast:5678
-assert_return(() => call($29, "checkRange", [64331, 64518, 92]), -1);
-
-// memory_copy.wast:5680
-assert_return(() => call($29, "checkRange", [64518, 64827, 11]), -1);
-
-// memory_copy.wast:5682
-assert_return(() => call($29, "checkRange", [64827, 64834, 26]), -1);
-
-// memory_copy.wast:5684
-assert_return(() => call($29, "checkRange", [64834, 65536, 0]), -1);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast
deleted file mode 100644
index 8cc21af317..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast
+++ /dev/null
@@ -1,673 +0,0 @@
-;;
-;; Generated by ../meta/generate_memory_fill.js
-;;
-
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0xFF00) (i32.const 0x55) (i32.const 256))))
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65280) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65280) (i32.const 65536) (i32.const 85))
- (i32.const -1))
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0xFF00) (i32.const 0x55) (i32.const 257))))
-(assert_trap (invoke "test") "out of bounds memory access")
-
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0xFFFFFF00) (i32.const 0x55) (i32.const 257))))
-(assert_trap (invoke "test") "out of bounds memory access")
-
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0x12) (i32.const 0x55) (i32.const 0))))
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0x10000) (i32.const 0x55) (i32.const 0))))
-(invoke "test")
-
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0x1) (i32.const 0xAA) (i32.const 0xFFFE))))
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 1) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 1) (i32.const 65535) (i32.const 170))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65535) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-
-(module
- (memory 1 1)
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "test")
- (memory.fill (i32.const 0x12) (i32.const 0x55) (i32.const 10))
- (memory.fill (i32.const 0x15) (i32.const 0xAA) (i32.const 4))))
-(invoke "test")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 18) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 18) (i32.const 21) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 21) (i32.const 25) (i32.const 170))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 25) (i32.const 28) (i32.const 85))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 28) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(assert_invalid
- (module
- (func (export "testfn")
- (memory.fill (i32.const 10) (i32.const 20) (i32.const 30))))
- "unknown memory 0")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i32.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f32.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (i64.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f32.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f32.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f32.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f32.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (i64.const 20) (f64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f64.const 20) (i32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f64.const 20) (f32.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f64.const 20) (i64.const 30))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1 1)
- (func (export "testfn")
- (memory.fill (f64.const 10) (f64.const 20) (f64.const 30))))
- "type mismatch")
-
-(module
- (memory 1 1 )
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $val i32) (param $len i32)
- (memory.fill (local.get $offs) (local.get $val) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65280) (i32.const 37) (i32.const 512))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 65280) (i32.const 65536) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65280) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 1 )
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $val i32) (param $len i32)
- (memory.fill (local.get $offs) (local.get $val) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65279) (i32.const 37) (i32.const 514))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 65279) (i32.const 65536) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65279) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 1 )
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $val i32) (param $len i32)
- (memory.fill (local.get $offs) (local.get $val) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65279) (i32.const 37) (i32.const 4294967295))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 65279) (i32.const 65536) (i32.const 37))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65279) (i32.const 0))
- (i32.const -1))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js
deleted file mode 100644
index 5277588dcd..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js
+++ /dev/null
@@ -1,440 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// memory_fill.wast:5
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc1\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8f\x80\x80\x80\x00\x00\x41\x80\xfe\x03\x41\xd5\x00\x41\x80\x02\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:21
-run(() => call($1, "test", []));
-
-// memory_fill.wast:23
-assert_return(() => call($1, "checkRange", [0, 65280, 0]), -1);
-
-// memory_fill.wast:25
-assert_return(() => call($1, "checkRange", [65280, 65536, 85]), -1);
-
-// memory_fill.wast:27
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc1\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8f\x80\x80\x80\x00\x00\x41\x80\xfe\x03\x41\xd5\x00\x41\x81\x02\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:43
-assert_trap(() => call($2, "test", []));
-
-// memory_fill.wast:45
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc0\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8e\x80\x80\x80\x00\x00\x41\x80\x7e\x41\xd5\x00\x41\x81\x02\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:61
-assert_trap(() => call($3, "test", []));
-
-// memory_fill.wast:63
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x41\x12\x41\xd5\x00\x41\x00\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:79
-run(() => call($4, "test", []));
-
-// memory_fill.wast:81
-assert_return(() => call($4, "checkRange", [0, 65536, 0]), -1);
-
-// memory_fill.wast:83
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc0\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8e\x80\x80\x80\x00\x00\x41\x80\x80\x04\x41\xd5\x00\x41\x00\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:99
-run(() => call($5, "test", []));
-
-// memory_fill.wast:101
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc0\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8e\x80\x80\x80\x00\x00\x41\x01\x41\xaa\x01\x41\xfe\xff\x03\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:117
-run(() => call($6, "test", []));
-
-// memory_fill.wast:119
-assert_return(() => call($6, "checkRange", [0, 1, 0]), -1);
-
-// memory_fill.wast:121
-assert_return(() => call($6, "checkRange", [1, 65535, 170]), -1);
-
-// memory_fill.wast:123
-assert_return(() => call($6, "checkRange", [65535, 65536, 0]), -1);
-
-// memory_fill.wast:126
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc8\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x96\x80\x80\x80\x00\x00\x41\x12\x41\xd5\x00\x41\x0a\xfc\x0b\x00\x41\x15\x41\xaa\x01\x41\x04\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:143
-run(() => call($7, "test", []));
-
-// memory_fill.wast:145
-assert_return(() => call($7, "checkRange", [0, 18, 0]), -1);
-
-// memory_fill.wast:147
-assert_return(() => call($7, "checkRange", [18, 21, 85]), -1);
-
-// memory_fill.wast:149
-assert_return(() => call($7, "checkRange", [21, 25, 170]), -1);
-
-// memory_fill.wast:151
-assert_return(() => call($7, "checkRange", [25, 28, 85]), -1);
-
-// memory_fill.wast:153
-assert_return(() => call($7, "checkRange", [28, 65536, 0]), -1);
-
-// memory_fill.wast:155
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:161
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:168
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:175
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:182
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:189
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:196
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:203
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:210
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:217
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:224
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:231
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:238
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:245
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:252
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:259
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:266
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:273
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:280
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:287
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:294
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:301
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9a\x80\x80\x80\x00\x01\x94\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:308
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:315
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9e\x80\x80\x80\x00\x01\x98\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:322
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:329
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:336
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:343
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:350
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:357
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9e\x80\x80\x80\x00\x01\x98\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:364
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:371
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa2\x80\x80\x80\x00\x01\x9c\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:378
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:385
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:392
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:399
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:406
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:413
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:420
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:427
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:434
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:441
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:448
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:455
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:462
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:469
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:476
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:483
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:490
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:497
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:504
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:511
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:518
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:525
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9e\x80\x80\x80\x00\x01\x98\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:532
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:539
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa2\x80\x80\x80\x00\x01\x9c\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:546
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:553
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:560
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:567
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:574
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:581
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa2\x80\x80\x80\x00\x01\x9c\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:588
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:595
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa6\x80\x80\x80\x00\x01\xa0\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:602
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8e\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0a\xbd\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:619
-assert_trap(() => call($8, "run", [65280, 37, 512]));
-
-// memory_fill.wast:622
-assert_return(() => call($8, "checkRange", [65280, 65536, 37]), -1);
-
-// memory_fill.wast:624
-assert_return(() => call($8, "checkRange", [0, 65280, 0]), -1);
-
-// memory_fill.wast:626
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8e\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0a\xbd\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:643
-assert_trap(() => call($9, "run", [65279, 37, 514]));
-
-// memory_fill.wast:646
-assert_return(() => call($9, "checkRange", [65279, 65536, 37]), -1);
-
-// memory_fill.wast:648
-assert_return(() => call($9, "checkRange", [0, 65279, 0]), -1);
-
-// memory_fill.wast:650
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8e\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0a\xbd\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b");
-
-// memory_fill.wast:667
-assert_trap(() => call($10, "run", [65279, 37, -1]));
-
-// memory_fill.wast:670
-assert_return(() => call($10, "checkRange", [65279, 65536, 37]), -1);
-
-// memory_fill.wast:672
-assert_return(() => call($10, "checkRange", [0, 65279, 0]), -1);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast
deleted file mode 100644
index 59c9fe8606..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast
+++ /dev/null
@@ -1,947 +0,0 @@
-;;
-;; Generated by ../meta/generate_memory_init.js
-;;
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data passive "\02\07\01\08")
- (data (i32.const 12) "\07\05\02\03\06")
- (data passive "\05\09\02\07\06")
- (func (export "test")
- (nop))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data passive "\02\07\01\08")
- (data (i32.const 12) "\07\05\02\03\06")
- (data passive "\05\09\02\07\06")
- (func (export "test")
- (memory.init 1 (i32.const 7) (i32.const 0) (i32.const 4)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data passive "\02\07\01\08")
- (data (i32.const 12) "\07\05\02\03\06")
- (data passive "\05\09\02\07\06")
- (func (export "test")
- (memory.init 3 (i32.const 15) (i32.const 1) (i32.const 3)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-
-(module
- (memory (export "memory0") 1 1)
- (data (i32.const 2) "\03\01\04\01")
- (data passive "\02\07\01\08")
- (data (i32.const 12) "\07\05\02\03\06")
- (data passive "\05\09\02\07\06")
- (func (export "test")
- (memory.init 1 (i32.const 7) (i32.const 0) (i32.const 4))
- (data.drop 1)
- (memory.init 3 (i32.const 15) (i32.const 1) (i32.const 3))
- (data.drop 3)
- (memory.copy (i32.const 20) (i32.const 15) (i32.const 5))
- (memory.copy (i32.const 21) (i32.const 29) (i32.const 1))
- (memory.copy (i32.const 24) (i32.const 10) (i32.const 1))
- (memory.copy (i32.const 13) (i32.const 11) (i32.const 4))
- (memory.copy (i32.const 19) (i32.const 20) (i32.const 5)))
- (func (export "load8_u") (param i32) (result i32)
- (i32.load8_u (local.get 0))))
-
-(invoke "test")
-
-(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 1))
-(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 5))
-(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 2))
-(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 9))
-(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 7))
-(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 8))
-(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
-(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
-(assert_invalid
- (module
- (func (export "test")
- (data.drop 0)))
- "unknown memory 0")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (data.drop 4)))
- "unknown data segment")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (data.drop 0)
- (data.drop 0)))
-(assert_trap (invoke "test") "data segment dropped")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (data.drop 0)
- (memory.init 0 (i32.const 1234) (i32.const 1) (i32.const 1))))
-(assert_trap (invoke "test") "data segment dropped")
-
-(module
- (memory 1)
- (data (i32.const 0) "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1234) (i32.const 1) (i32.const 1))))
-(assert_trap (invoke "test") "data segment dropped")
-
-(assert_invalid
- (module
- (func (export "test")
- (memory.init 1 (i32.const 1234) (i32.const 1) (i32.const 1))))
- "unknown memory 0")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 1 (i32.const 1234) (i32.const 1) (i32.const 1))))
- "unknown data segment 1")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i32.const 0) (i32.const 1))
- (memory.init 0 (i32.const 1) (i32.const 0) (i32.const 1))))
-(invoke "test")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1234) (i32.const 0) (i32.const 5))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1234) (i32.const 2) (i32.const 3))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 0xFFFE) (i32.const 1) (i32.const 3))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1234) (i32.const 4) (i32.const 0))))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 0x10000) (i32.const 2) (i32.const 0))))
-(assert_trap (invoke "test") "out of bounds")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i32.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f32.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (i64.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (memory 1)
- (data passive "\37")
- (func (export "test")
- (memory.init 0 (f64.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(module
- (memory 1 1 )
- (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $len i32)
- (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65528) (i32.const 16))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65528) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65528) (i32.const 65536) (i32.const 66))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65536) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 1 )
- (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $len i32)
- (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65527) (i32.const 16))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65527) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65527) (i32.const 65536) (i32.const 66))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65536) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 1 )
- (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $len i32)
- (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65472) (i32.const 30))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65472) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65472) (i32.const 65488) (i32.const 66))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65488) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 1 )
- (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $len i32)
- (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65473) (i32.const 31))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65473) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65473) (i32.const 65489) (i32.const 66))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65489) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 )
- (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $len i32)
- (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 65528) (i32.const 4294967040))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65528) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65528) (i32.const 65536) (i32.const 66))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 65536) (i32.const 65536) (i32.const 0))
- (i32.const -1))
-(module
- (memory 1 )
- (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
-
- (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
- (loop $cont
- (if (i32.eq (local.get $from) (local.get $to))
- (then
- (return (i32.const -1))))
- (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
- (then
- (local.set $from (i32.add (local.get $from) (i32.const 1)))
- (br $cont))))
- (return (local.get $from)))
-
- (func (export "run") (param $offs i32) (param $len i32)
- (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 4294967292))
- "out of bounds")
-
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 0) (i32.const 0))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 0) (i32.const 16) (i32.const 66))
- (i32.const -1))
-(assert_return (invoke "checkRange" (i32.const 16) (i32.const 65536) (i32.const 0))
- (i32.const -1))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js
deleted file mode 100644
index 04b43c73ff..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js
+++ /dev/null
@@ -1,866 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// memory_init.wast:5
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x95\x80\x80\x80\x00\x02\x83\x80\x80\x80\x00\x00\x01\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
-
-// memory_init.wast:16
-run(() => call($1, "test", []));
-
-// memory_init.wast:18
-assert_return(() => call($1, "load8_u", [0]), 0);
-
-// memory_init.wast:19
-assert_return(() => call($1, "load8_u", [1]), 0);
-
-// memory_init.wast:20
-assert_return(() => call($1, "load8_u", [2]), 3);
-
-// memory_init.wast:21
-assert_return(() => call($1, "load8_u", [3]), 1);
-
-// memory_init.wast:22
-assert_return(() => call($1, "load8_u", [4]), 4);
-
-// memory_init.wast:23
-assert_return(() => call($1, "load8_u", [5]), 1);
-
-// memory_init.wast:24
-assert_return(() => call($1, "load8_u", [6]), 0);
-
-// memory_init.wast:25
-assert_return(() => call($1, "load8_u", [7]), 0);
-
-// memory_init.wast:26
-assert_return(() => call($1, "load8_u", [8]), 0);
-
-// memory_init.wast:27
-assert_return(() => call($1, "load8_u", [9]), 0);
-
-// memory_init.wast:28
-assert_return(() => call($1, "load8_u", [10]), 0);
-
-// memory_init.wast:29
-assert_return(() => call($1, "load8_u", [11]), 0);
-
-// memory_init.wast:30
-assert_return(() => call($1, "load8_u", [12]), 7);
-
-// memory_init.wast:31
-assert_return(() => call($1, "load8_u", [13]), 5);
-
-// memory_init.wast:32
-assert_return(() => call($1, "load8_u", [14]), 2);
-
-// memory_init.wast:33
-assert_return(() => call($1, "load8_u", [15]), 3);
-
-// memory_init.wast:34
-assert_return(() => call($1, "load8_u", [16]), 6);
-
-// memory_init.wast:35
-assert_return(() => call($1, "load8_u", [17]), 0);
-
-// memory_init.wast:36
-assert_return(() => call($1, "load8_u", [18]), 0);
-
-// memory_init.wast:37
-assert_return(() => call($1, "load8_u", [19]), 0);
-
-// memory_init.wast:38
-assert_return(() => call($1, "load8_u", [20]), 0);
-
-// memory_init.wast:39
-assert_return(() => call($1, "load8_u", [21]), 0);
-
-// memory_init.wast:40
-assert_return(() => call($1, "load8_u", [22]), 0);
-
-// memory_init.wast:41
-assert_return(() => call($1, "load8_u", [23]), 0);
-
-// memory_init.wast:42
-assert_return(() => call($1, "load8_u", [24]), 0);
-
-// memory_init.wast:43
-assert_return(() => call($1, "load8_u", [25]), 0);
-
-// memory_init.wast:44
-assert_return(() => call($1, "load8_u", [26]), 0);
-
-// memory_init.wast:45
-assert_return(() => call($1, "load8_u", [27]), 0);
-
-// memory_init.wast:46
-assert_return(() => call($1, "load8_u", [28]), 0);
-
-// memory_init.wast:47
-assert_return(() => call($1, "load8_u", [29]), 0);
-
-// memory_init.wast:49
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x04\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x08\x01\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
-
-// memory_init.wast:60
-run(() => call($2, "test", []));
-
-// memory_init.wast:62
-assert_return(() => call($2, "load8_u", [0]), 0);
-
-// memory_init.wast:63
-assert_return(() => call($2, "load8_u", [1]), 0);
-
-// memory_init.wast:64
-assert_return(() => call($2, "load8_u", [2]), 3);
-
-// memory_init.wast:65
-assert_return(() => call($2, "load8_u", [3]), 1);
-
-// memory_init.wast:66
-assert_return(() => call($2, "load8_u", [4]), 4);
-
-// memory_init.wast:67
-assert_return(() => call($2, "load8_u", [5]), 1);
-
-// memory_init.wast:68
-assert_return(() => call($2, "load8_u", [6]), 0);
-
-// memory_init.wast:69
-assert_return(() => call($2, "load8_u", [7]), 2);
-
-// memory_init.wast:70
-assert_return(() => call($2, "load8_u", [8]), 7);
-
-// memory_init.wast:71
-assert_return(() => call($2, "load8_u", [9]), 1);
-
-// memory_init.wast:72
-assert_return(() => call($2, "load8_u", [10]), 8);
-
-// memory_init.wast:73
-assert_return(() => call($2, "load8_u", [11]), 0);
-
-// memory_init.wast:74
-assert_return(() => call($2, "load8_u", [12]), 7);
-
-// memory_init.wast:75
-assert_return(() => call($2, "load8_u", [13]), 5);
-
-// memory_init.wast:76
-assert_return(() => call($2, "load8_u", [14]), 2);
-
-// memory_init.wast:77
-assert_return(() => call($2, "load8_u", [15]), 3);
-
-// memory_init.wast:78
-assert_return(() => call($2, "load8_u", [16]), 6);
-
-// memory_init.wast:79
-assert_return(() => call($2, "load8_u", [17]), 0);
-
-// memory_init.wast:80
-assert_return(() => call($2, "load8_u", [18]), 0);
-
-// memory_init.wast:81
-assert_return(() => call($2, "load8_u", [19]), 0);
-
-// memory_init.wast:82
-assert_return(() => call($2, "load8_u", [20]), 0);
-
-// memory_init.wast:83
-assert_return(() => call($2, "load8_u", [21]), 0);
-
-// memory_init.wast:84
-assert_return(() => call($2, "load8_u", [22]), 0);
-
-// memory_init.wast:85
-assert_return(() => call($2, "load8_u", [23]), 0);
-
-// memory_init.wast:86
-assert_return(() => call($2, "load8_u", [24]), 0);
-
-// memory_init.wast:87
-assert_return(() => call($2, "load8_u", [25]), 0);
-
-// memory_init.wast:88
-assert_return(() => call($2, "load8_u", [26]), 0);
-
-// memory_init.wast:89
-assert_return(() => call($2, "load8_u", [27]), 0);
-
-// memory_init.wast:90
-assert_return(() => call($2, "load8_u", [28]), 0);
-
-// memory_init.wast:91
-assert_return(() => call($2, "load8_u", [29]), 0);
-
-// memory_init.wast:93
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x04\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x01\x41\x03\xfc\x08\x03\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
-
-// memory_init.wast:104
-run(() => call($3, "test", []));
-
-// memory_init.wast:106
-assert_return(() => call($3, "load8_u", [0]), 0);
-
-// memory_init.wast:107
-assert_return(() => call($3, "load8_u", [1]), 0);
-
-// memory_init.wast:108
-assert_return(() => call($3, "load8_u", [2]), 3);
-
-// memory_init.wast:109
-assert_return(() => call($3, "load8_u", [3]), 1);
-
-// memory_init.wast:110
-assert_return(() => call($3, "load8_u", [4]), 4);
-
-// memory_init.wast:111
-assert_return(() => call($3, "load8_u", [5]), 1);
-
-// memory_init.wast:112
-assert_return(() => call($3, "load8_u", [6]), 0);
-
-// memory_init.wast:113
-assert_return(() => call($3, "load8_u", [7]), 0);
-
-// memory_init.wast:114
-assert_return(() => call($3, "load8_u", [8]), 0);
-
-// memory_init.wast:115
-assert_return(() => call($3, "load8_u", [9]), 0);
-
-// memory_init.wast:116
-assert_return(() => call($3, "load8_u", [10]), 0);
-
-// memory_init.wast:117
-assert_return(() => call($3, "load8_u", [11]), 0);
-
-// memory_init.wast:118
-assert_return(() => call($3, "load8_u", [12]), 7);
-
-// memory_init.wast:119
-assert_return(() => call($3, "load8_u", [13]), 5);
-
-// memory_init.wast:120
-assert_return(() => call($3, "load8_u", [14]), 2);
-
-// memory_init.wast:121
-assert_return(() => call($3, "load8_u", [15]), 9);
-
-// memory_init.wast:122
-assert_return(() => call($3, "load8_u", [16]), 2);
-
-// memory_init.wast:123
-assert_return(() => call($3, "load8_u", [17]), 7);
-
-// memory_init.wast:124
-assert_return(() => call($3, "load8_u", [18]), 0);
-
-// memory_init.wast:125
-assert_return(() => call($3, "load8_u", [19]), 0);
-
-// memory_init.wast:126
-assert_return(() => call($3, "load8_u", [20]), 0);
-
-// memory_init.wast:127
-assert_return(() => call($3, "load8_u", [21]), 0);
-
-// memory_init.wast:128
-assert_return(() => call($3, "load8_u", [22]), 0);
-
-// memory_init.wast:129
-assert_return(() => call($3, "load8_u", [23]), 0);
-
-// memory_init.wast:130
-assert_return(() => call($3, "load8_u", [24]), 0);
-
-// memory_init.wast:131
-assert_return(() => call($3, "load8_u", [25]), 0);
-
-// memory_init.wast:132
-assert_return(() => call($3, "load8_u", [26]), 0);
-
-// memory_init.wast:133
-assert_return(() => call($3, "load8_u", [27]), 0);
-
-// memory_init.wast:134
-assert_return(() => call($3, "load8_u", [28]), 0);
-
-// memory_init.wast:135
-assert_return(() => call($3, "load8_u", [29]), 0);
-
-// memory_init.wast:137
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x04\x0a\xe0\x80\x80\x80\x00\x02\xce\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x08\x01\x00\xfc\x09\x01\x41\x0f\x41\x01\x41\x03\xfc\x08\x03\x00\xfc\x09\x03\x41\x14\x41\x0f\x41\x05\xfc\x0a\x00\x00\x41\x15\x41\x1d\x41\x01\xfc\x0a\x00\x00\x41\x18\x41\x0a\x41\x01\xfc\x0a\x00\x00\x41\x0d\x41\x0b\x41\x04\xfc\x0a\x00\x00\x41\x13\x41\x14\x41\x05\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
-
-// memory_init.wast:156
-run(() => call($4, "test", []));
-
-// memory_init.wast:158
-assert_return(() => call($4, "load8_u", [0]), 0);
-
-// memory_init.wast:159
-assert_return(() => call($4, "load8_u", [1]), 0);
-
-// memory_init.wast:160
-assert_return(() => call($4, "load8_u", [2]), 3);
-
-// memory_init.wast:161
-assert_return(() => call($4, "load8_u", [3]), 1);
-
-// memory_init.wast:162
-assert_return(() => call($4, "load8_u", [4]), 4);
-
-// memory_init.wast:163
-assert_return(() => call($4, "load8_u", [5]), 1);
-
-// memory_init.wast:164
-assert_return(() => call($4, "load8_u", [6]), 0);
-
-// memory_init.wast:165
-assert_return(() => call($4, "load8_u", [7]), 2);
-
-// memory_init.wast:166
-assert_return(() => call($4, "load8_u", [8]), 7);
-
-// memory_init.wast:167
-assert_return(() => call($4, "load8_u", [9]), 1);
-
-// memory_init.wast:168
-assert_return(() => call($4, "load8_u", [10]), 8);
-
-// memory_init.wast:169
-assert_return(() => call($4, "load8_u", [11]), 0);
-
-// memory_init.wast:170
-assert_return(() => call($4, "load8_u", [12]), 7);
-
-// memory_init.wast:171
-assert_return(() => call($4, "load8_u", [13]), 0);
-
-// memory_init.wast:172
-assert_return(() => call($4, "load8_u", [14]), 7);
-
-// memory_init.wast:173
-assert_return(() => call($4, "load8_u", [15]), 5);
-
-// memory_init.wast:174
-assert_return(() => call($4, "load8_u", [16]), 2);
-
-// memory_init.wast:175
-assert_return(() => call($4, "load8_u", [17]), 7);
-
-// memory_init.wast:176
-assert_return(() => call($4, "load8_u", [18]), 0);
-
-// memory_init.wast:177
-assert_return(() => call($4, "load8_u", [19]), 9);
-
-// memory_init.wast:178
-assert_return(() => call($4, "load8_u", [20]), 0);
-
-// memory_init.wast:179
-assert_return(() => call($4, "load8_u", [21]), 7);
-
-// memory_init.wast:180
-assert_return(() => call($4, "load8_u", [22]), 0);
-
-// memory_init.wast:181
-assert_return(() => call($4, "load8_u", [23]), 8);
-
-// memory_init.wast:182
-assert_return(() => call($4, "load8_u", [24]), 8);
-
-// memory_init.wast:183
-assert_return(() => call($4, "load8_u", [25]), 0);
-
-// memory_init.wast:184
-assert_return(() => call($4, "load8_u", [26]), 0);
-
-// memory_init.wast:185
-assert_return(() => call($4, "load8_u", [27]), 0);
-
-// memory_init.wast:186
-assert_return(() => call($4, "load8_u", [28]), 0);
-
-// memory_init.wast:187
-assert_return(() => call($4, "load8_u", [29]), 0);
-
-// memory_init.wast:188
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x00\x0a\x8b\x80\x80\x80\x00\x01\x85\x80\x80\x80\x00\x00\xfc\x09\x00\x0b");
-
-// memory_init.wast:194
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x8b\x80\x80\x80\x00\x01\x85\x80\x80\x80\x00\x00\xfc\x09\x04\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:202
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x8e\x80\x80\x80\x00\x01\x88\x80\x80\x80\x00\x00\xfc\x09\x00\xfc\x09\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:208
-assert_trap(() => call($5, "test", []));
-
-// memory_init.wast:210
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\xfc\x09\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:216
-assert_trap(() => call($6, "test", []));
-
-// memory_init.wast:218
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x87\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x01\x37");
-
-// memory_init.wast:223
-assert_trap(() => call($7, "test", []));
-
-// memory_init.wast:225
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x00\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x01\x00\x0b");
-
-// memory_init.wast:231
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x01\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:239
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x01\x41\x00\x41\x01\xfc\x08\x00\x00\x41\x01\x41\x00\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:245
-run(() => call($8, "test", []));
-
-// memory_init.wast:247
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x00\x41\x05\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:252
-assert_trap(() => call($9, "test", []));
-
-// memory_init.wast:254
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x02\x41\x03\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:259
-assert_trap(() => call($10, "test", []));
-
-// memory_init.wast:261
-let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\xfe\xff\x03\x41\x01\x41\x03\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:266
-assert_trap(() => call($11, "test", []));
-
-// memory_init.wast:268
-let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x04\x41\x00\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:273
-assert_trap(() => call($12, "test", []));
-
-// memory_init.wast:275
-let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x80\x80\x04\x41\x02\x41\x00\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:280
-assert_trap(() => call($13, "test", []));
-
-// memory_init.wast:282
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:290
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x01\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:298
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:306
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:314
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:322
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:330
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:338
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:346
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:354
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:362
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:370
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:378
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:386
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:394
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:402
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:410
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:418
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:426
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:434
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:442
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:450
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:458
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:466
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:474
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:482
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:490
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:498
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:506
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:514
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:522
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:530
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:538
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:546
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:554
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:562
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:570
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:578
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:586
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:594
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:602
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:610
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:618
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:626
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:634
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:642
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:650
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:658
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:666
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:674
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:682
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:690
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:698
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:706
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:714
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:722
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:730
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:738
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:746
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:754
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:762
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:770
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:778
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa7\x80\x80\x80\x00\x01\xa1\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
-
-// memory_init.wast:786
-let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
-
-// memory_init.wast:804
-assert_trap(() => call($14, "run", [65528, 16]));
-
-// memory_init.wast:807
-assert_return(() => call($14, "checkRange", [0, 65528, 0]), -1);
-
-// memory_init.wast:809
-assert_return(() => call($14, "checkRange", [65528, 65536, 66]), -1);
-
-// memory_init.wast:811
-assert_return(() => call($14, "checkRange", [65536, 65536, 0]), -1);
-
-// memory_init.wast:813
-let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
-
-// memory_init.wast:831
-assert_trap(() => call($15, "run", [65527, 16]));
-
-// memory_init.wast:834
-assert_return(() => call($15, "checkRange", [0, 65527, 0]), -1);
-
-// memory_init.wast:836
-assert_return(() => call($15, "checkRange", [65527, 65536, 66]), -1);
-
-// memory_init.wast:838
-assert_return(() => call($15, "checkRange", [65536, 65536, 0]), -1);
-
-// memory_init.wast:840
-let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
-
-// memory_init.wast:858
-assert_trap(() => call($16, "run", [65472, 30]));
-
-// memory_init.wast:861
-assert_return(() => call($16, "checkRange", [0, 65472, 0]), -1);
-
-// memory_init.wast:863
-assert_return(() => call($16, "checkRange", [65472, 65488, 66]), -1);
-
-// memory_init.wast:865
-assert_return(() => call($16, "checkRange", [65488, 65536, 0]), -1);
-
-// memory_init.wast:867
-let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
-
-// memory_init.wast:885
-assert_trap(() => call($17, "run", [65473, 31]));
-
-// memory_init.wast:888
-assert_return(() => call($17, "checkRange", [0, 65473, 0]), -1);
-
-// memory_init.wast:890
-assert_return(() => call($17, "checkRange", [65473, 65489, 66]), -1);
-
-// memory_init.wast:892
-assert_return(() => call($17, "checkRange", [65489, 65536, 0]), -1);
-
-// memory_init.wast:894
-let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
-
-// memory_init.wast:912
-assert_trap(() => call($18, "run", [65528, -256]));
-
-// memory_init.wast:915
-assert_return(() => call($18, "checkRange", [0, 65528, 0]), -1);
-
-// memory_init.wast:917
-assert_return(() => call($18, "checkRange", [65528, 65536, 66]), -1);
-
-// memory_init.wast:919
-assert_return(() => call($18, "checkRange", [65536, 65536, 0]), -1);
-
-// memory_init.wast:921
-let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
-
-// memory_init.wast:939
-assert_trap(() => call($19, "run", [0, -4]));
-
-// memory_init.wast:942
-assert_return(() => call($19, "checkRange", [0, 0, 0]), -1);
-
-// memory_init.wast:944
-assert_return(() => call($19, "checkRange", [0, 16, 66]), -1);
-
-// memory_init.wast:946
-assert_return(() => call($19, "checkRange", [16, 65536, 0]), -1);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast
deleted file mode 100644
index 51c4ae148b..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast
+++ /dev/null
@@ -1,1469 +0,0 @@
-;;
-;; Generated by ../meta/generate_table_copy.js
-;;
-
-(module
- (func (export "ef0") (result i32) (i32.const 0))
- (func (export "ef1") (result i32) (i32.const 1))
- (func (export "ef2") (result i32) (i32.const 2))
- (func (export "ef3") (result i32) (i32.const 3))
- (func (export "ef4") (result i32) (i32.const 4))
-)
-(register "a")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (nop))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 13) (i32.const 2) (i32.const 3)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 25) (i32.const 15) (i32.const 2)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 25)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 26)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 13) (i32.const 25) (i32.const 3)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_trap (invoke "check" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 15)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 20) (i32.const 22) (i32.const 4)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 25) (i32.const 1) (i32.const 3)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 26)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 27)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 10) (i32.const 12) (i32.const 7)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 10)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 11)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 12)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.copy (i32.const 12) (i32.const 10) (i32.const 7)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 13)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 14)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 17)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 18)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 28) (i32.const 1) (i32.const 3))
- ))
-
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 0xFFFFFFFE) (i32.const 1) (i32.const 2))
- ))
-
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 15) (i32.const 25) (i32.const 6))
- ))
-
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 15) (i32.const 0xFFFFFFFE) (i32.const 2))
- ))
-
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 15) (i32.const 25) (i32.const 0))
- ))
-
-(invoke "test")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 30) (i32.const 15) (i32.const 0))
- ))
-
-(invoke "test")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.copy (i32.const 15) (i32.const 30) (i32.const 0))
- ))
-
-(invoke "test")
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 0)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 24) (i32.const 0) (i32.const 16))
- "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 0)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 23) (i32.const 0) (i32.const 15))
- "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 24)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 8))
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 24)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 24) (i32.const 16))
- "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 23)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 23) (i32.const 15))
- "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 23)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 24)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 8))
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 11)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 24) (i32.const 11) (i32.const 16))
- "out of bounds")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 12)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 13)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 14)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 15)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 16)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 17)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 18)) (i32.const 7))
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 24)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 11) (i32.const 24) (i32.const 16))
- "out of bounds")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 11)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 12)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 13)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 14)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 15)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 16)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 17)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 18)) (i32.const 7))
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 21)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 24) (i32.const 21) (i32.const 16))
- "out of bounds")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 22)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 23)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 24)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 7))
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 24)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 21) (i32.const 24) (i32.const 16))
- "out of bounds")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 22)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 23)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 24)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem (i32.const 21)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 21) (i32.const 21) (i32.const 16))
- "out of bounds")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 21)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 22)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 23)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 24)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 10))
-
-(module
- (type (func (result i32)))
- (table 128 128 funcref)
- (elem (i32.const 112)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 0) (i32.const 112) (i32.const 4294967264))
- "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 15)) (i32.const 15))
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 96)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 97)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 98)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 99)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 100)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 101)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 102)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 103)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 104)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 105)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 106)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 107)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 108)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 109)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 110)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 111)) "uninitialized element")
-(assert_return (invoke "test" (i32.const 112)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 113)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 114)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 115)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 116)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 117)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 118)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 119)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 120)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 121)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 122)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 123)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 124)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 125)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 126)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 127)) (i32.const 15))
-
-(module
- (type (func (result i32)))
- (table 128 128 funcref)
- (elem (i32.const 0)
- $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
- (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
-
-(assert_trap (invoke "run" (i32.const 112) (i32.const 0) (i32.const 4294967264))
- "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 9)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 10)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 11)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 12)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 13)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 14)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 15)) (i32.const 15))
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 96)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 97)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 98)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 99)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 100)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 101)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 102)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 103)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 104)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 105)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 106)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 107)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 108)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 109)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 110)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 111)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 112)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 113)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 114)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 115)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 116)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 117)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 118)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 119)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 120)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 121)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 122)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 123)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 124)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 125)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 126)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 127)) "uninitialized element")
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js
deleted file mode 100644
index 67c1e94e0e..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js
+++ /dev/null
@@ -1,2651 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// table_copy.wast:5
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x00\x00\x07\x9f\x80\x80\x80\x00\x05\x03\x65\x66\x30\x00\x00\x03\x65\x66\x31\x00\x01\x03\x65\x66\x32\x00\x02\x03\x65\x66\x33\x00\x03\x03\x65\x66\x34\x00\x04\x0a\xae\x80\x80\x80\x00\x05\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b");
-
-// table_copy.wast:12
-register("a", $1)
-
-// table_copy.wast:14
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xc2\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x83\x80\x80\x80\x00\x00\x01\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:37
-run(() => call($2, "test", []));
-
-// table_copy.wast:38
-assert_trap(() => call($2, "check", [0]));
-
-// table_copy.wast:39
-assert_trap(() => call($2, "check", [1]));
-
-// table_copy.wast:40
-assert_return(() => call($2, "check", [2]), 3);
-
-// table_copy.wast:41
-assert_return(() => call($2, "check", [3]), 1);
-
-// table_copy.wast:42
-assert_return(() => call($2, "check", [4]), 4);
-
-// table_copy.wast:43
-assert_return(() => call($2, "check", [5]), 1);
-
-// table_copy.wast:44
-assert_trap(() => call($2, "check", [6]));
-
-// table_copy.wast:45
-assert_trap(() => call($2, "check", [7]));
-
-// table_copy.wast:46
-assert_trap(() => call($2, "check", [8]));
-
-// table_copy.wast:47
-assert_trap(() => call($2, "check", [9]));
-
-// table_copy.wast:48
-assert_trap(() => call($2, "check", [10]));
-
-// table_copy.wast:49
-assert_trap(() => call($2, "check", [11]));
-
-// table_copy.wast:50
-assert_return(() => call($2, "check", [12]), 7);
-
-// table_copy.wast:51
-assert_return(() => call($2, "check", [13]), 5);
-
-// table_copy.wast:52
-assert_return(() => call($2, "check", [14]), 2);
-
-// table_copy.wast:53
-assert_return(() => call($2, "check", [15]), 3);
-
-// table_copy.wast:54
-assert_return(() => call($2, "check", [16]), 6);
-
-// table_copy.wast:55
-assert_trap(() => call($2, "check", [17]));
-
-// table_copy.wast:56
-assert_trap(() => call($2, "check", [18]));
-
-// table_copy.wast:57
-assert_trap(() => call($2, "check", [19]));
-
-// table_copy.wast:58
-assert_trap(() => call($2, "check", [20]));
-
-// table_copy.wast:59
-assert_trap(() => call($2, "check", [21]));
-
-// table_copy.wast:60
-assert_trap(() => call($2, "check", [22]));
-
-// table_copy.wast:61
-assert_trap(() => call($2, "check", [23]));
-
-// table_copy.wast:62
-assert_trap(() => call($2, "check", [24]));
-
-// table_copy.wast:63
-assert_trap(() => call($2, "check", [25]));
-
-// table_copy.wast:64
-assert_trap(() => call($2, "check", [26]));
-
-// table_copy.wast:65
-assert_trap(() => call($2, "check", [27]));
-
-// table_copy.wast:66
-assert_trap(() => call($2, "check", [28]));
-
-// table_copy.wast:67
-assert_trap(() => call($2, "check", [29]));
-
-// table_copy.wast:69
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x02\x41\x03\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:92
-run(() => call($3, "test", []));
-
-// table_copy.wast:93
-assert_trap(() => call($3, "check", [0]));
-
-// table_copy.wast:94
-assert_trap(() => call($3, "check", [1]));
-
-// table_copy.wast:95
-assert_return(() => call($3, "check", [2]), 3);
-
-// table_copy.wast:96
-assert_return(() => call($3, "check", [3]), 1);
-
-// table_copy.wast:97
-assert_return(() => call($3, "check", [4]), 4);
-
-// table_copy.wast:98
-assert_return(() => call($3, "check", [5]), 1);
-
-// table_copy.wast:99
-assert_trap(() => call($3, "check", [6]));
-
-// table_copy.wast:100
-assert_trap(() => call($3, "check", [7]));
-
-// table_copy.wast:101
-assert_trap(() => call($3, "check", [8]));
-
-// table_copy.wast:102
-assert_trap(() => call($3, "check", [9]));
-
-// table_copy.wast:103
-assert_trap(() => call($3, "check", [10]));
-
-// table_copy.wast:104
-assert_trap(() => call($3, "check", [11]));
-
-// table_copy.wast:105
-assert_return(() => call($3, "check", [12]), 7);
-
-// table_copy.wast:106
-assert_return(() => call($3, "check", [13]), 3);
-
-// table_copy.wast:107
-assert_return(() => call($3, "check", [14]), 1);
-
-// table_copy.wast:108
-assert_return(() => call($3, "check", [15]), 4);
-
-// table_copy.wast:109
-assert_return(() => call($3, "check", [16]), 6);
-
-// table_copy.wast:110
-assert_trap(() => call($3, "check", [17]));
-
-// table_copy.wast:111
-assert_trap(() => call($3, "check", [18]));
-
-// table_copy.wast:112
-assert_trap(() => call($3, "check", [19]));
-
-// table_copy.wast:113
-assert_trap(() => call($3, "check", [20]));
-
-// table_copy.wast:114
-assert_trap(() => call($3, "check", [21]));
-
-// table_copy.wast:115
-assert_trap(() => call($3, "check", [22]));
-
-// table_copy.wast:116
-assert_trap(() => call($3, "check", [23]));
-
-// table_copy.wast:117
-assert_trap(() => call($3, "check", [24]));
-
-// table_copy.wast:118
-assert_trap(() => call($3, "check", [25]));
-
-// table_copy.wast:119
-assert_trap(() => call($3, "check", [26]));
-
-// table_copy.wast:120
-assert_trap(() => call($3, "check", [27]));
-
-// table_copy.wast:121
-assert_trap(() => call($3, "check", [28]));
-
-// table_copy.wast:122
-assert_trap(() => call($3, "check", [29]));
-
-// table_copy.wast:124
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x0f\x41\x02\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:147
-run(() => call($4, "test", []));
-
-// table_copy.wast:148
-assert_trap(() => call($4, "check", [0]));
-
-// table_copy.wast:149
-assert_trap(() => call($4, "check", [1]));
-
-// table_copy.wast:150
-assert_return(() => call($4, "check", [2]), 3);
-
-// table_copy.wast:151
-assert_return(() => call($4, "check", [3]), 1);
-
-// table_copy.wast:152
-assert_return(() => call($4, "check", [4]), 4);
-
-// table_copy.wast:153
-assert_return(() => call($4, "check", [5]), 1);
-
-// table_copy.wast:154
-assert_trap(() => call($4, "check", [6]));
-
-// table_copy.wast:155
-assert_trap(() => call($4, "check", [7]));
-
-// table_copy.wast:156
-assert_trap(() => call($4, "check", [8]));
-
-// table_copy.wast:157
-assert_trap(() => call($4, "check", [9]));
-
-// table_copy.wast:158
-assert_trap(() => call($4, "check", [10]));
-
-// table_copy.wast:159
-assert_trap(() => call($4, "check", [11]));
-
-// table_copy.wast:160
-assert_return(() => call($4, "check", [12]), 7);
-
-// table_copy.wast:161
-assert_return(() => call($4, "check", [13]), 5);
-
-// table_copy.wast:162
-assert_return(() => call($4, "check", [14]), 2);
-
-// table_copy.wast:163
-assert_return(() => call($4, "check", [15]), 3);
-
-// table_copy.wast:164
-assert_return(() => call($4, "check", [16]), 6);
-
-// table_copy.wast:165
-assert_trap(() => call($4, "check", [17]));
-
-// table_copy.wast:166
-assert_trap(() => call($4, "check", [18]));
-
-// table_copy.wast:167
-assert_trap(() => call($4, "check", [19]));
-
-// table_copy.wast:168
-assert_trap(() => call($4, "check", [20]));
-
-// table_copy.wast:169
-assert_trap(() => call($4, "check", [21]));
-
-// table_copy.wast:170
-assert_trap(() => call($4, "check", [22]));
-
-// table_copy.wast:171
-assert_trap(() => call($4, "check", [23]));
-
-// table_copy.wast:172
-assert_trap(() => call($4, "check", [24]));
-
-// table_copy.wast:173
-assert_return(() => call($4, "check", [25]), 3);
-
-// table_copy.wast:174
-assert_return(() => call($4, "check", [26]), 6);
-
-// table_copy.wast:175
-assert_trap(() => call($4, "check", [27]));
-
-// table_copy.wast:176
-assert_trap(() => call($4, "check", [28]));
-
-// table_copy.wast:177
-assert_trap(() => call($4, "check", [29]));
-
-// table_copy.wast:179
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x19\x41\x03\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:202
-run(() => call($5, "test", []));
-
-// table_copy.wast:203
-assert_trap(() => call($5, "check", [0]));
-
-// table_copy.wast:204
-assert_trap(() => call($5, "check", [1]));
-
-// table_copy.wast:205
-assert_return(() => call($5, "check", [2]), 3);
-
-// table_copy.wast:206
-assert_return(() => call($5, "check", [3]), 1);
-
-// table_copy.wast:207
-assert_return(() => call($5, "check", [4]), 4);
-
-// table_copy.wast:208
-assert_return(() => call($5, "check", [5]), 1);
-
-// table_copy.wast:209
-assert_trap(() => call($5, "check", [6]));
-
-// table_copy.wast:210
-assert_trap(() => call($5, "check", [7]));
-
-// table_copy.wast:211
-assert_trap(() => call($5, "check", [8]));
-
-// table_copy.wast:212
-assert_trap(() => call($5, "check", [9]));
-
-// table_copy.wast:213
-assert_trap(() => call($5, "check", [10]));
-
-// table_copy.wast:214
-assert_trap(() => call($5, "check", [11]));
-
-// table_copy.wast:215
-assert_return(() => call($5, "check", [12]), 7);
-
-// table_copy.wast:216
-assert_trap(() => call($5, "check", [13]));
-
-// table_copy.wast:217
-assert_trap(() => call($5, "check", [14]));
-
-// table_copy.wast:218
-assert_trap(() => call($5, "check", [15]));
-
-// table_copy.wast:219
-assert_return(() => call($5, "check", [16]), 6);
-
-// table_copy.wast:220
-assert_trap(() => call($5, "check", [17]));
-
-// table_copy.wast:221
-assert_trap(() => call($5, "check", [18]));
-
-// table_copy.wast:222
-assert_trap(() => call($5, "check", [19]));
-
-// table_copy.wast:223
-assert_trap(() => call($5, "check", [20]));
-
-// table_copy.wast:224
-assert_trap(() => call($5, "check", [21]));
-
-// table_copy.wast:225
-assert_trap(() => call($5, "check", [22]));
-
-// table_copy.wast:226
-assert_trap(() => call($5, "check", [23]));
-
-// table_copy.wast:227
-assert_trap(() => call($5, "check", [24]));
-
-// table_copy.wast:228
-assert_trap(() => call($5, "check", [25]));
-
-// table_copy.wast:229
-assert_trap(() => call($5, "check", [26]));
-
-// table_copy.wast:230
-assert_trap(() => call($5, "check", [27]));
-
-// table_copy.wast:231
-assert_trap(() => call($5, "check", [28]));
-
-// table_copy.wast:232
-assert_trap(() => call($5, "check", [29]));
-
-// table_copy.wast:234
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x14\x41\x16\x41\x04\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:257
-run(() => call($6, "test", []));
-
-// table_copy.wast:258
-assert_trap(() => call($6, "check", [0]));
-
-// table_copy.wast:259
-assert_trap(() => call($6, "check", [1]));
-
-// table_copy.wast:260
-assert_return(() => call($6, "check", [2]), 3);
-
-// table_copy.wast:261
-assert_return(() => call($6, "check", [3]), 1);
-
-// table_copy.wast:262
-assert_return(() => call($6, "check", [4]), 4);
-
-// table_copy.wast:263
-assert_return(() => call($6, "check", [5]), 1);
-
-// table_copy.wast:264
-assert_trap(() => call($6, "check", [6]));
-
-// table_copy.wast:265
-assert_trap(() => call($6, "check", [7]));
-
-// table_copy.wast:266
-assert_trap(() => call($6, "check", [8]));
-
-// table_copy.wast:267
-assert_trap(() => call($6, "check", [9]));
-
-// table_copy.wast:268
-assert_trap(() => call($6, "check", [10]));
-
-// table_copy.wast:269
-assert_trap(() => call($6, "check", [11]));
-
-// table_copy.wast:270
-assert_return(() => call($6, "check", [12]), 7);
-
-// table_copy.wast:271
-assert_return(() => call($6, "check", [13]), 5);
-
-// table_copy.wast:272
-assert_return(() => call($6, "check", [14]), 2);
-
-// table_copy.wast:273
-assert_return(() => call($6, "check", [15]), 3);
-
-// table_copy.wast:274
-assert_return(() => call($6, "check", [16]), 6);
-
-// table_copy.wast:275
-assert_trap(() => call($6, "check", [17]));
-
-// table_copy.wast:276
-assert_trap(() => call($6, "check", [18]));
-
-// table_copy.wast:277
-assert_trap(() => call($6, "check", [19]));
-
-// table_copy.wast:278
-assert_trap(() => call($6, "check", [20]));
-
-// table_copy.wast:279
-assert_trap(() => call($6, "check", [21]));
-
-// table_copy.wast:280
-assert_trap(() => call($6, "check", [22]));
-
-// table_copy.wast:281
-assert_trap(() => call($6, "check", [23]));
-
-// table_copy.wast:282
-assert_trap(() => call($6, "check", [24]));
-
-// table_copy.wast:283
-assert_trap(() => call($6, "check", [25]));
-
-// table_copy.wast:284
-assert_trap(() => call($6, "check", [26]));
-
-// table_copy.wast:285
-assert_trap(() => call($6, "check", [27]));
-
-// table_copy.wast:286
-assert_trap(() => call($6, "check", [28]));
-
-// table_copy.wast:287
-assert_trap(() => call($6, "check", [29]));
-
-// table_copy.wast:289
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x01\x41\x03\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:312
-run(() => call($7, "test", []));
-
-// table_copy.wast:313
-assert_trap(() => call($7, "check", [0]));
-
-// table_copy.wast:314
-assert_trap(() => call($7, "check", [1]));
-
-// table_copy.wast:315
-assert_return(() => call($7, "check", [2]), 3);
-
-// table_copy.wast:316
-assert_return(() => call($7, "check", [3]), 1);
-
-// table_copy.wast:317
-assert_return(() => call($7, "check", [4]), 4);
-
-// table_copy.wast:318
-assert_return(() => call($7, "check", [5]), 1);
-
-// table_copy.wast:319
-assert_trap(() => call($7, "check", [6]));
-
-// table_copy.wast:320
-assert_trap(() => call($7, "check", [7]));
-
-// table_copy.wast:321
-assert_trap(() => call($7, "check", [8]));
-
-// table_copy.wast:322
-assert_trap(() => call($7, "check", [9]));
-
-// table_copy.wast:323
-assert_trap(() => call($7, "check", [10]));
-
-// table_copy.wast:324
-assert_trap(() => call($7, "check", [11]));
-
-// table_copy.wast:325
-assert_return(() => call($7, "check", [12]), 7);
-
-// table_copy.wast:326
-assert_return(() => call($7, "check", [13]), 5);
-
-// table_copy.wast:327
-assert_return(() => call($7, "check", [14]), 2);
-
-// table_copy.wast:328
-assert_return(() => call($7, "check", [15]), 3);
-
-// table_copy.wast:329
-assert_return(() => call($7, "check", [16]), 6);
-
-// table_copy.wast:330
-assert_trap(() => call($7, "check", [17]));
-
-// table_copy.wast:331
-assert_trap(() => call($7, "check", [18]));
-
-// table_copy.wast:332
-assert_trap(() => call($7, "check", [19]));
-
-// table_copy.wast:333
-assert_trap(() => call($7, "check", [20]));
-
-// table_copy.wast:334
-assert_trap(() => call($7, "check", [21]));
-
-// table_copy.wast:335
-assert_trap(() => call($7, "check", [22]));
-
-// table_copy.wast:336
-assert_trap(() => call($7, "check", [23]));
-
-// table_copy.wast:337
-assert_trap(() => call($7, "check", [24]));
-
-// table_copy.wast:338
-assert_trap(() => call($7, "check", [25]));
-
-// table_copy.wast:339
-assert_return(() => call($7, "check", [26]), 3);
-
-// table_copy.wast:340
-assert_return(() => call($7, "check", [27]), 1);
-
-// table_copy.wast:341
-assert_trap(() => call($7, "check", [28]));
-
-// table_copy.wast:342
-assert_trap(() => call($7, "check", [29]));
-
-// table_copy.wast:344
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x0c\x41\x07\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:367
-run(() => call($8, "test", []));
-
-// table_copy.wast:368
-assert_trap(() => call($8, "check", [0]));
-
-// table_copy.wast:369
-assert_trap(() => call($8, "check", [1]));
-
-// table_copy.wast:370
-assert_return(() => call($8, "check", [2]), 3);
-
-// table_copy.wast:371
-assert_return(() => call($8, "check", [3]), 1);
-
-// table_copy.wast:372
-assert_return(() => call($8, "check", [4]), 4);
-
-// table_copy.wast:373
-assert_return(() => call($8, "check", [5]), 1);
-
-// table_copy.wast:374
-assert_trap(() => call($8, "check", [6]));
-
-// table_copy.wast:375
-assert_trap(() => call($8, "check", [7]));
-
-// table_copy.wast:376
-assert_trap(() => call($8, "check", [8]));
-
-// table_copy.wast:377
-assert_trap(() => call($8, "check", [9]));
-
-// table_copy.wast:378
-assert_return(() => call($8, "check", [10]), 7);
-
-// table_copy.wast:379
-assert_return(() => call($8, "check", [11]), 5);
-
-// table_copy.wast:380
-assert_return(() => call($8, "check", [12]), 2);
-
-// table_copy.wast:381
-assert_return(() => call($8, "check", [13]), 3);
-
-// table_copy.wast:382
-assert_return(() => call($8, "check", [14]), 6);
-
-// table_copy.wast:383
-assert_trap(() => call($8, "check", [15]));
-
-// table_copy.wast:384
-assert_trap(() => call($8, "check", [16]));
-
-// table_copy.wast:385
-assert_trap(() => call($8, "check", [17]));
-
-// table_copy.wast:386
-assert_trap(() => call($8, "check", [18]));
-
-// table_copy.wast:387
-assert_trap(() => call($8, "check", [19]));
-
-// table_copy.wast:388
-assert_trap(() => call($8, "check", [20]));
-
-// table_copy.wast:389
-assert_trap(() => call($8, "check", [21]));
-
-// table_copy.wast:390
-assert_trap(() => call($8, "check", [22]));
-
-// table_copy.wast:391
-assert_trap(() => call($8, "check", [23]));
-
-// table_copy.wast:392
-assert_trap(() => call($8, "check", [24]));
-
-// table_copy.wast:393
-assert_trap(() => call($8, "check", [25]));
-
-// table_copy.wast:394
-assert_trap(() => call($8, "check", [26]));
-
-// table_copy.wast:395
-assert_trap(() => call($8, "check", [27]));
-
-// table_copy.wast:396
-assert_trap(() => call($8, "check", [28]));
-
-// table_copy.wast:397
-assert_trap(() => call($8, "check", [29]));
-
-// table_copy.wast:399
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x0a\x41\x07\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_copy.wast:422
-run(() => call($9, "test", []));
-
-// table_copy.wast:423
-assert_trap(() => call($9, "check", [0]));
-
-// table_copy.wast:424
-assert_trap(() => call($9, "check", [1]));
-
-// table_copy.wast:425
-assert_return(() => call($9, "check", [2]), 3);
-
-// table_copy.wast:426
-assert_return(() => call($9, "check", [3]), 1);
-
-// table_copy.wast:427
-assert_return(() => call($9, "check", [4]), 4);
-
-// table_copy.wast:428
-assert_return(() => call($9, "check", [5]), 1);
-
-// table_copy.wast:429
-assert_trap(() => call($9, "check", [6]));
-
-// table_copy.wast:430
-assert_trap(() => call($9, "check", [7]));
-
-// table_copy.wast:431
-assert_trap(() => call($9, "check", [8]));
-
-// table_copy.wast:432
-assert_trap(() => call($9, "check", [9]));
-
-// table_copy.wast:433
-assert_trap(() => call($9, "check", [10]));
-
-// table_copy.wast:434
-assert_trap(() => call($9, "check", [11]));
-
-// table_copy.wast:435
-assert_trap(() => call($9, "check", [12]));
-
-// table_copy.wast:436
-assert_trap(() => call($9, "check", [13]));
-
-// table_copy.wast:437
-assert_return(() => call($9, "check", [14]), 7);
-
-// table_copy.wast:438
-assert_return(() => call($9, "check", [15]), 5);
-
-// table_copy.wast:439
-assert_return(() => call($9, "check", [16]), 2);
-
-// table_copy.wast:440
-assert_return(() => call($9, "check", [17]), 3);
-
-// table_copy.wast:441
-assert_return(() => call($9, "check", [18]), 6);
-
-// table_copy.wast:442
-assert_trap(() => call($9, "check", [19]));
-
-// table_copy.wast:443
-assert_trap(() => call($9, "check", [20]));
-
-// table_copy.wast:444
-assert_trap(() => call($9, "check", [21]));
-
-// table_copy.wast:445
-assert_trap(() => call($9, "check", [22]));
-
-// table_copy.wast:446
-assert_trap(() => call($9, "check", [23]));
-
-// table_copy.wast:447
-assert_trap(() => call($9, "check", [24]));
-
-// table_copy.wast:448
-assert_trap(() => call($9, "check", [25]));
-
-// table_copy.wast:449
-assert_trap(() => call($9, "check", [26]));
-
-// table_copy.wast:450
-assert_trap(() => call($9, "check", [27]));
-
-// table_copy.wast:451
-assert_trap(() => call($9, "check", [28]));
-
-// table_copy.wast:452
-assert_trap(() => call($9, "check", [29]));
-
-// table_copy.wast:454
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1c\x41\x01\x41\x03\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:474
-assert_trap(() => call($10, "test", []));
-
-// table_copy.wast:476
-let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x7e\x41\x01\x41\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:496
-assert_trap(() => call($11, "test", []));
-
-// table_copy.wast:498
-let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x19\x41\x06\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:518
-assert_trap(() => call($12, "test", []));
-
-// table_copy.wast:520
-let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x7e\x41\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:540
-assert_trap(() => call($13, "test", []));
-
-// table_copy.wast:542
-let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x19\x41\x00\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:562
-run(() => call($14, "test", []));
-
-// table_copy.wast:564
-let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1e\x41\x0f\x41\x00\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:584
-run(() => call($15, "test", []));
-
-// table_copy.wast:586
-let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x1e\x41\x00\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:606
-run(() => call($16, "test", []));
-
-// table_copy.wast:608
-let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:634
-assert_trap(() => call($17, "run", [24, 0, 16]));
-
-// table_copy.wast:636
-assert_return(() => call($17, "test", [0]), 0);
-
-// table_copy.wast:637
-assert_return(() => call($17, "test", [1]), 1);
-
-// table_copy.wast:638
-assert_return(() => call($17, "test", [2]), 2);
-
-// table_copy.wast:639
-assert_return(() => call($17, "test", [3]), 3);
-
-// table_copy.wast:640
-assert_return(() => call($17, "test", [4]), 4);
-
-// table_copy.wast:641
-assert_return(() => call($17, "test", [5]), 5);
-
-// table_copy.wast:642
-assert_return(() => call($17, "test", [6]), 6);
-
-// table_copy.wast:643
-assert_return(() => call($17, "test", [7]), 7);
-
-// table_copy.wast:644
-assert_trap(() => call($17, "test", [8]));
-
-// table_copy.wast:645
-assert_trap(() => call($17, "test", [9]));
-
-// table_copy.wast:646
-assert_trap(() => call($17, "test", [10]));
-
-// table_copy.wast:647
-assert_trap(() => call($17, "test", [11]));
-
-// table_copy.wast:648
-assert_trap(() => call($17, "test", [12]));
-
-// table_copy.wast:649
-assert_trap(() => call($17, "test", [13]));
-
-// table_copy.wast:650
-assert_trap(() => call($17, "test", [14]));
-
-// table_copy.wast:651
-assert_trap(() => call($17, "test", [15]));
-
-// table_copy.wast:652
-assert_trap(() => call($17, "test", [16]));
-
-// table_copy.wast:653
-assert_trap(() => call($17, "test", [17]));
-
-// table_copy.wast:654
-assert_trap(() => call($17, "test", [18]));
-
-// table_copy.wast:655
-assert_trap(() => call($17, "test", [19]));
-
-// table_copy.wast:656
-assert_trap(() => call($17, "test", [20]));
-
-// table_copy.wast:657
-assert_trap(() => call($17, "test", [21]));
-
-// table_copy.wast:658
-assert_trap(() => call($17, "test", [22]));
-
-// table_copy.wast:659
-assert_trap(() => call($17, "test", [23]));
-
-// table_copy.wast:660
-assert_return(() => call($17, "test", [24]), 0);
-
-// table_copy.wast:661
-assert_return(() => call($17, "test", [25]), 1);
-
-// table_copy.wast:662
-assert_return(() => call($17, "test", [26]), 2);
-
-// table_copy.wast:663
-assert_return(() => call($17, "test", [27]), 3);
-
-// table_copy.wast:664
-assert_return(() => call($17, "test", [28]), 4);
-
-// table_copy.wast:665
-assert_return(() => call($17, "test", [29]), 5);
-
-// table_copy.wast:666
-assert_return(() => call($17, "test", [30]), 6);
-
-// table_copy.wast:667
-assert_return(() => call($17, "test", [31]), 7);
-
-// table_copy.wast:669
-let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8f\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x09\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:695
-assert_trap(() => call($18, "run", [23, 0, 15]));
-
-// table_copy.wast:697
-assert_return(() => call($18, "test", [0]), 0);
-
-// table_copy.wast:698
-assert_return(() => call($18, "test", [1]), 1);
-
-// table_copy.wast:699
-assert_return(() => call($18, "test", [2]), 2);
-
-// table_copy.wast:700
-assert_return(() => call($18, "test", [3]), 3);
-
-// table_copy.wast:701
-assert_return(() => call($18, "test", [4]), 4);
-
-// table_copy.wast:702
-assert_return(() => call($18, "test", [5]), 5);
-
-// table_copy.wast:703
-assert_return(() => call($18, "test", [6]), 6);
-
-// table_copy.wast:704
-assert_return(() => call($18, "test", [7]), 7);
-
-// table_copy.wast:705
-assert_return(() => call($18, "test", [8]), 8);
-
-// table_copy.wast:706
-assert_trap(() => call($18, "test", [9]));
-
-// table_copy.wast:707
-assert_trap(() => call($18, "test", [10]));
-
-// table_copy.wast:708
-assert_trap(() => call($18, "test", [11]));
-
-// table_copy.wast:709
-assert_trap(() => call($18, "test", [12]));
-
-// table_copy.wast:710
-assert_trap(() => call($18, "test", [13]));
-
-// table_copy.wast:711
-assert_trap(() => call($18, "test", [14]));
-
-// table_copy.wast:712
-assert_trap(() => call($18, "test", [15]));
-
-// table_copy.wast:713
-assert_trap(() => call($18, "test", [16]));
-
-// table_copy.wast:714
-assert_trap(() => call($18, "test", [17]));
-
-// table_copy.wast:715
-assert_trap(() => call($18, "test", [18]));
-
-// table_copy.wast:716
-assert_trap(() => call($18, "test", [19]));
-
-// table_copy.wast:717
-assert_trap(() => call($18, "test", [20]));
-
-// table_copy.wast:718
-assert_trap(() => call($18, "test", [21]));
-
-// table_copy.wast:719
-assert_trap(() => call($18, "test", [22]));
-
-// table_copy.wast:720
-assert_return(() => call($18, "test", [23]), 0);
-
-// table_copy.wast:721
-assert_return(() => call($18, "test", [24]), 1);
-
-// table_copy.wast:722
-assert_return(() => call($18, "test", [25]), 2);
-
-// table_copy.wast:723
-assert_return(() => call($18, "test", [26]), 3);
-
-// table_copy.wast:724
-assert_return(() => call($18, "test", [27]), 4);
-
-// table_copy.wast:725
-assert_return(() => call($18, "test", [28]), 5);
-
-// table_copy.wast:726
-assert_return(() => call($18, "test", [29]), 6);
-
-// table_copy.wast:727
-assert_return(() => call($18, "test", [30]), 7);
-
-// table_copy.wast:728
-assert_return(() => call($18, "test", [31]), 8);
-
-// table_copy.wast:730
-let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x18\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:756
-assert_trap(() => call($19, "run", [0, 24, 16]));
-
-// table_copy.wast:758
-assert_return(() => call($19, "test", [0]), 0);
-
-// table_copy.wast:759
-assert_return(() => call($19, "test", [1]), 1);
-
-// table_copy.wast:760
-assert_return(() => call($19, "test", [2]), 2);
-
-// table_copy.wast:761
-assert_return(() => call($19, "test", [3]), 3);
-
-// table_copy.wast:762
-assert_return(() => call($19, "test", [4]), 4);
-
-// table_copy.wast:763
-assert_return(() => call($19, "test", [5]), 5);
-
-// table_copy.wast:764
-assert_return(() => call($19, "test", [6]), 6);
-
-// table_copy.wast:765
-assert_return(() => call($19, "test", [7]), 7);
-
-// table_copy.wast:766
-assert_trap(() => call($19, "test", [8]));
-
-// table_copy.wast:767
-assert_trap(() => call($19, "test", [9]));
-
-// table_copy.wast:768
-assert_trap(() => call($19, "test", [10]));
-
-// table_copy.wast:769
-assert_trap(() => call($19, "test", [11]));
-
-// table_copy.wast:770
-assert_trap(() => call($19, "test", [12]));
-
-// table_copy.wast:771
-assert_trap(() => call($19, "test", [13]));
-
-// table_copy.wast:772
-assert_trap(() => call($19, "test", [14]));
-
-// table_copy.wast:773
-assert_trap(() => call($19, "test", [15]));
-
-// table_copy.wast:774
-assert_trap(() => call($19, "test", [16]));
-
-// table_copy.wast:775
-assert_trap(() => call($19, "test", [17]));
-
-// table_copy.wast:776
-assert_trap(() => call($19, "test", [18]));
-
-// table_copy.wast:777
-assert_trap(() => call($19, "test", [19]));
-
-// table_copy.wast:778
-assert_trap(() => call($19, "test", [20]));
-
-// table_copy.wast:779
-assert_trap(() => call($19, "test", [21]));
-
-// table_copy.wast:780
-assert_trap(() => call($19, "test", [22]));
-
-// table_copy.wast:781
-assert_trap(() => call($19, "test", [23]));
-
-// table_copy.wast:782
-assert_return(() => call($19, "test", [24]), 0);
-
-// table_copy.wast:783
-assert_return(() => call($19, "test", [25]), 1);
-
-// table_copy.wast:784
-assert_return(() => call($19, "test", [26]), 2);
-
-// table_copy.wast:785
-assert_return(() => call($19, "test", [27]), 3);
-
-// table_copy.wast:786
-assert_return(() => call($19, "test", [28]), 4);
-
-// table_copy.wast:787
-assert_return(() => call($19, "test", [29]), 5);
-
-// table_copy.wast:788
-assert_return(() => call($19, "test", [30]), 6);
-
-// table_copy.wast:789
-assert_return(() => call($19, "test", [31]), 7);
-
-// table_copy.wast:791
-let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8f\x80\x80\x80\x00\x01\x00\x41\x17\x0b\x09\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:817
-assert_trap(() => call($20, "run", [0, 23, 15]));
-
-// table_copy.wast:819
-assert_return(() => call($20, "test", [0]), 0);
-
-// table_copy.wast:820
-assert_return(() => call($20, "test", [1]), 1);
-
-// table_copy.wast:821
-assert_return(() => call($20, "test", [2]), 2);
-
-// table_copy.wast:822
-assert_return(() => call($20, "test", [3]), 3);
-
-// table_copy.wast:823
-assert_return(() => call($20, "test", [4]), 4);
-
-// table_copy.wast:824
-assert_return(() => call($20, "test", [5]), 5);
-
-// table_copy.wast:825
-assert_return(() => call($20, "test", [6]), 6);
-
-// table_copy.wast:826
-assert_return(() => call($20, "test", [7]), 7);
-
-// table_copy.wast:827
-assert_return(() => call($20, "test", [8]), 8);
-
-// table_copy.wast:828
-assert_trap(() => call($20, "test", [9]));
-
-// table_copy.wast:829
-assert_trap(() => call($20, "test", [10]));
-
-// table_copy.wast:830
-assert_trap(() => call($20, "test", [11]));
-
-// table_copy.wast:831
-assert_trap(() => call($20, "test", [12]));
-
-// table_copy.wast:832
-assert_trap(() => call($20, "test", [13]));
-
-// table_copy.wast:833
-assert_trap(() => call($20, "test", [14]));
-
-// table_copy.wast:834
-assert_trap(() => call($20, "test", [15]));
-
-// table_copy.wast:835
-assert_trap(() => call($20, "test", [16]));
-
-// table_copy.wast:836
-assert_trap(() => call($20, "test", [17]));
-
-// table_copy.wast:837
-assert_trap(() => call($20, "test", [18]));
-
-// table_copy.wast:838
-assert_trap(() => call($20, "test", [19]));
-
-// table_copy.wast:839
-assert_trap(() => call($20, "test", [20]));
-
-// table_copy.wast:840
-assert_trap(() => call($20, "test", [21]));
-
-// table_copy.wast:841
-assert_trap(() => call($20, "test", [22]));
-
-// table_copy.wast:842
-assert_return(() => call($20, "test", [23]), 0);
-
-// table_copy.wast:843
-assert_return(() => call($20, "test", [24]), 1);
-
-// table_copy.wast:844
-assert_return(() => call($20, "test", [25]), 2);
-
-// table_copy.wast:845
-assert_return(() => call($20, "test", [26]), 3);
-
-// table_copy.wast:846
-assert_return(() => call($20, "test", [27]), 4);
-
-// table_copy.wast:847
-assert_return(() => call($20, "test", [28]), 5);
-
-// table_copy.wast:848
-assert_return(() => call($20, "test", [29]), 6);
-
-// table_copy.wast:849
-assert_return(() => call($20, "test", [30]), 7);
-
-// table_copy.wast:850
-assert_return(() => call($20, "test", [31]), 8);
-
-// table_copy.wast:852
-let $21 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x0b\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:878
-assert_trap(() => call($21, "run", [24, 11, 16]));
-
-// table_copy.wast:880
-assert_trap(() => call($21, "test", [0]));
-
-// table_copy.wast:881
-assert_trap(() => call($21, "test", [1]));
-
-// table_copy.wast:882
-assert_trap(() => call($21, "test", [2]));
-
-// table_copy.wast:883
-assert_trap(() => call($21, "test", [3]));
-
-// table_copy.wast:884
-assert_trap(() => call($21, "test", [4]));
-
-// table_copy.wast:885
-assert_trap(() => call($21, "test", [5]));
-
-// table_copy.wast:886
-assert_trap(() => call($21, "test", [6]));
-
-// table_copy.wast:887
-assert_trap(() => call($21, "test", [7]));
-
-// table_copy.wast:888
-assert_trap(() => call($21, "test", [8]));
-
-// table_copy.wast:889
-assert_trap(() => call($21, "test", [9]));
-
-// table_copy.wast:890
-assert_trap(() => call($21, "test", [10]));
-
-// table_copy.wast:891
-assert_return(() => call($21, "test", [11]), 0);
-
-// table_copy.wast:892
-assert_return(() => call($21, "test", [12]), 1);
-
-// table_copy.wast:893
-assert_return(() => call($21, "test", [13]), 2);
-
-// table_copy.wast:894
-assert_return(() => call($21, "test", [14]), 3);
-
-// table_copy.wast:895
-assert_return(() => call($21, "test", [15]), 4);
-
-// table_copy.wast:896
-assert_return(() => call($21, "test", [16]), 5);
-
-// table_copy.wast:897
-assert_return(() => call($21, "test", [17]), 6);
-
-// table_copy.wast:898
-assert_return(() => call($21, "test", [18]), 7);
-
-// table_copy.wast:899
-assert_trap(() => call($21, "test", [19]));
-
-// table_copy.wast:900
-assert_trap(() => call($21, "test", [20]));
-
-// table_copy.wast:901
-assert_trap(() => call($21, "test", [21]));
-
-// table_copy.wast:902
-assert_trap(() => call($21, "test", [22]));
-
-// table_copy.wast:903
-assert_trap(() => call($21, "test", [23]));
-
-// table_copy.wast:904
-assert_trap(() => call($21, "test", [24]));
-
-// table_copy.wast:905
-assert_trap(() => call($21, "test", [25]));
-
-// table_copy.wast:906
-assert_trap(() => call($21, "test", [26]));
-
-// table_copy.wast:907
-assert_trap(() => call($21, "test", [27]));
-
-// table_copy.wast:908
-assert_trap(() => call($21, "test", [28]));
-
-// table_copy.wast:909
-assert_trap(() => call($21, "test", [29]));
-
-// table_copy.wast:910
-assert_trap(() => call($21, "test", [30]));
-
-// table_copy.wast:911
-assert_trap(() => call($21, "test", [31]));
-
-// table_copy.wast:913
-let $22 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x18\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:939
-assert_trap(() => call($22, "run", [11, 24, 16]));
-
-// table_copy.wast:941
-assert_trap(() => call($22, "test", [0]));
-
-// table_copy.wast:942
-assert_trap(() => call($22, "test", [1]));
-
-// table_copy.wast:943
-assert_trap(() => call($22, "test", [2]));
-
-// table_copy.wast:944
-assert_trap(() => call($22, "test", [3]));
-
-// table_copy.wast:945
-assert_trap(() => call($22, "test", [4]));
-
-// table_copy.wast:946
-assert_trap(() => call($22, "test", [5]));
-
-// table_copy.wast:947
-assert_trap(() => call($22, "test", [6]));
-
-// table_copy.wast:948
-assert_trap(() => call($22, "test", [7]));
-
-// table_copy.wast:949
-assert_trap(() => call($22, "test", [8]));
-
-// table_copy.wast:950
-assert_trap(() => call($22, "test", [9]));
-
-// table_copy.wast:951
-assert_trap(() => call($22, "test", [10]));
-
-// table_copy.wast:952
-assert_return(() => call($22, "test", [11]), 0);
-
-// table_copy.wast:953
-assert_return(() => call($22, "test", [12]), 1);
-
-// table_copy.wast:954
-assert_return(() => call($22, "test", [13]), 2);
-
-// table_copy.wast:955
-assert_return(() => call($22, "test", [14]), 3);
-
-// table_copy.wast:956
-assert_return(() => call($22, "test", [15]), 4);
-
-// table_copy.wast:957
-assert_return(() => call($22, "test", [16]), 5);
-
-// table_copy.wast:958
-assert_return(() => call($22, "test", [17]), 6);
-
-// table_copy.wast:959
-assert_return(() => call($22, "test", [18]), 7);
-
-// table_copy.wast:960
-assert_trap(() => call($22, "test", [19]));
-
-// table_copy.wast:961
-assert_trap(() => call($22, "test", [20]));
-
-// table_copy.wast:962
-assert_trap(() => call($22, "test", [21]));
-
-// table_copy.wast:963
-assert_trap(() => call($22, "test", [22]));
-
-// table_copy.wast:964
-assert_trap(() => call($22, "test", [23]));
-
-// table_copy.wast:965
-assert_return(() => call($22, "test", [24]), 0);
-
-// table_copy.wast:966
-assert_return(() => call($22, "test", [25]), 1);
-
-// table_copy.wast:967
-assert_return(() => call($22, "test", [26]), 2);
-
-// table_copy.wast:968
-assert_return(() => call($22, "test", [27]), 3);
-
-// table_copy.wast:969
-assert_return(() => call($22, "test", [28]), 4);
-
-// table_copy.wast:970
-assert_return(() => call($22, "test", [29]), 5);
-
-// table_copy.wast:971
-assert_return(() => call($22, "test", [30]), 6);
-
-// table_copy.wast:972
-assert_return(() => call($22, "test", [31]), 7);
-
-// table_copy.wast:974
-let $23 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x15\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:1000
-assert_trap(() => call($23, "run", [24, 21, 16]));
-
-// table_copy.wast:1002
-assert_trap(() => call($23, "test", [0]));
-
-// table_copy.wast:1003
-assert_trap(() => call($23, "test", [1]));
-
-// table_copy.wast:1004
-assert_trap(() => call($23, "test", [2]));
-
-// table_copy.wast:1005
-assert_trap(() => call($23, "test", [3]));
-
-// table_copy.wast:1006
-assert_trap(() => call($23, "test", [4]));
-
-// table_copy.wast:1007
-assert_trap(() => call($23, "test", [5]));
-
-// table_copy.wast:1008
-assert_trap(() => call($23, "test", [6]));
-
-// table_copy.wast:1009
-assert_trap(() => call($23, "test", [7]));
-
-// table_copy.wast:1010
-assert_trap(() => call($23, "test", [8]));
-
-// table_copy.wast:1011
-assert_trap(() => call($23, "test", [9]));
-
-// table_copy.wast:1012
-assert_trap(() => call($23, "test", [10]));
-
-// table_copy.wast:1013
-assert_trap(() => call($23, "test", [11]));
-
-// table_copy.wast:1014
-assert_trap(() => call($23, "test", [12]));
-
-// table_copy.wast:1015
-assert_trap(() => call($23, "test", [13]));
-
-// table_copy.wast:1016
-assert_trap(() => call($23, "test", [14]));
-
-// table_copy.wast:1017
-assert_trap(() => call($23, "test", [15]));
-
-// table_copy.wast:1018
-assert_trap(() => call($23, "test", [16]));
-
-// table_copy.wast:1019
-assert_trap(() => call($23, "test", [17]));
-
-// table_copy.wast:1020
-assert_trap(() => call($23, "test", [18]));
-
-// table_copy.wast:1021
-assert_trap(() => call($23, "test", [19]));
-
-// table_copy.wast:1022
-assert_trap(() => call($23, "test", [20]));
-
-// table_copy.wast:1023
-assert_return(() => call($23, "test", [21]), 0);
-
-// table_copy.wast:1024
-assert_return(() => call($23, "test", [22]), 1);
-
-// table_copy.wast:1025
-assert_return(() => call($23, "test", [23]), 2);
-
-// table_copy.wast:1026
-assert_return(() => call($23, "test", [24]), 3);
-
-// table_copy.wast:1027
-assert_return(() => call($23, "test", [25]), 4);
-
-// table_copy.wast:1028
-assert_return(() => call($23, "test", [26]), 5);
-
-// table_copy.wast:1029
-assert_return(() => call($23, "test", [27]), 6);
-
-// table_copy.wast:1030
-assert_return(() => call($23, "test", [28]), 7);
-
-// table_copy.wast:1031
-assert_trap(() => call($23, "test", [29]));
-
-// table_copy.wast:1032
-assert_trap(() => call($23, "test", [30]));
-
-// table_copy.wast:1033
-assert_trap(() => call($23, "test", [31]));
-
-// table_copy.wast:1035
-let $24 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x18\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:1061
-assert_trap(() => call($24, "run", [21, 24, 16]));
-
-// table_copy.wast:1063
-assert_trap(() => call($24, "test", [0]));
-
-// table_copy.wast:1064
-assert_trap(() => call($24, "test", [1]));
-
-// table_copy.wast:1065
-assert_trap(() => call($24, "test", [2]));
-
-// table_copy.wast:1066
-assert_trap(() => call($24, "test", [3]));
-
-// table_copy.wast:1067
-assert_trap(() => call($24, "test", [4]));
-
-// table_copy.wast:1068
-assert_trap(() => call($24, "test", [5]));
-
-// table_copy.wast:1069
-assert_trap(() => call($24, "test", [6]));
-
-// table_copy.wast:1070
-assert_trap(() => call($24, "test", [7]));
-
-// table_copy.wast:1071
-assert_trap(() => call($24, "test", [8]));
-
-// table_copy.wast:1072
-assert_trap(() => call($24, "test", [9]));
-
-// table_copy.wast:1073
-assert_trap(() => call($24, "test", [10]));
-
-// table_copy.wast:1074
-assert_trap(() => call($24, "test", [11]));
-
-// table_copy.wast:1075
-assert_trap(() => call($24, "test", [12]));
-
-// table_copy.wast:1076
-assert_trap(() => call($24, "test", [13]));
-
-// table_copy.wast:1077
-assert_trap(() => call($24, "test", [14]));
-
-// table_copy.wast:1078
-assert_trap(() => call($24, "test", [15]));
-
-// table_copy.wast:1079
-assert_trap(() => call($24, "test", [16]));
-
-// table_copy.wast:1080
-assert_trap(() => call($24, "test", [17]));
-
-// table_copy.wast:1081
-assert_trap(() => call($24, "test", [18]));
-
-// table_copy.wast:1082
-assert_trap(() => call($24, "test", [19]));
-
-// table_copy.wast:1083
-assert_trap(() => call($24, "test", [20]));
-
-// table_copy.wast:1084
-assert_return(() => call($24, "test", [21]), 0);
-
-// table_copy.wast:1085
-assert_return(() => call($24, "test", [22]), 1);
-
-// table_copy.wast:1086
-assert_return(() => call($24, "test", [23]), 2);
-
-// table_copy.wast:1087
-assert_return(() => call($24, "test", [24]), 3);
-
-// table_copy.wast:1088
-assert_return(() => call($24, "test", [25]), 4);
-
-// table_copy.wast:1089
-assert_return(() => call($24, "test", [26]), 5);
-
-// table_copy.wast:1090
-assert_return(() => call($24, "test", [27]), 6);
-
-// table_copy.wast:1091
-assert_return(() => call($24, "test", [28]), 7);
-
-// table_copy.wast:1092
-assert_return(() => call($24, "test", [29]), 5);
-
-// table_copy.wast:1093
-assert_return(() => call($24, "test", [30]), 6);
-
-// table_copy.wast:1094
-assert_return(() => call($24, "test", [31]), 7);
-
-// table_copy.wast:1096
-let $25 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x91\x80\x80\x80\x00\x01\x00\x41\x15\x0b\x0b\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:1122
-assert_trap(() => call($25, "run", [21, 21, 16]));
-
-// table_copy.wast:1124
-assert_trap(() => call($25, "test", [0]));
-
-// table_copy.wast:1125
-assert_trap(() => call($25, "test", [1]));
-
-// table_copy.wast:1126
-assert_trap(() => call($25, "test", [2]));
-
-// table_copy.wast:1127
-assert_trap(() => call($25, "test", [3]));
-
-// table_copy.wast:1128
-assert_trap(() => call($25, "test", [4]));
-
-// table_copy.wast:1129
-assert_trap(() => call($25, "test", [5]));
-
-// table_copy.wast:1130
-assert_trap(() => call($25, "test", [6]));
-
-// table_copy.wast:1131
-assert_trap(() => call($25, "test", [7]));
-
-// table_copy.wast:1132
-assert_trap(() => call($25, "test", [8]));
-
-// table_copy.wast:1133
-assert_trap(() => call($25, "test", [9]));
-
-// table_copy.wast:1134
-assert_trap(() => call($25, "test", [10]));
-
-// table_copy.wast:1135
-assert_trap(() => call($25, "test", [11]));
-
-// table_copy.wast:1136
-assert_trap(() => call($25, "test", [12]));
-
-// table_copy.wast:1137
-assert_trap(() => call($25, "test", [13]));
-
-// table_copy.wast:1138
-assert_trap(() => call($25, "test", [14]));
-
-// table_copy.wast:1139
-assert_trap(() => call($25, "test", [15]));
-
-// table_copy.wast:1140
-assert_trap(() => call($25, "test", [16]));
-
-// table_copy.wast:1141
-assert_trap(() => call($25, "test", [17]));
-
-// table_copy.wast:1142
-assert_trap(() => call($25, "test", [18]));
-
-// table_copy.wast:1143
-assert_trap(() => call($25, "test", [19]));
-
-// table_copy.wast:1144
-assert_trap(() => call($25, "test", [20]));
-
-// table_copy.wast:1145
-assert_return(() => call($25, "test", [21]), 0);
-
-// table_copy.wast:1146
-assert_return(() => call($25, "test", [22]), 1);
-
-// table_copy.wast:1147
-assert_return(() => call($25, "test", [23]), 2);
-
-// table_copy.wast:1148
-assert_return(() => call($25, "test", [24]), 3);
-
-// table_copy.wast:1149
-assert_return(() => call($25, "test", [25]), 4);
-
-// table_copy.wast:1150
-assert_return(() => call($25, "test", [26]), 5);
-
-// table_copy.wast:1151
-assert_return(() => call($25, "test", [27]), 6);
-
-// table_copy.wast:1152
-assert_return(() => call($25, "test", [28]), 7);
-
-// table_copy.wast:1153
-assert_return(() => call($25, "test", [29]), 8);
-
-// table_copy.wast:1154
-assert_return(() => call($25, "test", [30]), 9);
-
-// table_copy.wast:1155
-assert_return(() => call($25, "test", [31]), 10);
-
-// table_copy.wast:1157
-let $26 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\x80\x01\x80\x01\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x97\x80\x80\x80\x00\x01\x00\x41\xf0\x00\x0b\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:1183
-assert_trap(() => call($26, "run", [0, 112, -32]));
-
-// table_copy.wast:1185
-assert_return(() => call($26, "test", [0]), 0);
-
-// table_copy.wast:1186
-assert_return(() => call($26, "test", [1]), 1);
-
-// table_copy.wast:1187
-assert_return(() => call($26, "test", [2]), 2);
-
-// table_copy.wast:1188
-assert_return(() => call($26, "test", [3]), 3);
-
-// table_copy.wast:1189
-assert_return(() => call($26, "test", [4]), 4);
-
-// table_copy.wast:1190
-assert_return(() => call($26, "test", [5]), 5);
-
-// table_copy.wast:1191
-assert_return(() => call($26, "test", [6]), 6);
-
-// table_copy.wast:1192
-assert_return(() => call($26, "test", [7]), 7);
-
-// table_copy.wast:1193
-assert_return(() => call($26, "test", [8]), 8);
-
-// table_copy.wast:1194
-assert_return(() => call($26, "test", [9]), 9);
-
-// table_copy.wast:1195
-assert_return(() => call($26, "test", [10]), 10);
-
-// table_copy.wast:1196
-assert_return(() => call($26, "test", [11]), 11);
-
-// table_copy.wast:1197
-assert_return(() => call($26, "test", [12]), 12);
-
-// table_copy.wast:1198
-assert_return(() => call($26, "test", [13]), 13);
-
-// table_copy.wast:1199
-assert_return(() => call($26, "test", [14]), 14);
-
-// table_copy.wast:1200
-assert_return(() => call($26, "test", [15]), 15);
-
-// table_copy.wast:1201
-assert_trap(() => call($26, "test", [16]));
-
-// table_copy.wast:1202
-assert_trap(() => call($26, "test", [17]));
-
-// table_copy.wast:1203
-assert_trap(() => call($26, "test", [18]));
-
-// table_copy.wast:1204
-assert_trap(() => call($26, "test", [19]));
-
-// table_copy.wast:1205
-assert_trap(() => call($26, "test", [20]));
-
-// table_copy.wast:1206
-assert_trap(() => call($26, "test", [21]));
-
-// table_copy.wast:1207
-assert_trap(() => call($26, "test", [22]));
-
-// table_copy.wast:1208
-assert_trap(() => call($26, "test", [23]));
-
-// table_copy.wast:1209
-assert_trap(() => call($26, "test", [24]));
-
-// table_copy.wast:1210
-assert_trap(() => call($26, "test", [25]));
-
-// table_copy.wast:1211
-assert_trap(() => call($26, "test", [26]));
-
-// table_copy.wast:1212
-assert_trap(() => call($26, "test", [27]));
-
-// table_copy.wast:1213
-assert_trap(() => call($26, "test", [28]));
-
-// table_copy.wast:1214
-assert_trap(() => call($26, "test", [29]));
-
-// table_copy.wast:1215
-assert_trap(() => call($26, "test", [30]));
-
-// table_copy.wast:1216
-assert_trap(() => call($26, "test", [31]));
-
-// table_copy.wast:1217
-assert_trap(() => call($26, "test", [32]));
-
-// table_copy.wast:1218
-assert_trap(() => call($26, "test", [33]));
-
-// table_copy.wast:1219
-assert_trap(() => call($26, "test", [34]));
-
-// table_copy.wast:1220
-assert_trap(() => call($26, "test", [35]));
-
-// table_copy.wast:1221
-assert_trap(() => call($26, "test", [36]));
-
-// table_copy.wast:1222
-assert_trap(() => call($26, "test", [37]));
-
-// table_copy.wast:1223
-assert_trap(() => call($26, "test", [38]));
-
-// table_copy.wast:1224
-assert_trap(() => call($26, "test", [39]));
-
-// table_copy.wast:1225
-assert_trap(() => call($26, "test", [40]));
-
-// table_copy.wast:1226
-assert_trap(() => call($26, "test", [41]));
-
-// table_copy.wast:1227
-assert_trap(() => call($26, "test", [42]));
-
-// table_copy.wast:1228
-assert_trap(() => call($26, "test", [43]));
-
-// table_copy.wast:1229
-assert_trap(() => call($26, "test", [44]));
-
-// table_copy.wast:1230
-assert_trap(() => call($26, "test", [45]));
-
-// table_copy.wast:1231
-assert_trap(() => call($26, "test", [46]));
-
-// table_copy.wast:1232
-assert_trap(() => call($26, "test", [47]));
-
-// table_copy.wast:1233
-assert_trap(() => call($26, "test", [48]));
-
-// table_copy.wast:1234
-assert_trap(() => call($26, "test", [49]));
-
-// table_copy.wast:1235
-assert_trap(() => call($26, "test", [50]));
-
-// table_copy.wast:1236
-assert_trap(() => call($26, "test", [51]));
-
-// table_copy.wast:1237
-assert_trap(() => call($26, "test", [52]));
-
-// table_copy.wast:1238
-assert_trap(() => call($26, "test", [53]));
-
-// table_copy.wast:1239
-assert_trap(() => call($26, "test", [54]));
-
-// table_copy.wast:1240
-assert_trap(() => call($26, "test", [55]));
-
-// table_copy.wast:1241
-assert_trap(() => call($26, "test", [56]));
-
-// table_copy.wast:1242
-assert_trap(() => call($26, "test", [57]));
-
-// table_copy.wast:1243
-assert_trap(() => call($26, "test", [58]));
-
-// table_copy.wast:1244
-assert_trap(() => call($26, "test", [59]));
-
-// table_copy.wast:1245
-assert_trap(() => call($26, "test", [60]));
-
-// table_copy.wast:1246
-assert_trap(() => call($26, "test", [61]));
-
-// table_copy.wast:1247
-assert_trap(() => call($26, "test", [62]));
-
-// table_copy.wast:1248
-assert_trap(() => call($26, "test", [63]));
-
-// table_copy.wast:1249
-assert_trap(() => call($26, "test", [64]));
-
-// table_copy.wast:1250
-assert_trap(() => call($26, "test", [65]));
-
-// table_copy.wast:1251
-assert_trap(() => call($26, "test", [66]));
-
-// table_copy.wast:1252
-assert_trap(() => call($26, "test", [67]));
-
-// table_copy.wast:1253
-assert_trap(() => call($26, "test", [68]));
-
-// table_copy.wast:1254
-assert_trap(() => call($26, "test", [69]));
-
-// table_copy.wast:1255
-assert_trap(() => call($26, "test", [70]));
-
-// table_copy.wast:1256
-assert_trap(() => call($26, "test", [71]));
-
-// table_copy.wast:1257
-assert_trap(() => call($26, "test", [72]));
-
-// table_copy.wast:1258
-assert_trap(() => call($26, "test", [73]));
-
-// table_copy.wast:1259
-assert_trap(() => call($26, "test", [74]));
-
-// table_copy.wast:1260
-assert_trap(() => call($26, "test", [75]));
-
-// table_copy.wast:1261
-assert_trap(() => call($26, "test", [76]));
-
-// table_copy.wast:1262
-assert_trap(() => call($26, "test", [77]));
-
-// table_copy.wast:1263
-assert_trap(() => call($26, "test", [78]));
-
-// table_copy.wast:1264
-assert_trap(() => call($26, "test", [79]));
-
-// table_copy.wast:1265
-assert_trap(() => call($26, "test", [80]));
-
-// table_copy.wast:1266
-assert_trap(() => call($26, "test", [81]));
-
-// table_copy.wast:1267
-assert_trap(() => call($26, "test", [82]));
-
-// table_copy.wast:1268
-assert_trap(() => call($26, "test", [83]));
-
-// table_copy.wast:1269
-assert_trap(() => call($26, "test", [84]));
-
-// table_copy.wast:1270
-assert_trap(() => call($26, "test", [85]));
-
-// table_copy.wast:1271
-assert_trap(() => call($26, "test", [86]));
-
-// table_copy.wast:1272
-assert_trap(() => call($26, "test", [87]));
-
-// table_copy.wast:1273
-assert_trap(() => call($26, "test", [88]));
-
-// table_copy.wast:1274
-assert_trap(() => call($26, "test", [89]));
-
-// table_copy.wast:1275
-assert_trap(() => call($26, "test", [90]));
-
-// table_copy.wast:1276
-assert_trap(() => call($26, "test", [91]));
-
-// table_copy.wast:1277
-assert_trap(() => call($26, "test", [92]));
-
-// table_copy.wast:1278
-assert_trap(() => call($26, "test", [93]));
-
-// table_copy.wast:1279
-assert_trap(() => call($26, "test", [94]));
-
-// table_copy.wast:1280
-assert_trap(() => call($26, "test", [95]));
-
-// table_copy.wast:1281
-assert_trap(() => call($26, "test", [96]));
-
-// table_copy.wast:1282
-assert_trap(() => call($26, "test", [97]));
-
-// table_copy.wast:1283
-assert_trap(() => call($26, "test", [98]));
-
-// table_copy.wast:1284
-assert_trap(() => call($26, "test", [99]));
-
-// table_copy.wast:1285
-assert_trap(() => call($26, "test", [100]));
-
-// table_copy.wast:1286
-assert_trap(() => call($26, "test", [101]));
-
-// table_copy.wast:1287
-assert_trap(() => call($26, "test", [102]));
-
-// table_copy.wast:1288
-assert_trap(() => call($26, "test", [103]));
-
-// table_copy.wast:1289
-assert_trap(() => call($26, "test", [104]));
-
-// table_copy.wast:1290
-assert_trap(() => call($26, "test", [105]));
-
-// table_copy.wast:1291
-assert_trap(() => call($26, "test", [106]));
-
-// table_copy.wast:1292
-assert_trap(() => call($26, "test", [107]));
-
-// table_copy.wast:1293
-assert_trap(() => call($26, "test", [108]));
-
-// table_copy.wast:1294
-assert_trap(() => call($26, "test", [109]));
-
-// table_copy.wast:1295
-assert_trap(() => call($26, "test", [110]));
-
-// table_copy.wast:1296
-assert_trap(() => call($26, "test", [111]));
-
-// table_copy.wast:1297
-assert_return(() => call($26, "test", [112]), 0);
-
-// table_copy.wast:1298
-assert_return(() => call($26, "test", [113]), 1);
-
-// table_copy.wast:1299
-assert_return(() => call($26, "test", [114]), 2);
-
-// table_copy.wast:1300
-assert_return(() => call($26, "test", [115]), 3);
-
-// table_copy.wast:1301
-assert_return(() => call($26, "test", [116]), 4);
-
-// table_copy.wast:1302
-assert_return(() => call($26, "test", [117]), 5);
-
-// table_copy.wast:1303
-assert_return(() => call($26, "test", [118]), 6);
-
-// table_copy.wast:1304
-assert_return(() => call($26, "test", [119]), 7);
-
-// table_copy.wast:1305
-assert_return(() => call($26, "test", [120]), 8);
-
-// table_copy.wast:1306
-assert_return(() => call($26, "test", [121]), 9);
-
-// table_copy.wast:1307
-assert_return(() => call($26, "test", [122]), 10);
-
-// table_copy.wast:1308
-assert_return(() => call($26, "test", [123]), 11);
-
-// table_copy.wast:1309
-assert_return(() => call($26, "test", [124]), 12);
-
-// table_copy.wast:1310
-assert_return(() => call($26, "test", [125]), 13);
-
-// table_copy.wast:1311
-assert_return(() => call($26, "test", [126]), 14);
-
-// table_copy.wast:1312
-assert_return(() => call($26, "test", [127]), 15);
-
-// table_copy.wast:1314
-let $27 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\x80\x01\x80\x01\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x96\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
-
-// table_copy.wast:1340
-assert_trap(() => call($27, "run", [112, 0, -32]));
-
-// table_copy.wast:1342
-assert_return(() => call($27, "test", [0]), 0);
-
-// table_copy.wast:1343
-assert_return(() => call($27, "test", [1]), 1);
-
-// table_copy.wast:1344
-assert_return(() => call($27, "test", [2]), 2);
-
-// table_copy.wast:1345
-assert_return(() => call($27, "test", [3]), 3);
-
-// table_copy.wast:1346
-assert_return(() => call($27, "test", [4]), 4);
-
-// table_copy.wast:1347
-assert_return(() => call($27, "test", [5]), 5);
-
-// table_copy.wast:1348
-assert_return(() => call($27, "test", [6]), 6);
-
-// table_copy.wast:1349
-assert_return(() => call($27, "test", [7]), 7);
-
-// table_copy.wast:1350
-assert_return(() => call($27, "test", [8]), 8);
-
-// table_copy.wast:1351
-assert_return(() => call($27, "test", [9]), 9);
-
-// table_copy.wast:1352
-assert_return(() => call($27, "test", [10]), 10);
-
-// table_copy.wast:1353
-assert_return(() => call($27, "test", [11]), 11);
-
-// table_copy.wast:1354
-assert_return(() => call($27, "test", [12]), 12);
-
-// table_copy.wast:1355
-assert_return(() => call($27, "test", [13]), 13);
-
-// table_copy.wast:1356
-assert_return(() => call($27, "test", [14]), 14);
-
-// table_copy.wast:1357
-assert_return(() => call($27, "test", [15]), 15);
-
-// table_copy.wast:1358
-assert_trap(() => call($27, "test", [16]));
-
-// table_copy.wast:1359
-assert_trap(() => call($27, "test", [17]));
-
-// table_copy.wast:1360
-assert_trap(() => call($27, "test", [18]));
-
-// table_copy.wast:1361
-assert_trap(() => call($27, "test", [19]));
-
-// table_copy.wast:1362
-assert_trap(() => call($27, "test", [20]));
-
-// table_copy.wast:1363
-assert_trap(() => call($27, "test", [21]));
-
-// table_copy.wast:1364
-assert_trap(() => call($27, "test", [22]));
-
-// table_copy.wast:1365
-assert_trap(() => call($27, "test", [23]));
-
-// table_copy.wast:1366
-assert_trap(() => call($27, "test", [24]));
-
-// table_copy.wast:1367
-assert_trap(() => call($27, "test", [25]));
-
-// table_copy.wast:1368
-assert_trap(() => call($27, "test", [26]));
-
-// table_copy.wast:1369
-assert_trap(() => call($27, "test", [27]));
-
-// table_copy.wast:1370
-assert_trap(() => call($27, "test", [28]));
-
-// table_copy.wast:1371
-assert_trap(() => call($27, "test", [29]));
-
-// table_copy.wast:1372
-assert_trap(() => call($27, "test", [30]));
-
-// table_copy.wast:1373
-assert_trap(() => call($27, "test", [31]));
-
-// table_copy.wast:1374
-assert_trap(() => call($27, "test", [32]));
-
-// table_copy.wast:1375
-assert_trap(() => call($27, "test", [33]));
-
-// table_copy.wast:1376
-assert_trap(() => call($27, "test", [34]));
-
-// table_copy.wast:1377
-assert_trap(() => call($27, "test", [35]));
-
-// table_copy.wast:1378
-assert_trap(() => call($27, "test", [36]));
-
-// table_copy.wast:1379
-assert_trap(() => call($27, "test", [37]));
-
-// table_copy.wast:1380
-assert_trap(() => call($27, "test", [38]));
-
-// table_copy.wast:1381
-assert_trap(() => call($27, "test", [39]));
-
-// table_copy.wast:1382
-assert_trap(() => call($27, "test", [40]));
-
-// table_copy.wast:1383
-assert_trap(() => call($27, "test", [41]));
-
-// table_copy.wast:1384
-assert_trap(() => call($27, "test", [42]));
-
-// table_copy.wast:1385
-assert_trap(() => call($27, "test", [43]));
-
-// table_copy.wast:1386
-assert_trap(() => call($27, "test", [44]));
-
-// table_copy.wast:1387
-assert_trap(() => call($27, "test", [45]));
-
-// table_copy.wast:1388
-assert_trap(() => call($27, "test", [46]));
-
-// table_copy.wast:1389
-assert_trap(() => call($27, "test", [47]));
-
-// table_copy.wast:1390
-assert_trap(() => call($27, "test", [48]));
-
-// table_copy.wast:1391
-assert_trap(() => call($27, "test", [49]));
-
-// table_copy.wast:1392
-assert_trap(() => call($27, "test", [50]));
-
-// table_copy.wast:1393
-assert_trap(() => call($27, "test", [51]));
-
-// table_copy.wast:1394
-assert_trap(() => call($27, "test", [52]));
-
-// table_copy.wast:1395
-assert_trap(() => call($27, "test", [53]));
-
-// table_copy.wast:1396
-assert_trap(() => call($27, "test", [54]));
-
-// table_copy.wast:1397
-assert_trap(() => call($27, "test", [55]));
-
-// table_copy.wast:1398
-assert_trap(() => call($27, "test", [56]));
-
-// table_copy.wast:1399
-assert_trap(() => call($27, "test", [57]));
-
-// table_copy.wast:1400
-assert_trap(() => call($27, "test", [58]));
-
-// table_copy.wast:1401
-assert_trap(() => call($27, "test", [59]));
-
-// table_copy.wast:1402
-assert_trap(() => call($27, "test", [60]));
-
-// table_copy.wast:1403
-assert_trap(() => call($27, "test", [61]));
-
-// table_copy.wast:1404
-assert_trap(() => call($27, "test", [62]));
-
-// table_copy.wast:1405
-assert_trap(() => call($27, "test", [63]));
-
-// table_copy.wast:1406
-assert_trap(() => call($27, "test", [64]));
-
-// table_copy.wast:1407
-assert_trap(() => call($27, "test", [65]));
-
-// table_copy.wast:1408
-assert_trap(() => call($27, "test", [66]));
-
-// table_copy.wast:1409
-assert_trap(() => call($27, "test", [67]));
-
-// table_copy.wast:1410
-assert_trap(() => call($27, "test", [68]));
-
-// table_copy.wast:1411
-assert_trap(() => call($27, "test", [69]));
-
-// table_copy.wast:1412
-assert_trap(() => call($27, "test", [70]));
-
-// table_copy.wast:1413
-assert_trap(() => call($27, "test", [71]));
-
-// table_copy.wast:1414
-assert_trap(() => call($27, "test", [72]));
-
-// table_copy.wast:1415
-assert_trap(() => call($27, "test", [73]));
-
-// table_copy.wast:1416
-assert_trap(() => call($27, "test", [74]));
-
-// table_copy.wast:1417
-assert_trap(() => call($27, "test", [75]));
-
-// table_copy.wast:1418
-assert_trap(() => call($27, "test", [76]));
-
-// table_copy.wast:1419
-assert_trap(() => call($27, "test", [77]));
-
-// table_copy.wast:1420
-assert_trap(() => call($27, "test", [78]));
-
-// table_copy.wast:1421
-assert_trap(() => call($27, "test", [79]));
-
-// table_copy.wast:1422
-assert_trap(() => call($27, "test", [80]));
-
-// table_copy.wast:1423
-assert_trap(() => call($27, "test", [81]));
-
-// table_copy.wast:1424
-assert_trap(() => call($27, "test", [82]));
-
-// table_copy.wast:1425
-assert_trap(() => call($27, "test", [83]));
-
-// table_copy.wast:1426
-assert_trap(() => call($27, "test", [84]));
-
-// table_copy.wast:1427
-assert_trap(() => call($27, "test", [85]));
-
-// table_copy.wast:1428
-assert_trap(() => call($27, "test", [86]));
-
-// table_copy.wast:1429
-assert_trap(() => call($27, "test", [87]));
-
-// table_copy.wast:1430
-assert_trap(() => call($27, "test", [88]));
-
-// table_copy.wast:1431
-assert_trap(() => call($27, "test", [89]));
-
-// table_copy.wast:1432
-assert_trap(() => call($27, "test", [90]));
-
-// table_copy.wast:1433
-assert_trap(() => call($27, "test", [91]));
-
-// table_copy.wast:1434
-assert_trap(() => call($27, "test", [92]));
-
-// table_copy.wast:1435
-assert_trap(() => call($27, "test", [93]));
-
-// table_copy.wast:1436
-assert_trap(() => call($27, "test", [94]));
-
-// table_copy.wast:1437
-assert_trap(() => call($27, "test", [95]));
-
-// table_copy.wast:1438
-assert_trap(() => call($27, "test", [96]));
-
-// table_copy.wast:1439
-assert_trap(() => call($27, "test", [97]));
-
-// table_copy.wast:1440
-assert_trap(() => call($27, "test", [98]));
-
-// table_copy.wast:1441
-assert_trap(() => call($27, "test", [99]));
-
-// table_copy.wast:1442
-assert_trap(() => call($27, "test", [100]));
-
-// table_copy.wast:1443
-assert_trap(() => call($27, "test", [101]));
-
-// table_copy.wast:1444
-assert_trap(() => call($27, "test", [102]));
-
-// table_copy.wast:1445
-assert_trap(() => call($27, "test", [103]));
-
-// table_copy.wast:1446
-assert_trap(() => call($27, "test", [104]));
-
-// table_copy.wast:1447
-assert_trap(() => call($27, "test", [105]));
-
-// table_copy.wast:1448
-assert_trap(() => call($27, "test", [106]));
-
-// table_copy.wast:1449
-assert_trap(() => call($27, "test", [107]));
-
-// table_copy.wast:1450
-assert_trap(() => call($27, "test", [108]));
-
-// table_copy.wast:1451
-assert_trap(() => call($27, "test", [109]));
-
-// table_copy.wast:1452
-assert_trap(() => call($27, "test", [110]));
-
-// table_copy.wast:1453
-assert_trap(() => call($27, "test", [111]));
-
-// table_copy.wast:1454
-assert_trap(() => call($27, "test", [112]));
-
-// table_copy.wast:1455
-assert_trap(() => call($27, "test", [113]));
-
-// table_copy.wast:1456
-assert_trap(() => call($27, "test", [114]));
-
-// table_copy.wast:1457
-assert_trap(() => call($27, "test", [115]));
-
-// table_copy.wast:1458
-assert_trap(() => call($27, "test", [116]));
-
-// table_copy.wast:1459
-assert_trap(() => call($27, "test", [117]));
-
-// table_copy.wast:1460
-assert_trap(() => call($27, "test", [118]));
-
-// table_copy.wast:1461
-assert_trap(() => call($27, "test", [119]));
-
-// table_copy.wast:1462
-assert_trap(() => call($27, "test", [120]));
-
-// table_copy.wast:1463
-assert_trap(() => call($27, "test", [121]));
-
-// table_copy.wast:1464
-assert_trap(() => call($27, "test", [122]));
-
-// table_copy.wast:1465
-assert_trap(() => call($27, "test", [123]));
-
-// table_copy.wast:1466
-assert_trap(() => call($27, "test", [124]));
-
-// table_copy.wast:1467
-assert_trap(() => call($27, "test", [125]));
-
-// table_copy.wast:1468
-assert_trap(() => call($27, "test", [126]));
-
-// table_copy.wast:1469
-assert_trap(() => call($27, "test", [127]));
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast
deleted file mode 100644
index 11012a317a..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast
+++ /dev/null
@@ -1,1602 +0,0 @@
-;;
-;; Generated by ../meta/generate_table_init.js
-;;
-
-(module
- (func (export "ef0") (result i32) (i32.const 0))
- (func (export "ef1") (result i32) (i32.const 1))
- (func (export "ef2") (result i32) (i32.const 2))
- (func (export "ef3") (result i32) (i32.const 3))
- (func (export "ef4") (result i32) (i32.const 4))
-)
-(register "a")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.init 1 (i32.const 7) (i32.const 0) (i32.const 4)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 7)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 8)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 9)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 10)) (i32.const 8))
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
-(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.init 3 (i32.const 15) (i32.const 1) (i32.const 3)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 9))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 17)) (i32.const 7))
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-
-(module
- (type (func (result i32))) ;; type #0
- (import "a" "ef0" (func (result i32))) ;; index 0
- (import "a" "ef1" (func (result i32)))
- (import "a" "ef2" (func (result i32)))
- (import "a" "ef3" (func (result i32)))
- (import "a" "ef4" (func (result i32))) ;; index 4
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 5)) ;; index 5
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9)) ;; index 9
- (func (export "test")
- (table.init 1 (i32.const 7) (i32.const 0) (i32.const 4))
- (elem.drop 1)
- (table.init 3 (i32.const 15) (i32.const 1) (i32.const 3))
- (elem.drop 3)
- (table.copy (i32.const 20) (i32.const 15) (i32.const 5))
- (table.copy (i32.const 21) (i32.const 29) (i32.const 1))
- (table.copy (i32.const 24) (i32.const 10) (i32.const 1))
- (table.copy (i32.const 13) (i32.const 11) (i32.const 4))
- (table.copy (i32.const 19) (i32.const 20) (i32.const 5)))
- (func (export "check") (param i32) (result i32)
- (call_indirect (type 0) (local.get 0)))
-)
-
-(invoke "test")
-(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
-(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
-(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
-(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 7)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 8)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 9)) (i32.const 1))
-(assert_return (invoke "check" (i32.const 10)) (i32.const 8))
-(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
-(assert_trap (invoke "check" (i32.const 13)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 14)) (i32.const 7))
-(assert_return (invoke "check" (i32.const 15)) (i32.const 5))
-(assert_return (invoke "check" (i32.const 16)) (i32.const 2))
-(assert_return (invoke "check" (i32.const 17)) (i32.const 7))
-(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 19)) (i32.const 9))
-(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 21)) (i32.const 7))
-(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
-(assert_return (invoke "check" (i32.const 23)) (i32.const 8))
-(assert_return (invoke "check" (i32.const 24)) (i32.const 8))
-(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
-(assert_invalid
- (module
- (func (export "test")
- (elem.drop 0)))
- "unknown table 0")
-
-(assert_invalid
- (module
- (func (export "test")
- (table.init 0 (i32.const 12) (i32.const 1) (i32.const 1))))
- "unknown table 0")
-
-(assert_invalid
- (module
- (elem passive funcref 0)
- (func (result i32) (i32.const 0))
- (func (export "test")
- (elem.drop 4)))
- "unknown table 0")
-
-(assert_invalid
- (module
- (elem passive funcref 0)
- (func (result i32) (i32.const 0))
- (func (export "test")
- (table.init 4 (i32.const 12) (i32.const 1) (i32.const 1))))
- "unknown table 0")
-
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (elem.drop 2)
- ))
-(assert_trap (invoke "test") "element segment dropped")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 2 (i32.const 12) (i32.const 1) (i32.const 1))
- ))
-(assert_trap (invoke "test") "element segment dropped")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 1 (i32.const 12) (i32.const 1) (i32.const 1))
- (table.init 1 (i32.const 21) (i32.const 1) (i32.const 1))))
-(invoke "test")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (elem.drop 1)
- (elem.drop 1)))
-(assert_trap (invoke "test") "element segment dropped")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (elem.drop 1)
- (table.init 1 (i32.const 12) (i32.const 1) (i32.const 1))))
-(assert_trap (invoke "test") "element segment dropped")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 1 (i32.const 12) (i32.const 0) (i32.const 5))
- ))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 1 (i32.const 12) (i32.const 2) (i32.const 3))
- ))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 1 (i32.const 28) (i32.const 1) (i32.const 3))
- ))
-(assert_trap (invoke "test") "out of bounds")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 1 (i32.const 12) (i32.const 4) (i32.const 0))
- ))
-(invoke "test")
-
-(module
- (table 30 30 funcref)
- (elem (i32.const 2) 3 1 4 1)
- (elem passive funcref 2 7 1 8)
- (elem (i32.const 12) 7 5 2 3 6)
- (elem passive funcref 5 9 2 7 6)
- (func (result i32) (i32.const 0))
- (func (result i32) (i32.const 1))
- (func (result i32) (i32.const 2))
- (func (result i32) (i32.const 3))
- (func (result i32) (i32.const 4))
- (func (result i32) (i32.const 5))
- (func (result i32) (i32.const 6))
- (func (result i32) (i32.const 7))
- (func (result i32) (i32.const 8))
- (func (result i32) (i32.const 9))
- (func (export "test")
- (table.init 1 (i32.const 30) (i32.const 2) (i32.const 0))
- ))
-(invoke "test")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i32.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f32.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (i64.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f32.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f32.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f32.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f32.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (i64.const 1) (f64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f64.const 1) (i32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f64.const 1) (f32.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f64.const 1) (i64.const 1))))
- "type mismatch")
-
-(assert_invalid
- (module
- (table 10 funcref)
- (elem passive funcref $f0 $f0 $f0)
- (func $f0)
- (func (export "test")
- (table.init 0 (f64.const 1) (f64.const 1) (f64.const 1))))
- "type mismatch")
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $offs i32) (param $len i32)
- (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-(assert_trap (invoke "run" (i32.const 24) (i32.const 16)) "out of bounds")
-(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 32 64 funcref)
- (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $offs i32) (param $len i32)
- (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-(assert_trap (invoke "run" (i32.const 25) (i32.const 16)) "out of bounds")
-(assert_return (invoke "test" (i32.const 25)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 26)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 27)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 28)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 29)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 30)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 31)) (i32.const 6))
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 160 320 funcref)
- (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $offs i32) (param $len i32)
- (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-(assert_trap (invoke "run" (i32.const 96) (i32.const 32)) "out of bounds")
-(assert_return (invoke "test" (i32.const 96)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 97)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 98)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 99)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 100)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 101)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 102)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 103)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 104)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 105)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 106)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 107)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 108)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 109)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 110)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 111)) (i32.const 15))
-(assert_trap (invoke "test" (i32.const 112)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 113)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 114)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 115)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 116)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 117)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 118)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 119)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 120)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 121)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 122)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 123)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 124)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 125)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 126)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 127)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 128)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 129)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 130)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 131)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 132)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 133)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 134)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 135)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 136)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 137)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 138)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 139)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 140)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 141)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 142)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 143)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 144)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 145)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 146)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 147)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 148)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 149)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 150)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 151)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 152)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 153)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 154)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 155)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 156)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 157)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 158)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 159)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 160 320 funcref)
- (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $offs i32) (param $len i32)
- (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-(assert_trap (invoke "run" (i32.const 97) (i32.const 31)) "out of bounds")
-(assert_return (invoke "test" (i32.const 97)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 98)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 99)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 100)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 101)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 102)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 103)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 104)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 105)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 106)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 107)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 108)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 109)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 110)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 111)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 112)) (i32.const 15))
-(assert_trap (invoke "test" (i32.const 113)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 114)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 115)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 116)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 117)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 118)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 119)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 120)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 121)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 122)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 123)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 124)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 125)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 126)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 127)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 128)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 129)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 130)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 131)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 132)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 133)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 134)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 135)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 136)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 137)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 138)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 139)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 140)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 141)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 142)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 143)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 144)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 145)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 146)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 147)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 148)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 149)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 150)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 151)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 152)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 153)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 154)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 155)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 156)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 157)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 158)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 159)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 96)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 64 64 funcref)
- (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $offs i32) (param $len i32)
- (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
-(assert_trap (invoke "run" (i32.const 48) (i32.const 4294967280)) "out of bounds")
-(assert_return (invoke "test" (i32.const 48)) (i32.const 0))
-(assert_return (invoke "test" (i32.const 49)) (i32.const 1))
-(assert_return (invoke "test" (i32.const 50)) (i32.const 2))
-(assert_return (invoke "test" (i32.const 51)) (i32.const 3))
-(assert_return (invoke "test" (i32.const 52)) (i32.const 4))
-(assert_return (invoke "test" (i32.const 53)) (i32.const 5))
-(assert_return (invoke "test" (i32.const 54)) (i32.const 6))
-(assert_return (invoke "test" (i32.const 55)) (i32.const 7))
-(assert_return (invoke "test" (i32.const 56)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 57)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 58)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 59)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 60)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 61)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 62)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 63)) (i32.const 15))
-(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
-(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
-
-(module
- (type (func (result i32)))
- (table 16 16 funcref)
- (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
- (func $f0 (export "f0") (result i32) (i32.const 0))
- (func $f1 (export "f1") (result i32) (i32.const 1))
- (func $f2 (export "f2") (result i32) (i32.const 2))
- (func $f3 (export "f3") (result i32) (i32.const 3))
- (func $f4 (export "f4") (result i32) (i32.const 4))
- (func $f5 (export "f5") (result i32) (i32.const 5))
- (func $f6 (export "f6") (result i32) (i32.const 6))
- (func $f7 (export "f7") (result i32) (i32.const 7))
- (func $f8 (export "f8") (result i32) (i32.const 8))
- (func $f9 (export "f9") (result i32) (i32.const 9))
- (func $f10 (export "f10") (result i32) (i32.const 10))
- (func $f11 (export "f11") (result i32) (i32.const 11))
- (func $f12 (export "f12") (result i32) (i32.const 12))
- (func $f13 (export "f13") (result i32) (i32.const 13))
- (func $f14 (export "f14") (result i32) (i32.const 14))
- (func $f15 (export "f15") (result i32) (i32.const 15))
- (func (export "test") (param $n i32) (result i32)
- (call_indirect (type 0) (local.get $n)))
- (func (export "run") (param $offs i32) (param $len i32)
- (table.init 0 (local.get $offs) (i32.const 8) (local.get $len))))
-(assert_trap (invoke "run" (i32.const 0) (i32.const 4294967292)) "out of bounds")
-(assert_return (invoke "test" (i32.const 0)) (i32.const 8))
-(assert_return (invoke "test" (i32.const 1)) (i32.const 9))
-(assert_return (invoke "test" (i32.const 2)) (i32.const 10))
-(assert_return (invoke "test" (i32.const 3)) (i32.const 11))
-(assert_return (invoke "test" (i32.const 4)) (i32.const 12))
-(assert_return (invoke "test" (i32.const 5)) (i32.const 13))
-(assert_return (invoke "test" (i32.const 6)) (i32.const 14))
-(assert_return (invoke "test" (i32.const 7)) (i32.const 15))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js
deleted file mode 100644
index bbbc512f8e..0000000000
--- a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js
+++ /dev/null
@@ -1,2096 +0,0 @@
-
-'use strict';
-
-let spectest = {
- print: console.log.bind(console),
- print_i32: console.log.bind(console),
- print_i32_f32: console.log.bind(console),
- print_f64_f64: console.log.bind(console),
- print_f32: console.log.bind(console),
- print_f64: console.log.bind(console),
- global_i32: 666,
- global_f32: 666,
- global_f64: 666,
- table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
- memory: new WebAssembly.Memory({initial: 1, maximum: 2})
-};
-let handler = {
- get(target, prop) {
- return (prop in target) ? target[prop] : {};
- }
-};
-let registry = new Proxy({spectest}, handler);
-
-function register(name, instance) {
- registry[name] = instance.exports;
-}
-
-function module(bytes, valid = true) {
- let buffer = new ArrayBuffer(bytes.length);
- let view = new Uint8Array(buffer);
- for (let i = 0; i < bytes.length; ++i) {
- view[i] = bytes.charCodeAt(i);
- }
- let validated;
- try {
- validated = WebAssembly.validate(buffer);
- } catch (e) {
- throw new Error("Wasm validate throws");
- }
- if (validated !== valid) {
- throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
- }
- return new WebAssembly.Module(buffer);
-}
-
-function instance(bytes, imports = registry) {
- return new WebAssembly.Instance(module(bytes), imports);
-}
-
-function call(instance, name, args) {
- return instance.exports[name](...args);
-}
-
-function get(instance, name) {
- let v = instance.exports[name];
- return (v instanceof WebAssembly.Global) ? v.value : v;
-}
-
-function exports(name, instance) {
- return {[name]: instance.exports};
-}
-
-function run(action) {
- action();
-}
-
-function assert_malformed(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm decoding failure expected");
-}
-
-function assert_invalid(bytes) {
- try { module(bytes, false) } catch (e) {
- if (e instanceof WebAssembly.CompileError) return;
- }
- throw new Error("Wasm validation failure expected");
-}
-
-function assert_unlinkable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.LinkError) return;
- }
- throw new Error("Wasm linking failure expected");
-}
-
-function assert_uninstantiable(bytes) {
- let mod = module(bytes);
- try { new WebAssembly.Instance(mod, registry) } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-function assert_trap(action) {
- try { action() } catch (e) {
- if (e instanceof WebAssembly.RuntimeError) return;
- }
- throw new Error("Wasm trap expected");
-}
-
-let StackOverflow;
-try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
-
-function assert_exhaustion(action) {
- try { action() } catch (e) {
- if (e instanceof StackOverflow) return;
- }
- throw new Error("Wasm resource exhaustion expected");
-}
-
-function assert_return(action, expected) {
- let actual = action();
- if (!Object.is(actual, expected)) {
- throw new Error("Wasm return value " + expected + " expected, got " + actual);
- };
-}
-
-function assert_return_canonical_nan(action) {
- let actual = action();
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test that it's a canonical NaN.
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-function assert_return_arithmetic_nan(action) {
- // Note that JS can't reliably distinguish different NaN values,
- // so there's no good way to test for specific bitpatterns here.
- let actual = action();
- if (!Number.isNaN(actual)) {
- throw new Error("Wasm return value NaN expected, got " + actual);
- };
-}
-
-// table_init.wast:5
-let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x00\x00\x07\x9f\x80\x80\x80\x00\x05\x03\x65\x66\x30\x00\x00\x03\x65\x66\x31\x00\x01\x03\x65\x66\x32\x00\x02\x03\x65\x66\x33\x00\x03\x03\x65\x66\x34\x00\x04\x0a\xae\x80\x80\x80\x00\x05\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b");
-
-// table_init.wast:12
-register("a", $1)
-
-// table_init.wast:14
-let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x0c\x01\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_init.wast:37
-run(() => call($2, "test", []));
-
-// table_init.wast:38
-assert_trap(() => call($2, "check", [0]));
-
-// table_init.wast:39
-assert_trap(() => call($2, "check", [1]));
-
-// table_init.wast:40
-assert_return(() => call($2, "check", [2]), 3);
-
-// table_init.wast:41
-assert_return(() => call($2, "check", [3]), 1);
-
-// table_init.wast:42
-assert_return(() => call($2, "check", [4]), 4);
-
-// table_init.wast:43
-assert_return(() => call($2, "check", [5]), 1);
-
-// table_init.wast:44
-assert_trap(() => call($2, "check", [6]));
-
-// table_init.wast:45
-assert_return(() => call($2, "check", [7]), 2);
-
-// table_init.wast:46
-assert_return(() => call($2, "check", [8]), 7);
-
-// table_init.wast:47
-assert_return(() => call($2, "check", [9]), 1);
-
-// table_init.wast:48
-assert_return(() => call($2, "check", [10]), 8);
-
-// table_init.wast:49
-assert_trap(() => call($2, "check", [11]));
-
-// table_init.wast:50
-assert_return(() => call($2, "check", [12]), 7);
-
-// table_init.wast:51
-assert_return(() => call($2, "check", [13]), 5);
-
-// table_init.wast:52
-assert_return(() => call($2, "check", [14]), 2);
-
-// table_init.wast:53
-assert_return(() => call($2, "check", [15]), 3);
-
-// table_init.wast:54
-assert_return(() => call($2, "check", [16]), 6);
-
-// table_init.wast:55
-assert_trap(() => call($2, "check", [17]));
-
-// table_init.wast:56
-assert_trap(() => call($2, "check", [18]));
-
-// table_init.wast:57
-assert_trap(() => call($2, "check", [19]));
-
-// table_init.wast:58
-assert_trap(() => call($2, "check", [20]));
-
-// table_init.wast:59
-assert_trap(() => call($2, "check", [21]));
-
-// table_init.wast:60
-assert_trap(() => call($2, "check", [22]));
-
-// table_init.wast:61
-assert_trap(() => call($2, "check", [23]));
-
-// table_init.wast:62
-assert_trap(() => call($2, "check", [24]));
-
-// table_init.wast:63
-assert_trap(() => call($2, "check", [25]));
-
-// table_init.wast:64
-assert_trap(() => call($2, "check", [26]));
-
-// table_init.wast:65
-assert_trap(() => call($2, "check", [27]));
-
-// table_init.wast:66
-assert_trap(() => call($2, "check", [28]));
-
-// table_init.wast:67
-assert_trap(() => call($2, "check", [29]));
-
-// table_init.wast:69
-let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x01\x41\x03\xfc\x0c\x03\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_init.wast:92
-run(() => call($3, "test", []));
-
-// table_init.wast:93
-assert_trap(() => call($3, "check", [0]));
-
-// table_init.wast:94
-assert_trap(() => call($3, "check", [1]));
-
-// table_init.wast:95
-assert_return(() => call($3, "check", [2]), 3);
-
-// table_init.wast:96
-assert_return(() => call($3, "check", [3]), 1);
-
-// table_init.wast:97
-assert_return(() => call($3, "check", [4]), 4);
-
-// table_init.wast:98
-assert_return(() => call($3, "check", [5]), 1);
-
-// table_init.wast:99
-assert_trap(() => call($3, "check", [6]));
-
-// table_init.wast:100
-assert_trap(() => call($3, "check", [7]));
-
-// table_init.wast:101
-assert_trap(() => call($3, "check", [8]));
-
-// table_init.wast:102
-assert_trap(() => call($3, "check", [9]));
-
-// table_init.wast:103
-assert_trap(() => call($3, "check", [10]));
-
-// table_init.wast:104
-assert_trap(() => call($3, "check", [11]));
-
-// table_init.wast:105
-assert_return(() => call($3, "check", [12]), 7);
-
-// table_init.wast:106
-assert_return(() => call($3, "check", [13]), 5);
-
-// table_init.wast:107
-assert_return(() => call($3, "check", [14]), 2);
-
-// table_init.wast:108
-assert_return(() => call($3, "check", [15]), 9);
-
-// table_init.wast:109
-assert_return(() => call($3, "check", [16]), 2);
-
-// table_init.wast:110
-assert_return(() => call($3, "check", [17]), 7);
-
-// table_init.wast:111
-assert_trap(() => call($3, "check", [18]));
-
-// table_init.wast:112
-assert_trap(() => call($3, "check", [19]));
-
-// table_init.wast:113
-assert_trap(() => call($3, "check", [20]));
-
-// table_init.wast:114
-assert_trap(() => call($3, "check", [21]));
-
-// table_init.wast:115
-assert_trap(() => call($3, "check", [22]));
-
-// table_init.wast:116
-assert_trap(() => call($3, "check", [23]));
-
-// table_init.wast:117
-assert_trap(() => call($3, "check", [24]));
-
-// table_init.wast:118
-assert_trap(() => call($3, "check", [25]));
-
-// table_init.wast:119
-assert_trap(() => call($3, "check", [26]));
-
-// table_init.wast:120
-assert_trap(() => call($3, "check", [27]));
-
-// table_init.wast:121
-assert_trap(() => call($3, "check", [28]));
-
-// table_init.wast:122
-assert_trap(() => call($3, "check", [29]));
-
-// table_init.wast:124
-let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\x8d\x81\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\xce\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x0c\x01\x00\xfc\x0d\x01\x41\x0f\x41\x01\x41\x03\xfc\x0c\x03\x00\xfc\x0d\x03\x41\x14\x41\x0f\x41\x05\xfc\x0e\x00\x00\x41\x15\x41\x1d\x41\x01\xfc\x0e\x00\x00\x41\x18\x41\x0a\x41\x01\xfc\x0e\x00\x00\x41\x0d\x41\x0b\x41\x04\xfc\x0e\x00\x00\x41\x13\x41\x14\x41\x05\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
-
-// table_init.wast:155
-run(() => call($4, "test", []));
-
-// table_init.wast:156
-assert_trap(() => call($4, "check", [0]));
-
-// table_init.wast:157
-assert_trap(() => call($4, "check", [1]));
-
-// table_init.wast:158
-assert_return(() => call($4, "check", [2]), 3);
-
-// table_init.wast:159
-assert_return(() => call($4, "check", [3]), 1);
-
-// table_init.wast:160
-assert_return(() => call($4, "check", [4]), 4);
-
-// table_init.wast:161
-assert_return(() => call($4, "check", [5]), 1);
-
-// table_init.wast:162
-assert_trap(() => call($4, "check", [6]));
-
-// table_init.wast:163
-assert_return(() => call($4, "check", [7]), 2);
-
-// table_init.wast:164
-assert_return(() => call($4, "check", [8]), 7);
-
-// table_init.wast:165
-assert_return(() => call($4, "check", [9]), 1);
-
-// table_init.wast:166
-assert_return(() => call($4, "check", [10]), 8);
-
-// table_init.wast:167
-assert_trap(() => call($4, "check", [11]));
-
-// table_init.wast:168
-assert_return(() => call($4, "check", [12]), 7);
-
-// table_init.wast:169
-assert_trap(() => call($4, "check", [13]));
-
-// table_init.wast:170
-assert_return(() => call($4, "check", [14]), 7);
-
-// table_init.wast:171
-assert_return(() => call($4, "check", [15]), 5);
-
-// table_init.wast:172
-assert_return(() => call($4, "check", [16]), 2);
-
-// table_init.wast:173
-assert_return(() => call($4, "check", [17]), 7);
-
-// table_init.wast:174
-assert_trap(() => call($4, "check", [18]));
-
-// table_init.wast:175
-assert_return(() => call($4, "check", [19]), 9);
-
-// table_init.wast:176
-assert_trap(() => call($4, "check", [20]));
-
-// table_init.wast:177
-assert_return(() => call($4, "check", [21]), 7);
-
-// table_init.wast:178
-assert_trap(() => call($4, "check", [22]));
-
-// table_init.wast:179
-assert_return(() => call($4, "check", [23]), 8);
-
-// table_init.wast:180
-assert_return(() => call($4, "check", [24]), 8);
-
-// table_init.wast:181
-assert_trap(() => call($4, "check", [25]));
-
-// table_init.wast:182
-assert_trap(() => call($4, "check", [26]));
-
-// table_init.wast:183
-assert_trap(() => call($4, "check", [27]));
-
-// table_init.wast:184
-assert_trap(() => call($4, "check", [28]));
-
-// table_init.wast:185
-assert_trap(() => call($4, "check", [29]));
-
-// table_init.wast:186
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x8b\x80\x80\x80\x00\x01\x85\x80\x80\x80\x00\x00\xfc\x0d\x00\x0b");
-
-// table_init.wast:192
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:198
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x87\x80\x80\x80\x00\x01\x01\x70\x01\xd2\x00\x0b\x0a\x94\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x04\x0b");
-
-// table_init.wast:206
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x87\x80\x80\x80\x00\x01\x01\x70\x01\xd2\x00\x0b\x0a\x9b\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x04\x00\x0b");
-
-// table_init.wast:215
-let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xe5\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x02\x0b");
-
-// table_init.wast:234
-assert_trap(() => call($5, "test", []));
-
-// table_init.wast:236
-let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x02\x00\x0b");
-
-// table_init.wast:255
-assert_trap(() => call($6, "test", []));
-
-// table_init.wast:257
-let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xf6\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x96\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x01\x00\x41\x15\x41\x01\x41\x01\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:276
-run(() => call($7, "test", []));
-
-// table_init.wast:278
-let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xe8\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x88\x80\x80\x80\x00\x00\xfc\x0d\x01\xfc\x0d\x01\x0b");
-
-// table_init.wast:297
-assert_trap(() => call($8, "test", []));
-
-// table_init.wast:299
-let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xef\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8f\x80\x80\x80\x00\x00\xfc\x0d\x01\x41\x0c\x41\x01\x41\x01\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:318
-assert_trap(() => call($9, "test", []));
-
-// table_init.wast:320
-let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x00\x41\x05\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:339
-assert_trap(() => call($10, "test", []));
-
-// table_init.wast:341
-let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x02\x41\x03\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:360
-assert_trap(() => call($11, "test", []));
-
-// table_init.wast:362
-let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1c\x41\x01\x41\x03\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:381
-assert_trap(() => call($12, "test", []));
-
-// table_init.wast:383
-let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x04\x41\x00\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:402
-run(() => call($13, "test", []));
-
-// table_init.wast:404
-let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1e\x41\x02\x41\x00\xfc\x0c\x01\x00\x0b");
-
-// table_init.wast:423
-run(() => call($14, "test", []));
-
-// table_init.wast:425
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:434
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x01\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:443
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:452
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:461
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:470
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:479
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:488
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:497
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:506
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:515
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:524
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:533
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:542
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:551
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:560
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:569
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:578
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:587
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:596
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:605
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa2\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x95\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:614
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:623
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa6\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:632
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:641
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:650
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:659
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:668
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:677
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa6\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:686
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:695
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xaa\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9d\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:704
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:713
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:722
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:731
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:740
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:749
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:758
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:767
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:776
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:785
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:794
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:803
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:812
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:821
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:830
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:839
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:848
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:857
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:866
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:875
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:884
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:893
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa6\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:902
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:911
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xaa\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:920
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:929
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:938
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:947
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:956
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:965
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xaa\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:974
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:983
-assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xae\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\xa1\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:992
-let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:1016
-assert_trap(() => call($15, "run", [24, 16]));
-
-// table_init.wast:1017
-assert_return(() => call($15, "test", [24]), 0);
-
-// table_init.wast:1018
-assert_return(() => call($15, "test", [25]), 1);
-
-// table_init.wast:1019
-assert_return(() => call($15, "test", [26]), 2);
-
-// table_init.wast:1020
-assert_return(() => call($15, "test", [27]), 3);
-
-// table_init.wast:1021
-assert_return(() => call($15, "test", [28]), 4);
-
-// table_init.wast:1022
-assert_return(() => call($15, "test", [29]), 5);
-
-// table_init.wast:1023
-assert_return(() => call($15, "test", [30]), 6);
-
-// table_init.wast:1024
-assert_return(() => call($15, "test", [31]), 7);
-
-// table_init.wast:1025
-assert_trap(() => call($15, "test", [0]));
-
-// table_init.wast:1026
-assert_trap(() => call($15, "test", [1]));
-
-// table_init.wast:1027
-assert_trap(() => call($15, "test", [2]));
-
-// table_init.wast:1028
-assert_trap(() => call($15, "test", [3]));
-
-// table_init.wast:1029
-assert_trap(() => call($15, "test", [4]));
-
-// table_init.wast:1030
-assert_trap(() => call($15, "test", [5]));
-
-// table_init.wast:1031
-assert_trap(() => call($15, "test", [6]));
-
-// table_init.wast:1032
-assert_trap(() => call($15, "test", [7]));
-
-// table_init.wast:1033
-assert_trap(() => call($15, "test", [8]));
-
-// table_init.wast:1034
-assert_trap(() => call($15, "test", [9]));
-
-// table_init.wast:1035
-assert_trap(() => call($15, "test", [10]));
-
-// table_init.wast:1036
-assert_trap(() => call($15, "test", [11]));
-
-// table_init.wast:1037
-assert_trap(() => call($15, "test", [12]));
-
-// table_init.wast:1038
-assert_trap(() => call($15, "test", [13]));
-
-// table_init.wast:1039
-assert_trap(() => call($15, "test", [14]));
-
-// table_init.wast:1040
-assert_trap(() => call($15, "test", [15]));
-
-// table_init.wast:1041
-assert_trap(() => call($15, "test", [16]));
-
-// table_init.wast:1042
-assert_trap(() => call($15, "test", [17]));
-
-// table_init.wast:1043
-assert_trap(() => call($15, "test", [18]));
-
-// table_init.wast:1044
-assert_trap(() => call($15, "test", [19]));
-
-// table_init.wast:1045
-assert_trap(() => call($15, "test", [20]));
-
-// table_init.wast:1046
-assert_trap(() => call($15, "test", [21]));
-
-// table_init.wast:1047
-assert_trap(() => call($15, "test", [22]));
-
-// table_init.wast:1048
-assert_trap(() => call($15, "test", [23]));
-
-// table_init.wast:1050
-let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:1074
-assert_trap(() => call($16, "run", [25, 16]));
-
-// table_init.wast:1075
-assert_return(() => call($16, "test", [25]), 0);
-
-// table_init.wast:1076
-assert_return(() => call($16, "test", [26]), 1);
-
-// table_init.wast:1077
-assert_return(() => call($16, "test", [27]), 2);
-
-// table_init.wast:1078
-assert_return(() => call($16, "test", [28]), 3);
-
-// table_init.wast:1079
-assert_return(() => call($16, "test", [29]), 4);
-
-// table_init.wast:1080
-assert_return(() => call($16, "test", [30]), 5);
-
-// table_init.wast:1081
-assert_return(() => call($16, "test", [31]), 6);
-
-// table_init.wast:1082
-assert_trap(() => call($16, "test", [0]));
-
-// table_init.wast:1083
-assert_trap(() => call($16, "test", [1]));
-
-// table_init.wast:1084
-assert_trap(() => call($16, "test", [2]));
-
-// table_init.wast:1085
-assert_trap(() => call($16, "test", [3]));
-
-// table_init.wast:1086
-assert_trap(() => call($16, "test", [4]));
-
-// table_init.wast:1087
-assert_trap(() => call($16, "test", [5]));
-
-// table_init.wast:1088
-assert_trap(() => call($16, "test", [6]));
-
-// table_init.wast:1089
-assert_trap(() => call($16, "test", [7]));
-
-// table_init.wast:1090
-assert_trap(() => call($16, "test", [8]));
-
-// table_init.wast:1091
-assert_trap(() => call($16, "test", [9]));
-
-// table_init.wast:1092
-assert_trap(() => call($16, "test", [10]));
-
-// table_init.wast:1093
-assert_trap(() => call($16, "test", [11]));
-
-// table_init.wast:1094
-assert_trap(() => call($16, "test", [12]));
-
-// table_init.wast:1095
-assert_trap(() => call($16, "test", [13]));
-
-// table_init.wast:1096
-assert_trap(() => call($16, "test", [14]));
-
-// table_init.wast:1097
-assert_trap(() => call($16, "test", [15]));
-
-// table_init.wast:1098
-assert_trap(() => call($16, "test", [16]));
-
-// table_init.wast:1099
-assert_trap(() => call($16, "test", [17]));
-
-// table_init.wast:1100
-assert_trap(() => call($16, "test", [18]));
-
-// table_init.wast:1101
-assert_trap(() => call($16, "test", [19]));
-
-// table_init.wast:1102
-assert_trap(() => call($16, "test", [20]));
-
-// table_init.wast:1103
-assert_trap(() => call($16, "test", [21]));
-
-// table_init.wast:1104
-assert_trap(() => call($16, "test", [22]));
-
-// table_init.wast:1105
-assert_trap(() => call($16, "test", [23]));
-
-// table_init.wast:1106
-assert_trap(() => call($16, "test", [24]));
-
-// table_init.wast:1108
-let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\xa0\x01\xc0\x02\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:1132
-assert_trap(() => call($17, "run", [96, 32]));
-
-// table_init.wast:1133
-assert_return(() => call($17, "test", [96]), 0);
-
-// table_init.wast:1134
-assert_return(() => call($17, "test", [97]), 1);
-
-// table_init.wast:1135
-assert_return(() => call($17, "test", [98]), 2);
-
-// table_init.wast:1136
-assert_return(() => call($17, "test", [99]), 3);
-
-// table_init.wast:1137
-assert_return(() => call($17, "test", [100]), 4);
-
-// table_init.wast:1138
-assert_return(() => call($17, "test", [101]), 5);
-
-// table_init.wast:1139
-assert_return(() => call($17, "test", [102]), 6);
-
-// table_init.wast:1140
-assert_return(() => call($17, "test", [103]), 7);
-
-// table_init.wast:1141
-assert_return(() => call($17, "test", [104]), 8);
-
-// table_init.wast:1142
-assert_return(() => call($17, "test", [105]), 9);
-
-// table_init.wast:1143
-assert_return(() => call($17, "test", [106]), 10);
-
-// table_init.wast:1144
-assert_return(() => call($17, "test", [107]), 11);
-
-// table_init.wast:1145
-assert_return(() => call($17, "test", [108]), 12);
-
-// table_init.wast:1146
-assert_return(() => call($17, "test", [109]), 13);
-
-// table_init.wast:1147
-assert_return(() => call($17, "test", [110]), 14);
-
-// table_init.wast:1148
-assert_return(() => call($17, "test", [111]), 15);
-
-// table_init.wast:1149
-assert_trap(() => call($17, "test", [112]));
-
-// table_init.wast:1150
-assert_trap(() => call($17, "test", [113]));
-
-// table_init.wast:1151
-assert_trap(() => call($17, "test", [114]));
-
-// table_init.wast:1152
-assert_trap(() => call($17, "test", [115]));
-
-// table_init.wast:1153
-assert_trap(() => call($17, "test", [116]));
-
-// table_init.wast:1154
-assert_trap(() => call($17, "test", [117]));
-
-// table_init.wast:1155
-assert_trap(() => call($17, "test", [118]));
-
-// table_init.wast:1156
-assert_trap(() => call($17, "test", [119]));
-
-// table_init.wast:1157
-assert_trap(() => call($17, "test", [120]));
-
-// table_init.wast:1158
-assert_trap(() => call($17, "test", [121]));
-
-// table_init.wast:1159
-assert_trap(() => call($17, "test", [122]));
-
-// table_init.wast:1160
-assert_trap(() => call($17, "test", [123]));
-
-// table_init.wast:1161
-assert_trap(() => call($17, "test", [124]));
-
-// table_init.wast:1162
-assert_trap(() => call($17, "test", [125]));
-
-// table_init.wast:1163
-assert_trap(() => call($17, "test", [126]));
-
-// table_init.wast:1164
-assert_trap(() => call($17, "test", [127]));
-
-// table_init.wast:1165
-assert_trap(() => call($17, "test", [128]));
-
-// table_init.wast:1166
-assert_trap(() => call($17, "test", [129]));
-
-// table_init.wast:1167
-assert_trap(() => call($17, "test", [130]));
-
-// table_init.wast:1168
-assert_trap(() => call($17, "test", [131]));
-
-// table_init.wast:1169
-assert_trap(() => call($17, "test", [132]));
-
-// table_init.wast:1170
-assert_trap(() => call($17, "test", [133]));
-
-// table_init.wast:1171
-assert_trap(() => call($17, "test", [134]));
-
-// table_init.wast:1172
-assert_trap(() => call($17, "test", [135]));
-
-// table_init.wast:1173
-assert_trap(() => call($17, "test", [136]));
-
-// table_init.wast:1174
-assert_trap(() => call($17, "test", [137]));
-
-// table_init.wast:1175
-assert_trap(() => call($17, "test", [138]));
-
-// table_init.wast:1176
-assert_trap(() => call($17, "test", [139]));
-
-// table_init.wast:1177
-assert_trap(() => call($17, "test", [140]));
-
-// table_init.wast:1178
-assert_trap(() => call($17, "test", [141]));
-
-// table_init.wast:1179
-assert_trap(() => call($17, "test", [142]));
-
-// table_init.wast:1180
-assert_trap(() => call($17, "test", [143]));
-
-// table_init.wast:1181
-assert_trap(() => call($17, "test", [144]));
-
-// table_init.wast:1182
-assert_trap(() => call($17, "test", [145]));
-
-// table_init.wast:1183
-assert_trap(() => call($17, "test", [146]));
-
-// table_init.wast:1184
-assert_trap(() => call($17, "test", [147]));
-
-// table_init.wast:1185
-assert_trap(() => call($17, "test", [148]));
-
-// table_init.wast:1186
-assert_trap(() => call($17, "test", [149]));
-
-// table_init.wast:1187
-assert_trap(() => call($17, "test", [150]));
-
-// table_init.wast:1188
-assert_trap(() => call($17, "test", [151]));
-
-// table_init.wast:1189
-assert_trap(() => call($17, "test", [152]));
-
-// table_init.wast:1190
-assert_trap(() => call($17, "test", [153]));
-
-// table_init.wast:1191
-assert_trap(() => call($17, "test", [154]));
-
-// table_init.wast:1192
-assert_trap(() => call($17, "test", [155]));
-
-// table_init.wast:1193
-assert_trap(() => call($17, "test", [156]));
-
-// table_init.wast:1194
-assert_trap(() => call($17, "test", [157]));
-
-// table_init.wast:1195
-assert_trap(() => call($17, "test", [158]));
-
-// table_init.wast:1196
-assert_trap(() => call($17, "test", [159]));
-
-// table_init.wast:1197
-assert_trap(() => call($17, "test", [0]));
-
-// table_init.wast:1198
-assert_trap(() => call($17, "test", [1]));
-
-// table_init.wast:1199
-assert_trap(() => call($17, "test", [2]));
-
-// table_init.wast:1200
-assert_trap(() => call($17, "test", [3]));
-
-// table_init.wast:1201
-assert_trap(() => call($17, "test", [4]));
-
-// table_init.wast:1202
-assert_trap(() => call($17, "test", [5]));
-
-// table_init.wast:1203
-assert_trap(() => call($17, "test", [6]));
-
-// table_init.wast:1204
-assert_trap(() => call($17, "test", [7]));
-
-// table_init.wast:1205
-assert_trap(() => call($17, "test", [8]));
-
-// table_init.wast:1206
-assert_trap(() => call($17, "test", [9]));
-
-// table_init.wast:1207
-assert_trap(() => call($17, "test", [10]));
-
-// table_init.wast:1208
-assert_trap(() => call($17, "test", [11]));
-
-// table_init.wast:1209
-assert_trap(() => call($17, "test", [12]));
-
-// table_init.wast:1210
-assert_trap(() => call($17, "test", [13]));
-
-// table_init.wast:1211
-assert_trap(() => call($17, "test", [14]));
-
-// table_init.wast:1212
-assert_trap(() => call($17, "test", [15]));
-
-// table_init.wast:1213
-assert_trap(() => call($17, "test", [16]));
-
-// table_init.wast:1214
-assert_trap(() => call($17, "test", [17]));
-
-// table_init.wast:1215
-assert_trap(() => call($17, "test", [18]));
-
-// table_init.wast:1216
-assert_trap(() => call($17, "test", [19]));
-
-// table_init.wast:1217
-assert_trap(() => call($17, "test", [20]));
-
-// table_init.wast:1218
-assert_trap(() => call($17, "test", [21]));
-
-// table_init.wast:1219
-assert_trap(() => call($17, "test", [22]));
-
-// table_init.wast:1220
-assert_trap(() => call($17, "test", [23]));
-
-// table_init.wast:1221
-assert_trap(() => call($17, "test", [24]));
-
-// table_init.wast:1222
-assert_trap(() => call($17, "test", [25]));
-
-// table_init.wast:1223
-assert_trap(() => call($17, "test", [26]));
-
-// table_init.wast:1224
-assert_trap(() => call($17, "test", [27]));
-
-// table_init.wast:1225
-assert_trap(() => call($17, "test", [28]));
-
-// table_init.wast:1226
-assert_trap(() => call($17, "test", [29]));
-
-// table_init.wast:1227
-assert_trap(() => call($17, "test", [30]));
-
-// table_init.wast:1228
-assert_trap(() => call($17, "test", [31]));
-
-// table_init.wast:1229
-assert_trap(() => call($17, "test", [32]));
-
-// table_init.wast:1230
-assert_trap(() => call($17, "test", [33]));
-
-// table_init.wast:1231
-assert_trap(() => call($17, "test", [34]));
-
-// table_init.wast:1232
-assert_trap(() => call($17, "test", [35]));
-
-// table_init.wast:1233
-assert_trap(() => call($17, "test", [36]));
-
-// table_init.wast:1234
-assert_trap(() => call($17, "test", [37]));
-
-// table_init.wast:1235
-assert_trap(() => call($17, "test", [38]));
-
-// table_init.wast:1236
-assert_trap(() => call($17, "test", [39]));
-
-// table_init.wast:1237
-assert_trap(() => call($17, "test", [40]));
-
-// table_init.wast:1238
-assert_trap(() => call($17, "test", [41]));
-
-// table_init.wast:1239
-assert_trap(() => call($17, "test", [42]));
-
-// table_init.wast:1240
-assert_trap(() => call($17, "test", [43]));
-
-// table_init.wast:1241
-assert_trap(() => call($17, "test", [44]));
-
-// table_init.wast:1242
-assert_trap(() => call($17, "test", [45]));
-
-// table_init.wast:1243
-assert_trap(() => call($17, "test", [46]));
-
-// table_init.wast:1244
-assert_trap(() => call($17, "test", [47]));
-
-// table_init.wast:1245
-assert_trap(() => call($17, "test", [48]));
-
-// table_init.wast:1246
-assert_trap(() => call($17, "test", [49]));
-
-// table_init.wast:1247
-assert_trap(() => call($17, "test", [50]));
-
-// table_init.wast:1248
-assert_trap(() => call($17, "test", [51]));
-
-// table_init.wast:1249
-assert_trap(() => call($17, "test", [52]));
-
-// table_init.wast:1250
-assert_trap(() => call($17, "test", [53]));
-
-// table_init.wast:1251
-assert_trap(() => call($17, "test", [54]));
-
-// table_init.wast:1252
-assert_trap(() => call($17, "test", [55]));
-
-// table_init.wast:1253
-assert_trap(() => call($17, "test", [56]));
-
-// table_init.wast:1254
-assert_trap(() => call($17, "test", [57]));
-
-// table_init.wast:1255
-assert_trap(() => call($17, "test", [58]));
-
-// table_init.wast:1256
-assert_trap(() => call($17, "test", [59]));
-
-// table_init.wast:1257
-assert_trap(() => call($17, "test", [60]));
-
-// table_init.wast:1258
-assert_trap(() => call($17, "test", [61]));
-
-// table_init.wast:1259
-assert_trap(() => call($17, "test", [62]));
-
-// table_init.wast:1260
-assert_trap(() => call($17, "test", [63]));
-
-// table_init.wast:1261
-assert_trap(() => call($17, "test", [64]));
-
-// table_init.wast:1262
-assert_trap(() => call($17, "test", [65]));
-
-// table_init.wast:1263
-assert_trap(() => call($17, "test", [66]));
-
-// table_init.wast:1264
-assert_trap(() => call($17, "test", [67]));
-
-// table_init.wast:1265
-assert_trap(() => call($17, "test", [68]));
-
-// table_init.wast:1266
-assert_trap(() => call($17, "test", [69]));
-
-// table_init.wast:1267
-assert_trap(() => call($17, "test", [70]));
-
-// table_init.wast:1268
-assert_trap(() => call($17, "test", [71]));
-
-// table_init.wast:1269
-assert_trap(() => call($17, "test", [72]));
-
-// table_init.wast:1270
-assert_trap(() => call($17, "test", [73]));
-
-// table_init.wast:1271
-assert_trap(() => call($17, "test", [74]));
-
-// table_init.wast:1272
-assert_trap(() => call($17, "test", [75]));
-
-// table_init.wast:1273
-assert_trap(() => call($17, "test", [76]));
-
-// table_init.wast:1274
-assert_trap(() => call($17, "test", [77]));
-
-// table_init.wast:1275
-assert_trap(() => call($17, "test", [78]));
-
-// table_init.wast:1276
-assert_trap(() => call($17, "test", [79]));
-
-// table_init.wast:1277
-assert_trap(() => call($17, "test", [80]));
-
-// table_init.wast:1278
-assert_trap(() => call($17, "test", [81]));
-
-// table_init.wast:1279
-assert_trap(() => call($17, "test", [82]));
-
-// table_init.wast:1280
-assert_trap(() => call($17, "test", [83]));
-
-// table_init.wast:1281
-assert_trap(() => call($17, "test", [84]));
-
-// table_init.wast:1282
-assert_trap(() => call($17, "test", [85]));
-
-// table_init.wast:1283
-assert_trap(() => call($17, "test", [86]));
-
-// table_init.wast:1284
-assert_trap(() => call($17, "test", [87]));
-
-// table_init.wast:1285
-assert_trap(() => call($17, "test", [88]));
-
-// table_init.wast:1286
-assert_trap(() => call($17, "test", [89]));
-
-// table_init.wast:1287
-assert_trap(() => call($17, "test", [90]));
-
-// table_init.wast:1288
-assert_trap(() => call($17, "test", [91]));
-
-// table_init.wast:1289
-assert_trap(() => call($17, "test", [92]));
-
-// table_init.wast:1290
-assert_trap(() => call($17, "test", [93]));
-
-// table_init.wast:1291
-assert_trap(() => call($17, "test", [94]));
-
-// table_init.wast:1292
-assert_trap(() => call($17, "test", [95]));
-
-// table_init.wast:1294
-let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\xa0\x01\xc0\x02\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:1318
-assert_trap(() => call($18, "run", [97, 31]));
-
-// table_init.wast:1319
-assert_return(() => call($18, "test", [97]), 0);
-
-// table_init.wast:1320
-assert_return(() => call($18, "test", [98]), 1);
-
-// table_init.wast:1321
-assert_return(() => call($18, "test", [99]), 2);
-
-// table_init.wast:1322
-assert_return(() => call($18, "test", [100]), 3);
-
-// table_init.wast:1323
-assert_return(() => call($18, "test", [101]), 4);
-
-// table_init.wast:1324
-assert_return(() => call($18, "test", [102]), 5);
-
-// table_init.wast:1325
-assert_return(() => call($18, "test", [103]), 6);
-
-// table_init.wast:1326
-assert_return(() => call($18, "test", [104]), 7);
-
-// table_init.wast:1327
-assert_return(() => call($18, "test", [105]), 8);
-
-// table_init.wast:1328
-assert_return(() => call($18, "test", [106]), 9);
-
-// table_init.wast:1329
-assert_return(() => call($18, "test", [107]), 10);
-
-// table_init.wast:1330
-assert_return(() => call($18, "test", [108]), 11);
-
-// table_init.wast:1331
-assert_return(() => call($18, "test", [109]), 12);
-
-// table_init.wast:1332
-assert_return(() => call($18, "test", [110]), 13);
-
-// table_init.wast:1333
-assert_return(() => call($18, "test", [111]), 14);
-
-// table_init.wast:1334
-assert_return(() => call($18, "test", [112]), 15);
-
-// table_init.wast:1335
-assert_trap(() => call($18, "test", [113]));
-
-// table_init.wast:1336
-assert_trap(() => call($18, "test", [114]));
-
-// table_init.wast:1337
-assert_trap(() => call($18, "test", [115]));
-
-// table_init.wast:1338
-assert_trap(() => call($18, "test", [116]));
-
-// table_init.wast:1339
-assert_trap(() => call($18, "test", [117]));
-
-// table_init.wast:1340
-assert_trap(() => call($18, "test", [118]));
-
-// table_init.wast:1341
-assert_trap(() => call($18, "test", [119]));
-
-// table_init.wast:1342
-assert_trap(() => call($18, "test", [120]));
-
-// table_init.wast:1343
-assert_trap(() => call($18, "test", [121]));
-
-// table_init.wast:1344
-assert_trap(() => call($18, "test", [122]));
-
-// table_init.wast:1345
-assert_trap(() => call($18, "test", [123]));
-
-// table_init.wast:1346
-assert_trap(() => call($18, "test", [124]));
-
-// table_init.wast:1347
-assert_trap(() => call($18, "test", [125]));
-
-// table_init.wast:1348
-assert_trap(() => call($18, "test", [126]));
-
-// table_init.wast:1349
-assert_trap(() => call($18, "test", [127]));
-
-// table_init.wast:1350
-assert_trap(() => call($18, "test", [128]));
-
-// table_init.wast:1351
-assert_trap(() => call($18, "test", [129]));
-
-// table_init.wast:1352
-assert_trap(() => call($18, "test", [130]));
-
-// table_init.wast:1353
-assert_trap(() => call($18, "test", [131]));
-
-// table_init.wast:1354
-assert_trap(() => call($18, "test", [132]));
-
-// table_init.wast:1355
-assert_trap(() => call($18, "test", [133]));
-
-// table_init.wast:1356
-assert_trap(() => call($18, "test", [134]));
-
-// table_init.wast:1357
-assert_trap(() => call($18, "test", [135]));
-
-// table_init.wast:1358
-assert_trap(() => call($18, "test", [136]));
-
-// table_init.wast:1359
-assert_trap(() => call($18, "test", [137]));
-
-// table_init.wast:1360
-assert_trap(() => call($18, "test", [138]));
-
-// table_init.wast:1361
-assert_trap(() => call($18, "test", [139]));
-
-// table_init.wast:1362
-assert_trap(() => call($18, "test", [140]));
-
-// table_init.wast:1363
-assert_trap(() => call($18, "test", [141]));
-
-// table_init.wast:1364
-assert_trap(() => call($18, "test", [142]));
-
-// table_init.wast:1365
-assert_trap(() => call($18, "test", [143]));
-
-// table_init.wast:1366
-assert_trap(() => call($18, "test", [144]));
-
-// table_init.wast:1367
-assert_trap(() => call($18, "test", [145]));
-
-// table_init.wast:1368
-assert_trap(() => call($18, "test", [146]));
-
-// table_init.wast:1369
-assert_trap(() => call($18, "test", [147]));
-
-// table_init.wast:1370
-assert_trap(() => call($18, "test", [148]));
-
-// table_init.wast:1371
-assert_trap(() => call($18, "test", [149]));
-
-// table_init.wast:1372
-assert_trap(() => call($18, "test", [150]));
-
-// table_init.wast:1373
-assert_trap(() => call($18, "test", [151]));
-
-// table_init.wast:1374
-assert_trap(() => call($18, "test", [152]));
-
-// table_init.wast:1375
-assert_trap(() => call($18, "test", [153]));
-
-// table_init.wast:1376
-assert_trap(() => call($18, "test", [154]));
-
-// table_init.wast:1377
-assert_trap(() => call($18, "test", [155]));
-
-// table_init.wast:1378
-assert_trap(() => call($18, "test", [156]));
-
-// table_init.wast:1379
-assert_trap(() => call($18, "test", [157]));
-
-// table_init.wast:1380
-assert_trap(() => call($18, "test", [158]));
-
-// table_init.wast:1381
-assert_trap(() => call($18, "test", [159]));
-
-// table_init.wast:1382
-assert_trap(() => call($18, "test", [0]));
-
-// table_init.wast:1383
-assert_trap(() => call($18, "test", [1]));
-
-// table_init.wast:1384
-assert_trap(() => call($18, "test", [2]));
-
-// table_init.wast:1385
-assert_trap(() => call($18, "test", [3]));
-
-// table_init.wast:1386
-assert_trap(() => call($18, "test", [4]));
-
-// table_init.wast:1387
-assert_trap(() => call($18, "test", [5]));
-
-// table_init.wast:1388
-assert_trap(() => call($18, "test", [6]));
-
-// table_init.wast:1389
-assert_trap(() => call($18, "test", [7]));
-
-// table_init.wast:1390
-assert_trap(() => call($18, "test", [8]));
-
-// table_init.wast:1391
-assert_trap(() => call($18, "test", [9]));
-
-// table_init.wast:1392
-assert_trap(() => call($18, "test", [10]));
-
-// table_init.wast:1393
-assert_trap(() => call($18, "test", [11]));
-
-// table_init.wast:1394
-assert_trap(() => call($18, "test", [12]));
-
-// table_init.wast:1395
-assert_trap(() => call($18, "test", [13]));
-
-// table_init.wast:1396
-assert_trap(() => call($18, "test", [14]));
-
-// table_init.wast:1397
-assert_trap(() => call($18, "test", [15]));
-
-// table_init.wast:1398
-assert_trap(() => call($18, "test", [16]));
-
-// table_init.wast:1399
-assert_trap(() => call($18, "test", [17]));
-
-// table_init.wast:1400
-assert_trap(() => call($18, "test", [18]));
-
-// table_init.wast:1401
-assert_trap(() => call($18, "test", [19]));
-
-// table_init.wast:1402
-assert_trap(() => call($18, "test", [20]));
-
-// table_init.wast:1403
-assert_trap(() => call($18, "test", [21]));
-
-// table_init.wast:1404
-assert_trap(() => call($18, "test", [22]));
-
-// table_init.wast:1405
-assert_trap(() => call($18, "test", [23]));
-
-// table_init.wast:1406
-assert_trap(() => call($18, "test", [24]));
-
-// table_init.wast:1407
-assert_trap(() => call($18, "test", [25]));
-
-// table_init.wast:1408
-assert_trap(() => call($18, "test", [26]));
-
-// table_init.wast:1409
-assert_trap(() => call($18, "test", [27]));
-
-// table_init.wast:1410
-assert_trap(() => call($18, "test", [28]));
-
-// table_init.wast:1411
-assert_trap(() => call($18, "test", [29]));
-
-// table_init.wast:1412
-assert_trap(() => call($18, "test", [30]));
-
-// table_init.wast:1413
-assert_trap(() => call($18, "test", [31]));
-
-// table_init.wast:1414
-assert_trap(() => call($18, "test", [32]));
-
-// table_init.wast:1415
-assert_trap(() => call($18, "test", [33]));
-
-// table_init.wast:1416
-assert_trap(() => call($18, "test", [34]));
-
-// table_init.wast:1417
-assert_trap(() => call($18, "test", [35]));
-
-// table_init.wast:1418
-assert_trap(() => call($18, "test", [36]));
-
-// table_init.wast:1419
-assert_trap(() => call($18, "test", [37]));
-
-// table_init.wast:1420
-assert_trap(() => call($18, "test", [38]));
-
-// table_init.wast:1421
-assert_trap(() => call($18, "test", [39]));
-
-// table_init.wast:1422
-assert_trap(() => call($18, "test", [40]));
-
-// table_init.wast:1423
-assert_trap(() => call($18, "test", [41]));
-
-// table_init.wast:1424
-assert_trap(() => call($18, "test", [42]));
-
-// table_init.wast:1425
-assert_trap(() => call($18, "test", [43]));
-
-// table_init.wast:1426
-assert_trap(() => call($18, "test", [44]));
-
-// table_init.wast:1427
-assert_trap(() => call($18, "test", [45]));
-
-// table_init.wast:1428
-assert_trap(() => call($18, "test", [46]));
-
-// table_init.wast:1429
-assert_trap(() => call($18, "test", [47]));
-
-// table_init.wast:1430
-assert_trap(() => call($18, "test", [48]));
-
-// table_init.wast:1431
-assert_trap(() => call($18, "test", [49]));
-
-// table_init.wast:1432
-assert_trap(() => call($18, "test", [50]));
-
-// table_init.wast:1433
-assert_trap(() => call($18, "test", [51]));
-
-// table_init.wast:1434
-assert_trap(() => call($18, "test", [52]));
-
-// table_init.wast:1435
-assert_trap(() => call($18, "test", [53]));
-
-// table_init.wast:1436
-assert_trap(() => call($18, "test", [54]));
-
-// table_init.wast:1437
-assert_trap(() => call($18, "test", [55]));
-
-// table_init.wast:1438
-assert_trap(() => call($18, "test", [56]));
-
-// table_init.wast:1439
-assert_trap(() => call($18, "test", [57]));
-
-// table_init.wast:1440
-assert_trap(() => call($18, "test", [58]));
-
-// table_init.wast:1441
-assert_trap(() => call($18, "test", [59]));
-
-// table_init.wast:1442
-assert_trap(() => call($18, "test", [60]));
-
-// table_init.wast:1443
-assert_trap(() => call($18, "test", [61]));
-
-// table_init.wast:1444
-assert_trap(() => call($18, "test", [62]));
-
-// table_init.wast:1445
-assert_trap(() => call($18, "test", [63]));
-
-// table_init.wast:1446
-assert_trap(() => call($18, "test", [64]));
-
-// table_init.wast:1447
-assert_trap(() => call($18, "test", [65]));
-
-// table_init.wast:1448
-assert_trap(() => call($18, "test", [66]));
-
-// table_init.wast:1449
-assert_trap(() => call($18, "test", [67]));
-
-// table_init.wast:1450
-assert_trap(() => call($18, "test", [68]));
-
-// table_init.wast:1451
-assert_trap(() => call($18, "test", [69]));
-
-// table_init.wast:1452
-assert_trap(() => call($18, "test", [70]));
-
-// table_init.wast:1453
-assert_trap(() => call($18, "test", [71]));
-
-// table_init.wast:1454
-assert_trap(() => call($18, "test", [72]));
-
-// table_init.wast:1455
-assert_trap(() => call($18, "test", [73]));
-
-// table_init.wast:1456
-assert_trap(() => call($18, "test", [74]));
-
-// table_init.wast:1457
-assert_trap(() => call($18, "test", [75]));
-
-// table_init.wast:1458
-assert_trap(() => call($18, "test", [76]));
-
-// table_init.wast:1459
-assert_trap(() => call($18, "test", [77]));
-
-// table_init.wast:1460
-assert_trap(() => call($18, "test", [78]));
-
-// table_init.wast:1461
-assert_trap(() => call($18, "test", [79]));
-
-// table_init.wast:1462
-assert_trap(() => call($18, "test", [80]));
-
-// table_init.wast:1463
-assert_trap(() => call($18, "test", [81]));
-
-// table_init.wast:1464
-assert_trap(() => call($18, "test", [82]));
-
-// table_init.wast:1465
-assert_trap(() => call($18, "test", [83]));
-
-// table_init.wast:1466
-assert_trap(() => call($18, "test", [84]));
-
-// table_init.wast:1467
-assert_trap(() => call($18, "test", [85]));
-
-// table_init.wast:1468
-assert_trap(() => call($18, "test", [86]));
-
-// table_init.wast:1469
-assert_trap(() => call($18, "test", [87]));
-
-// table_init.wast:1470
-assert_trap(() => call($18, "test", [88]));
-
-// table_init.wast:1471
-assert_trap(() => call($18, "test", [89]));
-
-// table_init.wast:1472
-assert_trap(() => call($18, "test", [90]));
-
-// table_init.wast:1473
-assert_trap(() => call($18, "test", [91]));
-
-// table_init.wast:1474
-assert_trap(() => call($18, "test", [92]));
-
-// table_init.wast:1475
-assert_trap(() => call($18, "test", [93]));
-
-// table_init.wast:1476
-assert_trap(() => call($18, "test", [94]));
-
-// table_init.wast:1477
-assert_trap(() => call($18, "test", [95]));
-
-// table_init.wast:1478
-assert_trap(() => call($18, "test", [96]));
-
-// table_init.wast:1480
-let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x40\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:1504
-assert_trap(() => call($19, "run", [48, -16]));
-
-// table_init.wast:1505
-assert_return(() => call($19, "test", [48]), 0);
-
-// table_init.wast:1506
-assert_return(() => call($19, "test", [49]), 1);
-
-// table_init.wast:1507
-assert_return(() => call($19, "test", [50]), 2);
-
-// table_init.wast:1508
-assert_return(() => call($19, "test", [51]), 3);
-
-// table_init.wast:1509
-assert_return(() => call($19, "test", [52]), 4);
-
-// table_init.wast:1510
-assert_return(() => call($19, "test", [53]), 5);
-
-// table_init.wast:1511
-assert_return(() => call($19, "test", [54]), 6);
-
-// table_init.wast:1512
-assert_return(() => call($19, "test", [55]), 7);
-
-// table_init.wast:1513
-assert_return(() => call($19, "test", [56]), 8);
-
-// table_init.wast:1514
-assert_return(() => call($19, "test", [57]), 9);
-
-// table_init.wast:1515
-assert_return(() => call($19, "test", [58]), 10);
-
-// table_init.wast:1516
-assert_return(() => call($19, "test", [59]), 11);
-
-// table_init.wast:1517
-assert_return(() => call($19, "test", [60]), 12);
-
-// table_init.wast:1518
-assert_return(() => call($19, "test", [61]), 13);
-
-// table_init.wast:1519
-assert_return(() => call($19, "test", [62]), 14);
-
-// table_init.wast:1520
-assert_return(() => call($19, "test", [63]), 15);
-
-// table_init.wast:1521
-assert_trap(() => call($19, "test", [0]));
-
-// table_init.wast:1522
-assert_trap(() => call($19, "test", [1]));
-
-// table_init.wast:1523
-assert_trap(() => call($19, "test", [2]));
-
-// table_init.wast:1524
-assert_trap(() => call($19, "test", [3]));
-
-// table_init.wast:1525
-assert_trap(() => call($19, "test", [4]));
-
-// table_init.wast:1526
-assert_trap(() => call($19, "test", [5]));
-
-// table_init.wast:1527
-assert_trap(() => call($19, "test", [6]));
-
-// table_init.wast:1528
-assert_trap(() => call($19, "test", [7]));
-
-// table_init.wast:1529
-assert_trap(() => call($19, "test", [8]));
-
-// table_init.wast:1530
-assert_trap(() => call($19, "test", [9]));
-
-// table_init.wast:1531
-assert_trap(() => call($19, "test", [10]));
-
-// table_init.wast:1532
-assert_trap(() => call($19, "test", [11]));
-
-// table_init.wast:1533
-assert_trap(() => call($19, "test", [12]));
-
-// table_init.wast:1534
-assert_trap(() => call($19, "test", [13]));
-
-// table_init.wast:1535
-assert_trap(() => call($19, "test", [14]));
-
-// table_init.wast:1536
-assert_trap(() => call($19, "test", [15]));
-
-// table_init.wast:1537
-assert_trap(() => call($19, "test", [16]));
-
-// table_init.wast:1538
-assert_trap(() => call($19, "test", [17]));
-
-// table_init.wast:1539
-assert_trap(() => call($19, "test", [18]));
-
-// table_init.wast:1540
-assert_trap(() => call($19, "test", [19]));
-
-// table_init.wast:1541
-assert_trap(() => call($19, "test", [20]));
-
-// table_init.wast:1542
-assert_trap(() => call($19, "test", [21]));
-
-// table_init.wast:1543
-assert_trap(() => call($19, "test", [22]));
-
-// table_init.wast:1544
-assert_trap(() => call($19, "test", [23]));
-
-// table_init.wast:1545
-assert_trap(() => call($19, "test", [24]));
-
-// table_init.wast:1546
-assert_trap(() => call($19, "test", [25]));
-
-// table_init.wast:1547
-assert_trap(() => call($19, "test", [26]));
-
-// table_init.wast:1548
-assert_trap(() => call($19, "test", [27]));
-
-// table_init.wast:1549
-assert_trap(() => call($19, "test", [28]));
-
-// table_init.wast:1550
-assert_trap(() => call($19, "test", [29]));
-
-// table_init.wast:1551
-assert_trap(() => call($19, "test", [30]));
-
-// table_init.wast:1552
-assert_trap(() => call($19, "test", [31]));
-
-// table_init.wast:1553
-assert_trap(() => call($19, "test", [32]));
-
-// table_init.wast:1554
-assert_trap(() => call($19, "test", [33]));
-
-// table_init.wast:1555
-assert_trap(() => call($19, "test", [34]));
-
-// table_init.wast:1556
-assert_trap(() => call($19, "test", [35]));
-
-// table_init.wast:1557
-assert_trap(() => call($19, "test", [36]));
-
-// table_init.wast:1558
-assert_trap(() => call($19, "test", [37]));
-
-// table_init.wast:1559
-assert_trap(() => call($19, "test", [38]));
-
-// table_init.wast:1560
-assert_trap(() => call($19, "test", [39]));
-
-// table_init.wast:1561
-assert_trap(() => call($19, "test", [40]));
-
-// table_init.wast:1562
-assert_trap(() => call($19, "test", [41]));
-
-// table_init.wast:1563
-assert_trap(() => call($19, "test", [42]));
-
-// table_init.wast:1564
-assert_trap(() => call($19, "test", [43]));
-
-// table_init.wast:1565
-assert_trap(() => call($19, "test", [44]));
-
-// table_init.wast:1566
-assert_trap(() => call($19, "test", [45]));
-
-// table_init.wast:1567
-assert_trap(() => call($19, "test", [46]));
-
-// table_init.wast:1568
-assert_trap(() => call($19, "test", [47]));
-
-// table_init.wast:1570
-let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x10\x10\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x08\x20\x01\xfc\x0c\x00\x00\x0b");
-
-// table_init.wast:1594
-assert_trap(() => call($20, "run", [0, -4]));
-
-// table_init.wast:1595
-assert_return(() => call($20, "test", [0]), 8);
-
-// table_init.wast:1596
-assert_return(() => call($20, "test", [1]), 9);
-
-// table_init.wast:1597
-assert_return(() => call($20, "test", [2]), 10);
-
-// table_init.wast:1598
-assert_return(() => call($20, "test", [3]), 11);
-
-// table_init.wast:1599
-assert_return(() => call($20, "test", [4]), 12);
-
-// table_init.wast:1600
-assert_return(() => call($20, "test", [5]), 13);
-
-// table_init.wast:1601
-assert_return(() => call($20, "test", [6]), 14);
-
-// table_init.wast:1602
-assert_return(() => call($20, "test", [7]), 15);
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
index 65e7a84c45..a41d69c0af 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
@@ -121,13 +121,13 @@ load("test/mjsunit/wasm/exceptions-utils.js");
})();
// Test throwing/catching an encapsulated exception type value.
-(function TestThrowCatchExceptRef() {
+(function TestThrowCatchExnRef() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_e);
builder.addFunction("throw_catch_param", kSig_e_e)
.addBody([
- kExprTry, kWasmExceptRef,
+ kExprTry, kWasmExnRef,
kExprGetLocal, 0,
kExprThrow, except,
kExprCatch,
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-global.js b/deps/v8/test/mjsunit/wasm/exceptions-global.js
index c48a8e41ef..c3f208ca16 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-global.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-global.js
@@ -9,41 +9,41 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
-// First we just test that "except_ref" global variables are allowed.
-(function TestGlobalExceptRefSupported() {
+// First we just test that "exnref" global variables are allowed.
+(function TestGlobalExnRefSupported() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g = builder.addGlobal(kWasmExceptRef);
- builder.addFunction("push_and_drop_except_ref", kSig_v_v)
+ let g = builder.addGlobal(kWasmExnRef);
+ builder.addFunction("push_and_drop_exnref", kSig_v_v)
.addBody([
kExprGetGlobal, g.index,
kExprDrop,
]).exportFunc();
let instance = builder.instantiate();
- assertDoesNotThrow(instance.exports.push_and_drop_except_ref);
+ assertDoesNotThrow(instance.exports.push_and_drop_exnref);
})();
-// Test default value that global "except_ref" variables are initialized with.
-(function TestGlobalExceptRefDefaultValue() {
+// Test default value that global "exnref" variables are initialized with.
+(function TestGlobalExnRefDefaultValue() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g = builder.addGlobal(kWasmExceptRef);
- builder.addFunction('push_and_return_except_ref', kSig_e_v)
+ let g = builder.addGlobal(kWasmExnRef);
+ builder.addFunction('push_and_return_exnref', kSig_e_v)
.addBody([kExprGetGlobal, g.index])
.exportFunc();
let instance = builder.instantiate();
- assertEquals(null, instance.exports.push_and_return_except_ref());
+ assertEquals(null, instance.exports.push_and_return_exnref());
})();
-// Test storing a caught exception into an exported mutable "except_ref" global.
-(function TestGlobalExceptRefSetCaught() {
+// Test storing a caught exception into an exported mutable "exnref" global.
+(function TestGlobalExnRefSetCaught() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let except = builder.addException(kSig_v_i);
- let g = builder.addGlobal(kWasmExceptRef, true).exportAs("exn");
- builder.addFunction('catch_and_set_except_ref', kSig_v_i)
+ let g = builder.addGlobal(kWasmExnRef, true).exportAs("exn");
+ builder.addFunction('catch_and_set_exnref', kSig_v_i)
.addBody([
kExprTry, kWasmStmt,
kExprGetLocal, 0,
@@ -54,18 +54,18 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
]).exportFunc();
let instance = builder.instantiate();
- assertDoesNotThrow(() => instance.exports.catch_and_set_except_ref(23));
+ assertDoesNotThrow(() => instance.exports.catch_and_set_exnref(23));
let exception = instance.exports.exn.value; // Exported mutable global.
assertInstanceof(exception, WebAssembly.RuntimeError);
assertEquals(except, %GetWasmExceptionId(exception, instance));
})();
-// Test storing a parameter into an exported mutable "except_ref" global.
-(function TestGlobalExceptRefSetParameter() {
+// Test storing a parameter into an exported mutable "exnref" global.
+(function TestGlobalExnRefSetParameter() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g = builder.addGlobal(kWasmExceptRef, true).exportAs("exn");
- builder.addFunction('set_param_except_ref', kSig_v_e)
+ let g = builder.addGlobal(kWasmExnRef, true).exportAs("exn");
+ builder.addFunction('set_param_exnref', kSig_v_e)
.addBody([
kExprTry, kWasmStmt,
kExprGetLocal, 0,
@@ -77,16 +77,16 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let exception = "my fancy exception";
let instance = builder.instantiate();
- assertDoesNotThrow(() => instance.exports.set_param_except_ref(exception));
+ assertDoesNotThrow(() => instance.exports.set_param_exnref(exception));
assertEquals(exception, instance.exports.exn.value);
})();
-// Test loading an imported "except_ref" global and re-throwing the exception.
-(function TestGlobalExceptRefGetImportedAndRethrow() {
+// Test loading an imported "exnref" global and re-throwing the exception.
+(function TestGlobalExnRefGetImportedAndRethrow() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g_index = builder.addImportedGlobal("m", "exn", kWasmExceptRef);
- builder.addFunction('rethrow_except_ref', kSig_v_v)
+ let g_index = builder.addImportedGlobal("m", "exn", kWasmExnRef);
+ builder.addFunction('rethrow_exnref', kSig_v_v)
.addBody([
kExprGetGlobal, g_index,
kExprRethrow,
@@ -94,15 +94,15 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let exception = "my fancy exception";
let instance = builder.instantiate({ "m": { "exn": exception }});
- assertThrowsEquals(() => instance.exports.rethrow_except_ref(), exception);
+ assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception);
})();
-// Test loading an exported mutable "except_ref" being changed from the outside.
-(function TestGlobalExceptRefGetExportedMutableAndRethrow() {
+// Test loading an exported mutable "exnref" being changed from the outside.
+(function TestGlobalExnRefGetExportedMutableAndRethrow() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g = builder.addGlobal(kWasmExceptRef, true).exportAs("exn");
- builder.addFunction('rethrow_except_ref', kSig_v_v)
+ let g = builder.addGlobal(kWasmExnRef, true).exportAs("exn");
+ builder.addFunction('rethrow_exnref', kSig_v_v)
.addBody([
kExprGetGlobal, g.index,
kExprRethrow,
@@ -110,45 +110,45 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let instance = builder.instantiate();
let exception1 = instance.exports.exn.value = "my fancy exception";
- assertThrowsEquals(() => instance.exports.rethrow_except_ref(), exception1);
+ assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception1);
let exception2 = instance.exports.exn.value = "an even fancier exception";
- assertThrowsEquals(() => instance.exports.rethrow_except_ref(), exception2);
+ assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception2);
})();
// TODO(mstarzinger): Add the following test once proposal makes it clear how
// far interaction with the mutable globals proposal is intended to go.
-// Test loading an imported mutable "except_ref" being changed from the outside.
-/*(function TestGlobalExceptRefGetImportedMutableAndRethrow() {
+// Test loading an imported mutable "exnref" being changed from the outside.
+/*(function TestGlobalExnRefGetImportedMutableAndRethrow() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g_index = builder.addImportedGlobal("m", "exn", kWasmExceptRef, true);
- builder.addFunction('rethrow_except_ref', kSig_v_v)
+ let g_index = builder.addImportedGlobal("m", "exn", kWasmExnRef, true);
+ builder.addFunction('rethrow_exnref', kSig_v_v)
.addBody([
kExprGetGlobal, g_index,
kExprRethrow,
]).exportFunc();
let exception1 = "my fancy exception";
- let desc = { value: 'except_ref', mutable: true };
+ let desc = { value: 'exnref', mutable: true };
let mutable_global = new WebAssembly.Global(desc, exception1);
let instance = builder.instantiate({ "m": { "exn": mutable_global }});
- assertThrowsEquals(() => instance.exports.rethrow_except_ref(), exception1);
+ assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception1);
let exception2 = mutable_global.value = "an even fancier exception";
- assertThrowsEquals(() => instance.exports.rethrow_except_ref(), exception2);
+ assertThrowsEquals(() => instance.exports.rethrow_exnref(), exception2);
})();*/
-// Test custom initialization index for a global "except_ref" variable.
-(function TestGlobalExceptRefInitIndex() {
+// Test custom initialization index for a global "exnref" variable.
+(function TestGlobalExnRefInitIndex() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- let g1_index = builder.addImportedGlobal("m", "exn", kWasmExceptRef);
- let g2 = builder.addGlobal(kWasmExceptRef);
+ let g1_index = builder.addImportedGlobal("m", "exn", kWasmExnRef);
+ let g2 = builder.addGlobal(kWasmExnRef);
g2.init_index = g1_index; // Initialize {g2} to equal {g1}.
- builder.addFunction('push_and_return_except_ref', kSig_e_v)
+ builder.addFunction('push_and_return_exnref', kSig_e_v)
.addBody([kExprGetGlobal, g2.index])
.exportFunc();
let exception = { x: "my fancy exception" };
let instance = builder.instantiate({ "m": { "exn": exception }});
- assertSame(exception, instance.exports.push_and_return_except_ref());
+ assertSame(exception, instance.exports.push_and_return_exnref());
})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index d8a25c3503..7d53037269 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -7,11 +7,11 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
load("test/mjsunit/wasm/exceptions-utils.js");
-// First we just test that "except_ref" local variables are allowed.
-(function TestLocalExceptRef() {
+// First we just test that "exnref" local variables are allowed.
+(function TestLocalExnRef() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
- builder.addFunction("push_and_drop_except_ref", kSig_v_v)
+ builder.addFunction("push_and_drop_exnref", kSig_v_v)
.addLocals({except_count: 1})
.addBody([
kExprGetLocal, 0,
@@ -19,7 +19,7 @@ load("test/mjsunit/wasm/exceptions-utils.js");
]).exportFunc();
let instance = builder.instantiate();
- assertDoesNotThrow(instance.exports.push_and_drop_except_ref);
+ assertDoesNotThrow(instance.exports.push_and_drop_exnref);
})();
// The following method doesn't attempt to catch an raised exception.
diff --git a/deps/v8/test/mjsunit/wasm/export-identity.js b/deps/v8/test/mjsunit/wasm/export-identity.js
new file mode 100644
index 0000000000..2b9e9acc88
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/export-identity.js
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function createExport(fun) {
+ let builder = new WasmModuleBuilder();
+ let fun_index = builder.addImport("m", "fun", kSig_i_v)
+ builder.addExport("fun", fun_index);
+ let instance = builder.instantiate({ m: { fun: fun }});
+ return instance.exports.fun;
+}
+
+// Test that re-exporting a generic JavaScript function changes identity, as
+// the resulting export is an instance of {WebAssembly.Function} instead.
+(function TestReExportOfJS() {
+ print(arguments.callee.name);
+ function fun() { return 7 }
+ let exported = createExport(fun);
+ assertNotSame(exported, fun);
+ assertEquals(7, exported());
+ assertEquals(7, fun());
+})();
+
+// Test that re-exporting and existing {WebAssembly.Function} that represents
+// regular WebAssembly functions preserves identity.
+(function TestReReExportOfWasm() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('fun', kSig_i_v).addBody([kExprI32Const, 9]).exportFunc();
+ let fun = builder.instantiate().exports.fun;
+ let exported = createExport(fun);
+ assertSame(exported, fun);
+ assertEquals(9, fun());
+})();
+
+// Test that re-exporting and existing {WebAssembly.Function} that represents
+// generic JavaScript functions preserves identity.
+(function TestReReExportOfJS() {
+ print(arguments.callee.name);
+ let fun = createExport(() => 11)
+ let exported = createExport(fun);
+ assertSame(exported, fun);
+ assertEquals(11, fun());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js
new file mode 100644
index 0000000000..f9275d7ccc
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table-interpreter.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref --experimental-wasm-return-call
+// Flags: --wasm-interpret-all
+
+// This is just a wrapper for an existing reference types test case that runs
+// with the --wasm-interpret-all flag added. If we ever decide to add a test
+// variant for this, then we can remove this file.
+
+load("test/mjsunit/wasm/indirect-call-non-zero-table.js");
diff --git a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
index b5665d1654..573e1e1d9e 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter-mixed.js
@@ -184,8 +184,8 @@ function redirectToInterpreter(
checkStack(stripPath(e.stack), [
'Error: i=8', // -
/^ at imp \(file:\d+:29\)$/, // -
- ' at plus_one (wasm-function[1]:6)', // -
- ' at plus_two (wasm-function[1]:3)', // -
+ ' at plus_one (wasm-function[1]:0x3b)', // -
+ ' at plus_two (wasm-function[1]:0x3e)', // -
/^ at testStackTraceThroughCWasmEntry \(file:\d+:25\)$/, // -
/^ at file:\d+:3$/
]);
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index 970e71a646..c1c03a4dd0 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -39,7 +39,7 @@ function checkStack(stack, expected_lines) {
checkStack(stripPath(stack), [
'Error: test imported stack', // -
/^ at func \(interpreter.js:\d+:28\)$/, // -
- ' at main (wasm-function[1]:1)', // -
+ ' at main (wasm-function[1]:0x32)', // -
/^ at testCallImported \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
@@ -103,8 +103,8 @@ function checkStack(stack, expected_lines) {
assertEquals(interpreted_before + 2, %WasmNumInterpretedCalls(instance));
checkStack(stripPath(stack), [
'RuntimeError: unreachable', // -
- ' at foo (wasm-function[0]:3)', // -
- ' at main (wasm-function[1]:2)', // -
+ ' at foo (wasm-function[0]:0x27)', // -
+ ' at main (wasm-function[1]:0x2c)', // -
/^ at testTrap \(interpreter.js:\d+:24\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
@@ -136,7 +136,7 @@ function checkStack(stack, expected_lines) {
checkStack(stripPath(stack), [
'Error: thrown from imported function', // -
/^ at func \(interpreter.js:\d+:11\)$/, // -
- ' at main (wasm-function[1]:1)', // -
+ ' at main (wasm-function[1]:0x32)', // -
/^ at testThrowFromImport \(interpreter.js:\d+:24\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
@@ -218,10 +218,10 @@ function checkStack(stack, expected_lines) {
for (var e = 0; e < stacks.length; ++e) {
expected = ['Error: reentrant interpreter test #' + e];
expected.push(/^ at func \(interpreter.js:\d+:17\)$/);
- expected.push(' at main (wasm-function[1]:3)');
+ expected.push(' at main (wasm-function[1]:0x36)');
for (var k = e; k > 0; --k) {
expected.push(/^ at func \(interpreter.js:\d+:33\)$/);
- expected.push(' at main (wasm-function[1]:3)');
+ expected.push(' at main (wasm-function[1]:0x36)');
}
expected.push(
/^ at testReentrantInterpreter \(interpreter.js:\d+:22\)$/);
@@ -296,8 +296,8 @@ function checkStack(stack, expected_lines) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at direct (wasm-function[1]:1)', // -
- ' at main (wasm-function[3]:3)', // -
+ ' at direct (wasm-function[1]:0x55)', // -
+ ' at main (wasm-function[3]:0x64)', // -
/^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
@@ -309,8 +309,8 @@ function checkStack(stack, expected_lines) {
if (!(e instanceof TypeError)) throw e;
checkStack(stripPath(e.stack), [
'TypeError: ' + kTrapMsgs[kTrapTypeError], // -
- ' at indirect (wasm-function[2]:3)', // -
- ' at main (wasm-function[3]:3)', // -
+ ' at indirect (wasm-function[2]:0x5c)', // -
+ ' at main (wasm-function[3]:0x64)', // -
/^ at testIllegalImports \(interpreter.js:\d+:22\)$/, // -
/^ at interpreter.js:\d+:3$/
]);
@@ -358,8 +358,8 @@ function checkStack(stack, expected_lines) {
if (!(e instanceof RangeError)) throw e;
checkStack(stripPath(e.stack), [
'RangeError: Maximum call stack size exceeded',
- ' at main (wasm-function[0]:0)'
- ].concat(Array(9).fill(' at main (wasm-function[0]:2)')));
+ ' at main (wasm-function[0]:0x20)'
+ ].concat(Array(9).fill(' at main (wasm-function[0]:0x22)')));
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
new file mode 100644
index 0000000000..8721d8d066
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-gc-stress.js
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function AllocMemory(pages, max = pages) {
+ let m =
+ new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
+ let v = new Int32Array(m.buffer);
+ return {memory : m, view : v};
+}
+
+function RunSomeAllocs(total, retained, pages, max = pages) {
+ print(`-------iterations = ${total}, retained = $ { retained } -------`);
+ var array = new Array(retained);
+ for (var i = 0; i < total; i++) {
+ if ((i % 25) == 0)
+ print(`iteration $ { i }`);
+ let pair = AllocMemory(pages, max);
+ // For some iterations, retain the memory, view, or both.
+ switch (i % 3) {
+ case 0:
+ pair.memory = null;
+ break;
+ case 1:
+ pair.view = null;
+ break;
+ case 2:
+ break;
+ }
+ array[i % retained] = pair;
+ }
+}
+
+RunSomeAllocs(10, 1, 1, 1);
+RunSomeAllocs(100, 3, 1, 1);
+RunSomeAllocs(1000, 10, 1, 1);
+RunSomeAllocs(10000, 20, 1, 1);
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js
new file mode 100644
index 0000000000..aa9088ecbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-explicit-gc-stress.js
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads --expose-gc
+
+load("test/mjsunit/worker-ping-test.js");
+
+let kDisabledAbort = false; // TODO(9380): enable abort for this test
+
+let config = {
+ numThings: 4, // size of circular buffer
+ numWorkers: 4, // number of workers
+ numMessages: 500, // number of messages sent to each worker
+ allocInterval: 11, // interval for allocating new things per worker
+ traceScript: false, // print the script
+ traceAlloc: true, // print each allocation attempt
+ traceIteration: 10, // print diagnostics every so many iterations
+ abortOnFail: kDisabledAbort, // kill worker if allocation fails
+
+ AllocThing: function AllocThing(id) {
+ let pages = 1, max = 1;
+ return new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
+ },
+ BeforeSend: function BeforeSend(msg) {
+ gc();
+ },
+ BeforeReceive: function BeforeReceive(msg) {
+ gc();
+ }
+}
+
+RunWorkerPingTest(config);
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc-stress.js
new file mode 100644
index 0000000000..6f244cfe62
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc-stress.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+load("test/mjsunit/worker-ping-test.js");
+
+let kDisabledAbort = false; // TODO(9380): enable abort for this test
+
+let config = {
+ numThings: 4, // size of circular buffer
+ numWorkers: 4, // number of workers
+ numMessages: 1000, // number of messages sent to each worker
+ allocInterval: 11, // interval for allocating new things per worker
+ traceScript: false, // print the script
+ traceAlloc: true, // print each allocation attempt
+ traceIteration: 10, // print diagnostics every so many iterations
+ abortOnFail: kDisabledAbort, // kill worker if allocation fails
+
+ AllocThing: function AllocThing(id) {
+ let pages = 1, max = 1;
+ return new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
+ },
+}
+
+RunWorkerPingTest(config);
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
new file mode 100644
index 0000000000..376917b6ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-gc.js
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+const kNumMessages = 5000;
+
+function AllocMemory(pages = 1, max = pages) {
+ return new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
+}
+
+(function RunTest() {
+ let worker = new Worker(
+`onmessage =
+ function(msg) {
+ if (msg.memory) postMessage({memory : msg.memory});
+}`, {type : 'string'});
+
+ let time = performance.now();
+
+ for (let i = 0; i < kNumMessages; i++) {
+ let now = performance.now();
+ print(`iteration ${i}, Δ = ${(now - time).toFixed(3)} ms`);
+ time = now;
+
+ let memory = AllocMemory();
+ worker.postMessage({memory : memory});
+ let msg = worker.getMessage();
+ if (msg.memory) {
+ assertInstanceof(msg.memory, WebAssembly.Memory);
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/shared-memory-worker-stress.js b/deps/v8/test/mjsunit/wasm/shared-memory-worker-stress.js
new file mode 100644
index 0000000000..53ec4a5f4b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/shared-memory-worker-stress.js
@@ -0,0 +1,27 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-threads
+
+load("test/mjsunit/worker-ping-test.js");
+
+// TODO(v8:9380): increase {numThings} and {numWorkers} when stress-opt mode
+// no longer leaks wasm memories.
+let config = {
+ numThings: 2, // size of circular buffer
+ numWorkers: 2, // number of workers
+ numMessages: 2000, // number of messages sent to each worker
+ allocInterval: 10001, // interval for allocating new things per worker
+ traceScript: false, // print the script
+ traceAlloc: true, // print each allocation attempt
+ traceIteration: 10, // print diagnostics every so many iterations
+ abortOnFail: true, // kill worker if allocation fails
+
+ AllocThing: function AllocThing(id) {
+ let pages = 1, max = 1;
+ return new WebAssembly.Memory({initial : pages, maximum : max, shared : true});
+ },
+}
+
+RunWorkerPingTest(config);
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 484cee0acd..7e592e7ef2 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -23,7 +23,7 @@ function verifyStack(frames, expected) {
assertContains(exp[4], frames[i].getFileName(), "["+i+"].getFileName()");
var toString;
if (exp[0]) {
- toString = "wasm-function[" + exp[2] + "]:" + exp[3];
+ toString = "wasm-function[" + exp[2] + "]:" + exp[5];
if (exp[1] !== null) toString = exp[1] + " (" + toString + ")";
} else {
toString = exp[4] + ":" + exp[2] + ":";
@@ -70,7 +70,7 @@ var module = builder.instantiate({mod: {func: STACK}});
var expected_string = 'Error\n' +
// The line numbers below will change as this test gains / loses lines..
' at STACK (stack.js:38:11)\n' + // --
- ' at main (wasm-function[1]:1)\n' + // --
+ ' at main (wasm-function[1]:0x86)\n' + // --
' at testSimpleStack (stack.js:77:18)\n' + // --
' at stack.js:79:3'; // --
@@ -88,9 +88,9 @@ Error.prepareStackTrace = function(error, frames) {
module.exports.main();
verifyStack(stack, [
- // isWasm function line pos file
+ // isWasm function line pos file offset
[ false, "STACK", 38, 0, "stack.js"],
- [ true, "main", 1, 1, null],
+ [ true, "main", 1, 1, null, '0x86'],
[ false, "testStackFrames", 88, 0, "stack.js"],
[ false, null, 97, 0, "stack.js"]
]);
@@ -103,8 +103,8 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("unreachable", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file
- [ true, "exec_unreachable", 2, 1, null],
+ // isWasm function line pos file offset
+ [ true, "exec_unreachable", 2, 1, null, '0x8b'],
[ false, "testWasmUnreachable", 101, 0, "stack.js"],
[ false, null, 112, 0, "stack.js"]
]);
@@ -118,9 +118,9 @@ Error.prepareStackTrace = function(error, frames) {
} catch (e) {
assertContains("out of bounds", e.message);
verifyStack(e.stack, [
- // isWasm function line pos file
- [ true, null, 3, 3, null],
- [ true, "call_mem_out_of_bounds", 4, 1, null],
+ // isWasm function line pos file offset
+ [ true, null, 3, 3, null, '0x91'],
+ [ true, "call_mem_out_of_bounds", 4, 1, null, '0x97'],
[ false, "testWasmMemOutOfBounds", 116, 0, "stack.js"],
[ false, null, 128, 0, "stack.js"]
]);
@@ -147,11 +147,11 @@ Error.prepareStackTrace = function(error, frames) {
assertEquals("Maximum call stack size exceeded", e.message, "trap reason");
assertTrue(e.stack.length >= 4, "expected at least 4 stack entries");
verifyStack(e.stack.splice(0, 4), [
- // isWasm function line pos file
- [ true, "recursion", 0, 0, null],
- [ true, "recursion", 0, 3, null],
- [ true, "recursion", 0, 3, null],
- [ true, "recursion", 0, 3, null]
+ // isWasm function line pos file offset
+ [ true, "recursion", 0, 0, null, '0x34'],
+ [ true, "recursion", 0, 3, null, '0x37'],
+ [ true, "recursion", 0, 3, null, '0x37'],
+ [ true, "recursion", 0, 3, null, '0x37']
]);
}
})();
@@ -173,11 +173,12 @@ Error.prepareStackTrace = function(error, frames) {
fail('expected wasm exception');
} catch (e) {
assertEquals('unreachable', e.message, 'trap reason');
+ let hexOffset = '0x' + (unreachable_pos + 0x25).toString(16);
verifyStack(e.stack, [
- // isWasm, function, line, pos, file
- [true, 'main', 0, unreachable_pos + 1, null], // -
- [false, 'testBigOffset', 172, 0, 'stack.js'], //-
- [false, null, 183, 0, 'stack.js']
+ // isWasm, function, line, pos, file, offset
+ [true, 'main', 0, unreachable_pos + 1, null, hexOffset], // -
+ [false, 'testBigOffset', 172, 0, 'stack.js'], //-
+ [false, null, 184, 0, 'stack.js']
]);
}
})();
diff --git a/deps/v8/test/mjsunit/wasm/table-access-interpreter.js b/deps/v8/test/mjsunit/wasm/table-access-interpreter.js
new file mode 100644
index 0000000000..561ac5aca4
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-access-interpreter.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref
+// Flags: --wasm-interpret-all
+
+// This is just a wrapper for an existing reference types test case that runs
+// with the --wasm-interpret-all flag added. If we ever decide to add a test
+// variant for this, then we can remove this file.
+
+load("test/mjsunit/wasm/table-access.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index 3203b76d0b..b91934d949 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -13,12 +13,12 @@ function addTableWithAccessors(builder, type, size, name) {
builder.addFunction('set_' + name, set_sig)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprSetTable, table.index])
+ kExprTableSet, table.index])
.exportFunc();
const get_sig = makeSig([kWasmI32], [type]);
builder.addFunction('get_' + name, get_sig)
- .addBody([kExprGetLocal, 0, kExprGetTable, table.index])
+ .addBody([kExprGetLocal, 0, kExprTableGet, table.index])
.exportFunc();
}
@@ -66,7 +66,7 @@ const dummy_func = exports.set_table_func1;
assertTraps(kTrapTableOutOfBounds, () => exports.set_table_ref1(44, dummy_ref));
})();
-(function testSetTable() {
+(function testTableSet() {
print(arguments.callee.name);
assertSame(null, exports.get_table_func1(3));
exports.set_table_func1(3, dummy_func);
@@ -109,10 +109,10 @@ const dummy_func = exports.set_table_func1;
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, value2]);
const f3 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, value3]);
builder.addFunction('get_t1', kSig_a_i)
- .addBody([kExprGetLocal, 0, kExprGetTable, t1])
+ .addBody([kExprGetLocal, 0, kExprTableGet, t1])
.exportFunc();
builder.addFunction('get_t2', kSig_a_i)
- .addBody([kExprGetLocal, 0, kExprGetTable, t2])
+ .addBody([kExprGetLocal, 0, kExprTableGet, t2])
.exportFunc();
const offset1 = 3;
@@ -143,7 +143,7 @@ const dummy_func = exports.set_table_func1;
.addBody([
kExprI32Const, index, // entry index
kExprRefFunc, function_index, // function reference
- kExprSetTable, table_index, // --
+ kExprTableSet, table_index, // --
kExprI32Const, index, // entry index
kExprCallIndirect, sig_index, table_index // --
diff --git a/deps/v8/test/mjsunit/wasm/table-copy-anyref.js b/deps/v8/test/mjsunit/wasm/table-copy-anyref.js
new file mode 100644
index 0000000000..d5cddb3ed6
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-copy-anyref.js
@@ -0,0 +1,73 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-bulk-memory --experimental-wasm-anyref
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let kTableSize = 5;
+
+let table = new WebAssembly.Table(
+ {element: 'anyref', initial: kTableSize, maximum: kTableSize});
+
+let builder = new WasmModuleBuilder();
+builder.addImportedTable('m', 'table', kTableSize, kTableSize, kWasmAnyRef);
+builder.addTable(kWasmAnyFunc, 1000);
+
+builder.addFunction('copy', kSig_v_iii)
+ .addBody([
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kNumericPrefix,
+ kExprTableCopy, kTableZero, kTableZero
+ ])
+ .exportFunc();
+
+const instance = builder.instantiate({m: {table: table}});
+
+function resetTable() {
+ table.set(0, 1000);
+ table.set(1, 1001);
+ table.set(2, 1002);
+ table.set(3, 1003);
+ table.set(4, 1004);
+}
+
+function assertTable(values) {
+ for (let i = 0; i < kTableSize; ++i) {
+ assertEquals(table.get(i), values[i]);
+ }
+}
+
+resetTable();
+instance.exports.copy(0, 1, 1);
+assertTable([1001, 1001, 1002, 1003, 1004]);
+
+resetTable();
+instance.exports.copy(0, 1, 2);
+assertTable([1001, 1002, 1002, 1003, 1004]);
+
+resetTable();
+instance.exports.copy(3, 0, 2);
+assertTable([1000, 1001, 1002, 1000, 1001]);
+
+// Non-overlapping, src < dst. Because of src < dst, we copy backwards.
+// Therefore the first access already traps, and the table is not changed.
+resetTable();
+assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(3, 0, 3));
+assertTable([1000, 1001, 1002, 1003, 1004]);
+
+// Non-overlapping, dst < src.
+resetTable();
+assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 4, 2));
+assertTable([1004, 1001, 1002, 1003, 1004]);
+
+// Overlapping, src < dst. This is required to copy backward, but the first
+// access will be out-of-bounds, so nothing changes.
+resetTable();
+assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(3, 0, 99));
+assertTable([1000, 1001, 1002, 1003, 1004]);
+
+// Overlapping, dst < src.
+resetTable();
+assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 1, 99));
+assertTable([1001, 1002, 1003, 1004, 1004]);
diff --git a/deps/v8/test/mjsunit/wasm/table-fill-interpreter.js b/deps/v8/test/mjsunit/wasm/table-fill-interpreter.js
new file mode 100644
index 0000000000..ed9c48b406
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-fill-interpreter.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref
+// Flags: --wasm-interpret-all
+
+// This is just a wrapper for an existing reference types test case that runs
+// with the --wasm-interpret-all flag added. If we ever decide to add a test
+// variant for this, then we can remove this file.
+
+load("test/mjsunit/wasm/table-fill.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-fill.js b/deps/v8/test/mjsunit/wasm/table-fill.js
index 4f61eee4d5..ed5938f908 100644
--- a/deps/v8/test/mjsunit/wasm/table-fill.js
+++ b/deps/v8/test/mjsunit/wasm/table-fill.js
@@ -38,7 +38,7 @@ for (index of [import_ref, internal_ref]) {
.exportFunc();
builder.addFunction(`get${index}`, kSig_r_i)
- .addBody([kExprGetLocal, 0, kExprGetTable, index])
+ .addBody([kExprGetLocal, 0, kExprTableGet, index])
.exportFunc();
}
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js
new file mode 100644
index 0000000000..15bbc63a21
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm-interpreter.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref
+// Flags: --wasm-interpret-all
+
+// This is just a wrapper for an existing reference types test case that runs
+// with the --wasm-interpret-all flag added. If we ever decide to add a test
+// variant for this, then we can remove this file.
+
+load("test/mjsunit/wasm/table-grow-from-wasm.js");
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
index 7a7d916dea..5b37af32c2 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -38,7 +38,7 @@ function testGrowInternalAnyRefTable(table_index) {
.exportFunc();
builder.addFunction('get', kSig_r_i)
- .addBody([kExprGetLocal, 0, kExprGetTable, table_index])
+ .addBody([kExprGetLocal, 0, kExprTableGet, table_index])
.exportFunc();
const instance = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js b/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
index adaf8e7185..0b857fb42f 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection-with-anyref.js
@@ -34,3 +34,48 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(false, type.mutable);
assertEquals(2, Object.getOwnPropertyNames(type).length);
})();
+
+// This is an extension of "type-reflection.js/TestFunctionTableSetAndCall" to
+// multiple table indexes. If --experimental-wasm-anyref is enabled by default
+// this test case can supersede the other one.
+(function TestFunctionMultiTableSetAndCall() {
+ let builder = new WasmModuleBuilder();
+ let v1 = 7; let v2 = 9; let v3 = 0.0;
+ let f1 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => v1);
+ let f2 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => v2);
+ let f3 = new WebAssembly.Function({parameters:[], results:["f64"]}, _ => v3);
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 2});
+ let table_index0 = builder.addImportedTable("m", "table", 2);
+ let table_index1 = builder.addTable(kWasmAnyFunc, 1).exportAs("tbl").index;
+ let sig_index = builder.addType(kSig_i_v);
+ table.set(0, f1);
+ builder.addFunction('call0', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, table_index0
+ ])
+ .exportFunc();
+ builder.addFunction('call1', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, table_index1
+ ])
+ .exportFunc();
+ let instance = builder.instantiate({ m: { table: table }});
+
+ // Test table #0 first.
+ assertEquals(v1, instance.exports.call0(0));
+ table.set(1, f2);
+ assertEquals(v2, instance.exports.call0(1));
+ table.set(1, f3);
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.call0(1));
+
+ // Test table #1 next.
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.call1(0));
+ instance.exports.tbl.set(0, f1);
+ assertEquals(v1, instance.exports.call1(0));
+ instance.exports.tbl.set(0, f2);
+ assertEquals(v2, instance.exports.call1(0));
+ instance.exports.tbl.set(0, f3);
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.call1(0));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index 77a58bc261..da9ef83fda 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -239,6 +239,21 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertDoesNotThrow(() => fun());
})();
+(function TestFunctionTypeOfConstructedFunction() {
+ let testcases = [
+ {parameters:[], results:[]},
+ {parameters:["i32"], results:[]},
+ {parameters:["i64"], results:["i32"]},
+ {parameters:["f64", "f64", "i32"], results:[]},
+ {parameters:["f32"], results:["f32"]},
+ ];
+ testcases.forEach(function(expected) {
+ let fun = new WebAssembly.Function(expected, _ => 0);
+ let type = WebAssembly.Function.type(fun);
+ assertEquals(expected, type)
+ });
+})();
+
(function TestFunctionTypeOfExportedFunction() {
let testcases = [
[kSig_v_v, {parameters:[], results:[]}],
@@ -255,3 +270,86 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals(expected, type)
});
})();
+
+(function TestFunctionTableSetAndCall() {
+ let builder = new WasmModuleBuilder();
+ let fun1 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 7);
+ let fun2 = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 9);
+ let fun3 = new WebAssembly.Function({parameters:[], results:["f64"]}, _ => 0);
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 2});
+ let table_index = builder.addImportedTable("m", "table", 2);
+ let sig_index = builder.addType(kSig_i_v);
+ table.set(0, fun1);
+ builder.addFunction('main', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, table_index
+ ])
+ .exportFunc();
+ let instance = builder.instantiate({ m: { table: table }});
+ assertEquals(7, instance.exports.main(0));
+ table.set(1, fun2);
+ assertEquals(9, instance.exports.main(1));
+ table.set(1, fun3);
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.main(1));
+})();
+
+(function TestFunctionTableSetIncompatibleSig() {
+ let builder = new WasmModuleBuilder();
+ let fun = new WebAssembly.Function({parameters:[], results:["i64"]}, _ => 0);
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 2});
+ let table_index = builder.addImportedTable("m", "table", 2);
+ let sig_index = builder.addType(kSig_l_v);
+ table.set(0, fun);
+ builder.addFunction('main', kSig_v_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, table_index,
+ kExprDrop
+ ])
+ .exportFunc();
+ let instance = builder.instantiate({ m: { table: table }});
+ assertThrows(
+ () => instance.exports.main(0), TypeError,
+ /wasm function signature contains illegal type/);
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.main(1));
+ table.set(1, fun);
+ assertThrows(
+ () => instance.exports.main(1), TypeError,
+ /wasm function signature contains illegal type/);
+})();
+
+(function TestFunctionModuleImportMatchingSig() {
+ let builder = new WasmModuleBuilder();
+ let fun = new WebAssembly.Function({parameters:[], results:["i32"]}, _ => 7);
+ let fun_index = builder.addImport("m", "fun", kSig_i_v)
+ builder.addFunction('main', kSig_i_v)
+ .addBody([
+ kExprCallFunction, fun_index
+ ])
+ .exportFunc();
+ let instance = builder.instantiate({ m: { fun: fun }});
+ assertEquals(7, instance.exports.main());
+})();
+
+(function TestFunctionModuleImportMismatchingSig() {
+ let builder = new WasmModuleBuilder();
+ let fun1 = new WebAssembly.Function({parameters:[], results:[]}, _ => 7);
+ let fun2 = new WebAssembly.Function({parameters:["i32"], results:[]}, _ => 8);
+ let fun3 = new WebAssembly.Function({parameters:[], results:["f32"]}, _ => 9);
+ let fun_index = builder.addImport("m", "fun", kSig_i_v)
+ builder.addFunction('main', kSig_i_v)
+ .addBody([
+ kExprCallFunction, fun_index
+ ])
+ .exportFunc();
+ assertThrows(
+ () => builder.instantiate({ m: { fun: fun1 }}), WebAssembly.LinkError,
+ /imported function does not match the expected type/);
+ assertThrows(
+ () => builder.instantiate({ m: { fun: fun2 }}), WebAssembly.LinkError,
+ /imported function does not match the expected type/);
+ assertThrows(
+ () => builder.instantiate({ m: { fun: fun3 }}), WebAssembly.LinkError,
+ /imported function does not match the expected type/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 3f2f80ee2f..8e423bd24f 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -100,7 +100,7 @@ let kWasmF64 = 0x7c;
let kWasmS128 = 0x7b;
let kWasmAnyRef = 0x6f;
let kWasmAnyFunc = 0x70;
-let kWasmExceptRef = 0x68;
+let kWasmExnRef = 0x68;
let kExternalFunction = 0;
let kExternalTable = 1;
@@ -150,17 +150,17 @@ let kSig_f_d = makeSig([kWasmF64], [kWasmF32]);
let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
let kSig_r_r = makeSig([kWasmAnyRef], [kWasmAnyRef]);
let kSig_a_a = makeSig([kWasmAnyFunc], [kWasmAnyFunc]);
-let kSig_e_e = makeSig([kWasmExceptRef], [kWasmExceptRef]);
+let kSig_e_e = makeSig([kWasmExnRef], [kWasmExnRef]);
let kSig_i_r = makeSig([kWasmAnyRef], [kWasmI32]);
let kSig_v_r = makeSig([kWasmAnyRef], []);
let kSig_v_a = makeSig([kWasmAnyFunc], []);
-let kSig_v_e = makeSig([kWasmExceptRef], []);
+let kSig_v_e = makeSig([kWasmExnRef], []);
let kSig_v_rr = makeSig([kWasmAnyRef, kWasmAnyRef], []);
let kSig_v_aa = makeSig([kWasmAnyFunc, kWasmAnyFunc], []);
let kSig_r_v = makeSig([], [kWasmAnyRef]);
let kSig_a_v = makeSig([], [kWasmAnyFunc]);
let kSig_a_i = makeSig([kWasmI32], [kWasmAnyFunc]);
-let kSig_e_v = makeSig([], [kWasmExceptRef]);
+let kSig_e_v = makeSig([], [kWasmExnRef]);
function makeSig(params, results) {
return {params: params, results: results};
@@ -214,8 +214,8 @@ let kExprSetLocal = 0x21;
let kExprTeeLocal = 0x22;
let kExprGetGlobal = 0x23;
let kExprSetGlobal = 0x24;
-let kExprGetTable = 0x25;
-let kExprSetTable = 0x26;
+let kExprTableGet = 0x25;
+let kExprTableSet = 0x26;
let kExprI32LoadMem = 0x28;
let kExprI64LoadMem = 0x29;
let kExprF32LoadMem = 0x2a;
@@ -1087,7 +1087,7 @@ class WasmModuleBuilder {
section.emit_u8(kExprRefNull);
}
break;
- case kWasmExceptRef:
+ case kWasmExnRef:
section.emit_u8(kExprRefNull);
break;
}
@@ -1263,7 +1263,7 @@ class WasmModuleBuilder {
local_decls.push({count: l.anyfunc_count, type: kWasmAnyFunc});
}
if (l.except_count > 0) {
- local_decls.push({count: l.except_count, type: kWasmExceptRef});
+ local_decls.push({count: l.except_count, type: kWasmExnRef});
}
}
diff --git a/deps/v8/test/mjsunit/worker-ping-test.js b/deps/v8/test/mjsunit/worker-ping-test.js
new file mode 100644
index 0000000000..046e217edb
--- /dev/null
+++ b/deps/v8/test/mjsunit/worker-ping-test.js
@@ -0,0 +1,125 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A test utility for pinging objects back and forth among a pool of workers.
+// Use by calling {RunWorkerPingTest} with a {config} object.
+{
+// Reference config object for demonstrating the interface.
+let config = {
+ numThings: 4, // size of circular buffer
+ numWorkers: 4, // number of workers
+ numMessages: 100, // number of messages sent to each worker
+ allocInterval: 11, // interval for allocating new things per worker
+ traceScript: false, // print the script
+ traceAlloc: false, // print each allocation attempt
+ traceIteration: 10, // print diagnostics every so many iterations
+ abortOnFail: false, // kill worker if allocation fails
+
+ // Note that because the functions are appended to a worker script
+ // *as source*, they need to be named properly.
+
+ // The function that allocates things. Required.
+ AllocThing: function AllocThing(id) {
+ return new Array(2);
+ },
+ // Before message send behavior. Optional.
+ BeforeSend: function BeforeSend(msg) { },
+ // Before message reception behavior. Optional.
+ BeforeReceive: function BeforeReceive(msg) { },
+}
+}
+
+function RunWorkerPingTest(config) {
+ let workers = [];
+ let beforeSend = (typeof config.BeforeSend == "function") ?
+ config.BeforeSend :
+ function BeforeSend(msg) { };
+ let beforeReceive = (typeof config.BeforeReceive == "function") ?
+ config.BeforeReceive :
+ function BeforeReceive(msg) { };
+
+ // Each worker has a circular buffer of size {config.numThings}, recording
+ // received things into the buffer and responding with a previous thing.
+ // Every {config.allocInterval}, a worker creates a new thing by
+ // {config.AllocThing}.
+
+ let script =
+`const kNumThings = ${config.numThings};
+ const kAllocInterval = ${config.allocInterval};
+ let index = 0;
+ let total = 0;
+ let id = 0;
+ let things = new Array(kNumThings);
+ for (let i = 0; i < kNumThings; i++) {
+ things[i] = TryAllocThing();
+ }
+
+ function TryAllocThing() {
+ try {
+ let thing = AllocThing(id++);
+ ${config.traceAlloc ? "print(\"alloc success\");" : ""}
+ return thing;
+ } catch(e) {
+ ${config.abortOnFail ? "postMessage({error: e.toString()}); throw e;" : "" }
+ ${config.traceAlloc ? "print(\"alloc fail: \" + e);" : ""}
+ }
+ }
+
+ onmessage = function(msg) {
+ BeforeReceive(msg);
+ if (msg.thing !== undefined) {
+ let reply = things[index];
+ if ((total % kAllocInterval) == 0) {
+ reply = TryAllocThing();
+ }
+ things[index] = msg.thing;
+ postMessage({thing : reply});
+ index = (index + 1) % kNumThings;
+ total++;
+ }
+ }
+ ${config.AllocThing.toString()}
+ ${beforeReceive.toString()}
+ `;
+
+ if (config.traceScript) {
+ print("========== Worker script ==========");
+ print(script);
+ print("===================================");
+ }
+
+ for (let i = 0; i < config.numWorkers; i++) {
+ let worker = new Worker(script, {type : 'string'});
+ workers.push(worker);
+ }
+
+ let time = performance.now();
+
+ // The main thread posts {config.numMessages} messages to {config.numWorkers}
+ // workers, with each message containing a "thing" created by {config.AllocThing}.
+ let thing = config.AllocThing(-1);
+ for (let i = 0; i < config.numMessages; i++) {
+ if ((i % config.traceIteration) == 0) {
+ let now = performance.now();
+ print(`iteration ${i}, Δ = ${(now - time).toFixed(3)} ms`);
+ time = now;
+ }
+
+ for (let worker of workers) {
+ let msg = {thing: thing};
+ beforeSend(msg);
+ worker.postMessage(msg);
+ msg = worker.getMessage();
+ if (msg.thing) {
+ thing = msg.thing;
+ } else if (msg.error) {
+ worker.terminate();
+ throw msg.error;
+ }
+ }
+ }
+ for (let worker of workers) {
+ worker.terminate();
+ }
+}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 611238d951..103c1334a1 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -110,6 +110,8 @@ static int DumpHeapConstants(const char* argv0) {
{
Isolate::Scope scope(isolate);
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ i::ReadOnlyHeap* read_only_heap =
+ reinterpret_cast<i::Isolate*>(isolate)->read_only_heap();
i::PrintF("%s", kHeader);
#define DUMP_TYPE(T) i::PrintF(" %d: \"%s\",\n", i::T, #T);
i::PrintF("INSTANCE_TYPES = {\n");
@@ -121,13 +123,13 @@ static int DumpHeapConstants(const char* argv0) {
// Dump the KNOWN_MAP table to the console.
i::PrintF("\n# List of known V8 maps.\n");
i::PrintF("KNOWN_MAPS = {\n");
- i::ReadOnlyHeapIterator ro_iterator(heap->read_only_heap());
+ i::ReadOnlyHeapObjectIterator ro_iterator(read_only_heap);
for (i::HeapObject object = ro_iterator.Next(); !object.is_null();
object = ro_iterator.Next()) {
if (!object.IsMap()) continue;
DumpKnownMap(heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
}
- i::HeapObjectIterator iterator(heap->map_space());
+ i::PagedSpaceObjectIterator iterator(heap->map_space());
for (i::HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
if (!object.IsMap()) continue;
@@ -140,7 +142,7 @@ static int DumpHeapConstants(const char* argv0) {
// Dump the KNOWN_OBJECTS table to the console.
i::PrintF("\n# List of known V8 objects.\n");
i::PrintF("KNOWN_OBJECTS = {\n");
- i::ReadOnlyHeapIterator ro_iterator(heap->read_only_heap());
+ i::ReadOnlyHeapObjectIterator ro_iterator(read_only_heap);
for (i::HeapObject object = ro_iterator.Next(); !object.is_null();
object = ro_iterator.Next()) {
// Skip read-only heap maps, they will be reported elsewhere.
@@ -148,9 +150,9 @@ static int DumpHeapConstants(const char* argv0) {
DumpKnownObject(heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
}
- i::PagedSpaces spit(heap);
- for (i::PagedSpace* s = spit.next(); s != nullptr; s = spit.next()) {
- i::HeapObjectIterator it(s);
+ i::PagedSpaceIterator spit(heap);
+ for (i::PagedSpace* s = spit.Next(); s != nullptr; s = spit.Next()) {
+ i::PagedSpaceObjectIterator it(s);
// Code objects are generally platform-dependent.
if (s->identity() == i::CODE_SPACE || s->identity() == i::MAP_SPACE)
continue;
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 216d962ff3..5a1c89ac9e 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -126,6 +126,10 @@
# no_i18n build has an old ICU data and Georgian is treated as unicameral.
'ecma/String/15.5.4.12-3': [FAIL, ['no_i18n == True', PASS]],
+ # Expecations in this test don't match the spec.
+ # See also https://bugs.chromium.org/p/v8/issues/detail?id=9446
+ 'js1_5/Regress/regress-248444': [FAIL],
+
##################### SKIPPED TESTS #####################
# This test checks that we behave properly in an out-of-memory
diff --git a/deps/v8/test/preparser/OWNERS b/deps/v8/test/preparser/OWNERS
new file mode 100644
index 0000000000..98e641fd88
--- /dev/null
+++ b/deps/v8/test/preparser/OWNERS
@@ -0,0 +1,2 @@
+marja@chromium.org
+verwaest@chromium.org
diff --git a/deps/v8/test/test262/OWNERS b/deps/v8/test/test262/OWNERS
new file mode 100644
index 0000000000..246672d0ff
--- /dev/null
+++ b/deps/v8/test/test262/OWNERS
@@ -0,0 +1,2 @@
+adamk@chromium.org
+gsathya@chromium.org
diff --git a/deps/v8/test/test262/harness-adapt.js b/deps/v8/test/test262/harness-adapt.js
index 3d3ad7e8a0..55d1b445b0 100644
--- a/deps/v8/test/test262/harness-adapt.js
+++ b/deps/v8/test/test262/harness-adapt.js
@@ -98,7 +98,10 @@ function RealmOperators(realm) {
createRealm() {
return RealmOperators(Realm.createAllowCrossRealmAccess());
},
- global: Realm.eval(realm, 'this')
+ global: Realm.eval(realm, 'this'),
+ gc() {
+ v8GC();
+ }
};
$262.global.$262 = $262;
return $262;
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 0a231ef11d..54ba1579cc 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -75,10 +75,17 @@
'intl402/DateTimeFormat/prototype/format/format-function-name': [FAIL],
'intl402/Collator/prototype/compare/compare-function-name': [FAIL],
- # intl tests which require flags. https://bugs.chromium.org/p/v8/issues/detail?id=9154
+ # Intl tests which require flags.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9154
'intl402/NumberFormat/numbering-system-options': ['--harmony-intl-add-calendar-numbering-system'],
'intl402/DateTimeFormat/numbering-system-calendar-options': ['--harmony-intl-add-calendar-numbering-system'],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9319
+ 'intl402/NumberFormat/prototype/resolvedOptions/order': [FAIL],
+
+ # crbug.com/v8/9483
+ 'intl402/NumberFormat/currencyDisplay-unit': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=9084
'intl402/supportedLocalesOf-consistent-with-resolvedOptions': [FAIL],
'intl402/fallback-locales-are-supported': [FAIL],
@@ -527,6 +534,37 @@
'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-function-expression': [FAIL],
'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-recursive': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8179
+ #
+ # These tests require exception handling support which is currently
+ # blocked on landing https://chromium-review.googlesource.com/c/v8/v8/+/1655655
+ 'built-ins/FinalizationGroup/FinalizationGroupCleanupIteratorPrototype/next-job-not-active-throws': [FAIL],
+ 'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-callback-throws': [FAIL],
+ 'built-ins/FinalizationGroup/prototype/cleanupSome/poisoned-cleanup-callback-throws': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8179
+ #
+ # The spec should be changed to make target === holding throw and then we should change these tests.
+ 'built-ins/FinalizationGroup/prototype/register/unregisterToken-same-as-holdings-and-target': [FAIL],
+ 'built-ins/FinalizationGroup/prototype/register/holdings-same-as-target': [FAIL],
+ 'built-ins/FinalizationGroup/prototype/register/return-undefined-register-itself': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=8179
+ #
+ # This test has target === holdings which throws, changing holdings to
+ # { y } makes the test pass.
+ 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-prevented-with-reference': [FAIL],
+
+ # https://github.com/tc39/test262/issues/2256
+ 'built-ins/FinalizationGroup/prototype/cleanupSome/cleanup-prevented-with-unregister': [FAIL],
+
+ # https://github.com/tc39/test262/issues/2239
+ 'built-ins/WeakRef/prototype/deref/gc-cleanup-not-prevented-with-wr-deref': [FAIL],
+ 'built-ins/FinalizationGroup/prototype/cleanupSome/gc-cleanup-not-prevented-with-wr-deref': [FAIL],
+
+ # https://github.com/tc39/test262/issues/2255
+ 'built-ins/FinalizationGroup/prototype/cleanupSome/iterator-holdings-multiple-values': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# https://bugs.chromium.org/p/v8/issues/detail?id=7833
@@ -892,23 +930,6 @@
'language/expressions/dynamic-import/reuse-namespace-object': [SKIP],
'language/expressions/dynamic-import/reuse-namespace-object-from-import': [SKIP],
'language/expressions/dynamic-import/reuse-namespace-object-from-script': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-10-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-11-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-12-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-13-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-14-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-15-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-16-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-17-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-1-update-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-2-update-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-3-update-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-4-update-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-5-lhs-equals-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-6-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-7-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-8-lhs-assignment-operator-assignment-expression': [SKIP],
- 'language/expressions/dynamic-import/syntax/invalid/invalid-asssignmenttargettype-reference-error-9-lhs-assignment-operator-assignment-expression': [SKIP],
'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-assignment-expr-not-optional': [SKIP],
'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-no-new-call-expression': [SKIP],
'language/expressions/dynamic-import/syntax/invalid/nested-arrow-assignment-expression-no-rest-param': [SKIP],
@@ -1201,13 +1222,6 @@
'built-ins/SharedArrayBuffer/length-is-too-large-throws': [SKIP],
}], # asan == True or msan == True or tsan == True
-['variant == interpreted_regexp', {
- # Call stack exceeded: https://crbug.com/v8/8678
- 'built-ins/RegExp/CharacterClassEscapes/character-class-non-digit-class-escape-plus-quantifier-flags-u': [SKIP],
- 'built-ins/RegExp/CharacterClassEscapes/character-class-non-whitespace-class-escape-plus-quantifier-flags-u': [SKIP],
- 'built-ins/RegExp/CharacterClassEscapes/character-class-non-word-class-escape-plus-quantifier-flags-u': [SKIP],
-}], # variant == interpreted_regexp
-
##############################################################################
['variant == jitless', {
# https://crbug.com/v8/7777
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 6ce0834cb1..ff866a832f 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -49,13 +49,16 @@ FEATURE_FLAGS = {
'Intl.DateTimeFormat-formatRange': '--harmony-intl-date-format-range',
'Intl.NumberFormat-unified': '--harmony-intl-numberformat-unified',
'Intl.Segmenter': '--harmony-intl-segmenter',
+ 'Intl.DateTimeFormat-dayPeriod': '--harmony-intl-dateformat-day-period',
+ 'Intl.DateTimeFormat-quarter': '--harmony-intl-dateformat-quarter',
+ 'Intl.DateTimeFormat-fractionalSecondDigits': '--harmony-intl-dateformat-fractional-second-digits',
'Symbol.prototype.description': '--harmony-symbol-description',
- 'globalThis': '--harmony-global',
'export-star-as-namespace-from-module': '--harmony-namespace-exports',
- 'Object.fromEntries': '--harmony-object-from-entries',
- 'hashbang': '--harmony-hashbang',
'BigInt': '--harmony-intl-bigint',
'Promise.allSettled': '--harmony-promise-all-settled',
+ 'FinalizationGroup': '--harmony-weak-refs',
+ 'WeakRef': '--harmony-weak-refs',
+ 'host-gc-required': '--expose-gc-as=v8GC',
}
SKIPPED_FEATURES = set(['class-methods-private',
diff --git a/deps/v8/test/torque/OWNERS b/deps/v8/test/torque/OWNERS
new file mode 100644
index 0000000000..5f8830d55a
--- /dev/null
+++ b/deps/v8/test/torque/OWNERS
@@ -0,0 +1 @@
+file://src/torque/OWNERS
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 8f6635a459..6d2aee1479 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -11,11 +11,7 @@ namespace test {
}
}
- macro ElementsKindTestHelper2(kind: constexpr ElementsKind): bool {
- return ((kind == UINT8_ELEMENTS) || (kind == UINT16_ELEMENTS));
- }
-
- macro ElementsKindTestHelper3(kind: constexpr ElementsKind): constexpr bool {
+ macro ElementsKindTestHelper2(kind: constexpr ElementsKind): constexpr bool {
return ((kind == UINT8_ELEMENTS) || (kind == UINT16_ELEMENTS));
}
@@ -48,10 +44,10 @@ namespace test {
@export
macro TestConstexprReturn() {
- check(FromConstexpr<bool>(ElementsKindTestHelper3(UINT8_ELEMENTS)));
- check(FromConstexpr<bool>(ElementsKindTestHelper3(UINT16_ELEMENTS)));
- check(!FromConstexpr<bool>(ElementsKindTestHelper3(UINT32_ELEMENTS)));
- check(FromConstexpr<bool>(!ElementsKindTestHelper3(UINT32_ELEMENTS)));
+ check(FromConstexpr<bool>(ElementsKindTestHelper2(UINT8_ELEMENTS)));
+ check(FromConstexpr<bool>(ElementsKindTestHelper2(UINT16_ELEMENTS)));
+ check(!FromConstexpr<bool>(ElementsKindTestHelper2(UINT32_ELEMENTS)));
+ check(FromConstexpr<bool>(!ElementsKindTestHelper2(UINT32_ELEMENTS)));
}
@export
@@ -87,11 +83,11 @@ namespace test {
}
}
- builtin GenericBuiltinTest<T: type>(c: Context, param: T): Object {
+ builtin GenericBuiltinTest<T: type>(_c: Context, _param: T): Object {
return Null;
}
- GenericBuiltinTest<Object>(c: Context, param: Object): Object {
+ GenericBuiltinTest<Object>(_c: Context, param: Object): Object {
return param;
}
@@ -126,8 +122,8 @@ namespace test {
@export
macro TestPartiallyUnusedLabel(): Boolean {
- let r1: bool = CallLabelTestHelper4(true);
- let r2: bool = CallLabelTestHelper4(false);
+ const r1: bool = CallLabelTestHelper4(true);
+ const r2: bool = CallLabelTestHelper4(false);
if (r1 && !r2) {
return True;
@@ -136,7 +132,7 @@ namespace test {
}
}
- macro GenericMacroTest<T: type>(param: T): Object {
+ macro GenericMacroTest<T: type>(_param: T): Object {
return Undefined;
}
@@ -144,8 +140,8 @@ namespace test {
return param2;
}
- macro GenericMacroTestWithLabels<T: type>(param: T): Object
- labels X {
+ macro GenericMacroTestWithLabels<T: type>(_param: T): Object
+ labels _X {
return Undefined;
}
@@ -157,7 +153,7 @@ namespace test {
@export
macro TestMacroSpecialization() {
try {
- const smi0: Smi = 0;
+ const _smi0: Smi = 0;
check(GenericMacroTest<Smi>(0) == Undefined);
check(GenericMacroTest<Smi>(1) == Undefined);
check(GenericMacroTest<Object>(Null) == Null);
@@ -175,10 +171,10 @@ namespace test {
}
}
- builtin TestHelperPlus1(context: Context, x: Smi): Smi {
+ builtin TestHelperPlus1(_context: Context, x: Smi): Smi {
return x + 1;
}
- builtin TestHelperPlus2(context: Context, x: Smi): Smi {
+ builtin TestHelperPlus2(_context: Context, x: Smi): Smi {
return x + 2;
}
@@ -193,21 +189,22 @@ namespace test {
@export
macro TestVariableRedeclaration(implicit context: Context)(): Boolean {
- let var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
- let var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
+ let _var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
+ let _var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
return True;
}
@export
macro TestTernaryOperator(x: Smi): Smi {
- let b: bool = x < 0 ? true : false;
+ const b: bool = x < 0 ? true : false;
return b ? x - 10 : x + 100;
}
@export
macro TestFunctionPointerToGeneric(c: Context) {
- let fptr1: builtin(Context, Smi) => Object = GenericBuiltinTest<Smi>;
- let fptr2: builtin(Context, Object) => Object = GenericBuiltinTest<Object>;
+ const fptr1: builtin(Context, Smi) => Object = GenericBuiltinTest<Smi>;
+ const fptr2: builtin(Context, Object) => Object =
+ GenericBuiltinTest<Object>;
check(fptr1(c, 0) == Null);
check(fptr1(c, 1) == Null);
@@ -224,7 +221,7 @@ namespace test {
@export
macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
if (TaggedIsSmi(n)) {
- let m: Smi = UnsafeCast<Smi>(n);
+ const m: Smi = UnsafeCast<Smi>(n);
check(TestHelperPlus1(context, m) == 11);
return True;
@@ -240,13 +237,13 @@ namespace test {
@export
macro TestLargeIntegerLiterals(implicit c: Context)() {
- let x: int32 = 0x40000000;
- let y: int32 = 0x7fffffff;
+ let _x: int32 = 0x40000000;
+ let _y: int32 = 0x7fffffff;
}
@export
macro TestMultilineAssert() {
- let someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
+ const someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
check(
someVeryLongVariableNameThatWillCauseLineBreaks > 0 &&
someVeryLongVariableNameThatWillCauseLineBreaks < 10);
@@ -312,14 +309,14 @@ namespace test {
macro TestStruct3(implicit context: Context)(): TestStructA {
let a: TestStructA =
TestStructA{indexes: UnsafeCast<FixedArray>(kEmptyFixedArray), i: 13, k: 5};
- let b: TestStructA = a;
- let c: TestStructA = TestStruct2();
+ let _b: TestStructA = a;
+ const c: TestStructA = TestStruct2();
a.i = TestStruct1(c);
a.k = a.i;
let d: TestStructB;
d.x = a;
d = TestStructB{x: a, y: 7};
- let e: TestStructA = d.x;
+ let _e: TestStructA = d.x;
let f: Smi = TestStructA{
indexes: UnsafeCast<FixedArray>(kEmptyFixedArray),
i: 27,
@@ -343,11 +340,12 @@ namespace test {
Foo(TestStructA) {
goto Foo(TestStruct2());
}
+ @export // Silence unused warning.
macro CallTestStructInLabel(implicit context: Context)() {
try {
TestStructInLabel() otherwise Foo;
}
- label Foo(s: TestStructA) {}
+ label Foo(_s: TestStructA) {}
}
// This macro tests different versions of the for-loop where some parts
@@ -447,14 +445,14 @@ namespace test {
}
// Test if we can handle uninitialized values on the stack.
- let i: Smi;
+ let _i: Smi;
for (let j: Smi = 0; j < 10; ++j) {
}
}
@export
macro TestSubtyping(x: Smi) {
- const foo: Object = x;
+ const _foo: Object = x;
}
macro IncrementIfSmi<A: type>(x: A): A {
@@ -473,7 +471,7 @@ namespace test {
int32 {
let result: int32 = 0;
typeswitch (IncrementIfSmi(x)) {
- case (x: FixedArray): {
+ case (_x: FixedArray): {
result = result + 1;
}
case (Number): {
@@ -490,7 +488,7 @@ namespace test {
case (a: FixedArray): {
result = result + Convert<int32>(a.length);
}
- case (x: HeapNumber): {
+ case (_x: HeapNumber): {
result = result + 7;
}
}
@@ -509,13 +507,13 @@ namespace test {
@export
macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
typeswitch (obj) {
- case (o: Smi): {
+ case (_o: Smi): {
}
- case (o: JSTypedArray): {
+ case (_o: JSTypedArray): {
}
- case (o: JSReceiver): {
+ case (_o: JSReceiver): {
}
- case (o: HeapObject): {
+ case (_o: HeapObject): {
}
}
}
@@ -689,7 +687,7 @@ namespace test {
@export
macro TestQualifiedAccess(implicit context: Context)() {
- let s: Smi = 0;
+ const s: Smi = 0;
check(!array::IsJSArray(s));
}
@@ -698,7 +696,7 @@ namespace test {
let r: Smi = 0;
try {
ThrowTypeError(kInvalidArrayLength);
- } catch (e) {
+ } catch (_e) {
r = 1;
return r;
}
@@ -714,7 +712,7 @@ namespace test {
let r: Smi = 0;
try {
TestCatch2Wrapper();
- } catch (e) {
+ } catch (_e) {
r = 2;
return r;
}
@@ -722,7 +720,7 @@ namespace test {
@export
macro TestCatch3WrapperWithLabel(implicit context: Context)():
- never labels Abort {
+ never labels _Abort {
ThrowTypeError(kInvalidArrayLength);
}
@@ -735,7 +733,7 @@ namespace test {
label Abort {
return -1;
}
- catch (e) {
+ catch (_e) {
r = 2;
return r;
}
@@ -746,18 +744,18 @@ namespace test {
// IteratorBuiltinsAssembler match the signatures provided in
// iterator.tq.
@export
- macro TestIterator(implicit context: Context)(o: Object, map: Map) {
+ macro TestIterator(implicit context: Context)(o: JSReceiver, map: Map) {
try {
const t1: Object = iterator::GetIteratorMethod(o);
const t2: iterator::IteratorRecord = iterator::GetIterator(o);
- const t3: Object = iterator::IteratorStep(t2) otherwise Fail;
- const t4: Object = iterator::IteratorStep(t2, map) otherwise Fail;
+ const _t3: Object = iterator::IteratorStep(t2) otherwise Fail;
+ const _t4: Object = iterator::IteratorStep(t2, map) otherwise Fail;
- const t5: Object = iterator::IteratorValue(t4);
- const t6: Object = iterator::IteratorValue(t4, map);
+ const t5: Object = iterator::IteratorValue(o);
+ const _t6: Object = iterator::IteratorValue(o, map);
- const t7: JSArray = iterator::IterableToList(t1, t1);
+ const _t7: JSArray = iterator::IterableToList(t1, t1);
iterator::IteratorCloseOnException(t2, t5);
}
@@ -772,13 +770,13 @@ namespace test {
assert(frameType == STUB_FRAME);
assert(f.caller == LoadParentFramePointer());
typeswitch (f) {
- case (f: StandardFrame): {
+ case (_f: StandardFrame): {
unreachable;
}
- case (f: ArgumentsAdaptorFrame): {
+ case (_f: ArgumentsAdaptorFrame): {
unreachable;
}
- case (f: StubFrame): {
+ case (_f: StubFrame): {
}
}
}
@@ -786,7 +784,7 @@ namespace test {
@export
macro TestNew(implicit context: Context)() {
const f: JSArray = NewJSArray();
- assert(f.IsEmpty());
+ check(f.IsEmpty());
f.length = 0;
}
@@ -811,18 +809,18 @@ namespace test {
macro TestStructConstructor(implicit context: Context)() {
// Test default constructor
let a: TestOuter = TestOuter{a: 5, b: TestInner{x: 6, y: 7}, c: 8};
- assert(a.a == 5);
- assert(a.b.x == 6);
- assert(a.b.y == 7);
- assert(a.c == 8);
+ check(a.a == 5);
+ check(a.b.x == 6);
+ check(a.b.y == 7);
+ check(a.c == 8);
a.b.x = 1;
- assert(a.b.x == 1);
+ check(a.b.x == 1);
a.b.SetX(2);
- assert(a.b.x == 2);
- assert(a.b.GetX() == 2);
+ check(a.b.x == 2);
+ check(a.b.GetX() == 2);
}
- class InternalClass {
+ class InternalClass extends Struct {
Flip() labels NotASmi {
const tmp = Cast<Smi>(this.b) otherwise NotASmi;
this.b = this.a;
@@ -860,24 +858,27 @@ namespace test {
const x = StructWithConst{a: Null, b: 1};
let y = StructWithConst{a: Null, b: 1};
y.a = Undefined;
- const copy = x;
+ const _copy = x;
+
+ check(x.TestMethod1() == 1);
+ check(x.TestMethod2() == Null);
}
struct TestIterator {
Next(): Object labels NoMore {
if (this.count-- == 0) goto NoMore;
- return Hole;
+ return TheHole;
}
count: Smi;
}
@export
macro TestNewFixedArrayFromSpread(implicit context: Context)(): Object {
- const i = TestIterator{count: 5};
+ let i = TestIterator{count: 5};
return new FixedArray{map: kFixedArrayMap, length: 5, objects: ...i};
}
- class SmiPair {
+ class SmiPair extends Struct {
GetA():&Smi {
return & this.a;
}
@@ -907,4 +908,103 @@ namespace test {
StaticAssert(1 + 2 == 3);
}
+ class SmiBox extends Struct {
+ value: Smi;
+ unrelated: Smi;
+ }
+
+ builtin NewSmiBox(implicit context: Context)(value: Smi): SmiBox {
+ return new SmiBox{value, unrelated: 0};
+ }
+
+ @export
+ macro TestLoadEliminationFixed(implicit context: Context)() {
+ const box = NewSmiBox(123);
+ const v1 = box.value;
+ box.unrelated = 999;
+ const v2 = (box.unrelated == 0) ? box.value : box.value;
+ StaticAssert(WordEqual(v1, v2));
+
+ box.value = 11;
+ const v3 = box.value;
+ const eleven: Smi = 11;
+ StaticAssert(WordEqual(v3, eleven));
+ }
+
+ @export
+ macro TestLoadEliminationVariable(implicit context: Context)() {
+ const a = UnsafeCast<FixedArray>(kEmptyFixedArray);
+ const box = NewSmiBox(1);
+ const v1 = a.objects[box.value];
+ const u1 = a.objects[box.value + 2];
+ const v2 = a.objects[box.value];
+ const u2 = a.objects[box.value + 2];
+ StaticAssert(WordEqual(v1, v2));
+ StaticAssert(WordEqual(u1, u2));
+ }
+
+ @export
+ macro TestRedundantArrayElementCheck(implicit context: Context)(): Smi {
+ const a = kEmptyFixedArray;
+ for (let i: Smi = 0; i < a.length; i++) {
+ if (a.objects[i] == TheHole) {
+ if (a.objects[i] == TheHole) {
+ return -1;
+ } else {
+ StaticAssert(false);
+ }
+ }
+ }
+ return 1;
+ }
+
+ @export
+ macro TestRedundantSmiCheck(implicit context: Context)(): Smi {
+ const a = kEmptyFixedArray;
+ const x = a.objects[1];
+ typeswitch (x) {
+ case (Smi): {
+ Cast<Smi>(x) otherwise VerifiedUnreachable();
+ return -1;
+ }
+ case (Object): {
+ }
+ }
+ return 1;
+ }
+
+ struct SBox<T: type> {
+ value: T;
+ }
+
+ @export
+ macro TestGenericStruct1(): intptr {
+ const i: intptr = 123;
+ let box = SBox<intptr>{value: i};
+ let boxbox = SBox<SBox<intptr>>{value: box};
+ check(box.value == 123);
+ boxbox.value.value *= 2;
+ check(boxbox.value.value == 246);
+ return boxbox.value.value;
+ }
+
+ struct TestTuple<T1: type, T2: type> {
+ const fst: T1;
+ const snd: T2;
+ }
+
+ macro Swap<T1: type, T2: type>(tuple: TestTuple<T1, T2>):
+ TestTuple<T2, T1> {
+ return TestTuple<T2, T1>{fst: tuple.snd, snd: tuple.fst};
+ }
+
+ @export
+ macro TestGenericStruct2(): TestTuple<Smi, intptr> {
+ const intptrAndSmi = TestTuple<intptr, Smi>{fst: 1, snd: 2};
+ const smiAndIntptr = Swap<intptr, Smi>(intptrAndSmi);
+ check(intptrAndSmi.fst == smiAndIntptr.snd);
+ check(intptrAndSmi.snd == smiAndIntptr.fst);
+ return smiAndIntptr;
+ }
+
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 39af3fbc06..87013f9fbc 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -50,6 +50,7 @@ v8_source_set("unittests_sources") {
"api/interceptor-unittest.cc",
"api/isolate-unittest.cc",
"api/remote-object-unittest.cc",
+ "api/resource-constraints-unittest.cc",
"api/v8-object-unittest.cc",
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
@@ -76,6 +77,7 @@ v8_source_set("unittests_sources") {
"base/template-utils-unittest.cc",
"base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
+ "base/vlq-base64-unittest.cc",
"codegen/code-stub-assembler-unittest.cc",
"codegen/code-stub-assembler-unittest.h",
"codegen/register-configuration-unittest.cc",
@@ -223,6 +225,7 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
+ "wasm/wasm-text-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
@@ -299,6 +302,7 @@ v8_source_set("unittests_sources") {
"../..:v8_libbase",
"../..:v8_libplatform",
"../../third_party/inspector_protocol:encoding_test",
+ "../../third_party/inspector_protocol:bindings_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
diff --git a/deps/v8/test/unittests/api/resource-constraints-unittest.cc b/deps/v8/test/unittests/api/resource-constraints-unittest.cc
new file mode 100644
index 0000000000..4c9b7f33dd
--- /dev/null
+++ b/deps/v8/test/unittests/api/resource-constraints-unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/heap/heap.h"
+
+namespace v8 {
+
+TEST(ResourceConstraints, ConfigureDefaultsFromHeapSizeSmall) {
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaultsFromHeapSize(1 * MB, 1 * MB);
+ ASSERT_EQ(i::Heap::MinOldGenerationSize(),
+ constraints.max_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 512 * pm * KB,
+ constraints.max_young_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_old_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_young_generation_size_in_bytes());
+}
+
+TEST(ResourceConstraints, ConfigureDefaultsFromHeapSizeLarge) {
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaultsFromHeapSize(100u * MB, 3000u * MB);
+ ASSERT_EQ(3000u * MB - 3 * 8192 * pm * KB,
+ constraints.max_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 8192 * pm * KB,
+ constraints.max_young_generation_size_in_bytes());
+ ASSERT_EQ(100u * MB - 3 * 512 * pm * KB,
+ constraints.initial_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 512 * pm * KB,
+ constraints.initial_young_generation_size_in_bytes());
+}
+
+TEST(ResourceConstraints, ConfigureDefaults) {
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaults(2048u * MB, 0u);
+ ASSERT_EQ(512u * pm * MB, constraints.max_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 4096 * pm * KB,
+ constraints.max_young_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_old_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_young_generation_size_in_bytes());
+}
+
+} // namespace v8
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index 6e5c9131fd..eb72d45263 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -155,6 +155,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPlatformObject) {
caller_context->Global()->Set(caller_context, object_key, object).ToChecked();
const char script[] =
"function f() { object.property; object.property = 0; } "
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); "
"%OptimizeFunctionOnNextCall(f); "
"f();";
@@ -210,6 +211,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnInterface) {
.ToChecked();
const char script[] =
"function f() { Interface.property; Interface.property = 0; } "
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); "
"%OptimizeFunctionOnNextCall(f); "
"f();";
diff --git a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
index 6099cd5a59..420b236432 100644
--- a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
+++ b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
@@ -37,8 +37,9 @@ static void CheckSlowSample(const std::vector<uint64_t>& sample, uint64_t max,
}
}
-static void TestNextSample(RandomNumberGenerator& rng, uint64_t max,
- size_t size, bool slow = false) {
+static void TestNextSample(
+ RandomNumberGenerator& rng, // NOLINT(runtime/references)
+ uint64_t max, size_t size, bool slow = false) {
std::vector<uint64_t> sample =
slow ? rng.NextSampleSlow(max, size) : rng.NextSample(max, size);
diff --git a/deps/v8/test/unittests/base/vlq-base64-unittest.cc b/deps/v8/test/unittests/base/vlq-base64-unittest.cc
new file mode 100644
index 0000000000..8abec9b626
--- /dev/null
+++ b/deps/v8/test/unittests/base/vlq-base64-unittest.cc
@@ -0,0 +1,137 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+#include <initializer_list>
+#include <limits>
+
+#include "src/base/vlq-base64.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+TEST(VLQBASE64, charToDigit) {
+ char kSyms[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+ for (int i = 0; i < 256; ++i) {
+ char* pos = strchr(kSyms, static_cast<char>(i));
+ int8_t expected = i == 0 || pos == nullptr ? -1 : pos - kSyms;
+ EXPECT_EQ(expected, charToDigitDecodeForTesting(static_cast<uint8_t>(i)));
+ }
+}
+
+struct ExpectedVLQBase64Result {
+ size_t pos;
+ int32_t result;
+};
+
+void TestVLQBase64Decode(
+ const char* str,
+ std::initializer_list<ExpectedVLQBase64Result> expected_results) {
+ size_t pos = 0;
+ for (const auto& expect : expected_results) {
+ int32_t result = VLQBase64Decode(str, strlen(str), &pos);
+ EXPECT_EQ(expect.result, result);
+ EXPECT_EQ(expect.pos, pos);
+ }
+}
+
+TEST(VLQBASE64, DecodeOneSegment) {
+ TestVLQBase64Decode("", {{0, std::numeric_limits<int32_t>::min()}});
+
+ // Unsupported symbol.
+ TestVLQBase64Decode("*", {{0, std::numeric_limits<int32_t>::min()}});
+ TestVLQBase64Decode("&", {{0, std::numeric_limits<int32_t>::min()}});
+ TestVLQBase64Decode("kt:", {{2, std::numeric_limits<int32_t>::min()}});
+ TestVLQBase64Decode("k^C", {{1, std::numeric_limits<int32_t>::min()}});
+
+ // Imcomplete string.
+ TestVLQBase64Decode("kth4yp", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // Interpretable strings.
+ TestVLQBase64Decode("A", {{1, 0}});
+ TestVLQBase64Decode("C", {{1, 1}});
+ TestVLQBase64Decode("Y", {{1, 12}});
+ TestVLQBase64Decode("2H", {{2, 123}});
+ TestVLQBase64Decode("ktC", {{3, 1234}});
+ TestVLQBase64Decode("yjY", {{3, 12345}});
+ TestVLQBase64Decode("gkxH", {{4, 123456}});
+ TestVLQBase64Decode("uorrC", {{5, 1234567}});
+ TestVLQBase64Decode("80wxX", {{5, 12345678}});
+ TestVLQBase64Decode("qxmvrH", {{6, 123456789}});
+ TestVLQBase64Decode("kth4ypC", {{7, 1234567890}});
+ TestVLQBase64Decode("+/////D", {{7, std::numeric_limits<int32_t>::max()}});
+ TestVLQBase64Decode("D", {{1, -1}});
+ TestVLQBase64Decode("Z", {{1, -12}});
+ TestVLQBase64Decode("3H", {{2, -123}});
+ TestVLQBase64Decode("ltC", {{3, -1234}});
+ TestVLQBase64Decode("zjY", {{3, -12345}});
+ TestVLQBase64Decode("hkxH", {{4, -123456}});
+ TestVLQBase64Decode("vorrC", {{5, -1234567}});
+ TestVLQBase64Decode("90wxX", {{5, -12345678}});
+ TestVLQBase64Decode("rxmvrH", {{6, -123456789}});
+ TestVLQBase64Decode("lth4ypC", {{7, -1234567890}});
+ TestVLQBase64Decode("//////D", {{7, -std::numeric_limits<int32_t>::max()}});
+
+ // An overflowed value 12345678901 (0x2DFDC1C35).
+ TestVLQBase64Decode("qjuw7/2A", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value 123456789012(0x1CBE991A14).
+ TestVLQBase64Decode("ohtkz+lH", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value 4294967296 (0x100000000).
+ TestVLQBase64Decode("ggggggE", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value -12345678901, |value| = (0x2DFDC1C35).
+ TestVLQBase64Decode("rjuw7/2A", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value -123456789012,|value| = (0x1CBE991A14).
+ TestVLQBase64Decode("phtkz+lH", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value -4294967296, |value| = (0x100000000).
+ TestVLQBase64Decode("hgggggE", {{6, std::numeric_limits<int32_t>::min()}});
+}
+
+TEST(VLQBASE64, DecodeTwoSegment) {
+ TestVLQBase64Decode("AA", {{1, 0}, {2, 0}});
+ TestVLQBase64Decode("KA", {{1, 5}, {2, 0}});
+ TestVLQBase64Decode("AQ", {{1, 0}, {2, 8}});
+ TestVLQBase64Decode("MG", {{1, 6}, {2, 3}});
+ TestVLQBase64Decode("a4E", {{1, 13}, {3, 76}});
+ TestVLQBase64Decode("4GyO", {{2, 108}, {4, 233}});
+ TestVLQBase64Decode("ggEqnD", {{3, 2048}, {6, 1653}});
+ TestVLQBase64Decode("g2/D0ilF", {{4, 65376}, {8, 84522}});
+ TestVLQBase64Decode("ss6gBy0m3B", {{5, 537798}, {10, 904521}});
+ TestVLQBase64Decode("LA", {{1, -5}, {2, 0}});
+ TestVLQBase64Decode("AR", {{1, 0}, {2, -8}});
+ TestVLQBase64Decode("NH", {{1, -6}, {2, -3}});
+ TestVLQBase64Decode("b5E", {{1, -13}, {3, -76}});
+ TestVLQBase64Decode("5GzO", {{2, -108}, {4, -233}});
+ TestVLQBase64Decode("hgErnD", {{3, -2048}, {6, -1653}});
+ TestVLQBase64Decode("h2/D1ilF", {{4, -65376}, {8, -84522}});
+ TestVLQBase64Decode("ts6gBz0m3B", {{5, -537798}, {10, -904521}});
+ TestVLQBase64Decode("4GzO", {{2, 108}, {4, -233}});
+ TestVLQBase64Decode("ggErnD", {{3, 2048}, {6, -1653}});
+ TestVLQBase64Decode("g2/D1ilF", {{4, 65376}, {8, -84522}});
+ TestVLQBase64Decode("ss6gBz0m3B", {{5, 537798}, {10, -904521}});
+ TestVLQBase64Decode("5GyO", {{2, -108}, {4, 233}});
+ TestVLQBase64Decode("hgEqnD", {{3, -2048}, {6, 1653}});
+ TestVLQBase64Decode("h2/D0ilF", {{4, -65376}, {8, 84522}});
+ TestVLQBase64Decode("ts6gBy0m3B", {{5, -537798}, {10, 904521}});
+}
+
+TEST(VLQBASE64, DecodeFourSegment) {
+ TestVLQBase64Decode("AAAA", {{1, 0}, {2, 0}, {3, 0}, {4, 0}});
+ TestVLQBase64Decode("QADA", {{1, 8}, {2, 0}, {3, -1}, {4, 0}});
+ TestVLQBase64Decode("ECQY", {{1, 2}, {2, 1}, {3, 8}, {4, 12}});
+ TestVLQBase64Decode("goGguCioPk9I",
+ {{3, 3200}, {6, 1248}, {9, 7809}, {12, 4562}});
+ TestVLQBase64Decode("6/BACA", {{3, 1021}, {4, 0}, {5, 1}, {6, 0}});
+ TestVLQBase64Decode("urCAQA", {{3, 1207}, {4, 0}, {5, 8}, {6, 0}});
+ TestVLQBase64Decode("sDACA", {{2, 54}, {3, 0}, {4, 1}, {5, 0}});
+}
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index fb6dc163b2..bc74e6fe19 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -94,7 +94,7 @@ class CompilerDispatcherTest : public TestWithNativeContext {
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
- shared->FunctionLiteralId(isolate), nullptr);
+ shared->function_literal_id(), nullptr);
return dispatcher->Enqueue(outer_parse_info.get(), function_name,
function_literal);
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 78663c52a5..867f89abfd 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -41,8 +41,9 @@ std::ostream& operator<<(std::ostream& os, const Shift& shift) {
// Helper to build Int32Constant or Int64Constant depending on the given
// machine type.
-Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
- int64_t value) {
+Node* BuildConstant(
+ InstructionSelectorTest::StreamBuilder& m, // NOLINT(runtime/references)
+ MachineType type, int64_t value) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m.Int32Constant(static_cast<int32_t>(value));
@@ -58,7 +59,6 @@ Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
return NULL;
}
-
// ARM64 logical instructions.
const MachInst2 kLogicalInstructions[] = {
{&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
@@ -1277,7 +1277,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1LL << bit;
+ uint64_t mask = uint64_t{1} << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
@@ -1298,7 +1298,7 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1LL << bit;
+ uint64_t mask = uint64_t{1} << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
@@ -2196,7 +2196,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
- m.Return(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)));
+ m.Return(
+ m.Int64Mul(m.Parameter(0), m.Int64Constant((int64_t{1} << k) + 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2209,7 +2210,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
// (2^k + 1) * x -> x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
- m.Return(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)));
+ m.Return(
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2223,9 +2225,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)),
- m.Parameter(1)));
+ m.Return(m.Int64Add(
+ m.Int64Mul(m.Parameter(0), m.Int64Constant((int64_t{1} << k) + 1)),
+ m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2240,9 +2242,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)),
- m.Parameter(1)));
+ m.Return(m.Int64Add(
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(0)),
+ m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2257,9 +2259,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Parameter(0),
- m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
+ m.Return(m.Int64Add(
+ m.Parameter(0),
+ m.Int64Mul(m.Parameter(1), m.Int64Constant((int64_t{1} << k) + 1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2274,9 +2276,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Parameter(0),
- m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
+ m.Return(m.Int64Add(
+ m.Parameter(0),
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2291,9 +2293,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Sub(m.Parameter(0),
- m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
+ m.Return(m.Int64Sub(
+ m.Parameter(0),
+ m.Int64Mul(m.Parameter(1), m.Int64Constant((int64_t{1} << k) + 1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2308,9 +2310,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Sub(m.Parameter(0),
- m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
+ m.Return(m.Int64Sub(
+ m.Parameter(0),
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2842,6 +2844,65 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+static const WriteBarrierKind kWriteBarrierKinds[] = {
+ kMapWriteBarrier, kPointerWriteBarrier, kEphemeronKeyWriteBarrier,
+ kFullWriteBarrier};
+
+const int32_t kStoreWithBarrierImmediates[] = {
+ -256, -255, -3, -2, -1, 0, 1, 2, 3, 255,
+ 256, 264, 4096, 4104, 8192, 8200, 16384, 16392, 32752, 32760};
+
+using InstructionSelectorStoreWithBarrierTest =
+ InstructionSelectorTestWithParam<WriteBarrierKind>;
+
+TEST_P(InstructionSelectorStoreWithBarrierTest,
+ StoreWithWriteBarrierParameters) {
+ const WriteBarrierKind barrier_kind = GetParam();
+ StreamBuilder m(this, MachineType::Int32(),
+ MachineType::TypeCompressedTaggedPointer(),
+ MachineType::Int32(), MachineType::TypeCompressedTagged());
+ m.Store(MachineType::RepCompressedTagged(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), barrier_kind);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build(kAllExceptNopInstructions);
+ // We have two instructions that are not nops: Store and Return.
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchStoreWithWriteBarrier, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+TEST_P(InstructionSelectorStoreWithBarrierTest,
+ StoreWithWriteBarrierImmediate) {
+ const WriteBarrierKind barrier_kind = GetParam();
+ TRACED_FOREACH(int32_t, index, kStoreWithBarrierImmediates) {
+ StreamBuilder m(this, MachineType::Int32(),
+ MachineType::TypeCompressedTaggedPointer(),
+ MachineType::TypeCompressedTagged());
+ m.Store(MachineType::RepCompressedTagged(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), barrier_kind);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build(kAllExceptNopInstructions);
+ // We have two instructions that are not nops: Store and Return.
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchStoreWithWriteBarrier, s[0]->arch_opcode());
+ // With compressed pointers, a store with barrier is a 32-bit str which has
+ // a smaller immediate range.
+ if (COMPRESS_POINTERS_BOOL && (index > 16380)) {
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ } else {
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ }
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorStoreWithBarrierTest,
+ ::testing::ValuesIn(kWriteBarrierKinds));
+
// -----------------------------------------------------------------------------
// Comparison instructions.
@@ -4601,9 +4662,11 @@ namespace {
// Builds a call with the specified signature and nodes as arguments.
// Then checks that the correct number of kArm64Poke and kArm64PokePair were
// generated.
-void TestPokePair(InstructionSelectorTest::StreamBuilder& m, Zone* zone,
- MachineSignature::Builder& builder, Node* nodes[],
- int num_nodes, int expected_poke_pair, int expected_poke) {
+void TestPokePair(
+ InstructionSelectorTest::StreamBuilder& m, // NOLINT(runtime/references)
+ Zone* zone,
+ MachineSignature::Builder& builder, // NOLINT(runtime/references)
+ Node* nodes[], int num_nodes, int expected_poke_pair, int expected_poke) {
auto call_descriptor =
InstructionSelectorTest::StreamBuilder::MakeSimpleCallDescriptor(
zone, builder.Build());
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index ecc1712e3d..a48ad1b359 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -5,6 +5,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
@@ -38,11 +39,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSequence sequence(test_->isolate(), test_->zone(),
instruction_blocks);
SourcePositionTable source_position_table(graph());
+ TickCounter tick_counter;
InstructionSelector selector(
test_->zone(), node_count, &linkage, &sequence, schedule,
&source_position_table, nullptr,
- InstructionSelector::kEnableSwitchJumpTable, source_position_mode,
- features, InstructionSelector::kDisableScheduling,
+ InstructionSelector::kEnableSwitchJumpTable, &tick_counter,
+ source_position_mode, features, InstructionSelector::kDisableScheduling,
InstructionSelector::kEnableRootsRelativeAddressing,
PoisoningMitigationLevel::kPoisonAll);
selector.SelectInstructions();
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index 9fcaedc0f8..34fb84957c 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/branch-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
@@ -27,7 +28,7 @@ class BranchEliminationTest : public GraphTest {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
machine());
- GraphReducer graph_reducer(zone(), graph(), jsgraph.Dead());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter(), jsgraph.Dead());
BranchElimination branch_condition_elimination(&graph_reducer, &jsgraph,
zone());
graph_reducer.AddReducer(&branch_condition_elimination);
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index fa779891ac..d3c81344f2 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -59,8 +59,7 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
Handle<BytecodeArray> bytecode,
const std::vector<std::pair<std::string, std::string>>&
expected_liveness) {
- BytecodeAnalysis analysis(bytecode, zone(), true);
- analysis.Analyze(BailoutId::None());
+ BytecodeAnalysis analysis(bytecode, zone(), BailoutId::None(), true);
interpreter::BytecodeArrayIterator iterator(bytecode);
for (auto liveness : expected_liveness) {
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index c97bb96b49..690701cf56 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -29,7 +29,7 @@ class CommonOperatorReducerTest : public GraphTest {
Reduction Reduce(
AdvancedReducer::Editor* editor, Node* node,
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
- JSHeapBroker broker(isolate(), zone());
+ JSHeapBroker broker(isolate(), zone(), FLAG_trace_heap_broker);
MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
flags);
CommonOperatorReducer reducer(editor, graph(), &broker, common(), &machine,
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index d30449daa7..135e8e307f 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -63,7 +63,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
public:
ConstantFoldingReducerTest()
: TypedGraphTest(3),
- broker_(isolate(), zone()),
+ broker_(isolate(), zone(), FLAG_trace_heap_broker),
simplified_(zone()),
deps_(&broker_, zone()) {}
~ConstantFoldingReducerTest() override = default;
@@ -75,7 +75,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
ConstantFoldingReducer reducer(&graph_reducer, &jsgraph, broker());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index 992ddcc55b..bda547b3f0 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -25,7 +25,8 @@ class ControlFlowOptimizerTest : public GraphTest {
protected:
void Optimize() {
- ControlFlowOptimizer optimizer(graph(), common(), machine(), zone());
+ ControlFlowOptimizer optimizer(graph(), common(), machine(), tick_counter(),
+ zone());
optimizer.Optimize();
}
diff --git a/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc b/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
index 65bdb4c46e..fa938f76da 100644
--- a/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
@@ -3,11 +3,14 @@
// found in the LICENSE file.
#include "src/compiler/decompression-elimination.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+using testing::_;
using testing::StrictMock;
namespace v8 {
@@ -24,12 +27,69 @@ class DecompressionEliminationTest : public GraphTest {
~DecompressionEliminationTest() override = default;
protected:
- Reduction Reduce(Node* node) {
- StrictMock<MockAdvancedReducerEditor> editor;
- DecompressionElimination decompression_elimination(&editor, graph(),
+ Reduction Reduce(StrictMock<MockAdvancedReducerEditor>* editor, Node* node) {
+ DecompressionElimination decompression_elimination(editor, graph(),
machine(), common());
return decompression_elimination.Reduce(node);
}
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node);
+ }
+ Node* GetUniqueValueUse(Node* node) {
+ Node* value_use = nullptr;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge)) {
+ if (value_use) {
+ return nullptr;
+ } else {
+ value_use = edge.from();
+ }
+ }
+ }
+ // Return the value use of node after the reduction, if there is exactly one
+ return value_use;
+ }
+
+ const Operator* DecompressionOpFromAccess(const ElementAccess access) {
+ switch (access.machine_type.representation()) {
+ case MachineRepresentation::kCompressed:
+ return machine()->ChangeCompressedToTagged();
+ case MachineRepresentation::kCompressedSigned:
+ return machine()->ChangeCompressedSignedToTaggedSigned();
+ case MachineRepresentation::kCompressedPointer:
+ return machine()->ChangeCompressedPointerToTaggedPointer();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ const Operator* CompressionOpFromAccess(const ElementAccess access) {
+ switch (access.machine_type.representation()) {
+ case MachineRepresentation::kCompressed:
+ return machine()->ChangeTaggedToCompressed();
+ case MachineRepresentation::kCompressedSigned:
+ return machine()->ChangeTaggedSignedToCompressedSigned();
+ case MachineRepresentation::kCompressedPointer:
+ return machine()->ChangeTaggedPointerToCompressedPointer();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // 'Global' accesses used to simplify the tests.
+ ElementAccess const any_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyCompressed(),
+ kNoWriteBarrier};
+ ElementAccess const signed_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::CompressedSigned(),
+ kNoWriteBarrier};
+ ElementAccess const pointer_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::CompressedPointer(),
+ kNoWriteBarrier};
+ const ElementAccess element_accesses[3] = {any_access, signed_access,
+ pointer_access};
+
MachineOperatorBuilder* machine() { return &machine_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
@@ -39,222 +99,90 @@ class DecompressionEliminationTest : public GraphTest {
};
// -----------------------------------------------------------------------------
-// Direct Decompression & Compression
+// Direct Decompression & Compression.
TEST_F(DecompressionEliminationTest, BasicDecompressionCompression) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
- index, effect, control);
- Node* changeToTagged =
- graph()->NewNode(machine()->ChangeCompressedToTagged(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
- changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
-TEST_F(DecompressionEliminationTest, BasicDecompressionCompressionSigned) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
- }
-
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedSigned(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
- index, effect, control);
- Node* changeToTagged =
- graph()->NewNode(machine()->ChangeCompressedSignedToTaggedSigned(), load);
- Node* changeToCompressed = graph()->NewNode(
- machine()->ChangeTaggedSignedToCompressedSigned(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
- changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
+ // Pairs of <load, store> accesses
+ const std::pair<ElementAccess, ElementAccess> accesses[] = {
+ {any_access, any_access}, {signed_access, any_access},
+ {pointer_access, any_access}, {any_access, signed_access},
+ {signed_access, signed_access}, {any_access, pointer_access},
+ {pointer_access, pointer_access}};
-TEST_F(DecompressionEliminationTest, BasicDecompressionCompressionPointer) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
+ for (size_t i = 0; i < arraysize(accesses); ++i) {
+ // Create the graph.
+ Node* load = graph()->NewNode(simplified()->LoadElement(accesses[i].first),
+ object, index, effect, control);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(accesses[i].first), load);
+ Node* change_to_compressed = graph()->NewNode(
+ CompressionOpFromAccess(accesses[i].second), change_to_tagged);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(accesses[i].second), object,
+ index, change_to_compressed, effect, control);
+
+ // Reduce.
+ Reduction r = Reduce(change_to_compressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
}
-
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedPointer(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
- index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* changeToCompressed = graph()->NewNode(
- machine()->ChangeTaggedPointerToCompressedPointer(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
- changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
}
// -----------------------------------------------------------------------------
-// Direct Decompression & Compression - border cases
-
-// For example, if we are lowering a CheckedCompressedToTaggedPointer in the
-// effect linearization phase we will change that to
-// ChangeCompressedPointerToTaggedPointer. Then, we might end up with a chain of
-// Parent <- ChangeCompressedPointerToTaggedPointer <- ChangeTaggedToCompressed
-// <- Child.
-// Similarly, we have cases with Signed instead of pointer.
-// The following border case tests will test that the functionality is robust
-// enough to handle that.
-
-TEST_F(DecompressionEliminationTest,
- BasicDecompressionCompressionBorderCaseSigned) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
- }
+// Direct Compression & Decompression
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
- ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
- index, effect, control);
- Node* changeToTagged =
- graph()->NewNode(machine()->ChangeCompressedSignedToTaggedSigned(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
- index, changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
-
-TEST_F(DecompressionEliminationTest,
- BasicDecompressionCompressionBorderCasePointer) {
- // Skip test if pointer compression is not enabled
+TEST_F(DecompressionEliminationTest, BasicCompressionDecompression) {
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
- ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedPointer(),
- kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
- index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
- index, changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
-// We also have cases of ChangeCompressedToTagged <-
-// ChangeTaggedPointerToCompressedPointer, where the
-// ChangeTaggedPointerToCompressedPointer was introduced while lowering a
-// NewConsString on effect control linearizer
+ // Pairs of <load, store> accesses
+ const std::pair<ElementAccess, ElementAccess> accesses[] = {
+ {any_access, any_access}, {signed_access, any_access},
+ {pointer_access, any_access}, {any_access, signed_access},
+ {signed_access, signed_access}, {any_access, pointer_access},
+ {pointer_access, pointer_access}};
-TEST_F(DecompressionEliminationTest,
- BasicDecompressionCompressionBorderCasePointerDecompression) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
+ for (size_t i = 0; i < arraysize(accesses); ++i) {
+ // Create the graph.
+ Node* load = graph()->NewNode(simplified()->LoadElement(accesses[i].first),
+ object, index, effect, control);
+ Node* change_to_compressed =
+ graph()->NewNode(CompressionOpFromAccess(accesses[i].first), load);
+ Node* change_to_tagged = graph()->NewNode(
+ DecompressionOpFromAccess(accesses[i].second), change_to_compressed);
+ effect = graph()->NewNode(simplified()->StoreElement(accesses[i].second),
+ object, index, change_to_tagged, effect, control);
+
+ // Reduce.
+ Reduction r = Reduce(change_to_tagged);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
}
-
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedPointer(),
- kNoWriteBarrier};
- ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
- index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
- index, changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
}
// -----------------------------------------------------------------------------
-// Compress after constant
+// Compress after constant.
-TEST_F(DecompressionEliminationTest,
- DecompressionConstantStoreElementInt64Constant) {
+TEST_F(DecompressionEliminationTest, CompressionAfterInt64Constant) {
// Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
@@ -266,21 +194,6 @@ TEST_F(DecompressionEliminationTest,
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const Operator* compression_ops[] = {
- machine()->ChangeTaggedToCompressed(),
- machine()->ChangeTaggedSignedToCompressedSigned(),
- machine()->ChangeTaggedPointerToCompressedPointer()};
-
- ASSERT_EQ(arraysize(compression_ops), arraysize(element_accesses));
-
const int64_t constants[] = {static_cast<int64_t>(0x0000000000000000),
static_cast<int64_t>(0x0000000000000001),
static_cast<int64_t>(0x0000FFFFFFFF0000),
@@ -291,31 +204,26 @@ TEST_F(DecompressionEliminationTest,
static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
- // For every compression.
- for (size_t i = 0; i < arraysize(compression_ops); ++i) {
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
// For every Int64Constant.
for (size_t j = 0; j < arraysize(constants); ++j) {
// Create the graph.
Node* constant = graph()->NewNode(common()->Int64Constant(constants[j]));
- Node* changeToCompressed = graph()->NewNode(compression_ops[i], constant);
- effect =
- graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
- object, index, changeToCompressed, effect, control);
+ Node* change_to_compressed = graph()->NewNode(
+ CompressionOpFromAccess(element_accesses[i]), constant);
+ effect = graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
+ object, index, change_to_compressed, effect,
+ control);
// Reduce.
- Reduction r = Reduce(changeToCompressed);
+ Reduction r = Reduce(change_to_compressed);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kInt32Constant);
}
}
}
-TEST_F(DecompressionEliminationTest,
- DecompressionConstantStoreElementHeapConstant) {
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
- if (COMPRESS_POINTERS_BOOL) {
- return;
- }
+TEST_F(DecompressionEliminationTest, CompressionAfterHeapConstant) {
// Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
@@ -327,21 +235,6 @@ TEST_F(DecompressionEliminationTest,
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const Operator* compression_ops[] = {
- machine()->ChangeTaggedToCompressed(),
- machine()->ChangeTaggedSignedToCompressedSigned(),
- machine()->ChangeTaggedPointerToCompressedPointer()};
-
- ASSERT_EQ(arraysize(compression_ops), arraysize(element_accesses));
-
const Handle<HeapNumber> heap_constants[] = {
factory()->NewHeapNumber(0.0),
factory()->NewHeapNumber(-0.0),
@@ -359,119 +252,83 @@ TEST_F(DecompressionEliminationTest,
factory()->NewHeapNumber(0x8FFFFFFFFFFFFFFF),
factory()->NewHeapNumber(0xFFFFFFFFFFFFFFFF)};
- // For every compression.
- for (size_t i = 0; i < arraysize(compression_ops); ++i) {
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
// For every HeapNumber.
for (size_t j = 0; j < arraysize(heap_constants); ++j) {
// Create the graph.
Node* constant =
graph()->NewNode(common()->HeapConstant(heap_constants[j]));
- Node* changeToCompressed = graph()->NewNode(compression_ops[i], constant);
- effect =
- graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
- object, index, changeToCompressed, effect, control);
+ Node* change_to_compressed = graph()->NewNode(
+ CompressionOpFromAccess(element_accesses[i]), constant);
+ effect = graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
+ object, index, change_to_compressed, effect,
+ control);
// Reduce.
- Reduction r = Reduce(changeToCompressed);
+ Reduction r = Reduce(change_to_compressed);
ASSERT_TRUE(r.Changed());
- // TODO(v8:8977): Change the IrOpcode here to kCompressedHeapConstant when
- // that is in place.
- EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kHeapConstant);
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kCompressedHeapConstant);
}
}
}
// -----------------------------------------------------------------------------
-// Phi
+// Phi.
TEST_F(DecompressionEliminationTest, PhiOneDecompress) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 1;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- const IrOpcode::Value opcodes[] = {
- IrOpcode::kChangeCompressedToTagged,
- IrOpcode::kChangeCompressedSignedToTaggedSigned,
- IrOpcode::kChangeCompressedPointerToTaggedPointer};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
- ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
-
- // For every access
+ // For every access.
for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // Create the graph.
Node* load =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* change_to_tagged = graph()->NewNode(decompression_ops[i], load);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
change_to_tagged, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(opcodes[i], r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(DecompressionOpFromAccess(element_accesses[i]), decompress->op());
}
}
TEST_F(DecompressionEliminationTest, PhiThreeDecompressSameRepresentation) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 3;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const IrOpcode::Value opcodes[] = {
- IrOpcode::kChangeCompressedToTagged,
- IrOpcode::kChangeCompressedSignedToTaggedSigned,
- IrOpcode::kChangeCompressedPointerToTaggedPointer};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
- ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
-
- // For every access
+ // For every access.
for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // Create the graph.
Node* load1 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
@@ -481,115 +338,98 @@ TEST_F(DecompressionEliminationTest, PhiThreeDecompressSameRepresentation) {
Node* load3 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
- Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
- Node* change_to_tagged3 = graph()->NewNode(decompression_ops[i], load3);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
+ Node* change_to_tagged_3 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load3);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, change_to_tagged3, control);
+ change_to_tagged_1, change_to_tagged_2, change_to_tagged_3, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(opcodes[i], r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(DecompressionOpFromAccess(element_accesses[i]), decompress->op());
}
}
TEST_F(DecompressionEliminationTest, PhiThreeDecompressOneAnyRepresentation) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 3;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
+ // Signed and Pointer (and not Any) accesses.
+ const ElementAccess not_any_accesses[] = {signed_access, pointer_access};
- const ElementAccess any_access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyCompressed(),
- kNoWriteBarrier};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
-
- // For every access
- for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // For every access.
+ for (size_t i = 0; i < arraysize(not_any_accesses); ++i) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(not_any_accesses[i]), object,
index, effect, control);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(not_any_accesses[i]), object,
index, effect, control);
- // Note that load3 loads a CompressedAny instead of element_accesses[i]
+ // Note that load3 loads a CompressedAny instead of not_any_accesses[i]
Node* load3 = graph()->NewNode(simplified()->LoadElement(any_access),
object, index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
- Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
- Node* change_to_tagged3 =
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(not_any_accesses[i]), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(not_any_accesses[i]), load2);
+ Node* change_to_tagged_3 =
graph()->NewNode(machine()->ChangeCompressedToTagged(), load3);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, change_to_tagged3, control);
+ change_to_tagged_1, change_to_tagged_2, change_to_tagged_3, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(IrOpcode::kChangeCompressedToTagged, r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(machine()->ChangeCompressedToTagged(), decompress->op());
}
}
TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 3;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const IrOpcode::Value opcodes[] = {
- IrOpcode::kChangeCompressedToTagged,
- IrOpcode::kChangeCompressedSignedToTaggedSigned,
- IrOpcode::kChangeCompressedPointerToTaggedPointer};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
- ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
-
- // For every access
+ // For every access.
for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // Create the graph.
Node* load1 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
@@ -599,14 +439,16 @@ TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
Node* load3 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
- Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, load3, control);
+ change_to_tagged_1, change_to_tagged_2, load3, control);
- // Reduce
+ // Reduce.
Reduction r = Reduce(phi);
ASSERT_FALSE(r.Changed());
}
@@ -615,287 +457,251 @@ TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
// In the case of having one decompress Signed and one Pointer, we have to
// generate the conservative decompress any after the Phi.
TEST_F(DecompressionEliminationTest, PhiTwoDecompressesOneSignedOnePointer) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 2;
- const ElementAccess signed_access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::CompressedSigned(),
- kNoWriteBarrier};
- const ElementAccess pointer_access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::CompressedPointer(),
- kNoWriteBarrier};
- // Create the graph
+ // Create the graph.
Node* load1 = graph()->NewNode(simplified()->LoadElement(signed_access),
object, index, effect, control);
Node* load2 = graph()->NewNode(simplified()->LoadElement(pointer_access),
object, index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(
- machine()->ChangeCompressedSignedToTaggedSigned(), load1);
- Node* change_to_tagged2 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(signed_access), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(pointer_access), load2);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, control);
+ change_to_tagged_1, change_to_tagged_2, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(IrOpcode::kChangeCompressedToTagged, r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(machine()->ChangeCompressedToTagged(), decompress->op());
}
// -----------------------------------------------------------------------------
-// TypedStateValues
+// TypedStateValues.
TEST_F(DecompressionEliminationTest, TypedStateValuesOneDecompress) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 1;
+ const int number_of_inputs = 1;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]),
- object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* typedStateValuesOneDecompress = graph()->NewNode(
- common()->TypedStateValues(types, dense), changeToTagged);
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load);
+ Node* typedStateValues = graph()->NewNode(
+ common()->TypedStateValues(types, dense), change_to_tagged);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->InputAt(0), load);
}
}
TEST_F(DecompressionEliminationTest, TypedStateValuesTwoDecompresses) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 3;
+ const int number_of_inputs = 3;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
+
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged1 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load1);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged2 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load2);
- Node* typedStateValuesOneDecompress =
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
+ Node* typedStateValues =
graph()->NewNode(common()->TypedStateValues(types, dense),
- changeToTagged1, load1, changeToTagged2);
+ change_to_tagged_1, load1, change_to_tagged_2);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->InputAt(0), load1);
+ // Note that the input at index 1 didn't change.
+ EXPECT_EQ(r.replacement()->InputAt(1), load1);
+ EXPECT_EQ(r.replacement()->InputAt(2), load2);
}
}
TEST_F(DecompressionEliminationTest, TypedStateValuesAllDecompresses) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 3;
+ const int number_of_inputs = 3;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
+
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged1 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load1);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged2 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
Node* load3 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged3 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load3);
- Node* typedStateValuesOneDecompress =
- graph()->NewNode(common()->TypedStateValues(types, dense),
- changeToTagged1, changeToTagged2, changeToTagged3);
+ Node* change_to_tagged_3 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load3);
+ Node* typedStateValues = graph()->NewNode(
+ common()->TypedStateValues(types, dense), change_to_tagged_1,
+ change_to_tagged_2, change_to_tagged_3);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->InputAt(0), load1);
+ EXPECT_EQ(r.replacement()->InputAt(1), load2);
+ EXPECT_EQ(r.replacement()->InputAt(2), load3);
}
}
TEST_F(DecompressionEliminationTest, TypedStateValuesNoDecompresses) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 3;
+ const int number_of_inputs = 3;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]),
- object, index, effect, control);
- Node* typedStateValuesOneDecompress = graph()->NewNode(
+
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* typedStateValues = graph()->NewNode(
common()->TypedStateValues(types, dense), load, load, load);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_FALSE(r.Changed());
}
}
// -----------------------------------------------------------------------------
-// Word64Equal comparison of two decompressions
+// Word64Equal comparison of two decompressions.
TEST_F(DecompressionEliminationTest, TwoDecompressionWord64Equal) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
- // For every decompression (lhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
+ // For every decompression (lhs).
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
// For every decompression (rhs)
- for (size_t k = 0; k < arraysize(DecompressionOps); ++k) {
- // Create the graph
+ for (size_t j = 0; j < arraysize(element_accesses); ++j) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]),
object, index, effect, control);
- Node* changeToTagged1 = graph()->NewNode(DecompressionOps[j], load1);
+ Node* change_to_tagged_1 = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load1);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[k]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[j]),
object, index, effect, control);
- Node* changeToTagged2 = graph()->NewNode(DecompressionOps[j], load2);
- Node* comparison = graph()->NewNode(machine()->Word64Equal(),
- changeToTagged1, changeToTagged2);
- // Reduce
+ Node* change_to_tagged_2 = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load2);
+ Node* comparison = graph()->NewNode(
+ machine()->Word64Equal(), change_to_tagged_1, change_to_tagged_2);
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
@@ -904,44 +710,31 @@ TEST_F(DecompressionEliminationTest, TwoDecompressionWord64Equal) {
}
// -----------------------------------------------------------------------------
-// Word64Equal comparison of two decompressions, where lhs == rhs
+// Word64Equal comparison of two decompressions, where lhs == rhs.
TEST_F(DecompressionEliminationTest, TwoDecompressionWord64EqualSameInput) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
- // For every decompression (same for lhs and rhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
- object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ // For every access. (same for lhs and rhs)
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load);
Node* comparison = graph()->NewNode(machine()->Word64Equal(),
- changeToTagged, changeToTagged);
- // Reduce
+ change_to_tagged, change_to_tagged);
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
@@ -949,35 +742,20 @@ TEST_F(DecompressionEliminationTest, TwoDecompressionWord64EqualSameInput) {
}
// -----------------------------------------------------------------------------
-// Word64Equal comparison of decompress and a constant
+// Word64Equal comparison of decompress and a constant.
TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
const int64_t constants[] = {static_cast<int64_t>(0x0000000000000000),
static_cast<int64_t>(0x0000000000000001),
static_cast<int64_t>(0x0000FFFFFFFF0000),
@@ -988,24 +766,25 @@ TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
- // For every decompression (lhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
- // For every constant (rhs)
- for (size_t k = 0; k < arraysize(constants); ++k) {
- // Test with both (lhs, rhs) combinations
- for (bool lhsIsDecompression : {false, true}) {
- // Create the graph
+ // For every decompression (lhs).
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // For every constant (rhs).
+ for (size_t j = 0; j < arraysize(constants); ++j) {
+ // Test with both (lhs, rhs) combinations.
+ for (bool lhs_is_decompression : {false, true}) {
+ // Create the graph.
Node* load =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]),
object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* change_to_tagged = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load);
Node* constant =
- graph()->NewNode(common()->Int64Constant(constants[k]));
+ graph()->NewNode(common()->Int64Constant(constants[j]));
- Node* lhs = lhsIsDecompression ? changeToTagged : constant;
- Node* rhs = lhsIsDecompression ? constant : changeToTagged;
+ Node* lhs = lhs_is_decompression ? change_to_tagged : constant;
+ Node* rhs = lhs_is_decompression ? constant : change_to_tagged;
Node* comparison = graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
- // Reduce
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
@@ -1015,38 +794,18 @@ TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
}
TEST_F(DecompressionEliminationTest, DecompressionHeapConstantWord64Equal) {
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
- if (COMPRESS_POINTERS_BOOL) {
- return;
- }
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
- const Handle<HeapNumber> heapConstants[] = {
+ const Handle<HeapNumber> heap_constants[] = {
factory()->NewHeapNumber(0.0),
factory()->NewHeapNumber(-0.0),
factory()->NewHeapNumber(11.2),
@@ -1063,24 +822,25 @@ TEST_F(DecompressionEliminationTest, DecompressionHeapConstantWord64Equal) {
factory()->NewHeapNumber(0x8FFFFFFFFFFFFFFF),
factory()->NewHeapNumber(0xFFFFFFFFFFFFFFFF)};
- // For every decompression (lhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
- // For every constant (rhs)
- for (size_t k = 0; k < arraysize(heapConstants); ++k) {
- // Test with both (lhs, rhs) combinations
- for (bool lhsIsDecompression : {false, true}) {
- // Create the graph
+ // For every decompression (lhs).
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // For every constant (rhs).
+ for (size_t j = 0; j < arraysize(heap_constants); ++j) {
+ // Test with both (lhs, rhs) combinations.
+ for (bool lhs_is_decompression : {false, true}) {
+ // Create the graph.
Node* load =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]),
object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* change_to_tagged = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load);
Node* constant =
- graph()->NewNode(common()->HeapConstant(heapConstants[k]));
+ graph()->NewNode(common()->HeapConstant(heap_constants[j]));
- Node* lhs = lhsIsDecompression ? changeToTagged : constant;
- Node* rhs = lhsIsDecompression ? constant : changeToTagged;
+ Node* lhs = lhs_is_decompression ? change_to_tagged : constant;
+ Node* rhs = lhs_is_decompression ? constant : change_to_tagged;
Node* comparison = graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
- // Reduce
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index f506502610..1f952cc1a6 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/test-utils.h"
using testing::_;
@@ -237,9 +238,11 @@ class AdvancedReducerTest : public TestWithZone {
protected:
Graph* graph() { return &graph_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
Graph graph_;
+ TickCounter tick_counter_;
};
@@ -368,7 +371,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse2) {
Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
- GraphReducer graph_reducer(zone(), graph(), dead);
+ GraphReducer graph_reducer(zone(), graph(), tick_counter(), dead);
ReplaceWithValueReducer r(&graph_reducer);
r.ReplaceWithValue(node, replacement);
EXPECT_EQ(start, use_control->InputAt(0));
@@ -392,7 +395,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse3) {
Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
- GraphReducer graph_reducer(zone(), graph(), dead);
+ GraphReducer graph_reducer(zone(), graph(), tick_counter(), dead);
ReplaceWithValueReducer r(&graph_reducer);
r.ReplaceWithValue(node, replacement);
EXPECT_EQ(start, use_control->InputAt(0));
@@ -422,20 +425,20 @@ class GraphReducerTest : public TestWithZone {
protected:
void ReduceNode(Node* node, Reducer* r) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r);
reducer.ReduceNode(node);
}
void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.ReduceNode(node);
}
void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.AddReducer(r3);
@@ -443,20 +446,20 @@ class GraphReducerTest : public TestWithZone {
}
void ReduceGraph(Reducer* r1) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.ReduceGraph();
}
void ReduceGraph(Reducer* r1, Reducer* r2) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.ReduceGraph();
}
void ReduceGraph(Reducer* r1, Reducer* r2, Reducer* r3) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.AddReducer(r3);
@@ -464,9 +467,11 @@ class GraphReducerTest : public TestWithZone {
}
Graph* graph() { return &graph_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
Graph graph_;
+ TickCounter tick_counter_;
};
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index f433dda42e..485df8e401 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -18,7 +18,7 @@ GraphTest::GraphTest(int num_parameters)
: canonical_(isolate()),
common_(zone()),
graph_(zone()),
- broker_(isolate(), zone()),
+ broker_(isolate(), zone(), FLAG_trace_heap_broker),
source_positions_(&graph_),
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
@@ -116,7 +116,8 @@ Matcher<Node*> GraphTest::IsUndefinedConstant() {
}
TypedGraphTest::TypedGraphTest(int num_parameters)
- : GraphTest(num_parameters), typer_(broker(), Typer::kNoFlags, graph()) {}
+ : GraphTest(num_parameters),
+ typer_(broker(), Typer::kNoFlags, graph(), tick_counter()) {}
TypedGraphTest::~TypedGraphTest() = default;
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index fa42294a65..60d425b911 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -5,9 +5,11 @@
#ifndef V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
#define V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/typer.h"
#include "src/handles/handles.h"
@@ -62,6 +64,7 @@ class GraphTest : public TestWithNativeContextAndZone {
SourcePositionTable* source_positions() { return &source_positions_; }
NodeOriginTable* node_origins() { return &node_origins_; }
JSHeapBroker* broker() { return &broker_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
CanonicalHandleScope canonical_;
@@ -70,6 +73,7 @@ class GraphTest : public TestWithNativeContextAndZone {
JSHeapBroker broker_;
SourcePositionTable source_positions_;
NodeOriginTable node_origins_;
+ TickCounter tick_counter_;
};
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index b9f3ff8056..3d4e16ac68 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -4,6 +4,7 @@
#include <cctype>
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-graph.h"
@@ -33,7 +34,7 @@ class JSCallReducerTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSCallReducer reducer(&graph_reducer, &jsgraph, broker(),
JSCallReducer::kNoFlags, &deps_);
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 5a0d54e861..95c03e543f 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-create-lowering.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -43,7 +44,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, broker(),
zone());
return reducer.Reduce(node);
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index d6e9876e64..3510cd4b74 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -36,7 +36,7 @@ class JSIntrinsicLoweringTest : public GraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSIntrinsicLowering reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 765a79db40..0d7bb946e3 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -47,7 +47,7 @@ class JSTypedLoweringTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSTypedLowering reducer(&graph_reducer, &jsgraph, broker(), zone());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 07013aa52c..e084b495f9 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -66,7 +66,7 @@ class LoopPeelingTest : public GraphTest {
StdoutStream{} << AsRPO(*graph());
}
Zone zone(isolate()->allocator(), ZONE_NAME);
- return LoopFinder::BuildLoopTree(graph(), &zone);
+ return LoopFinder::BuildLoopTree(graph(), tick_counter(), &zone);
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index e85bc09e1e..7f3a613994 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -33,7 +33,7 @@ class MachineOperatorReducerTest : public GraphTest {
javascript_(zone()),
jsgraph_(isolate(), graph(), &common_, &javascript_, nullptr,
&machine_),
- graph_reducer_(zone(), graph(), jsgraph_.Dead()) {}
+ graph_reducer_(zone(), graph(), tick_counter(), jsgraph_.Dead()) {}
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 8ffdaf27d0..fc6f1d5500 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1095,9 +1095,11 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
control_node = NodeProperties::GetControlInput(node); \
} \
+ LoadRepresentation rep = IrOpcode::kLoadFromObject == node->opcode() \
+ ? ObjectAccessOf(node->op()).machine_type \
+ : LoadRepresentationOf(node->op()); \
return (TestNodeMatcher::MatchAndExplain(node, listener) && \
- PrintMatchAndExplain(LoadRepresentationOf(node->op()), "rep", \
- rep_matcher_, listener) && \
+ PrintMatchAndExplain(rep, "rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
"base", base_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
@@ -1119,6 +1121,7 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
LOAD_MATCHER(Load)
LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad)
+LOAD_MATCHER(LoadFromObject)
#define STORE_MATCHER(kStore) \
class Is##kStore##Matcher final : public TestNodeMatcher { \
@@ -2037,6 +2040,16 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
control_matcher));
}
+Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsLoadFromObjectMatcher(rep_matcher, base_matcher,
+ index_matcher, effect_matcher,
+ control_matcher));
+}
+
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index a71f05964f..be8d67cb35 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -333,6 +333,11 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 067b7c95ec..76fbc4a368 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/redundancy-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -299,6 +300,36 @@ TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt64) {
}
// -----------------------------------------------------------------------------
+// CheckedInt32ToCompressedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedInt32ToCompressedSigned) {
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedInt32ToCompressedSigned(feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedInt32ToCompressedSigned(feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckedInt32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
@@ -938,7 +969,7 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -974,7 +1005,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -1013,7 +1044,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1049,7 +1080,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -1088,7 +1119,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1124,7 +1155,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -1163,7 +1194,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberAddWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1189,7 +1220,7 @@ TEST_F(RedundancyEliminationTest,
}
TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1219,7 +1250,7 @@ TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1247,7 +1278,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1278,7 +1309,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1306,7 +1337,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1337,7 +1368,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1365,7 +1396,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1396,7 +1427,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeToNumberWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
@@ -1424,7 +1455,7 @@ TEST_F(RedundancyEliminationTest,
}
TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
diff --git a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
index a71a5315f1..82c55d9cf8 100644
--- a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
@@ -56,13 +56,13 @@ class TestRangeBuilder {
LifetimePosition start = LifetimePosition::FromInt(pair.first);
LifetimePosition end = LifetimePosition::FromInt(pair.second);
CHECK(start < end);
- range->AddUseInterval(start, end, zone_);
+ range->AddUseInterval(start, end, zone_, FLAG_trace_turbo_alloc);
}
for (int pos : uses_) {
UsePosition* use_position =
new (zone_) UsePosition(LifetimePosition::FromInt(pos), nullptr,
nullptr, UsePositionHintType::kNone);
- range->AddUsePosition(use_position);
+ range->AddUsePosition(use_position, FLAG_trace_turbo_alloc);
}
pairs_.clear();
@@ -129,10 +129,10 @@ TEST_F(LiveRangeUnitTest, InvalidConstruction) {
// Build a range manually, because the builder guards against empty cases.
TopLevelLiveRange* range =
new (zone()) TopLevelLiveRange(1, MachineRepresentation::kTagged);
- V8_ASSERT_DEBUG_DEATH(
- range->AddUseInterval(LifetimePosition::FromInt(0),
- LifetimePosition::FromInt(0), zone()),
- ".*");
+ V8_ASSERT_DEBUG_DEATH(range->AddUseInterval(LifetimePosition::FromInt(0),
+ LifetimePosition::FromInt(0),
+ zone(), FLAG_trace_turbo_alloc),
+ ".*");
}
TEST_F(LiveRangeUnitTest, SplitInvalidStart) {
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index e153738544..ee1c7997b3 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/scheduler.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -38,8 +39,8 @@ class SchedulerTest : public TestWithIsolateAndZone {
StdoutStream{} << AsJSON(*graph(), &table, &table2);
}
- Schedule* schedule =
- Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kSplitNodes);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ zone(), graph(), Scheduler::kSplitNodes, tick_counter());
if (FLAG_trace_turbo_scheduler) {
StdoutStream{} << *schedule << std::endl;
@@ -62,8 +63,10 @@ class SchedulerTest : public TestWithIsolateAndZone {
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
JSOperatorBuilder* js() { return &js_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
+ TickCounter tick_counter_;
Graph graph_;
CommonOperatorBuilder common_;
SimplifiedOperatorBuilder simplified_;
@@ -88,7 +91,8 @@ const Operator kMockTailCall(IrOpcode::kTailCall, Operator::kNoProperties,
TEST_F(SchedulerTest, BuildScheduleEmpty) {
graph()->SetStart(graph()->NewNode(common()->Start(0)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
- USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags,
+ tick_counter()));
}
@@ -102,7 +106,8 @@ TEST_F(SchedulerTest, BuildScheduleOneParameter) {
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
- USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags,
+ tick_counter()));
}
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 722384da5b..8a5a9eda91 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/simplified-lowering.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/simplified-operator.h"
@@ -42,13 +43,13 @@ class SimplifiedLoweringTest : public GraphTest {
{
// Simplified lowering needs to run w/o the typer decorator so make sure
// the object is not live at the same time.
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
typer.Run();
}
- SimplifiedLowering lowering(jsgraph(), broker(), zone(), source_positions(),
- node_origins(),
- PoisoningMitigationLevel::kDontPoison);
+ SimplifiedLowering lowering(
+ jsgraph(), broker(), zone(), source_positions(), node_origins(),
+ PoisoningMitigationLevel::kDontPoison, tick_counter());
lowering.LowerAllNodes();
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 1f44eb088b..b198592ddd 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -29,12 +30,12 @@ class SimplifiedOperatorReducerTest : public GraphTest {
protected:
Reduction Reduce(Node* node) {
- JSHeapBroker broker(isolate(), zone());
+ JSHeapBroker broker(isolate(), zone(), FLAG_trace_heap_broker);
MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph, &broker);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 1b416628fc..9939c70c75 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -53,6 +53,7 @@ const PureOperator kPureOperators[] = {
PURE(NumberShiftRightLogical, Operator::kNoProperties, 2),
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
+ PURE(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index 70c0b69047..fb40386378 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -37,7 +37,7 @@ class TypedOptimizationTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
TypedOptimization reducer(&graph_reducer, &deps_, &jsgraph, broker());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 2eaa379f30..ec68993213 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -22,7 +22,7 @@ class TyperTest : public TypedGraphTest {
public:
TyperTest()
: TypedGraphTest(3),
- broker_(isolate(), zone()),
+ broker_(isolate(), zone(), FLAG_trace_heap_broker),
operation_typer_(&broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
diff --git a/deps/v8/test/unittests/heap/heap-controller-unittest.cc b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
index 445c49052c..db330608f2 100644
--- a/deps/v8/test/unittests/heap/heap-controller-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
@@ -53,15 +53,14 @@ TEST_F(MemoryControllerTest, HeapGrowingFactor) {
}
TEST_F(MemoryControllerTest, MaxHeapGrowingFactor) {
- CheckEqualRounded(1.3,
- V8Controller::MaxGrowingFactor(V8HeapTrait::kMinSize * MB));
- CheckEqualRounded(
- 1.600, V8Controller::MaxGrowingFactor(V8HeapTrait::kMaxSize / 2 * MB));
- CheckEqualRounded(
- 1.999, V8Controller::MaxGrowingFactor(
- (V8HeapTrait::kMaxSize - Heap::kPointerMultiplier) * MB));
+ CheckEqualRounded(1.3, V8Controller::MaxGrowingFactor(V8HeapTrait::kMinSize));
+ CheckEqualRounded(1.600,
+ V8Controller::MaxGrowingFactor(V8HeapTrait::kMaxSize / 2));
+ CheckEqualRounded(2.0,
+ V8Controller::MaxGrowingFactor(
+ (V8HeapTrait::kMaxSize - Heap::kPointerMultiplier)));
CheckEqualRounded(4.0, V8Controller::MaxGrowingFactor(
- static_cast<size_t>(V8HeapTrait::kMaxSize) * MB));
+ static_cast<size_t>(V8HeapTrait::kMaxSize)));
}
TEST_F(MemoryControllerTest, OldGenerationAllocationLimit) {
@@ -77,42 +76,36 @@ TEST_F(MemoryControllerTest, OldGenerationAllocationLimit) {
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kDefault));
+ heap, old_gen_size, 0u, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kDefault));
factor = Min(factor, V8HeapTrait::kConservativeGrowingFactor);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kSlow));
+ heap, old_gen_size, 0u, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kSlow));
factor = Min(factor, V8HeapTrait::kConservativeGrowingFactor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kConservative));
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, 0u, max_old_generation_size, new_space_capacity,
+ factor, Heap::HeapGrowingMode::kConservative));
factor = V8HeapTrait::kMinGrowingFactor;
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kMinimal));
-}
+ heap, old_gen_size, 0u, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kMinimal));
-TEST_F(MemoryControllerTest, MaxOldGenerationSize) {
- uint64_t configurations[][2] = {
- {0, V8HeapTrait::kMinSize},
- {512, V8HeapTrait::kMinSize},
- {1 * GB, 256 * Heap::kPointerMultiplier},
- {2 * static_cast<uint64_t>(GB), 512 * Heap::kPointerMultiplier},
- {4 * static_cast<uint64_t>(GB), V8HeapTrait::kMaxSize},
- {8 * static_cast<uint64_t>(GB), V8HeapTrait::kMaxSize}};
-
- for (auto configuration : configurations) {
- ASSERT_EQ(configuration[1],
- static_cast<uint64_t>(
- Heap::ComputeMaxOldGenerationSize(configuration[0])));
- }
+ factor = V8HeapTrait::kMinGrowingFactor;
+ size_t min_old_generation_size =
+ 2 * static_cast<size_t>(old_gen_size * factor + new_space_capacity);
+ EXPECT_EQ(
+ min_old_generation_size,
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, min_old_generation_size, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kMinimal));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index fbc384ef1d..048ff5d0a6 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -19,14 +19,75 @@ namespace internal {
using HeapTest = TestWithIsolate;
using HeapWithPointerCompressionTest = TestWithIsolateAndPointerCompression;
-TEST(Heap, SemiSpaceSize) {
+TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
const size_t MB = static_cast<size_t>(i::MB);
+ const size_t KB = static_cast<size_t>(i::KB);
const size_t pm = i::Heap::kPointerMultiplier;
- ASSERT_EQ(512u * pm, i::Heap::ComputeMaxSemiSpaceSize(0u));
- ASSERT_EQ(512u * pm, i::Heap::ComputeMaxSemiSpaceSize(512u * MB));
- ASSERT_EQ(2048u * pm, i::Heap::ComputeMaxSemiSpaceSize(1024u * MB));
- ASSERT_EQ(5120u * pm, i::Heap::ComputeMaxSemiSpaceSize(2024u * MB));
- ASSERT_EQ(8192u * pm, i::Heap::ComputeMaxSemiSpaceSize(4095u * MB));
+ ASSERT_EQ(3 * 512u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(128u * pm * MB));
+ ASSERT_EQ(3 * 2048u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(256u * pm * MB));
+ ASSERT_EQ(3 * 4096u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(512u * pm * MB));
+ ASSERT_EQ(3 * 8192u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(1024u * pm * MB));
+}
+
+TEST(Heap, GenerationSizesFromHeapSize) {
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ size_t old, young;
+
+ i::Heap::GenerationSizesFromHeapSize(1 * KB, &young, &old);
+ ASSERT_EQ(0u, old);
+ ASSERT_EQ(0u, young);
+
+ i::Heap::GenerationSizesFromHeapSize(1 * KB + 3 * 512u * pm * KB, &young,
+ &old);
+ ASSERT_EQ(1 * KB, old);
+ ASSERT_EQ(3 * 512u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(128 * pm * MB + 3 * 512 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(128u * pm * MB, old);
+ ASSERT_EQ(3 * 512u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(256u * pm * MB + 3 * 2048 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(256u * pm * MB, old);
+ ASSERT_EQ(3 * 2048u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(512u * pm * MB + 3 * 4096 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(512u * pm * MB, old);
+ ASSERT_EQ(3 * 4096u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(1024u * pm * MB + 3 * 8192 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(1024u * pm * MB, old);
+ ASSERT_EQ(3 * 8192u * pm * KB, young);
+}
+
+TEST(Heap, HeapSizeFromPhysicalMemory) {
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+
+ // The expected value is old_generation_size + 3 * semi_space_size.
+ ASSERT_EQ(128 * pm * MB + 3 * 512 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(0u));
+ ASSERT_EQ(128 * pm * MB + 3 * 512 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(512u * MB));
+ ASSERT_EQ(256 * pm * MB + 3 * 2048 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(1024u * MB));
+ ASSERT_EQ(512 * pm * MB + 3 * 4096 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(2048u * MB));
+ ASSERT_EQ(
+ 1024 * pm * MB + 3 * 8192 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(static_cast<uint64_t>(4096u) * MB));
+ ASSERT_EQ(
+ 1024 * pm * MB + 3 * 8192 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(static_cast<uint64_t>(8192u) * MB));
}
TEST_F(HeapTest, ASLR) {
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index 7c88f58521..be3ca20938 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -28,7 +28,7 @@ class SimpleTask : public ItemParallelJob::Task {
SimpleTask(Isolate* isolate, bool* did_run)
: ItemParallelJob::Task(isolate), did_run_(did_run) {}
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
ItemParallelJob::Item* item = nullptr;
while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
item->MarkFinished();
@@ -58,7 +58,7 @@ class EagerTask : public ItemParallelJob::Task {
public:
explicit EagerTask(Isolate* isolate) : ItemParallelJob::Task(isolate) {}
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
SimpleItem* item = nullptr;
while ((item = GetItem<SimpleItem>()) != nullptr) {
item->Process();
@@ -120,7 +120,7 @@ class TaskProcessingOneItem : public ItemParallelJob::Task {
wait_when_done_(wait_when_done),
did_process_an_item_(did_process_an_item) {}
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
SimpleItem* item = GetItem<SimpleItem>();
if (did_process_an_item_) {
@@ -164,7 +164,7 @@ class TaskForDifferentItems : public ItemParallelJob::Task {
processed_b_(processed_b) {}
~TaskForDifferentItems() override = default;
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
BaseItem* item = nullptr;
while ((item = GetItem<BaseItem>()) != nullptr) {
item->ProcessItem(this);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index c3aa7de234..3d02db7413 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -650,7 +650,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance));
iterator.Advance();
@@ -658,7 +658,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 5));
iterator.Advance();
@@ -666,7 +666,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 10));
iterator.Advance();
@@ -674,7 +674,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 15));
iterator.Advance();
@@ -683,7 +683,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
Bytecode::kJumpIfToBooleanFalseConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 20));
iterator.Advance();
}
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index a8907ba62a..b2eedd9807 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -81,7 +81,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
@@ -98,7 +99,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 0f6f0e99b0..466062b563 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -184,7 +184,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), 0);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
ASSERT_TRUE(iterator.IsValid());
}
@@ -331,7 +332,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(18);
@@ -488,7 +490,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -507,7 +510,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -968,7 +972,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -987,7 +992,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
ASSERT_FALSE(iterator.IsValid());
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 1286591752..cb9a83997e 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -63,6 +63,15 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
+ const Matcher<c::LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
+ return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
+ index_matcher, _, _);
+}
+
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<c::StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
@@ -436,7 +445,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
- IsChangeCompressedToTagged(m.IsLoad(
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -448,7 +457,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag),
LoadSensitivity::kCritical)));
#else
- Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -466,7 +475,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
#if V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
- IsChangeCompressedToTagged(m.IsLoad(
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -480,7 +489,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
LoadSensitivity::kCritical)));
#else
- Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -506,13 +515,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
int offset = 16;
Node* load_field = m.LoadObjectField(object, offset);
#ifdef V8_COMPRESS_POINTERS
- EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoad(
+ EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag))));
#else
- EXPECT_THAT(load_field,
- m.IsLoad(MachineType::AnyTagged(), object,
- c::IsIntPtrConstant(offset - kHeapObjectTag)));
+ EXPECT_THAT(load_field, m.IsLoadFromObject(
+ MachineType::AnyTagged(), object,
+ c::IsIntPtrConstant(offset - kHeapObjectTag)));
#endif
}
}
@@ -593,21 +602,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
kSystemPointerSize)));
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged(
- m.IsLoad(MachineType::AnyCompressed(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
- kHeapObjectTag)));
+ m.IsLoadFromObject(MachineType::AnyCompressed(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
+ kHeapObjectTag)));
EXPECT_THAT(load_feedback_vector,
- IsChangeCompressedToTagged(m.IsLoad(
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
#else
- Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
+ Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
- EXPECT_THAT(
- load_feedback_vector,
- m.IsLoad(MachineType::AnyTagged(), load_vector_cell_matcher,
- c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
+ EXPECT_THAT(load_feedback_vector,
+ m.IsLoadFromObject(
+ MachineType::AnyTagged(), load_vector_cell_matcher,
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
#endif
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index cec661b468..45ec5b4b7f 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -44,6 +44,10 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Matcher<compiler::Node*> IsLoadFromObject(
+ const Matcher<compiler::LoadRepresentation>& rep_matcher,
+ const Matcher<compiler::Node*>& base_matcher,
+ const Matcher<compiler::Node*>& index_matcher);
Matcher<compiler::Node*> IsStore(
const Matcher<compiler::StoreRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
diff --git a/deps/v8/test/unittests/logging/counters-unittest.cc b/deps/v8/test/unittests/logging/counters-unittest.cc
index 3dfb0ff92f..dd38d80ee4 100644
--- a/deps/v8/test/unittests/logging/counters-unittest.cc
+++ b/deps/v8/test/unittests/logging/counters-unittest.cc
@@ -88,9 +88,6 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
return isolate()->counters()->runtime_call_stats();
}
- // Print current RuntimeCallStats table. For debugging purposes.
- void PrintStats() { stats()->Print(); }
-
RuntimeCallCounterId counter_id() {
return RuntimeCallCounterId::kTestCounter1;
}
@@ -655,6 +652,8 @@ static void CustomCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
} // namespace
TEST_F(RuntimeCallStatsTest, CallbackFunction) {
+ FLAG_allow_natives_syntax = true;
+
RuntimeCallCounter* callback_counter =
stats()->GetCounter(RuntimeCallCounterId::kFunctionCallback);
@@ -710,9 +709,29 @@ TEST_F(RuntimeCallStatsTest, CallbackFunction) {
EXPECT_EQ(0, callback_counter->time().InMicroseconds());
EXPECT_EQ(100, counter()->time().InMicroseconds());
EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
+
+ // Check that the FunctionCallback tracing also works properly
+ // when the `callback` is called from optimized code.
+ RunJS(
+ "function wrap(o) { return o.callback(); };\n"
+ "%PrepareFunctionForOptimization(wrap);\n"
+ "wrap(custom_object);\n"
+ "wrap(custom_object);\n"
+ "%OptimizeFunctionOnNextCall(wrap);\n"
+ "wrap(custom_object);\n");
+ EXPECT_EQ(4, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(4013, callback_counter->count());
+ EXPECT_EQ(4013, counter2()->count());
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime * 4013, counter2()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, ApiGetter) {
+ FLAG_allow_natives_syntax = true;
+
RuntimeCallCounter* callback_counter =
stats()->GetCounter(RuntimeCallCounterId::kFunctionCallback);
current_test = this;
@@ -740,7 +759,6 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
Sleep(100);
RunJS("custom_object.apiGetter;");
}
- PrintStats();
EXPECT_EQ(1, js_counter()->count());
EXPECT_EQ(1, counter()->count());
@@ -754,7 +772,6 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(kCustomCallbackTime, counter2()->time().InMicroseconds());
RunJS("for (let i = 0; i < 9; i++) { custom_object.apiGetter };");
- PrintStats();
EXPECT_EQ(2, js_counter()->count());
EXPECT_EQ(1, counter()->count());
@@ -767,7 +784,6 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(kCustomCallbackTime * 10, counter2()->time().InMicroseconds());
RunJS("for (let i = 0; i < 4000; i++) { custom_object.apiGetter };");
- PrintStats();
EXPECT_EQ(3, js_counter()->count());
EXPECT_EQ(1, counter()->count());
@@ -779,7 +795,25 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(0, callback_counter->time().InMicroseconds());
EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
- PrintStats();
+ // Check that the FunctionCallback tracing also works properly
+ // when the `apiGetter` is called from optimized code.
+ RunJS(
+ "function wrap(o) { return o.apiGetter; };\n"
+ "%PrepareFunctionForOptimization(wrap);\n"
+ "wrap(custom_object);\n"
+ "wrap(custom_object);\n"
+ "%OptimizeFunctionOnNextCall(wrap);\n"
+ "wrap(custom_object);\n");
+
+ EXPECT_EQ(4, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(4013, callback_counter->count());
+ EXPECT_EQ(4013, counter2()->count());
+
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime * 4013, counter2()->time().InMicroseconds());
}
TEST_F(SnapshotNativeCounterTest, StringAddNative) {
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 38aae33809..a3a6fb22a7 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -2885,5 +2885,68 @@ TEST_F(ValueSerializerTestWithLimitedMemory, FailIfNoMemoryInWriteHostObject) {
EXPECT_TRUE(EvaluateScriptForInput("gotA")->IsFalse());
}
+// We only have basic tests and tests for .stack here, because we have more
+// comprehensive tests as web platform tests.
+TEST_F(ValueSerializerTest, RoundTripError) {
+ Local<Value> value = RoundTripTest("Error('hello')");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> name;
+ Local<Value> message;
+
+ {
+ Context::Scope scope(deserialization_context());
+ EXPECT_EQ(error->GetPrototype(), Exception::Error(String::Empty(isolate()))
+ .As<Object>()
+ ->GetPrototype());
+ }
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("name"))
+ .ToLocal(&name));
+ ASSERT_TRUE(name->IsString());
+ EXPECT_EQ(Utf8Value(name), "Error");
+
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("message"))
+ .ToLocal(&message));
+ ASSERT_TRUE(message->IsString());
+ EXPECT_EQ(Utf8Value(message), "hello");
+}
+
+TEST_F(ValueSerializerTest, DefaultErrorStack) {
+ Local<Value> value =
+ RoundTripTest("function hkalkcow() { return Error(); } hkalkcow();");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> stack;
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("stack"))
+ .ToLocal(&stack));
+ ASSERT_TRUE(stack->IsString());
+ EXPECT_NE(Utf8Value(stack).find("hkalkcow"), std::string::npos);
+}
+
+TEST_F(ValueSerializerTest, ModifiedErrorStack) {
+ Local<Value> value = RoundTripTest("let e = Error(); e.stack = 'hello'; e");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> stack;
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("stack"))
+ .ToLocal(&stack));
+ ASSERT_TRUE(stack->IsString());
+ EXPECT_EQ(Utf8Value(stack), "hello");
+}
+
+TEST_F(ValueSerializerTest, NonStringErrorStack) {
+ Local<Value> value = RoundTripTest("let e = Error(); e.stack = 17; e");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> stack;
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("stack"))
+ .ToLocal(&stack));
+ EXPECT_TRUE(stack->IsUndefined());
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index f85b3bf128..8b425542c1 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -73,7 +73,7 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
- shared->FunctionLiteralId(isolate), nullptr);
+ shared->function_literal_id(), nullptr);
return new BackgroundCompileTask(
allocator(), outer_parse_info.get(), function_name, function_literal,
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 94209b8b10..614ddba4f5 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -42,11 +42,11 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
isolate->factory()->NewStringFromAsciiChecked("f"),
Builtins::kCompileLazy);
int function_literal_id = 1;
+ shared->set_function_literal_id(function_literal_id);
// Ensure that the function can be compiled lazily.
shared->set_uncompiled_data(
*isolate->factory()->NewUncompiledDataWithoutPreparseData(
- ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length(),
- function_literal_id));
+ ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length()));
// Make sure we have an outer scope info, even though it's empty
shared->set_raw_outer_scope_info_or_feedback_metadata(
ScopeInfo::Empty(isolate));
diff --git a/deps/v8/test/unittests/torque/earley-parser-unittest.cc b/deps/v8/test/unittests/torque/earley-parser-unittest.cc
index 9718a404c9..f44a49b047 100644
--- a/deps/v8/test/unittests/torque/earley-parser-unittest.cc
+++ b/deps/v8/test/unittests/torque/earley-parser-unittest.cc
@@ -68,7 +68,7 @@ struct SimpleArithmeticGrammar : Grammar {
TEST(EarleyParser, SimpleArithmetic) {
SimpleArithmeticGrammar grammar;
- SourceFileMap::Scope source_file_map;
+ SourceFileMap::Scope source_file_map("");
CurrentSourceFile::Scope current_source_file{
SourceFileMap::AddSource("dummy_filename")};
std::string result1 =
diff --git a/deps/v8/test/unittests/torque/ls-message-unittest.cc b/deps/v8/test/unittests/torque/ls-message-unittest.cc
index 06346d32bb..c6779f978d 100644
--- a/deps/v8/test/unittests/torque/ls-message-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-message-unittest.cc
@@ -20,15 +20,19 @@ TEST(LanguageServerMessage, InitializeRequest) {
request.set_method("initialize");
request.params();
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- InitializeResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ InitializeResponse response(std::move(raw_response));
// Check that the response id matches up with the request id, and that
// the language server signals its support for definitions.
EXPECT_EQ(response.id(), 5);
EXPECT_TRUE(response.result().capabilities().definitionProvider());
EXPECT_TRUE(response.result().capabilities().documentSymbolProvider());
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage,
@@ -36,8 +40,10 @@ TEST(LanguageServerMessage,
Request<bool> notification;
notification.set_method("initialized");
- HandleMessage(notification.GetJsonValue(), [](JsonValue& raw_request) {
- RegistrationRequest request(raw_request);
+ bool writer_called = false;
+ HandleMessage(std::move(notification.GetJsonValue()), [&](JsonValue
+ raw_request) {
+ RegistrationRequest request(std::move(raw_request));
ASSERT_EQ(request.method(), "client/registerCapability");
ASSERT_EQ(request.params().registrations_size(), (size_t)1);
@@ -49,26 +55,33 @@ TEST(LanguageServerMessage,
registration
.registerOptions<DidChangeWatchedFilesRegistrationOptions>();
ASSERT_EQ(options.watchers_size(), (size_t)1);
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, GotoDefinitionUnkownFile) {
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
GotoDefinitionRequest request;
request.set_id(42);
request.set_method("textDocument/definition");
request.params().textDocument().set_uri("file:///unknown.tq");
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- GotoDefinitionResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ GotoDefinitionResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 42);
EXPECT_TRUE(response.IsNull("result"));
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, GotoDefinition) {
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
SourceId test_id = SourceFileMap::AddSource("file://test.tq");
SourceId definition_id = SourceFileMap::AddSource("file://base.tq");
@@ -84,11 +97,15 @@ TEST(LanguageServerMessage, GotoDefinition) {
request.params().position().set_line(2);
request.params().position().set_character(0);
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- GotoDefinitionResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ GotoDefinitionResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 42);
EXPECT_TRUE(response.IsNull("result"));
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
// Second, check a known defintion.
request = GotoDefinitionRequest();
@@ -98,8 +115,9 @@ TEST(LanguageServerMessage, GotoDefinition) {
request.params().position().set_line(1);
request.params().position().set_character(5);
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- GotoDefinitionResponse response(raw_response);
+ writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ GotoDefinitionResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 43);
ASSERT_FALSE(response.IsNull("result"));
@@ -109,22 +127,26 @@ TEST(LanguageServerMessage, GotoDefinition) {
EXPECT_EQ(location.range().start().character(), 1);
EXPECT_EQ(location.range().end().line(), 4);
EXPECT_EQ(location.range().end().character(), 5);
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, CompilationErrorSendsDiagnostics) {
DiagnosticsFiles::Scope diagnostic_files_scope;
LanguageServerData::Scope server_data_scope;
TorqueMessages::Scope messages_scope;
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
TorqueCompilerResult result;
{ Error("compilation failed somehow"); }
result.messages = std::move(TorqueMessages::Get());
result.source_file_map = SourceFileMap::Get();
- CompilationFinished(std::move(result), [](JsonValue& raw_response) {
- PublishDiagnosticsNotification notification(raw_response);
+ bool writer_called = false;
+ CompilationFinished(std::move(result), [&](JsonValue raw_response) {
+ PublishDiagnosticsNotification notification(std::move(raw_response));
EXPECT_EQ(notification.method(), "textDocument/publishDiagnostics");
ASSERT_FALSE(notification.IsNull("params"));
@@ -134,15 +156,18 @@ TEST(LanguageServerMessage, CompilationErrorSendsDiagnostics) {
Diagnostic diagnostic = notification.params().diagnostics(0);
EXPECT_EQ(diagnostic.severity(), Diagnostic::kError);
EXPECT_EQ(diagnostic.message(), "compilation failed somehow");
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
DiagnosticsFiles::Scope diagnostic_files_scope;
TorqueMessages::Scope messages_scope;
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope sourc_file_map_scope;
- SourceId test_id = SourceFileMap::AddSource("test.tq");
+ SourceFileMap::Scope sourc_file_map_scope("");
+ SourceId test_id = SourceFileMap::AddSource("file://test.tq");
// No compilation errors but two lint warnings.
{
@@ -156,12 +181,13 @@ TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
result.messages = std::move(TorqueMessages::Get());
result.source_file_map = SourceFileMap::Get();
- CompilationFinished(std::move(result), [](JsonValue& raw_response) {
- PublishDiagnosticsNotification notification(raw_response);
+ bool writer_called = false;
+ CompilationFinished(std::move(result), [&](JsonValue raw_response) {
+ PublishDiagnosticsNotification notification(std::move(raw_response));
EXPECT_EQ(notification.method(), "textDocument/publishDiagnostics");
ASSERT_FALSE(notification.IsNull("params"));
- EXPECT_EQ(notification.params().uri(), "test.tq");
+ EXPECT_EQ(notification.params().uri(), "file://test.tq");
ASSERT_EQ(notification.params().diagnostics_size(), static_cast<size_t>(2));
Diagnostic diagnostic1 = notification.params().diagnostics(0);
@@ -171,35 +197,42 @@ TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
Diagnostic diagnostic2 = notification.params().diagnostics(1);
EXPECT_EQ(diagnostic2.severity(), Diagnostic::kWarning);
EXPECT_EQ(diagnostic2.message(), "lint error 2");
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, CleanCompileSendsNoDiagnostics) {
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope sourc_file_map_scope;
+ SourceFileMap::Scope sourc_file_map_scope("");
TorqueCompilerResult result;
result.source_file_map = SourceFileMap::Get();
- CompilationFinished(std::move(result), [](JsonValue& raw_response) {
+ CompilationFinished(std::move(result), [](JsonValue raw_response) {
FAIL() << "Sending unexpected response!";
});
}
TEST(LanguageServerMessage, NoSymbolsSendsEmptyResponse) {
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope sourc_file_map_scope;
+ SourceFileMap::Scope sourc_file_map_scope("");
DocumentSymbolRequest request;
request.set_id(42);
request.set_method("textDocument/documentSymbol");
- request.params().textDocument().set_uri("test.tq");
+ request.params().textDocument().set_uri("file://test.tq");
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- DocumentSymbolResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ DocumentSymbolResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 42);
EXPECT_EQ(response.result_size(), static_cast<size_t>(0));
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
} // namespace ls
diff --git a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
index ad67bf0f21..7e23f6dcf4 100644
--- a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
@@ -13,7 +13,7 @@ namespace torque {
namespace {
struct TestCompiler {
- SourceFileMap::Scope file_map_scope;
+ SourceFileMap::Scope file_map_scope{""};
LanguageServerData::Scope server_data_scope;
void Compile(const std::string& source) {
@@ -23,7 +23,7 @@ struct TestCompiler {
options.force_assert_statements = true;
TorqueCompilerResult result = CompileTorque(source, options);
- SourceFileMap::Get() = result.source_file_map;
+ SourceFileMap::Get() = *result.source_file_map;
LanguageServerData::Get() = std::move(result.language_server_data);
}
};
@@ -42,7 +42,7 @@ TEST(LanguageServer, GotoTypeDefinition) {
compiler.Compile(source);
// Find the definition for type 'T1' of argument 'a' on line 4.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {4, 19});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
@@ -64,7 +64,7 @@ TEST(LanguageServer, GotoTypeDefinitionExtends) {
compiler.Compile(source);
// Find the definition for 'T1' of the extends clause on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 16});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
@@ -72,7 +72,7 @@ TEST(LanguageServer, GotoTypeDefinitionExtends) {
TEST(LanguageServer, GotoTypeDefinitionNoDataForFile) {
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope file_scope;
+ SourceFileMap::Scope file_scope("");
SourceId test_id = SourceFileMap::AddSource("test.tq");
// Regression test, this step should not crash.
@@ -94,7 +94,7 @@ TEST(LanguageServer, GotoLabelDefinitionInSignature) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the otherwise clause on line 6.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {6, 18});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {5, 19}, {5, 26}}));
@@ -116,7 +116,7 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlock) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the otherwise clause on line 6.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {6, 25});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
@@ -133,7 +133,7 @@ TEST(LanguageServer, GotoDefinitionClassSuperType) {
compiler.Compile(source);
// Find the definition for 'Tagged' of the 'extends' on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 33});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 11}}));
@@ -151,7 +151,7 @@ TEST(LanguageServer, GotoLabelDefinitionInSignatureGotoStmt) {
compiler.Compile(source);
// Find the definition for 'Fail' of the goto statement on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 7});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 26}, {2, 30}}));
@@ -170,7 +170,7 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlockGoto) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the goto statement on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 13});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {4, 8}, {4, 15}}));
@@ -192,7 +192,7 @@ TEST(LanguageServer, GotoLabelDefinitionGotoInOtherwise) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the otherwise clause on line 6.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {6, 30});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
@@ -214,7 +214,7 @@ TEST(LanguageServer, SymbolsArePopulated) {
TestCompiler compiler;
compiler.Compile(source);
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
const auto& symbols = LanguageServerData::SymbolsForSourceId(id);
ASSERT_FALSE(symbols.empty());
}
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 9a82498ee4..1366f86ce7 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -64,6 +64,7 @@ TorqueCompilerResult TestCompileTorque(std::string source) {
options.output_directory = "";
options.collect_language_server_data = false;
options.force_assert_statements = false;
+ options.v8_root = ".";
source = kTestTorquePrelude + source;
return CompileTorque(source, options);
@@ -128,6 +129,7 @@ TEST(Torque, ClassDefinition) {
i: uintptr;
}
+ @export
macro TestClassWithAllTypesLoadsAndStores(
t: TestClassWithAllTypes, r: RawPtr, v1: int8, v2: uint8, v3: int16,
v4: uint16, v5: int32, v6: uint32, v7: intptr, v8: uintptr) {
@@ -158,12 +160,10 @@ TEST(Torque, TypeDeclarationOrder) {
type Baztype = Foo | FooType;
@abstract
- @noVerifier
extern class Foo extends HeapObject {
fooField: FooType;
}
- @noVerifier
extern class Bar extends Foo {
barField: Bartype;
bazfield: Baztype;
@@ -179,7 +179,6 @@ TEST(Torque, ConditionalFields) {
// This class should throw alignment errors if @if decorators aren't
// working.
ExpectSuccessfulCompilation(R"(
- @noVerifier
extern class PreprocessingTest extends HeapObject {
@if(FALSE_FOR_TESTING) a: int8;
@if(TRUE_FOR_TESTING) a: int16;
@@ -192,7 +191,6 @@ TEST(Torque, ConditionalFields) {
}
)");
ExpectFailingCompilation(R"(
- @noVerifier
extern class PreprocessingTest extends HeapObject {
@if(TRUE_FOR_TESTING) a: int8;
@if(FALSE_FOR_TESTING) a: int16;
@@ -209,10 +207,110 @@ TEST(Torque, ConditionalFields) {
TEST(Torque, ConstexprLetBindingDoesNotCrash) {
ExpectFailingCompilation(
- R"(macro FooBar() { let foo = 0; check(foo >= 0); })",
+ R"(@export macro FooBar() { let foo = 0; check(foo >= 0); })",
HasSubstr("Use 'const' instead of 'let' for variable 'foo'"));
}
+TEST(Torque, DoubleUnderScorePrefixIllegalForIdentifiers) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo() {
+ let __x;
+ }
+ )",
+ HasSubstr("Lexer Error"));
+}
+
+TEST(Torque, UnusedLetBindingLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(y: Smi) {
+ let x: Smi = y;
+ }
+ )",
+ HasSubstr("Variable 'x' is never used."));
+}
+
+TEST(Torque, UnderscorePrefixSilencesUnusedWarning) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(y: Smi) {
+ let _x: Smi = y;
+ }
+ )");
+}
+
+TEST(Torque, UsingUnderscorePrefixedIdentifierError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(y: Smi) {
+ let _x: Smi = y;
+ check(_x == y);
+ }
+ )",
+ HasSubstr("Trying to reference '_x'"));
+}
+
+TEST(Torque, UnusedArgumentLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(x: Smi) {}
+ )",
+ HasSubstr("Variable 'x' is never used."));
+}
+
+TEST(Torque, UsingUnderscorePrefixedArgumentSilencesWarning) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(_y: Smi) {}
+ )");
+}
+
+TEST(Torque, UnusedLabelLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo() labels Bar {}
+ )",
+ HasSubstr("Label 'Bar' is never used."));
+}
+
+TEST(Torque, UsingUnderScorePrefixLabelSilencesWarning) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo() labels _Bar {}
+ )");
+}
+
+TEST(Torque, NoUnusedWarningForImplicitArguments) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(implicit c: Context, r: JSReceiver)() {}
+ )");
+}
+
+TEST(Torque, NoUnusedWarningForVariablesOnlyUsedInAsserts) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(x: bool) {
+ assert(x);
+ }
+ )");
+}
+
+TEST(Torque, ImportNonExistentFile) {
+ ExpectFailingCompilation(R"(import "foo/bar.tq")",
+ HasSubstr("File 'foo/bar.tq' not found."));
+}
+
+TEST(Torque, LetShouldBeConstLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(y: Smi): Smi {
+ let x: Smi = y;
+ return x;
+ })",
+ HasSubstr("Variable 'x' is never assigned to."));
+}
+
+TEST(Torque, LetShouldBeConstIsSkippedForStructs) {
+ ExpectSuccessfulCompilation(R"(
+ struct Foo{ a: Smi; }
+ @export macro Bar(x: Smi): Foo {
+ let foo = Foo{a: x};
+ return foo;
+ }
+ )");
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 938956f07d..29cb176197 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -87,7 +87,8 @@ class ControlTransferTest : public TestWithZone {
}
void CheckNoOtherTargets(
- const byte* start, const byte* end, ControlTransferMap& map,
+ const byte* start, const byte* end,
+ ControlTransferMap& map, // NOLINT(runtime/references)
std::initializer_list<ExpectedControlTransfer> targets) {
// Check there are no other control targets.
for (pc_t pc = 0; start + pc < end; pc++) {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index aaf6215a8a..725f7f4a59 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -254,7 +254,7 @@ class TestModuleBuilder {
byte AddTable(ValueType type, uint32_t initial_size, bool has_maximum_size,
uint32_t maximum_size) {
- CHECK(type == kWasmAnyRef || type == kWasmAnyFunc);
+ CHECK(type == kWasmAnyRef || type == kWasmFuncRef);
mod.tables.emplace_back();
WasmTable& table = mod.tables.back();
table.type = type;
@@ -274,6 +274,11 @@ class TestModuleBuilder {
byte AddPassiveElementSegment() {
mod.elem_segments.emplace_back();
+ auto& init = mod.elem_segments.back();
+ // Add 5 empty elements.
+ for (uint32_t j = 0; j < 5; j++) {
+ init.entries.push_back(WasmElemSegment::kNullIndex);
+ }
return static_cast<byte>(mod.elem_segments.size() - 1);
}
@@ -353,7 +358,7 @@ TEST_F(FunctionBodyDecoderTest, Float32Const) {
byte code[] = {kExprF32Const, 0, 0, 0, 0};
Address ptr = reinterpret_cast<Address>(code + 1);
for (int i = 0; i < 30; i++) {
- WriteLittleEndianValue<float>(ptr, i * -7.75f);
+ base::WriteLittleEndianValue<float>(ptr, i * -7.75f);
ExpectValidates(sigs.f_ff(), code);
}
}
@@ -362,7 +367,7 @@ TEST_F(FunctionBodyDecoderTest, Float64Const) {
byte code[] = {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0};
Address ptr = reinterpret_cast<Address>(code + 1);
for (int i = 0; i < 30; i++) {
- WriteLittleEndianValue<double>(ptr, i * 33.45);
+ base::WriteLittleEndianValue<double>(ptr, i * 33.45);
ExpectValidates(sigs.d_dd(), code);
}
}
@@ -1637,7 +1642,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectReturnCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, true, 30);
+ builder.AddTable(kWasmFuncRef, 20, true, 30);
module = builder.module();
byte f0 = builder.AddSignature(sigs.i_v());
@@ -1656,7 +1661,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsOutOfBounds) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, false, 20);
+ builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(0, WASM_ZERO)});
@@ -1779,7 +1784,7 @@ TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, false, 20);
+ builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
byte f0 = builder.AddSignature(sigs.i_v());
@@ -1795,7 +1800,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, false, 20);
+ builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
ExpectFailure(sig, {WASM_CALL_INDIRECT0(0, WASM_ZERO)});
@@ -2031,97 +2036,97 @@ TEST_F(FunctionBodyDecoderTest, AllSetGlobalCombinations) {
}
}
-TEST_F(FunctionBodyDecoderTest, SetTable) {
+TEST_F(FunctionBodyDecoderTest, TableSet) {
WASM_FEATURE_SCOPE(anyref);
TestModuleBuilder builder;
module = builder.module();
byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
- byte tab_func1 = builder.AddTable(kWasmAnyFunc, 20, true, 30);
- byte tab_func2 = builder.AddTable(kWasmAnyFunc, 10, false, 20);
+ byte tab_func1 = builder.AddTable(kWasmFuncRef, 20, true, 30);
+ byte tab_func2 = builder.AddTable(kWasmFuncRef, 10, false, 20);
byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
- ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte local_ref = 0;
byte local_func = 1;
byte local_int = 2;
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(6),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref1, WASM_I32V(6),
WASM_GET_LOCAL(local_ref))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(5),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_func1, WASM_I32V(5),
WASM_GET_LOCAL(local_func))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_func2, WASM_I32V(7),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_func2, WASM_I32V(7),
WASM_GET_LOCAL(local_func))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref2, WASM_I32V(8),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref2, WASM_I32V(8),
WASM_GET_LOCAL(local_ref))});
- // We can store anyfunc values as anyref, but not the other way around.
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(4),
+ // We can store funcref values as anyref, but not the other way around.
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref1, WASM_I32V(4),
WASM_GET_LOCAL(local_func))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(9),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_func1, WASM_I32V(9),
WASM_GET_LOCAL(local_ref))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_func2, WASM_I32V(3),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_func2, WASM_I32V(3),
WASM_GET_LOCAL(local_ref))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref2, WASM_I32V(2),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref2, WASM_I32V(2),
WASM_GET_LOCAL(local_func))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(9),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_ref1, WASM_I32V(9),
WASM_GET_LOCAL(local_int))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(3),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_func1, WASM_I32V(3),
WASM_GET_LOCAL(local_int))});
// Out-of-bounds table index should fail.
byte oob_tab = 37;
ExpectFailure(
- &sig, {WASM_SET_TABLE(oob_tab, WASM_I32V(9), WASM_GET_LOCAL(local_ref))});
- ExpectFailure(&sig, {WASM_SET_TABLE(oob_tab, WASM_I32V(3),
+ &sig, {WASM_TABLE_SET(oob_tab, WASM_I32V(9), WASM_GET_LOCAL(local_ref))});
+ ExpectFailure(&sig, {WASM_TABLE_SET(oob_tab, WASM_I32V(3),
WASM_GET_LOCAL(local_func))});
}
-TEST_F(FunctionBodyDecoderTest, GetTable) {
+TEST_F(FunctionBodyDecoderTest, TableGet) {
WASM_FEATURE_SCOPE(anyref);
TestModuleBuilder builder;
module = builder.module();
byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
- byte tab_func1 = builder.AddTable(kWasmAnyFunc, 20, true, 30);
- byte tab_func2 = builder.AddTable(kWasmAnyFunc, 10, false, 20);
+ byte tab_func1 = builder.AddTable(kWasmFuncRef, 20, true, 30);
+ byte tab_func2 = builder.AddTable(kWasmFuncRef, 10, false, 20);
byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
- ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte local_ref = 0;
byte local_func = 1;
byte local_int = 2;
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_ref1, WASM_I32V(6)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_ref1, WASM_I32V(6)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_ref2, WASM_I32V(8)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_ref2, WASM_I32V(8)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_func, WASM_GET_TABLE(tab_func1, WASM_I32V(5)))});
+ {WASM_SET_LOCAL(local_func, WASM_TABLE_GET(tab_func1, WASM_I32V(5)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_func, WASM_GET_TABLE(tab_func2, WASM_I32V(7)))});
+ {WASM_SET_LOCAL(local_func, WASM_TABLE_GET(tab_func2, WASM_I32V(7)))});
- // We can store anyfunc values as anyref, but not the other way around.
+ // We can store funcref values as anyref, but not the other way around.
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
- WASM_GET_TABLE(tab_ref1, WASM_I32V(4)))});
+ WASM_TABLE_GET(tab_ref1, WASM_I32V(4)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_func1, WASM_I32V(9)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_func1, WASM_I32V(9)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_func2, WASM_I32V(3)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_func2, WASM_I32V(3)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
- WASM_GET_TABLE(tab_ref2, WASM_I32V(2)))});
+ WASM_TABLE_GET(tab_ref2, WASM_I32V(2)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(local_int,
- WASM_GET_TABLE(tab_ref1, WASM_I32V(9)))});
+ WASM_TABLE_GET(tab_ref1, WASM_I32V(9)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(
- local_int, WASM_GET_TABLE(tab_func1, WASM_I32V(3)))});
+ local_int, WASM_TABLE_GET(tab_func1, WASM_I32V(3)))});
// Out-of-bounds table index should fail.
byte oob_tab = 37;
ExpectFailure(
- &sig, {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(oob_tab, WASM_I32V(9)))});
+ &sig, {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(oob_tab, WASM_I32V(9)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
- WASM_GET_TABLE(oob_tab, WASM_I32V(3)))});
+ WASM_TABLE_GET(oob_tab, WASM_I32V(3)))});
}
TEST_F(FunctionBodyDecoderTest, MultiTableCallIndirect) {
@@ -2129,13 +2134,13 @@ TEST_F(FunctionBodyDecoderTest, MultiTableCallIndirect) {
TestModuleBuilder builder;
module = builder.module();
byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
- byte tab_func = builder.AddTable(kWasmAnyFunc, 20, true, 30);
+ byte tab_func = builder.AddTable(kWasmFuncRef, 20, true, 30);
- ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte sig_index = builder.AddSignature(sigs.i_v());
- // We can store anyfunc values as anyref, but not the other way around.
+ // We can store funcref values as anyref, but not the other way around.
ExpectValidates(sigs.i_v(),
{kExprI32Const, 0, kExprCallIndirect, sig_index, tab_func});
@@ -2154,7 +2159,7 @@ TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
}
TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
- TestModuleBuilder builder(kAsmJsOrigin);
+ TestModuleBuilder builder(kAsmJsSloppyOrigin);
module = builder.module();
builder.InitializeMemory();
@@ -2186,7 +2191,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
};
{
- TestModuleBuilder builder(kAsmJsOrigin);
+ TestModuleBuilder builder(kAsmJsSloppyOrigin);
module = builder.module();
builder.InitializeMemory();
for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
@@ -2234,7 +2239,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsUnOpsCheckOrigin) {
{kExprI32AsmjsSConvertF64, sigs.i_d()},
{kExprI32AsmjsUConvertF64, sigs.i_d()}};
{
- TestModuleBuilder builder(kAsmJsOrigin);
+ TestModuleBuilder builder(kAsmJsSloppyOrigin);
module = builder.module();
builder.InitializeMemory();
for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
@@ -3104,7 +3109,7 @@ TEST_F(FunctionBodyDecoderTest, MemoryInit) {
ExpectValidates(sigs.v_v(),
{WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, MemoryInitInvalid) {
@@ -3174,12 +3179,12 @@ TEST_F(FunctionBodyDecoderTest, TableInit) {
module = builder.module();
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
@@ -3189,7 +3194,8 @@ TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
module = builder.module();
WASM_FEATURE_SCOPE(bulk_memory);
- byte code[] = {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO), WASM_END};
+ byte code[] = {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
+ WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
Validate(i == arraysize(code), sigs.v_v(), VectorOf(code, i), kOmitEnd);
}
@@ -3212,15 +3218,16 @@ TEST_F(FunctionBodyDecoderTest, TableCopy) {
builder.InitializeTable();
module = builder.module();
- ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
- {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableGrow) {
TestModuleBuilder builder;
- byte tab_func = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
module = builder.module();
@@ -3232,10 +3239,10 @@ TEST_F(FunctionBodyDecoderTest, TableGrow) {
{WASM_TABLE_GROW(tab_func, WASM_REF_NULL, WASM_ONE)});
ExpectValidates(sigs.i_r(),
{WASM_TABLE_GROW(tab_ref, WASM_REF_NULL, WASM_ONE)});
- // Anyfunc table cannot be initialized with an anyref value.
+ // FuncRef table cannot be initialized with an anyref value.
ExpectFailure(sigs.i_r(),
{WASM_TABLE_GROW(tab_func, WASM_GET_LOCAL(0), WASM_ONE)});
- // Anyref table can be initialized with an anyfunc value.
+ // Anyref table can be initialized with an funcref value.
ExpectValidates(sigs.i_a(),
{WASM_TABLE_GROW(tab_ref, WASM_GET_LOCAL(0), WASM_ONE)});
// Check that the table index gets verified.
@@ -3245,7 +3252,7 @@ TEST_F(FunctionBodyDecoderTest, TableGrow) {
TEST_F(FunctionBodyDecoderTest, TableSize) {
TestModuleBuilder builder;
- int tab = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ int tab = builder.AddTable(kWasmFuncRef, 10, true, 20);
module = builder.module();
@@ -3257,7 +3264,7 @@ TEST_F(FunctionBodyDecoderTest, TableSize) {
TEST_F(FunctionBodyDecoderTest, TableFill) {
TestModuleBuilder builder;
- byte tab_func = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
module = builder.module();
@@ -3269,10 +3276,10 @@ TEST_F(FunctionBodyDecoderTest, TableFill) {
WASM_REF_NULL, WASM_ONE)});
ExpectValidates(sigs.v_r(), {WASM_TABLE_FILL(tab_ref, WASM_ONE, WASM_REF_NULL,
WASM_ONE)});
- // Anyfunc table cannot be initialized with an anyref value.
+ // FuncRef table cannot be initialized with an anyref value.
ExpectFailure(sigs.v_r(), {WASM_TABLE_FILL(tab_func, WASM_ONE,
WASM_GET_LOCAL(0), WASM_ONE)});
- // Anyref table can be initialized with an anyfunc value.
+ // Anyref table can be initialized with an funcref value.
ExpectValidates(sigs.v_a(), {WASM_TABLE_FILL(tab_ref, WASM_ONE,
WASM_GET_LOCAL(0), WASM_ONE)});
// Check that the table index gets verified.
@@ -3282,7 +3289,7 @@ TEST_F(FunctionBodyDecoderTest, TableFill) {
TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyRef, 10, true, 20);
+ module = builder.module();
{
WASM_FEATURE_SCOPE(anyref);
ExpectFailure(sigs.i_v(), {WASM_TABLE_GROW(0, WASM_REF_NULL, WASM_ONE)});
@@ -3294,10 +3301,93 @@ TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
WASM_FEATURE_SCOPE(bulk_memory);
builder.AddPassiveElementSegment();
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added one table, therefore table.copy on table 0 should work.
+ int table_src = 0;
+ int table_dst = 0;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ // There is only one table, so table.copy on table 1 should fail.
+ table_src = 0;
+ table_dst = 1;
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ table_src = 1;
+ table_dst = 0;
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ }
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added two tables, therefore table.copy on table 0 should work.
+ int table_src = 0;
+ int table_dst = 0;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ // Also table.copy on table 1 should work now.
+ table_src = 1;
+ table_dst = 0;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ table_src = 0;
+ table_dst = 1;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added one table, therefore table.init on table 0 should work.
+ int table_index = 0;
+ ExpectValidates(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ // There is only one table, so table.init on table 1 should fail.
+ table_index = 1;
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ }
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added two tables, therefore table.init on table 0 should work.
+ int table_index = 0;
+ ExpectValidates(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ // Also table.init on table 1 should work now.
+ table_index = 1;
+ ExpectValidates(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
}
}
@@ -3628,9 +3718,9 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kWasmI64, 212);
}
-TEST_F(LocalDeclDecoderTest, ExceptRef) {
+TEST_F(LocalDeclDecoderTest, ExnRef) {
WASM_FEATURE_SCOPE(eh);
- ValueType type = kWasmExceptRef;
+ ValueType type = kWasmExnRef;
const byte data[] = {1, 1,
static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
BodyLocalDecls decls(zone());
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index d63819ba70..4493fcf1dd 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -167,7 +167,7 @@ struct ValueTypePair {
{kLocalI64, kWasmI64}, // --
{kLocalF32, kWasmF32}, // --
{kLocalF64, kWasmF64}, // --
- {kLocalAnyFunc, kWasmAnyFunc}, // --
+ {kLocalFuncRef, kWasmFuncRef}, // --
{kLocalAnyRef, kWasmAnyRef} // --
};
@@ -306,7 +306,7 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
}
}
-TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
+TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
@@ -315,10 +315,10 @@ TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
TWO_EMPTY_FUNCTIONS(SIG_INDEX(0)),
SECTION(Global, // --
ENTRY_COUNT(2), // --
- kLocalAnyFunc, // local type
+ kLocalFuncRef, // local type
0, // immutable
WASM_INIT_EXPR_REF_NULL, // init
- kLocalAnyFunc, // local type
+ kLocalFuncRef, // local type
0, // immutable
WASM_INIT_EXPR_REF_FUNC(1)), // init
TWO_EMPTY_BODIES};
@@ -331,12 +331,12 @@ TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
EXPECT_EQ(0u, result.value()->data_segments.size());
const WasmGlobal* global = &result.value()->globals[0];
- EXPECT_EQ(kWasmAnyFunc, global->type);
+ EXPECT_EQ(kWasmFuncRef, global->type);
EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kRefNullConst, global->init.kind);
global = &result.value()->globals[1];
- EXPECT_EQ(kWasmAnyFunc, global->type);
+ EXPECT_EQ(kWasmFuncRef, global->type);
EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kRefFuncConst, global->init.kind);
EXPECT_EQ(uint32_t{1}, global->init.val.function_index);
@@ -439,7 +439,9 @@ TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
}
}
-static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
+static void AppendUint32v(
+ std::vector<byte>& buffer, // NOLINT(runtime/references)
+ uint32_t val) {
while (true) {
uint32_t next = val >> 7;
uint32_t out = val & 0x7F;
@@ -920,7 +922,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// code ----------------------------------------------------------------
ONE_EMPTY_BODY};
@@ -937,7 +939,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
TEST_F(WasmModuleVerifyTest, ElementSectionWithInternalTable) {
static const byte data[] = {
// table ---------------------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// elements ------------------------------------------------------------
SECTION(Element, ENTRY_COUNT(0))};
@@ -951,7 +953,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionWithImportedTable) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
1), // initial size
// elements ------------------------------------------------------------
@@ -982,7 +984,7 @@ TEST_F(WasmModuleVerifyTest, Regression_735887) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(1), // entry count
@@ -1001,7 +1003,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(1), // entry count
@@ -1029,7 +1031,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
// funcs ------------------------------------------------------
FOUR_EMPTY_FUNCTIONS(SIG_INDEX(0)),
// table declaration -------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 8),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 8),
// table elements ----------------------------------------------
SECTION(Element,
ENTRY_COUNT(1), // entry count
@@ -1059,8 +1061,8 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 5, // table 0
- kLocalAnyFunc, 0, 9), // table 1
+ kLocalFuncRef, 0, 5, // table 0
+ kLocalFuncRef, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(2), // entry count
@@ -1092,21 +1094,21 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
5, // initial size
ADD_COUNT('m'), // module name
ADD_COUNT('s'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
10), // initial size
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19), // table 1
+ kLocalFuncRef, 0, 15, // table 0
+ kLocalFuncRef, 0, 19), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
4, // entry count
@@ -1146,8 +1148,8 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 5, // table 0
- kLocalAnyFunc, 0, 9), // table 1
+ kLocalFuncRef, 0, 5, // table 0
+ kLocalFuncRef, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(3), // entry count
@@ -1183,21 +1185,21 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
5, // initial size
ADD_COUNT('m'), // module name
ADD_COUNT('s'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
10), // initial size
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19), // table 1
+ kLocalFuncRef, 0, 15, // table 0
+ kLocalFuncRef, 0, 19), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
4, // entry count
@@ -1225,7 +1227,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
EXPECT_VERIFIES(data);
}
-TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithAnyFunc) {
+TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithFuncRef) {
WASM_FEATURE_SCOPE(anyref);
WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
@@ -1236,7 +1238,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithAnyFunc) {
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
kLocalAnyRef, 0, 5, // table 0
- kLocalAnyFunc, 0, 9), // table 1
+ kLocalFuncRef, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(2), // entry count
@@ -1269,7 +1271,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
5, // initial size
ADD_COUNT('m'), // module name
@@ -1282,8 +1284,8 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19), // table 1
+ kLocalFuncRef, 0, 15, // table 0
+ kLocalFuncRef, 0, 19), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(4), // entry count
@@ -1327,10 +1329,10 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithoutFlag) {
static const byte data[] = {
SECTION(Table, // table section
ENTRY_COUNT(2), // 2 tables
- kLocalAnyFunc, // table 1: type
+ kLocalFuncRef, // table 1: type
0, // table 1: no maximum
10, // table 1: minimum size
- kLocalAnyFunc, // table 2: type
+ kLocalFuncRef, // table 2: type
0, // table 2: no maximum
10), // table 2: minimum size
};
@@ -1342,7 +1344,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
static const byte data[] = {
SECTION(Table, // table section
ENTRY_COUNT(2), // 2 tables
- kLocalAnyFunc, // table 1: type
+ kLocalFuncRef, // table 1: type
0, // table 1: no maximum
10, // table 1: minimum size
kLocalAnyRef, // table 2: type
@@ -1356,7 +1358,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
EXPECT_EQ(2u, result.value()->tables.size());
EXPECT_EQ(10u, result.value()->tables[0].initial_size);
- EXPECT_EQ(kWasmAnyFunc, result.value()->tables[0].type);
+ EXPECT_EQ(kWasmFuncRef, result.value()->tables[0].type);
EXPECT_EQ(11u, result.value()->tables[1].initial_size);
EXPECT_EQ(kWasmAnyRef, result.value()->tables[1].type);
@@ -1544,7 +1546,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
TEST_F(WasmSignatureDecodeTest, Fail_anyref_without_flag) {
// Disable AnyRef support and check that decoding fails.
WASM_FEATURE_SCOPE_VAL(anyref, false);
- byte ref_types[] = {kLocalAnyFunc, kLocalAnyRef};
+ byte ref_types[] = {kLocalFuncRef, kLocalAnyRef};
for (byte invalid_type : ref_types) {
for (size_t i = 0;; i++) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
@@ -2350,9 +2352,9 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
// funcs -----------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration -----------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// element segments -----------------------------------------------------
- SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyFunc, U32V_1(3),
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalFuncRef, U32V_1(3),
REF_FUNC_ELEMENT(0), REF_FUNC_ELEMENT(0), REF_NULL_ELEMENT),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
@@ -2369,7 +2371,7 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentAnyRef) {
// funcs -----------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration -----------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// element segments -----------------------------------------------------
SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyRef, U32V_1(0)),
// code ------------------------------------------------------------------
diff --git a/deps/v8/test/unittests/wasm/wasm-text-unittest.cc b/deps/v8/test/unittests/wasm/wasm-text-unittest.cc
new file mode 100644
index 0000000000..e960b730a5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-text-unittest.cc
@@ -0,0 +1,121 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-text.h"
+#include "test/common/wasm/test-signatures.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmTextTest : public TestWithIsolateAndZone {
+ public:
+ TestSignatures sigs;
+ WasmFeatures enabled_features_;
+
+ void TestInstruction(const byte* func_start, size_t func_size) {
+ WasmModuleBuilder mb(zone());
+ auto* fb = mb.AddFunction(sigs.v_v());
+ fb->EmitCode(func_start, static_cast<uint32_t>(func_size));
+ fb->Emit(kExprEnd);
+
+ ZoneBuffer buffer(zone());
+ mb.WriteTo(&buffer);
+
+ ModuleWireBytes wire_bytes(
+ Vector<const byte>(buffer.begin(), buffer.size()));
+
+ ModuleResult result = DecodeWasmModule(
+ enabled_features_, buffer.begin(), buffer.end(), false, kWasmOrigin,
+ isolate()->counters(), isolate()->wasm_engine()->allocator());
+ EXPECT_TRUE(result.ok());
+
+ std::stringstream ss;
+ PrintWasmText(result.value().get(), wire_bytes, 0, ss, nullptr);
+ }
+};
+
+TEST_F(WasmTextTest, EveryOpcodeCanBeDecoded) {
+ static const struct {
+ WasmOpcode opcode;
+ const char* debug_name;
+ } kValues[] = {
+#define DECLARE_ELEMENT(name, opcode, sig) {kExpr##name, "kExpr" #name},
+ FOREACH_OPCODE(DECLARE_ELEMENT)};
+#undef DECLARE_ELEMENT
+
+ for (const auto& value : kValues) {
+ // Pad with 0 for any immediate values. If they're not needed, they'll be
+ // interpreted as unreachable.
+ byte data[20] = {0};
+
+ printf("%s\n", value.debug_name);
+ switch (value.opcode) {
+ // Instructions that have a special case because they affect the control
+ // depth.
+ case kExprBlock:
+ case kExprLoop:
+ case kExprIf:
+ case kExprTry:
+ data[0] = value.opcode;
+ data[1] = kLocalVoid;
+ data[2] = kExprEnd;
+ break;
+ case kExprElse:
+ data[0] = kExprIf;
+ data[1] = value.opcode;
+ data[2] = kExprEnd;
+ break;
+ case kExprCatch:
+ data[0] = kExprTry;
+ data[1] = value.opcode;
+ data[2] = kExprEnd;
+ break;
+ case kExprEnd:
+ break;
+
+ // Instructions with special requirements for immediates.
+ case kExprSelectWithType:
+ data[0] = kExprSelectWithType;
+ data[1] = 1;
+ data[2] = kLocalI32;
+ break;
+
+ default: {
+ if (value.opcode >= 0x100) {
+ data[0] = value.opcode >> 8; // Prefix byte.
+ byte opcode = value.opcode & 0xff; // Actual opcode.
+ if (opcode >= 0x80) {
+ // Opcode with prefix, and needs to be LEB encoded (3 bytes).
+ // For now, this can only be in the range [0x80, 0xff], which means
+ // that the third byte is always 1.
+ data[1] = (opcode & 0x7f) | 0x80;
+ data[2] = 1;
+ } else {
+ // Opcode with prefix (2 bytes).
+ data[1] = opcode;
+ }
+ } else {
+ // Single-byte opcode.
+ data[0] = value.opcode;
+ }
+ break;
+ }
+ }
+
+ TestInstruction(data, arraysize(data));
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/BUILD.gn b/deps/v8/test/wasm-api-tests/BUILD.gn
index c411cb0eb2..d0e2c01ac9 100644
--- a/deps/v8/test/wasm-api-tests/BUILD.gn
+++ b/deps/v8/test/wasm-api-tests/BUILD.gn
@@ -30,6 +30,15 @@ v8_executable("wasm_api_tests") {
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
"callbacks.cc",
+ "finalize.cc",
+ "globals.cc",
+ "memory.cc",
+ "reflect.cc",
"run-all-wasm-api-tests.cc",
+ "serialize.cc",
+ "table.cc",
+ "threads.cc",
+ "traps.cc",
+ "wasm-api-test.h",
]
}
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
index 1c91d9ca54..960fa726dd 100644
--- a/deps/v8/test/wasm-api-tests/callbacks.cc
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -2,29 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "test/wasm-api-tests/wasm-api-test.h"
+
#include "src/execution/isolate.h"
#include "src/heap/heap.h"
#include "src/wasm/c-api.h"
-#include "src/wasm/wasm-module-builder.h"
-#include "src/wasm/wasm-opcodes.h"
-#include "src/zone/accounting-allocator.h"
-#include "src/zone/zone.h"
-#include "test/common/wasm/wasm-macro-gen.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/wasm-api/wasm.hh"
-
-namespace wasm {
-
-// TODO(jkummerow): Drop these from the API.
-#ifdef DEBUG
-template <class T>
-void vec<T>::make_data() {}
-
-template <class T>
-void vec<T>::free_data() {}
-#endif
-
-} // namespace wasm
namespace v8 {
namespace internal {
@@ -32,113 +14,17 @@ namespace wasm {
namespace {
-using ::wasm::Engine;
-using ::wasm::Extern;
-using ::wasm::Func;
-using ::wasm::FuncType;
-using ::wasm::Instance;
-using ::wasm::Module;
-using ::wasm::own;
-using ::wasm::Store;
-using ::wasm::Trap;
-using ::wasm::Val;
-using ::wasm::ValType;
-using ::wasm::vec;
-
-own<Trap*> Stage2(void* env, const Val args[], Val results[]);
-
-class WasmCapiTest : public ::testing::Test {
- public:
- WasmCapiTest()
- : Test(),
- zone_(&allocator_, ZONE_NAME),
- builder_(&zone_),
- exports_(vec<Extern*>::make()),
- wasm_sig_(1, 1, wasm_sig_types_) {
- engine_ = Engine::make();
- store_ = Store::make(engine_.get());
-
- // Build the following function:
- // int32 stage1(int32 arg0) { return stage2(arg0); }
- uint32_t stage2_index =
- builder_.AddImport(ArrayVector("stage2"), wasm_sig());
- byte code[] = {WASM_CALL_FUNCTION(stage2_index, WASM_GET_LOCAL(0))};
- AddExportedFunction(CStrVector("stage1"), code, sizeof(code));
-
- cpp_sig_ = FuncType::make(vec<ValType*>::make(ValType::make(::wasm::I32)),
- vec<ValType*>::make(ValType::make(::wasm::I32)));
- stage2_ = Func::make(store(), cpp_sig_.get(), Stage2, this);
- }
-
- void Compile() {
- ZoneBuffer buffer(&zone_);
- builder_.WriteTo(buffer);
- size_t size = buffer.end() - buffer.begin();
- vec<byte_t> binary = vec<byte_t>::make(
- size, reinterpret_cast<byte_t*>(const_cast<byte*>(buffer.begin())));
-
- module_ = Module::make(store_.get(), binary);
- DCHECK_NE(module_.get(), nullptr);
- }
-
- own<Trap*> Run(Extern* imports[], Val args[], Val results[]) {
- instance_ = Instance::make(store_.get(), module_.get(), imports);
- DCHECK_NE(instance_.get(), nullptr);
- exports_ = instance_->exports();
- Func* entry = GetExportedFunction(0);
- return entry->call(args, results);
- }
-
- void AddExportedFunction(Vector<const char> name, byte code[],
- size_t code_size) {
- WasmFunctionBuilder* fun = builder()->AddFunction(wasm_sig());
- fun->EmitCode(code, static_cast<uint32_t>(code_size));
- fun->Emit(kExprEnd);
- builder()->AddExport(name, fun);
- }
-
- Func* GetExportedFunction(size_t index) {
- DCHECK_GT(exports_.size(), index);
- Extern* exported = exports_[index];
- DCHECK_EQ(exported->kind(), ::wasm::EXTERN_FUNC);
- Func* func = exported->func();
- DCHECK_NE(func, nullptr);
- return func;
- }
-
- WasmModuleBuilder* builder() { return &builder_; }
- Store* store() { return store_.get(); }
- Func* stage2() { return stage2_.get(); }
-
- FunctionSig* wasm_sig() { return &wasm_sig_; }
- FuncType* cpp_sig() { return cpp_sig_.get(); }
-
- private:
- AccountingAllocator allocator_;
- Zone zone_;
- WasmModuleBuilder builder_;
- own<Engine*> engine_;
- own<Store*> store_;
- own<Module*> module_;
- own<Instance*> instance_;
- vec<Extern*> exports_;
- own<Func*> stage2_;
- own<FuncType*> cpp_sig_;
- ValueType wasm_sig_types_[2] = {kWasmI32, kWasmI32};
- FunctionSig wasm_sig_;
-};
-
own<Trap*> Stage2(void* env, const Val args[], Val results[]) {
printf("Stage2...\n");
WasmCapiTest* self = reinterpret_cast<WasmCapiTest*>(env);
Func* stage3 = self->GetExportedFunction(1);
- own<Trap*> result = stage3->call(args, results);
- if (result) {
- printf("Stage2: got exception: %s\n", result->message().get());
+ own<Trap*> trap = stage3->call(args, results);
+ if (trap) {
+ printf("Stage2: got exception: %s\n", trap->message().get());
} else {
printf("Stage2: call successful\n");
}
- return result;
+ return trap;
}
own<Trap*> Stage4_GC(void* env, const Val args[], Val results[]) {
@@ -151,45 +37,192 @@ own<Trap*> Stage4_GC(void* env, const Val args[], Val results[]) {
return nullptr;
}
+class WasmCapiCallbacksTest : public WasmCapiTest {
+ public:
+ WasmCapiCallbacksTest() : WasmCapiTest() {
+ // Build the following function:
+ // int32 stage1(int32 arg0) { return stage2(arg0); }
+ uint32_t stage2_index =
+ builder()->AddImport(CStrVector("stage2"), wasm_i_i_sig());
+ byte code[] = {WASM_CALL_FUNCTION(stage2_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("stage1"), code, sizeof(code));
+
+ stage2_ = Func::make(store(), cpp_i_i_sig(), Stage2, this);
+ }
+
+ Func* stage2() { return stage2_.get(); }
+ void AddExportedFunction(Vector<const char> name, byte code[],
+ size_t code_size) {
+ WasmCapiTest::AddExportedFunction(name, code, code_size, wasm_i_i_sig());
+ }
+
+ private:
+ own<Func*> stage2_;
+};
+
} // namespace
-TEST_F(WasmCapiTest, Trap) {
+TEST_F(WasmCapiCallbacksTest, Trap) {
// Build the following function:
// int32 stage3_trap(int32 arg0) { unreachable(); }
byte code[] = {WASM_UNREACHABLE};
AddExportedFunction(CStrVector("stage3_trap"), code, sizeof(code));
- Compile();
Extern* imports[] = {stage2()};
+ Instantiate(imports);
Val args[] = {Val::i32(42)};
Val results[1];
- own<Trap*> result = Run(imports, args, results);
- EXPECT_NE(result, nullptr);
- printf("Stage0: Got trap as expected: %s\n", result->message().get());
+ own<Trap*> trap = GetExportedFunction(0)->call(args, results);
+ EXPECT_NE(trap, nullptr);
+ printf("Stage0: Got trap as expected: %s\n", trap->message().get());
}
-TEST_F(WasmCapiTest, GC) {
+TEST_F(WasmCapiCallbacksTest, GC) {
// Build the following function:
// int32 stage3_to4(int32 arg0) { return stage4(arg0); }
uint32_t stage4_index =
- builder()->AddImport(ArrayVector("stage4"), wasm_sig());
+ builder()->AddImport(CStrVector("stage4"), wasm_i_i_sig());
byte code[] = {WASM_CALL_FUNCTION(stage4_index, WASM_GET_LOCAL(0))};
AddExportedFunction(CStrVector("stage3_to4"), code, sizeof(code));
- Compile();
i::Isolate* isolate =
reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate();
- own<Func*> stage4 = Func::make(store(), cpp_sig(), Stage4_GC, isolate);
- EXPECT_EQ(cpp_sig()->params().size(), stage4->type()->params().size());
- EXPECT_EQ(cpp_sig()->results().size(), stage4->type()->results().size());
+ own<Func*> stage4 = Func::make(store(), cpp_i_i_sig(), Stage4_GC, isolate);
+ EXPECT_EQ(cpp_i_i_sig()->params().size(), stage4->type()->params().size());
+ EXPECT_EQ(cpp_i_i_sig()->results().size(), stage4->type()->results().size());
Extern* imports[] = {stage2(), stage4.get()};
+ Instantiate(imports);
Val args[] = {Val::i32(42)};
Val results[1];
- own<Trap*> result = Run(imports, args, results);
- EXPECT_EQ(result, nullptr);
+ own<Trap*> trap = GetExportedFunction(0)->call(args, results);
+ EXPECT_EQ(trap, nullptr);
EXPECT_EQ(43, results[0].i32());
}
+namespace {
+
+own<Trap*> FibonacciC(void* env, const Val args[], Val results[]) {
+ int32_t x = args[0].i32();
+ if (x == 0 || x == 1) {
+ results[0] = Val::i32(x);
+ return nullptr;
+ }
+ WasmCapiTest* self = reinterpret_cast<WasmCapiTest*>(env);
+ Func* fibo_wasm = self->GetExportedFunction(0);
+ // Aggressively re-use existing arrays. That's maybe not great coding
+ // style, but this test intentionally ensures that it works if someone
+ // insists on doing it.
+ Val recursive_args[] = {Val::i32(x - 1)};
+ own<Trap*> trap = fibo_wasm->call(recursive_args, results);
+ DCHECK_NULL(trap);
+ int32_t x1 = results[0].i32();
+ recursive_args[0] = Val::i32(x - 2);
+ trap = fibo_wasm->call(recursive_args, results);
+ DCHECK_NULL(trap);
+ int32_t x2 = results[0].i32();
+ results[0] = Val::i32(x1 + x2);
+ return nullptr;
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Recursion) {
+ // Build the following function:
+ // int32 fibonacci_wasm(int32 arg0) {
+ // if (arg0 == 0) return 0;
+ // if (arg0 == 1) return 1;
+ // return fibonacci_c(arg0 - 1) + fibonacci_c(arg0 - 2);
+ // }
+ uint32_t fibo_c_index =
+ builder()->AddImport(CStrVector("fibonacci_c"), wasm_i_i_sig());
+ byte code_fibo[] = {
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_ZERO),
+ WASM_RETURN1(WASM_ZERO)),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_ONE), WASM_RETURN1(WASM_ONE)),
+ // Muck with the parameter to ensure callers don't depend on its value.
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_ONE)),
+ WASM_RETURN1(WASM_I32_ADD(
+ WASM_CALL_FUNCTION(fibo_c_index, WASM_GET_LOCAL(0)),
+ WASM_CALL_FUNCTION(fibo_c_index,
+ WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_ONE))))};
+ AddExportedFunction(CStrVector("fibonacci_wasm"), code_fibo,
+ sizeof(code_fibo), wasm_i_i_sig());
+
+ own<Func*> fibonacci = Func::make(store(), cpp_i_i_sig(), FibonacciC, this);
+ Extern* imports[] = {fibonacci.get()};
+ Instantiate(imports);
+ // Enough iterations to make it interesting, few enough to keep it fast.
+ Val args[] = {Val::i32(15)};
+ Val results[1];
+ own<Trap*> result = GetExportedFunction(0)->call(args, results);
+ EXPECT_EQ(result, nullptr);
+ EXPECT_EQ(610, results[0].i32());
+}
+
+namespace {
+
+own<Trap*> PlusOne(const Val args[], Val results[]) {
+ int32_t a0 = args[0].i32();
+ results[0] = Val::i32(a0 + 1);
+ int64_t a1 = args[1].i64();
+ results[1] = Val::i64(a1 + 1);
+ float a2 = args[2].f32();
+ results[2] = Val::f32(a2 + 1);
+ double a3 = args[3].f64();
+ results[3] = Val::f64(a3 + 1);
+ results[4] = Val::ref(args[4].ref()->copy()); // No +1 for Refs.
+ return nullptr;
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, DirectCallCapiFunction) {
+ own<FuncType*> cpp_sig =
+ FuncType::make(vec<ValType*>::make(
+ ValType::make(::wasm::I32), ValType::make(::wasm::I64),
+ ValType::make(::wasm::F32), ValType::make(::wasm::F64),
+ ValType::make(::wasm::ANYREF)),
+ vec<ValType*>::make(
+ ValType::make(::wasm::I32), ValType::make(::wasm::I64),
+ ValType::make(::wasm::F32), ValType::make(::wasm::F64),
+ ValType::make(::wasm::ANYREF)));
+ own<Func*> func = Func::make(store(), cpp_sig.get(), PlusOne);
+ Extern* imports[] = {func.get()};
+ ValueType wasm_types[] = {kWasmI32, kWasmI64, kWasmF32, kWasmF64,
+ kWasmAnyRef, kWasmI32, kWasmI64, kWasmF32,
+ kWasmF64, kWasmAnyRef};
+ FunctionSig wasm_sig(5, 5, wasm_types);
+ int func_index = builder()->AddImport(CStrVector("func"), &wasm_sig);
+ builder()->ExportImportedFunction(CStrVector("func"), func_index);
+ Instantiate(imports);
+ int32_t a0 = 42;
+ int64_t a1 = 0x1234c0ffee;
+ float a2 = 1234.5;
+ double a3 = 123.45;
+ Val args[] = {Val::i32(a0), Val::i64(a1), Val::f32(a2), Val::f64(a3),
+ Val::ref(func->copy())};
+ Val results[5];
+ // Test that {func} can be called directly.
+ own<Trap*> trap = func->call(args, results);
+ EXPECT_EQ(nullptr, trap);
+ EXPECT_EQ(a0 + 1, results[0].i32());
+ EXPECT_EQ(a1 + 1, results[1].i64());
+ EXPECT_EQ(a2 + 1, results[2].f32());
+ EXPECT_EQ(a3 + 1, results[3].f64());
+ // TODO(jkummerow): Check that func == results[4] when we have a way
+ // to do so.
+
+ // Test that {func} can be called after import/export round-tripping.
+ trap = GetExportedFunction(0)->call(args, results);
+ EXPECT_EQ(nullptr, trap);
+ EXPECT_EQ(a0 + 1, results[0].i32());
+ EXPECT_EQ(a1 + 1, results[1].i64());
+ EXPECT_EQ(a2 + 1, results[2].f32());
+ EXPECT_EQ(a3 + 1, results[3].f64());
+ // TODO(jkummerow): Check that func == results[4] when we have a way
+ // to do so.
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/finalize.cc b/deps/v8/test/wasm-api-tests/finalize.cc
new file mode 100644
index 0000000000..7b3b976ca7
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/finalize.cc
@@ -0,0 +1,77 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+int g_instances_finalized = 0;
+int g_functions_finalized = 0;
+int g_foreigns_finalized = 0;
+int g_modules_finalized = 0;
+
+void FinalizeInstance(void* data) {
+ int iteration = static_cast<int>(reinterpret_cast<intptr_t>(data));
+ g_instances_finalized += iteration;
+}
+
+void FinalizeFunction(void* data) {
+ int iteration = static_cast<int>(reinterpret_cast<intptr_t>(data));
+ g_functions_finalized += iteration;
+}
+
+void FinalizeForeign(void* data) {
+ int iteration = static_cast<int>(reinterpret_cast<intptr_t>(data));
+ g_foreigns_finalized += iteration;
+}
+
+void FinalizeModule(void* data) {
+ g_modules_finalized += static_cast<int>(reinterpret_cast<intptr_t>(data));
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, InstanceFinalization) {
+ // Add a dummy function: f(x) { return x; }
+ byte code[] = {WASM_RETURN1(WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("f"), code, sizeof(code), wasm_i_i_sig());
+ Compile();
+ g_instances_finalized = 0;
+ g_functions_finalized = 0;
+ g_foreigns_finalized = 0;
+ g_modules_finalized = 0;
+ module()->set_host_info(reinterpret_cast<void*>(42), &FinalizeModule);
+ static const int kIterations = 10;
+ for (int iteration = 0; iteration < kIterations; iteration++) {
+ void* finalizer_data = reinterpret_cast<void*>(iteration);
+ own<Instance*> instance = Instance::make(store(), module(), nullptr);
+ EXPECT_NE(nullptr, instance.get());
+ instance->set_host_info(finalizer_data, &FinalizeInstance);
+
+ own<Func*> func = instance->exports()[0]->func()->copy();
+ ASSERT_NE(func, nullptr);
+ func->set_host_info(finalizer_data, &FinalizeFunction);
+
+ own<Foreign*> foreign = Foreign::make(store());
+ foreign->set_host_info(finalizer_data, &FinalizeForeign);
+ }
+ Shutdown();
+ // Verify that (1) all finalizers were called, and (2) they passed the
+ // correct host data: the loop above sets {i} as data, and the finalizer
+ // callbacks add them all up, so the expected value is
+ // sum([0, 1, ..., kIterations - 1]), which per Gauss's formula is:
+ static const int kExpected = (kIterations * (kIterations - 1)) / 2;
+ EXPECT_EQ(g_instances_finalized, kExpected);
+ EXPECT_EQ(g_functions_finalized, kExpected);
+ EXPECT_EQ(g_foreigns_finalized, kExpected);
+ EXPECT_EQ(g_modules_finalized, 42);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/globals.cc b/deps/v8/test/wasm-api-tests/globals.cc
new file mode 100644
index 0000000000..d47f326cc4
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/globals.cc
@@ -0,0 +1,208 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::GlobalType;
+
+TEST_F(WasmCapiTest, Globals) {
+ const bool kMutable = true;
+ const bool kImmutable = false;
+
+ // Define imported and exported globals in the module.
+ const uint32_t cfi_index =
+ builder()->AddGlobalImport(CStrVector("const f32"), kWasmF32, kImmutable);
+ const uint32_t cii_index =
+ builder()->AddGlobalImport(CStrVector("const i64"), kWasmI64, kImmutable);
+ const uint32_t vfi_index =
+ builder()->AddGlobalImport(CStrVector("var f32"), kWasmF32, kMutable);
+ const uint32_t vii_index =
+ builder()->AddGlobalImport(CStrVector("var i64"), kWasmI64, kMutable);
+ const int kNumImported = 4;
+
+ const uint32_t cfe_index =
+ kNumImported + builder()->AddExportedGlobal(kWasmF32, kImmutable,
+ WasmInitExpr(5.f),
+ CStrVector("const f32"));
+ const uint32_t cie_index =
+ kNumImported + builder()->AddExportedGlobal(kWasmI64, kImmutable,
+ WasmInitExpr(int64_t{6}),
+ CStrVector("const i64"));
+ const uint32_t vfe_index =
+ kNumImported + builder()->AddExportedGlobal(kWasmF32, kMutable,
+ WasmInitExpr(7.f),
+ CStrVector("var f32"));
+ const uint32_t vie_index =
+ kNumImported + builder()->AddExportedGlobal(kWasmI64, kMutable,
+ WasmInitExpr(int64_t{8}),
+ CStrVector("var i64"));
+
+ // Define functions for inspecting globals.
+ ValueType f32_type[] = {kWasmF32};
+ ValueType i64_type[] = {kWasmI64};
+ FunctionSig return_f32(1, 0, f32_type);
+ FunctionSig return_i64(1, 0, i64_type);
+ byte gcfi[] = {WASM_GET_GLOBAL(cfi_index)};
+ AddExportedFunction(CStrVector("get const f32 import"), gcfi, sizeof(gcfi),
+ &return_f32);
+ byte gcii[] = {WASM_GET_GLOBAL(cii_index)};
+ AddExportedFunction(CStrVector("get const i64 import"), gcii, sizeof(gcii),
+ &return_i64);
+ byte gvfi[] = {WASM_GET_GLOBAL(vfi_index)};
+ AddExportedFunction(CStrVector("get var f32 import"), gvfi, sizeof(gvfi),
+ &return_f32);
+ byte gvii[] = {WASM_GET_GLOBAL(vii_index)};
+ AddExportedFunction(CStrVector("get var i64 import"), gvii, sizeof(gvii),
+ &return_i64);
+
+ byte gcfe[] = {WASM_GET_GLOBAL(cfe_index)};
+ AddExportedFunction(CStrVector("get const f32 export"), gcfe, sizeof(gcfe),
+ &return_f32);
+ byte gcie[] = {WASM_GET_GLOBAL(cie_index)};
+ AddExportedFunction(CStrVector("get const i64 export"), gcie, sizeof(gcie),
+ &return_i64);
+ byte gvfe[] = {WASM_GET_GLOBAL(vfe_index)};
+ AddExportedFunction(CStrVector("get var f32 export"), gvfe, sizeof(gvfe),
+ &return_f32);
+ byte gvie[] = {WASM_GET_GLOBAL(vie_index)};
+ AddExportedFunction(CStrVector("get var i64 export"), gvie, sizeof(gvie),
+ &return_i64);
+
+ // Define functions for manipulating globals.
+ FunctionSig param_f32(0, 1, f32_type);
+ FunctionSig param_i64(0, 1, i64_type);
+ byte svfi[] = {WASM_SET_GLOBAL(vfi_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("set var f32 import"), svfi, sizeof(svfi),
+ &param_f32);
+ byte svii[] = {WASM_SET_GLOBAL(vii_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("set var i64 import"), svii, sizeof(svii),
+ &param_i64);
+ byte svfe[] = {WASM_SET_GLOBAL(vfe_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("set var f32 export"), svfe, sizeof(svfe),
+ &param_f32);
+ byte svie[] = {WASM_SET_GLOBAL(vie_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("set var i64 export"), svie, sizeof(svie),
+ &param_i64);
+
+ // Create imported globals.
+ own<GlobalType*> const_f32_type =
+ GlobalType::make(ValType::make(::wasm::F32), ::wasm::CONST);
+ own<GlobalType*> const_i64_type =
+ GlobalType::make(ValType::make(::wasm::I64), ::wasm::CONST);
+ own<GlobalType*> var_f32_type =
+ GlobalType::make(ValType::make(::wasm::F32), ::wasm::VAR);
+ own<GlobalType*> var_i64_type =
+ GlobalType::make(ValType::make(::wasm::I64), ::wasm::VAR);
+ own<Global*> const_f32_import =
+ Global::make(store(), const_f32_type.get(), Val::f32(1));
+ own<Global*> const_i64_import =
+ Global::make(store(), const_i64_type.get(), Val::i64(2));
+ own<Global*> var_f32_import =
+ Global::make(store(), var_f32_type.get(), Val::f32(3));
+ own<Global*> var_i64_import =
+ Global::make(store(), var_i64_type.get(), Val::i64(4));
+ Extern* imports[] = {const_f32_import.get(), const_i64_import.get(),
+ var_f32_import.get(), var_i64_import.get()};
+
+ Instantiate(imports);
+
+ // Extract exports.
+ size_t i = 0;
+ Global* const_f32_export = GetExportedGlobal(i++);
+ Global* const_i64_export = GetExportedGlobal(i++);
+ Global* var_f32_export = GetExportedGlobal(i++);
+ Global* var_i64_export = GetExportedGlobal(i++);
+ Func* get_const_f32_import = GetExportedFunction(i++);
+ Func* get_const_i64_import = GetExportedFunction(i++);
+ Func* get_var_f32_import = GetExportedFunction(i++);
+ Func* get_var_i64_import = GetExportedFunction(i++);
+ Func* get_const_f32_export = GetExportedFunction(i++);
+ Func* get_const_i64_export = GetExportedFunction(i++);
+ Func* get_var_f32_export = GetExportedFunction(i++);
+ Func* get_var_i64_export = GetExportedFunction(i++);
+ Func* set_var_f32_import = GetExportedFunction(i++);
+ Func* set_var_i64_import = GetExportedFunction(i++);
+ Func* set_var_f32_export = GetExportedFunction(i++);
+ Func* set_var_i64_export = GetExportedFunction(i++);
+
+ // Check initial values.
+ EXPECT_EQ(1.f, const_f32_import->get().f32());
+ EXPECT_EQ(2, const_i64_import->get().i64());
+ EXPECT_EQ(3.f, var_f32_import->get().f32());
+ EXPECT_EQ(4, var_i64_import->get().i64());
+ EXPECT_EQ(5.f, const_f32_export->get().f32());
+ EXPECT_EQ(6, const_i64_export->get().i64());
+ EXPECT_EQ(7.f, var_f32_export->get().f32());
+ EXPECT_EQ(8, var_i64_export->get().i64());
+ Val result[1];
+ get_const_f32_import->call(nullptr, result);
+ EXPECT_EQ(1.f, result[0].f32());
+ get_const_i64_import->call(nullptr, result);
+ EXPECT_EQ(2, result[0].i64());
+ get_var_f32_import->call(nullptr, result);
+ EXPECT_EQ(3.f, result[0].f32());
+ get_var_i64_import->call(nullptr, result);
+ EXPECT_EQ(4, result[0].i64());
+ get_const_f32_export->call(nullptr, result);
+ EXPECT_EQ(5.f, result[0].f32());
+ get_const_i64_export->call(nullptr, result);
+ EXPECT_EQ(6, result[0].i64());
+ get_var_f32_export->call(nullptr, result);
+ EXPECT_EQ(7.f, result[0].f32());
+ get_var_i64_export->call(nullptr, result);
+ EXPECT_EQ(8, result[0].i64());
+
+ // Modify variables through the API and check again.
+ var_f32_import->set(Val::f32(33));
+ var_i64_import->set(Val::i64(34));
+ var_f32_export->set(Val::f32(35));
+ var_i64_export->set(Val::i64(36));
+
+ EXPECT_EQ(33.f, var_f32_import->get().f32());
+ EXPECT_EQ(34, var_i64_import->get().i64());
+ EXPECT_EQ(35.f, var_f32_export->get().f32());
+ EXPECT_EQ(36, var_i64_export->get().i64());
+
+ get_var_f32_import->call(nullptr, result);
+ EXPECT_EQ(33.f, result[0].f32());
+ get_var_i64_import->call(nullptr, result);
+ EXPECT_EQ(34, result[0].i64());
+ get_var_f32_export->call(nullptr, result);
+ EXPECT_EQ(35.f, result[0].f32());
+ get_var_i64_export->call(nullptr, result);
+ EXPECT_EQ(36, result[0].i64());
+
+ // Modify variables through calls and check again.
+ Val args[1];
+ args[0] = Val::f32(73);
+ set_var_f32_import->call(args, nullptr);
+ args[0] = Val::i64(74);
+ set_var_i64_import->call(args, nullptr);
+ args[0] = Val::f32(75);
+ set_var_f32_export->call(args, nullptr);
+ args[0] = Val::i64(76);
+ set_var_i64_export->call(args, nullptr);
+
+ EXPECT_EQ(73.f, var_f32_import->get().f32());
+ EXPECT_EQ(74, var_i64_import->get().i64());
+ EXPECT_EQ(75.f, var_f32_export->get().f32());
+ EXPECT_EQ(76, var_i64_export->get().i64());
+
+ get_var_f32_import->call(nullptr, result);
+ EXPECT_EQ(73.f, result[0].f32());
+ get_var_i64_import->call(nullptr, result);
+ EXPECT_EQ(74, result[0].i64());
+ get_var_f32_export->call(nullptr, result);
+ EXPECT_EQ(75.f, result[0].f32());
+ get_var_i64_export->call(nullptr, result);
+ EXPECT_EQ(76, result[0].i64());
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/memory.cc b/deps/v8/test/wasm-api-tests/memory.cc
new file mode 100644
index 0000000000..aec4cf8ccc
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/memory.cc
@@ -0,0 +1,123 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::Limits;
+using ::wasm::MemoryType;
+
+TEST_F(WasmCapiTest, Memory) {
+ builder()->SetMinMemorySize(2);
+ builder()->SetMaxMemorySize(3);
+ builder()->AddExport(CStrVector("memory"), kExternalMemory, 0);
+
+ ValueType i32_type[] = {kWasmI32, kWasmI32};
+ FunctionSig return_i32(1, 0, i32_type);
+ FunctionSig param_i32_return_i32(1, 1, i32_type);
+ FunctionSig param_i32_i32(0, 2, i32_type);
+ byte size_code[] = {WASM_MEMORY_SIZE};
+ AddExportedFunction(CStrVector("size"), size_code, sizeof(size_code),
+ &return_i32);
+ byte load_code[] = {WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("load"), load_code, sizeof(load_code),
+ &param_i32_return_i32);
+ byte store_code[] = {WASM_STORE_MEM(MachineType::Int8(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1))};
+ AddExportedFunction(CStrVector("store"), store_code, sizeof(store_code),
+ &param_i32_i32);
+
+ byte data[] = {0x1, 0x2, 0x3, 0x4};
+ builder()->AddDataSegment(data, sizeof(data), 0x1000);
+
+ Instantiate(nullptr);
+
+ Memory* memory = GetExportedMemory(0);
+ Func* size_func = GetExportedFunction(1);
+ Func* load_func = GetExportedFunction(2);
+ Func* store_func = GetExportedFunction(3);
+
+ // Check initial state.
+ EXPECT_EQ(2u, memory->size());
+ EXPECT_EQ(0x20000u, memory->data_size());
+ EXPECT_EQ(0, memory->data()[0]);
+ EXPECT_EQ(1, memory->data()[0x1000]);
+ EXPECT_EQ(4, memory->data()[0x1003]);
+ Val args[2];
+ Val result[1];
+ // size == 2
+ size_func->call(nullptr, result);
+ EXPECT_EQ(2, result[0].i32());
+ // load(0) == 0
+ args[0] = Val::i32(0x0);
+ load_func->call(args, result);
+ EXPECT_EQ(0, result[0].i32());
+ // load(0x1000) == 1
+ args[0] = Val::i32(0x1000);
+ load_func->call(args, result);
+ EXPECT_EQ(1, result[0].i32());
+ // load(0x1003) == 4
+ args[0] = Val::i32(0x1003);
+ load_func->call(args, result);
+ EXPECT_EQ(4, result[0].i32());
+ // load(0x1FFFF) == 0
+ args[0] = Val::i32(0x1FFFF);
+ load_func->call(args, result);
+ EXPECT_EQ(0, result[0].i32());
+ // load(0x20000) -> trap
+ args[0] = Val::i32(0x20000);
+ own<Trap*> trap = load_func->call(args, result);
+ EXPECT_NE(nullptr, trap.get());
+
+ // Mutate memory.
+ memory->data()[0x1003] = 5;
+ args[0] = Val::i32(0x1002);
+ args[1] = Val::i32(6);
+ trap = store_func->call(args, nullptr);
+ EXPECT_EQ(nullptr, trap.get());
+ args[0] = Val::i32(0x20000);
+ trap = store_func->call(args, nullptr);
+ EXPECT_NE(nullptr, trap.get());
+ EXPECT_EQ(6, memory->data()[0x1002]);
+ EXPECT_EQ(5, memory->data()[0x1003]);
+ args[0] = Val::i32(0x1002);
+ load_func->call(args, result);
+ EXPECT_EQ(6, result[0].i32());
+ args[0] = Val::i32(0x1003);
+ load_func->call(args, result);
+ EXPECT_EQ(5, result[0].i32());
+
+ // Grow memory.
+ EXPECT_EQ(true, memory->grow(1));
+ EXPECT_EQ(3u, memory->size());
+ EXPECT_EQ(0x30000u, memory->data_size());
+ args[0] = Val::i32(0x20000);
+ trap = load_func->call(args, result);
+ EXPECT_EQ(nullptr, trap.get());
+ EXPECT_EQ(0, result[0].i32());
+ trap = store_func->call(args, nullptr);
+ EXPECT_EQ(nullptr, trap.get());
+ args[0] = Val::i32(0x30000);
+ trap = load_func->call(args, result);
+ EXPECT_NE(nullptr, trap.get());
+ trap = store_func->call(args, nullptr);
+ EXPECT_NE(nullptr, trap.get());
+ EXPECT_EQ(false, memory->grow(1));
+ EXPECT_EQ(true, memory->grow(0));
+
+ // Create standalone memory.
+ // TODO(wasm): Once Wasm allows multiple memories, turn this into an import.
+ own<MemoryType*> mem_type = MemoryType::make(Limits(5, 5));
+ own<Memory*> memory2 = Memory::make(store(), mem_type.get());
+ EXPECT_EQ(5u, memory2->size());
+ EXPECT_EQ(false, memory2->grow(1));
+ EXPECT_EQ(true, memory2->grow(0));
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/reflect.cc b/deps/v8/test/wasm-api-tests/reflect.cc
new file mode 100644
index 0000000000..a7def627db
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/reflect.cc
@@ -0,0 +1,109 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+#include "src/execution/isolate.h"
+#include "src/heap/heap.h"
+#include "src/wasm/c-api.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::ExportType;
+using ::wasm::GlobalType;
+using ::wasm::MemoryType;
+using ::wasm::TableType;
+
+namespace {
+
+const char* kFuncName = "func1";
+const char* kGlobalName = "global2";
+const char* kTableName = "table3";
+const char* kMemoryName = "memory4";
+
+void ExpectName(const char* expected, const ::wasm::Name& name) {
+ size_t len = strlen(expected);
+ EXPECT_EQ(len, name.size());
+ EXPECT_EQ(0, strncmp(expected, name.get(), len));
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Reflect) {
+ // Create a module exporting a function, a global, a table, and a memory.
+ byte code[] = {WASM_UNREACHABLE};
+ ValueType types[] = {kWasmI32, kWasmAnyRef, kWasmI32,
+ kWasmI64, kWasmF32, kWasmF64};
+ FunctionSig sig(2, 4, types);
+ AddExportedFunction(CStrVector(kFuncName), code, sizeof(code), &sig);
+
+ builder()->AddExportedGlobal(kWasmF64, false, WasmInitExpr(0.0),
+ CStrVector(kGlobalName));
+
+ builder()->AllocateIndirectFunctions(12);
+ builder()->AddExport(CStrVector(kTableName), kExternalTable, 0);
+
+ builder()->SetMinMemorySize(1);
+ builder()->AddExport(CStrVector(kMemoryName), kExternalMemory, 0);
+
+ Instantiate(nullptr);
+
+ vec<ExportType*> export_types = module()->exports();
+ const vec<Extern*>& exports = this->exports();
+ EXPECT_EQ(exports.size(), export_types.size());
+ EXPECT_EQ(4u, exports.size());
+ for (size_t i = 0; i < exports.size(); i++) {
+ ::wasm::ExternKind kind = exports[i]->kind();
+ const ::wasm::ExternType* extern_type = export_types[i]->type();
+ EXPECT_EQ(kind, extern_type->kind());
+ if (kind == ::wasm::EXTERN_FUNC) {
+ ExpectName(kFuncName, export_types[i]->name());
+ const FuncType* type = extern_type->func();
+ const vec<ValType*>& params = type->params();
+ EXPECT_EQ(4u, params.size());
+ EXPECT_EQ(::wasm::I32, params[0]->kind());
+ EXPECT_EQ(::wasm::I64, params[1]->kind());
+ EXPECT_EQ(::wasm::F32, params[2]->kind());
+ EXPECT_EQ(::wasm::F64, params[3]->kind());
+ const vec<ValType*>& results = type->results();
+ EXPECT_EQ(2u, results.size());
+ EXPECT_EQ(::wasm::I32, results[0]->kind());
+ EXPECT_EQ(::wasm::ANYREF, results[1]->kind());
+
+ const Func* func = exports[i]->func();
+ EXPECT_EQ(4u, func->param_arity());
+ EXPECT_EQ(2u, func->result_arity());
+
+ } else if (kind == ::wasm::EXTERN_GLOBAL) {
+ ExpectName(kGlobalName, export_types[i]->name());
+ const GlobalType* type = extern_type->global();
+ EXPECT_EQ(::wasm::F64, type->content()->kind());
+ EXPECT_EQ(::wasm::CONST, type->mutability());
+
+ } else if (kind == ::wasm::EXTERN_TABLE) {
+ ExpectName(kTableName, export_types[i]->name());
+ const TableType* type = extern_type->table();
+ EXPECT_EQ(::wasm::FUNCREF, type->element()->kind());
+ ::wasm::Limits limits = type->limits();
+ EXPECT_EQ(12u, limits.min);
+ EXPECT_EQ(12u, limits.max);
+
+ } else if (kind == ::wasm::EXTERN_MEMORY) {
+ ExpectName(kMemoryName, export_types[i]->name());
+ const MemoryType* type = extern_type->memory();
+ ::wasm::Limits limits = type->limits();
+ EXPECT_EQ(1u, limits.min);
+ EXPECT_EQ(std::numeric_limits<uint32_t>::max(), limits.max);
+
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/serialize.cc b/deps/v8/test/wasm-api-tests/serialize.cc
new file mode 100644
index 0000000000..5f10980cf2
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/serialize.cc
@@ -0,0 +1,48 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+bool g_callback_called;
+
+own<Trap*> Callback(const Val args[], Val results[]) {
+ g_callback_called = true;
+ return nullptr;
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Serialize) {
+ FunctionSig sig(0, 0, nullptr);
+ uint32_t callback_index = builder()->AddImport(CStrVector("callback"), &sig);
+ byte code[] = {WASM_CALL_FUNCTION0(callback_index)};
+ AddExportedFunction(CStrVector("run"), code, sizeof(code), &sig);
+ Compile();
+
+ vec<byte_t> serialized = module()->serialize();
+ own<Module*> deserialized = Module::deserialize(store(), serialized);
+
+ own<FuncType*> callback_type =
+ FuncType::make(vec<ValType*>::make(), vec<ValType*>::make());
+ own<Func*> callback = Func::make(store(), callback_type.get(), Callback);
+ Extern* imports[] = {callback.get()};
+
+ own<Instance*> instance =
+ Instance::make(store(), deserialized.get(), imports);
+ vec<Extern*> exports = instance->exports();
+ Func* run = exports[0]->func();
+ g_callback_called = false;
+ run->call();
+ EXPECT_TRUE(g_callback_called);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/table.cc b/deps/v8/test/wasm-api-tests/table.cc
new file mode 100644
index 0000000000..17ddfa2f0b
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/table.cc
@@ -0,0 +1,116 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::FUNCREF;
+using ::wasm::Limits;
+using ::wasm::TableType;
+
+namespace {
+
+own<Trap*> Negate(const Val args[], Val results[]) {
+ results[0] = Val(-args[0].i32());
+ return nullptr;
+}
+
+void ExpectTrap(const Func* func, int arg1, int arg2) {
+ Val args[2] = {Val::i32(arg1), Val::i32(arg2)};
+ Val results[1];
+ own<Trap*> trap = func->call(args, results);
+ EXPECT_NE(nullptr, trap);
+}
+
+void ExpectResult(int expected, const Func* func, int arg1, int arg2) {
+ Val args[2] = {Val::i32(arg1), Val::i32(arg2)};
+ Val results[1];
+ own<Trap*> trap = func->call(args, results);
+ EXPECT_EQ(nullptr, trap);
+ EXPECT_EQ(expected, results[0].i32());
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Table) {
+ builder()->AllocateIndirectFunctions(2);
+ builder()->SetMaxTableSize(10);
+ builder()->AddExport(CStrVector("table"), kExternalTable, 0);
+ const uint32_t sig_i_i_index = builder()->AddSignature(wasm_i_i_sig());
+ ValueType reps[] = {kWasmI32, kWasmI32, kWasmI32};
+ FunctionSig call_sig(1, 2, reps);
+ byte call_code[] = {
+ WASM_CALL_INDIRECT1(sig_i_i_index, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("call_indirect"), call_code, sizeof(call_code),
+ &call_sig);
+ byte f_code[] = {WASM_GET_LOCAL(0)};
+ AddExportedFunction(CStrVector("f"), f_code, sizeof(f_code), wasm_i_i_sig());
+ byte g_code[] = {WASM_I32V_1(42)};
+ AddExportedFunction(CStrVector("g"), g_code, sizeof(g_code), wasm_i_i_sig());
+ // Set table[1] to {f}, which has function index 1.
+ builder()->SetIndirectFunction(1, 1);
+
+ Instantiate(nullptr);
+
+ Table* table = GetExportedTable(0);
+ Func* call_indirect = GetExportedFunction(1);
+ Func* f = GetExportedFunction(2);
+ Func* g = GetExportedFunction(3);
+ own<Func*> h = Func::make(store(), cpp_i_i_sig(), Negate);
+
+ // Check initial table state.
+ EXPECT_EQ(2u, table->size());
+ EXPECT_EQ(nullptr, table->get(0));
+ EXPECT_NE(nullptr, table->get(1));
+ ExpectTrap(call_indirect, 0, 0);
+ ExpectResult(7, call_indirect, 7, 1);
+ ExpectTrap(call_indirect, 0, 2);
+
+ // Mutate table.
+ EXPECT_TRUE(table->set(0, g));
+ EXPECT_TRUE(table->set(1, nullptr));
+ EXPECT_FALSE(table->set(2, f));
+ EXPECT_NE(nullptr, table->get(0));
+ EXPECT_EQ(nullptr, table->get(1));
+ ExpectResult(42, call_indirect, 7, 0);
+ ExpectTrap(call_indirect, 0, 1);
+ ExpectTrap(call_indirect, 0, 2);
+
+ // Grow table.
+ EXPECT_TRUE(table->grow(3));
+ EXPECT_EQ(5u, table->size());
+ EXPECT_TRUE(table->set(2, f));
+ EXPECT_TRUE(table->set(3, h.get()));
+ EXPECT_FALSE(table->set(5, nullptr));
+ EXPECT_NE(nullptr, table->get(2));
+ EXPECT_NE(nullptr, table->get(3));
+ EXPECT_EQ(nullptr, table->get(4));
+ ExpectResult(5, call_indirect, 5, 2);
+ ExpectResult(-6, call_indirect, 6, 3);
+ ExpectTrap(call_indirect, 0, 4);
+ ExpectTrap(call_indirect, 0, 5);
+ EXPECT_TRUE(table->grow(2, f));
+ EXPECT_EQ(7u, table->size());
+ EXPECT_NE(nullptr, table->get(5));
+ EXPECT_NE(nullptr, table->get(6));
+ EXPECT_FALSE(table->grow(5));
+ EXPECT_TRUE(table->grow(3));
+ EXPECT_TRUE(table->grow(0));
+
+ // Create standalone table.
+ // TODO(wasm+): Once Wasm allows multiple tables, turn this into import.
+ own<TableType*> tabletype =
+ TableType::make(ValType::make(FUNCREF), Limits(5, 5));
+ own<Table*> table2 = Table::make(store(), tabletype.get());
+ EXPECT_EQ(5u, table2->size());
+ EXPECT_FALSE(table2->grow(1));
+ EXPECT_TRUE(table2->grow(0));
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/threads.cc b/deps/v8/test/wasm-api-tests/threads.cc
new file mode 100644
index 0000000000..c93afc4a89
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/threads.cc
@@ -0,0 +1,105 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+#include <mutex>
+#include <thread>
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::Shared;
+
+namespace {
+
+const int kNumThreads = 10;
+const int kIterationsPerThread = 3;
+int g_traces;
+
+own<Trap*> Callback(void* env, const Val args[], Val results[]) {
+ std::lock_guard<std::mutex> lock(*reinterpret_cast<std::mutex*>(env));
+ g_traces += args[0].i32();
+ return nullptr;
+}
+
+void Main(Engine* engine, Shared<Module>* shared, std::mutex* mutex, int id) {
+ own<Store*> store = Store::make(engine);
+ own<Module*> module = Module::obtain(store.get(), shared);
+ EXPECT_NE(nullptr, module.get());
+ for (int i = 0; i < kIterationsPerThread; i++) {
+ std::this_thread::sleep_for(std::chrono::microseconds(100));
+
+ // Create imports.
+ own<FuncType*> func_type = FuncType::make(
+ vec<ValType*>::make(ValType::make(::wasm::I32)), vec<ValType*>::make());
+ own<Func*> func = Func::make(store.get(), func_type.get(), Callback, mutex);
+ own<::wasm::GlobalType*> global_type =
+ ::wasm::GlobalType::make(ValType::make(::wasm::I32), ::wasm::CONST);
+ own<Global*> global =
+ Global::make(store.get(), global_type.get(), Val::i32(id));
+
+ // Instantiate and run.
+ // With the current implementation of the WasmModuleBuilder, global
+ // imports always come before function imports, regardless of the
+ // order of builder()->Add*Import() calls below.
+ Extern* imports[] = {global.get(), func.get()};
+ own<Instance*> instance =
+ Instance::make(store.get(), module.get(), imports);
+ vec<Extern*> exports = instance->exports();
+ Func* run_func = exports[0]->func();
+ run_func->call();
+ }
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Threads) {
+ // Create module.
+ ValueType i32_type[] = {kWasmI32};
+ FunctionSig param_i32(0, 1, i32_type);
+ uint32_t callback_index =
+ builder()->AddImport(CStrVector("callback"), &param_i32);
+ uint32_t global_index =
+ builder()->AddGlobalImport(CStrVector("id"), kWasmI32, false);
+
+ byte code[] = {
+ WASM_CALL_FUNCTION(callback_index, WASM_GET_GLOBAL(global_index))};
+ FunctionSig empty_sig(0, 0, nullptr);
+ AddExportedFunction(CStrVector("run"), code, sizeof(code), &empty_sig);
+ Compile();
+ own<Shared<Module>*> shared = module()->share();
+
+ // Spawn threads.
+ g_traces = 0;
+ std::mutex mutex;
+ std::thread threads[kNumThreads];
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i] = std::thread(Main, engine(), shared.get(), &mutex, i);
+ }
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i].join();
+ }
+ // Each thread in each iteration adds its ID to {traces}, so in the end
+ // we expect kIterationsPerThread * sum([0, ..., kNumThreads-1]).
+ // Per Gauss:
+ const int kExpected =
+ kIterationsPerThread * (kNumThreads - 1) * kNumThreads / 2;
+ EXPECT_EQ(kExpected, g_traces);
+}
+
+TEST_F(WasmCapiTest, MultiStoresOneThread) {
+ // These Stores intentionally have overlapping, but non-nested lifetimes.
+ own<Store*> store1 = Store::make(engine());
+ own<Store*> store2 = Store::make(engine());
+ own<Store*> store3 = Store::make(engine());
+ store1.reset();
+ store2.reset();
+ store3.reset();
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/traps.cc b/deps/v8/test/wasm-api-tests/traps.cc
new file mode 100644
index 0000000000..b049d09330
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/traps.cc
@@ -0,0 +1,60 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/wasm-api-tests/wasm-api-test.h"
+
+#include <iostream>
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::Message;
+
+namespace {
+
+own<Trap*> FailCallback(void* env, const Val args[], Val results[]) {
+ Store* store = reinterpret_cast<Store*>(env);
+ Message message = Message::make(std::string("callback abort"));
+ return Trap::make(store, message);
+}
+
+void ExpectMessage(const char* expected, const Message& message) {
+ size_t len = strlen(expected);
+ EXPECT_EQ(len, message.size());
+ EXPECT_EQ(0, strncmp(expected, message.get(), len));
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Traps) {
+ ValueType i32_type[] = {kWasmI32};
+ FunctionSig sig(1, 0, i32_type);
+ uint32_t callback_index = builder()->AddImport(CStrVector("callback"), &sig);
+ byte code[] = {WASM_CALL_FUNCTION0(callback_index)};
+ AddExportedFunction(CStrVector("callback"), code, sizeof(code), &sig);
+ byte code2[] = {WASM_UNREACHABLE, WASM_I32V_1(1)};
+ AddExportedFunction(CStrVector("unreachable"), code2, sizeof(code2), &sig);
+
+ own<FuncType*> func_type = FuncType::make(
+ vec<ValType*>::make(), vec<ValType*>::make(ValType::make(::wasm::I32)));
+ own<Func*> cpp_callback = Func::make(store(), func_type.get(), FailCallback,
+ reinterpret_cast<void*>(store()));
+ Extern* imports[] = {cpp_callback.get()};
+ Instantiate(imports);
+
+ Func* cpp_trapping_func = GetExportedFunction(0);
+ own<Trap*> cpp_trap = cpp_trapping_func->call();
+ EXPECT_NE(nullptr, cpp_trap.get());
+ ExpectMessage("Uncaught Error: callback abort", cpp_trap->message());
+
+ Func* wasm_trapping_func = GetExportedFunction(1);
+ own<Trap*> wasm_trap = wasm_trapping_func->call();
+ EXPECT_NE(nullptr, wasm_trap.get());
+ ExpectMessage("Uncaught RuntimeError: unreachable", wasm_trap->message());
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-test.h b/deps/v8/test/wasm-api-tests/wasm-api-test.h
new file mode 100644
index 0000000000..cb1d9301e1
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/wasm-api-test.h
@@ -0,0 +1,162 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEST_WASM_API_TESTS_WASM_API_TEST_H_
+#define TEST_WASM_API_TESTS_WASM_API_TEST_H_
+
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/wasm-api/wasm.hh"
+
+namespace wasm {
+
+// TODO(jkummerow): Drop these from the API.
+#ifdef DEBUG
+template <class T>
+void vec<T>::make_data() {}
+
+template <class T>
+void vec<T>::free_data() {}
+#endif
+
+} // namespace wasm
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+using ::wasm::Engine;
+using ::wasm::Extern;
+using ::wasm::Foreign;
+using ::wasm::Func;
+using ::wasm::FuncType;
+using ::wasm::Global;
+using ::wasm::Instance;
+using ::wasm::Memory;
+using ::wasm::Module;
+using ::wasm::own;
+using ::wasm::Ref;
+using ::wasm::Store;
+using ::wasm::Table;
+using ::wasm::Trap;
+using ::wasm::Val;
+using ::wasm::ValType;
+using ::wasm::vec;
+
+class WasmCapiTest : public ::testing::Test {
+ public:
+ WasmCapiTest()
+ : Test(),
+ zone_(&allocator_, ZONE_NAME),
+ builder_(&zone_),
+ exports_(vec<Extern*>::make()),
+ wasm_i_i_sig_(1, 1, wasm_i_i_sig_types_) {
+ engine_ = Engine::make();
+ store_ = Store::make(engine_.get());
+ cpp_i_i_sig_ =
+ FuncType::make(vec<ValType*>::make(ValType::make(::wasm::I32)),
+ vec<ValType*>::make(ValType::make(::wasm::I32)));
+ }
+
+ void Compile() {
+ ZoneBuffer buffer(&zone_);
+ builder_.WriteTo(&buffer);
+ size_t size = buffer.end() - buffer.begin();
+ vec<byte_t> binary = vec<byte_t>::make(
+ size, reinterpret_cast<byte_t*>(const_cast<byte*>(buffer.begin())));
+
+ module_ = Module::make(store_.get(), binary);
+ DCHECK_NE(module_.get(), nullptr);
+ }
+
+ void Instantiate(Extern* imports[]) {
+ Compile();
+ instance_ = Instance::make(store_.get(), module_.get(), imports);
+ DCHECK_NE(instance_.get(), nullptr);
+ exports_ = instance_->exports();
+ }
+
+ void AddExportedFunction(Vector<const char> name, byte code[],
+ size_t code_size, FunctionSig* sig) {
+ WasmFunctionBuilder* fun = builder()->AddFunction(sig);
+ fun->EmitCode(code, static_cast<uint32_t>(code_size));
+ fun->Emit(kExprEnd);
+ builder()->AddExport(name, fun);
+ }
+
+ Func* GetExportedFunction(size_t index) {
+ DCHECK_GT(exports_.size(), index);
+ Extern* exported = exports_[index];
+ DCHECK_EQ(exported->kind(), ::wasm::EXTERN_FUNC);
+ Func* func = exported->func();
+ DCHECK_NE(func, nullptr);
+ return func;
+ }
+
+ Global* GetExportedGlobal(size_t index) {
+ DCHECK_GT(exports_.size(), index);
+ Extern* exported = exports_[index];
+ DCHECK_EQ(exported->kind(), ::wasm::EXTERN_GLOBAL);
+ Global* global = exported->global();
+ DCHECK_NE(global, nullptr);
+ return global;
+ }
+
+ Memory* GetExportedMemory(size_t index) {
+ DCHECK_GT(exports_.size(), index);
+ Extern* exported = exports_[index];
+ DCHECK_EQ(exported->kind(), ::wasm::EXTERN_MEMORY);
+ Memory* memory = exported->memory();
+ DCHECK_NE(memory, nullptr);
+ return memory;
+ }
+
+ Table* GetExportedTable(size_t index) {
+ DCHECK_GT(exports_.size(), index);
+ Extern* exported = exports_[index];
+ DCHECK_EQ(exported->kind(), ::wasm::EXTERN_TABLE);
+ Table* table = exported->table();
+ DCHECK_NE(table, nullptr);
+ return table;
+ }
+
+ void Shutdown() {
+ instance_.reset();
+ module_.reset();
+ store_.reset();
+ engine_.reset();
+ }
+
+ WasmModuleBuilder* builder() { return &builder_; }
+ Engine* engine() { return engine_.get(); }
+ Store* store() { return store_.get(); }
+ Module* module() { return module_.get(); }
+ const vec<Extern*>& exports() { return exports_; }
+
+ FunctionSig* wasm_i_i_sig() { return &wasm_i_i_sig_; }
+ FuncType* cpp_i_i_sig() { return cpp_i_i_sig_.get(); }
+
+ private:
+ AccountingAllocator allocator_;
+ Zone zone_;
+ WasmModuleBuilder builder_;
+ own<Engine*> engine_;
+ own<Store*> store_;
+ own<Module*> module_;
+ own<Instance*> instance_;
+ vec<Extern*> exports_;
+ own<FuncType*> cpp_i_i_sig_;
+ ValueType wasm_i_i_sig_types_[2] = {kWasmI32, kWasmI32};
+ FunctionSig wasm_i_i_sig_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // TEST_WASM_API_TESTS_WASM_API_TEST_H_
diff --git a/deps/v8/test/wasm-js/OWNERS b/deps/v8/test/wasm-js/OWNERS
new file mode 100644
index 0000000000..4b6b34d24a
--- /dev/null
+++ b/deps/v8/test/wasm-js/OWNERS
@@ -0,0 +1 @@
+file://src/wasm/OWNERS
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 8d67366e30..3f3c67ac6a 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -16,8 +16,8 @@ META_TIMEOUT_REGEXP = re.compile(r"META:\s*timeout=(.*)")
class TestLoader(testsuite.JSTestLoader):
@property
- def extension(self):
- return ANY_JS
+ def extensions(self):
+ return [ANY_JS]
class TestSuite(testsuite.TestSuite):
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
index b849b63dca..34230b07d0 100644
--- a/deps/v8/test/wasm-spec-tests/testcfg.py
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -7,6 +7,15 @@ import os
from testrunner.local import testsuite
from testrunner.objects import testcase
+proposal_flags = [{
+ 'name': 'reference-types',
+ 'flags': ['--experimental-wasm-anyref',
+ '--no-experimental-wasm-bulk-memory']
+ },
+ {
+ 'name': 'bulk-memory-operations',
+ 'flags': ['--experimental-wasm-bulk-memory']
+ }]
class TestLoader(testsuite.JSTestLoader):
pass
@@ -23,6 +32,12 @@ class TestCase(testcase.D8TestCase):
def _get_files_params(self):
return [os.path.join(self.suite.root, self.path + self._get_suffix())]
+ def _get_source_flags(self):
+ for proposal in proposal_flags:
+ if os.sep.join(['proposals', proposal['name']]) in self.path:
+ return proposal['flags']
+ return []
+
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index f663c38443..0b068afe0a 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-e8bdb558198b944ff8a0df43301f1ff4eb3a91fa \ No newline at end of file
+b02f00e24b28ad76537a10a788a8be966c3577bd \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index e8678f4bec..e61f5ceb26 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -10,6 +10,14 @@
# the bulk-memory proposal. Since we've enabled bulk-memory by default, we
# need to update to use its testsuite.
'tests/linking': [FAIL],
+
+ # TODO(ahaas): Needs investigation, I disable the test for now.
+ 'tests/conversions': [PASS, ['system == windows and arch == ia32', FAIL]],
+
+ # TODO(ahaas): Incorporate recent changes to the bulk-memory-operations
+ # proposal.
+ 'tests/proposals/bulk-memory-operations/elem': [FAIL],
+ 'tests/proposals/bulk-memory-operations/data': [FAIL],
}], # ALWAYS
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
@@ -39,6 +47,8 @@
'tests/f64': [SKIP],
# This test fails because ppc float to double doesn't convert sNaN to qNaN.
'tests/conversions': [SKIP],
+ 'tests/proposals/reference-types/conversions': [SKIP],
+ 'tests/proposals/bulk-memory-operations/conversions': [SKIP],
}], # 'arch == ppc or arch == ppc64'
['arch == s390 or arch == s390x', {
diff --git a/deps/v8/test/webkit/class-syntax-declaration-expected.txt b/deps/v8/test/webkit/class-syntax-declaration-expected.txt
index c198f26914..4a9e60978a 100644
--- a/deps/v8/test/webkit/class-syntax-declaration-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-declaration-expected.txt
@@ -16,10 +16,10 @@ PASS setterValue = undefined; (new A).someSetter = 789; setterValue is 789
PASS (new A).__proto__ is A.prototype
PASS A.prototype.constructor is A
PASS class threw exception SyntaxError: Unexpected end of input.
-PASS class [ threw exception SyntaxError: Unexpected token [.
-PASS class { threw exception SyntaxError: Unexpected token {.
+PASS class [ threw exception SyntaxError: Unexpected token '['.
+PASS class { threw exception SyntaxError: Unexpected token '{'.
PASS class X { threw exception SyntaxError: Unexpected end of input.
-PASS class X { ( } threw exception SyntaxError: Unexpected token (.
+PASS class X { ( } threw exception SyntaxError: Unexpected token '('.
PASS class X {} did not throw exception.
PASS class X { constructor() {} constructor() {} } threw exception SyntaxError: A class may only have one constructor.
PASS class X { get constructor() {} } threw exception SyntaxError: Class constructor may not be an accessor.
diff --git a/deps/v8/test/webkit/class-syntax-declaration.js b/deps/v8/test/webkit/class-syntax-declaration.js
index bc6c31d312..dcefccac87 100644
--- a/deps/v8/test/webkit/class-syntax-declaration.js
+++ b/deps/v8/test/webkit/class-syntax-declaration.js
@@ -52,10 +52,10 @@ shouldBe("(new A).__proto__", "A.prototype");
shouldBe("A.prototype.constructor", "A");
shouldThrow("class", "'SyntaxError: Unexpected end of input'");
-shouldThrow("class [", "'SyntaxError: Unexpected token ['");
-shouldThrow("class {", "'SyntaxError: Unexpected token {'");
+shouldThrow("class [", '"SyntaxError: Unexpected token \'[\'"');
+shouldThrow("class {", '"SyntaxError: Unexpected token \'{\'"');
shouldThrow("class X {", "'SyntaxError: Unexpected end of input'");
-shouldThrow("class X { ( }", "'SyntaxError: Unexpected token ('");
+shouldThrow("class X { ( }", '"SyntaxError: Unexpected token \'(\'"');
shouldNotThrow("class X {}");
shouldThrow("class X { constructor() {} constructor() {} }", "'SyntaxError: A class may only have one constructor'");
diff --git a/deps/v8/test/webkit/class-syntax-expression-expected.txt b/deps/v8/test/webkit/class-syntax-expression-expected.txt
index acda6272f0..9d51576bdd 100644
--- a/deps/v8/test/webkit/class-syntax-expression-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-expression-expected.txt
@@ -17,7 +17,7 @@ PASS (new A).__proto__ is A.prototype
PASS A.prototype.constructor is A
PASS x = class threw exception SyntaxError: Unexpected end of input.
PASS x = class { threw exception SyntaxError: Unexpected end of input.
-PASS x = class { ( } threw exception SyntaxError: Unexpected token (.
+PASS x = class { ( } threw exception SyntaxError: Unexpected token '('.
PASS x = class {} did not throw exception.
PASS x = class { constructor() {} constructor() {} } threw exception SyntaxError: A class may only have one constructor.
PASS x = class { get constructor() {} } threw exception SyntaxError: Class constructor may not be an accessor.
diff --git a/deps/v8/test/webkit/class-syntax-expression.js b/deps/v8/test/webkit/class-syntax-expression.js
index 0cea5d70e4..e57ac72555 100644
--- a/deps/v8/test/webkit/class-syntax-expression.js
+++ b/deps/v8/test/webkit/class-syntax-expression.js
@@ -53,7 +53,7 @@ shouldBe("A.prototype.constructor", "A");
shouldThrow("x = class", "'SyntaxError: Unexpected end of input'");
shouldThrow("x = class {", "'SyntaxError: Unexpected end of input'");
-shouldThrow("x = class { ( }", "'SyntaxError: Unexpected token ('");
+shouldThrow("x = class { ( }", '"SyntaxError: Unexpected token \'(\'"');
shouldNotThrow("x = class {}");
shouldThrow("x = class { constructor() {} constructor() {} }", "'SyntaxError: A class may only have one constructor'");
diff --git a/deps/v8/test/webkit/class-syntax-extends-expected.txt b/deps/v8/test/webkit/class-syntax-extends-expected.txt
index 73ac66fa97..8f4b0323ac 100644
--- a/deps/v8/test/webkit/class-syntax-extends-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-extends-expected.txt
@@ -30,8 +30,8 @@ PASS x = class extends baseWithBadPrototype { constructor() { } } threw exceptio
PASS baseWithBadPrototype.prototype = "abc" did not throw exception.
PASS x = class extends baseWithBadPrototype { constructor() { } } threw exception TypeError: Class extends value does not have valid prototype property abc.
PASS baseWithBadPrototype.prototype = null; x = class extends baseWithBadPrototype { constructor() { } } did not throw exception.
-PASS x = 1; c = class extends ++x { constructor() { } }; threw exception SyntaxError: Unexpected token ++.
-PASS x = 1; c = class extends x++ { constructor() { } }; threw exception SyntaxError: Unexpected token ++.
+PASS x = 1; c = class extends ++x { constructor() { } }; threw exception SyntaxError: Unexpected token '++'.
+PASS x = 1; c = class extends x++ { constructor() { } }; threw exception SyntaxError: Unexpected token '++'.
PASS x = 1; c = class extends (++x) { constructor() { } }; threw exception TypeError: Class extends value 2 is not a constructor or null.
PASS x = 1; c = class extends (x++) { constructor() { } }; threw exception TypeError: Class extends value 1 is not a constructor or null.
PASS x = 1; try { c = class extends (++x) { constructor() { } } } catch (e) { }; x is 2
@@ -45,7 +45,7 @@ PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = cl
PASS namespace = {}; namespace.A = class { constructor() { } }; function getClassA() { return namespace.A }; namespace.B = class extends getClassA() { constructor() { } } did not throw exception.
PASS namespace = {}; namespace.A = class { constructor() { } }; function getClass(prop) { return namespace[prop] }; namespace.B = class extends getClass("A") { constructor() { } } did not throw exception.
PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (false||null||namespace.A) { constructor() { } } did not throw exception.
-PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends false||null||namespace.A { constructor() { } } threw exception SyntaxError: Unexpected token ||.
+PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends false||null||namespace.A { constructor() { } } threw exception SyntaxError: Unexpected token '||'.
PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (x++, namespace.A) { constructor() { } }; did not throw exception.
PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (namespace.A, x++) { constructor() { } }; threw exception TypeError: Class extends value 1 is not a constructor or null.
PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends new namespace.A { constructor() { } } threw exception TypeError: Class extends value [object Object] is not a constructor or null.
diff --git a/deps/v8/test/webkit/class-syntax-name-expected.txt b/deps/v8/test/webkit/class-syntax-name-expected.txt
index 27ae3088c2..dce382b0af 100644
--- a/deps/v8/test/webkit/class-syntax-name-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-name-expected.txt
@@ -6,10 +6,10 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
Class statement
PASS A threw exception ReferenceError: A is not defined.
PASS 'use strict'; A threw exception ReferenceError: A is not defined.
-PASS class {} threw exception SyntaxError: Unexpected token {.
-PASS 'use strict'; class {} threw exception SyntaxError: Unexpected token {.
-PASS class { constructor() {} } threw exception SyntaxError: Unexpected token {.
-PASS 'use strict'; class { constructor() {} } threw exception SyntaxError: Unexpected token {.
+PASS class {} threw exception SyntaxError: Unexpected token '{'.
+PASS 'use strict'; class {} threw exception SyntaxError: Unexpected token '{'.
+PASS class { constructor() {} } threw exception SyntaxError: Unexpected token '{'.
+PASS 'use strict'; class { constructor() {} } threw exception SyntaxError: Unexpected token '{'.
PASS class A { constructor() {} } did not throw exception.
PASS 'use strict'; class A { constructor() {} } did not throw exception.
PASS class A { constructor() {} }; A.toString() is 'class A { constructor() {} }'
diff --git a/deps/v8/test/webkit/class-syntax-semicolon-expected.txt b/deps/v8/test/webkit/class-syntax-semicolon-expected.txt
index c45eabac9c..4611bf7cae 100644
--- a/deps/v8/test/webkit/class-syntax-semicolon-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-semicolon-expected.txt
@@ -3,11 +3,11 @@ Tests for ES6 class syntax containing semicolon in the class body
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS class A { foo() ; { } } threw exception SyntaxError: Unexpected token ;.
-PASS class A { get foo;() { } } threw exception SyntaxError: Unexpected token ;.
-PASS class A { get foo() ; { } } threw exception SyntaxError: Unexpected token ;.
-PASS class A { set foo;(x) { } } threw exception SyntaxError: Unexpected token ;.
-PASS class A { set foo(x) ; { } } threw exception SyntaxError: Unexpected token ;.
+PASS class A { foo() ; { } } threw exception SyntaxError: Unexpected token ';'.
+PASS class A { get foo;() { } } threw exception SyntaxError: Unexpected token ';'.
+PASS class A { get foo() ; { } } threw exception SyntaxError: Unexpected token ';'.
+PASS class A { set foo;(x) { } } threw exception SyntaxError: Unexpected token ';'.
+PASS class A { set foo(x) ; { } } threw exception SyntaxError: Unexpected token ';'.
PASS class A { ; } did not throw exception.
PASS class A { foo() { } ; } did not throw exception.
PASS class A { get foo() { } ; } did not throw exception.
diff --git a/deps/v8/test/webkit/class-syntax-semicolon.js b/deps/v8/test/webkit/class-syntax-semicolon.js
index 88609dcbc7..9daeddb9c0 100644
--- a/deps/v8/test/webkit/class-syntax-semicolon.js
+++ b/deps/v8/test/webkit/class-syntax-semicolon.js
@@ -23,11 +23,11 @@
description('Tests for ES6 class syntax containing semicolon in the class body');
-shouldThrow("class A { foo() ; { } }", "'SyntaxError: Unexpected token ;'");
-shouldThrow("class A { get foo;() { } }", "'SyntaxError: Unexpected token ;'");
-shouldThrow("class A { get foo() ; { } }", "'SyntaxError: Unexpected token ;'");
-shouldThrow("class A { set foo;(x) { } }", "'SyntaxError: Unexpected token ;'");
-shouldThrow("class A { set foo(x) ; { } }", "'SyntaxError: Unexpected token ;'");
+shouldThrow("class A { foo() ; { } }", '"SyntaxError: Unexpected token \';\'"');
+shouldThrow("class A { get foo;() { } }", '"SyntaxError: Unexpected token \';\'"');
+shouldThrow("class A { get foo() ; { } }", '"SyntaxError: Unexpected token \';\'"');
+shouldThrow("class A { set foo;(x) { } }", '"SyntaxError: Unexpected token \';\'"');
+shouldThrow("class A { set foo(x) ; { } }", '"SyntaxError: Unexpected token \';\'"');
shouldNotThrow("class A { ; }");
shouldNotThrow("class A { foo() { } ; }");
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index 55c6edf808..8d31cf32de 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -153,22 +153,22 @@ PASS (function (){'use strict'; function f() { --arguments }}) threw exception S
PASS 'use strict'; function f() { arguments-- } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (){'use strict'; function f() { arguments-- }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS global.eval('"use strict"; if (0) ++arguments; true;') threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS 'use strict'; ++(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function (){'use strict'; ++(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS 'use strict'; (1, eval)++ threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function (){'use strict'; (1, eval)++}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS 'use strict'; --(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function (){'use strict'; --(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS 'use strict'; (1, eval)-- threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function (){'use strict'; (1, eval)--}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS 'use strict'; function f() { ++(1, arguments) } threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function (){'use strict'; function f() { ++(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS 'use strict'; function f() { (1, arguments)++ } threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function (){'use strict'; function f() { (1, arguments)++ }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS 'use strict'; function f() { --(1, arguments) } threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function (){'use strict'; function f() { --(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS 'use strict'; function f() { (1, arguments)-- } threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function (){'use strict'; function f() { (1, arguments)-- }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS 'use strict'; ++(1, eval) threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; ++(1, eval)}) threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS 'use strict'; (1, eval)++ threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; (1, eval)++}) threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS 'use strict'; --(1, eval) threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; --(1, eval)}) threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS 'use strict'; (1, eval)-- threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; (1, eval)--}) threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS 'use strict'; function f() { ++(1, arguments) } threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; function f() { ++(1, arguments) }}) threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS 'use strict'; function f() { (1, arguments)++ } threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; function f() { (1, arguments)++ }}) threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS 'use strict'; function f() { --(1, arguments) } threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; function f() { --(1, arguments) }}) threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
+PASS 'use strict'; function f() { (1, arguments)-- } threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; function f() { (1, arguments)-- }}) threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
FAIL 'use strict'; undefined; if (0) delete +a.b should throw an exception. Was undefined.
FAIL (function (){'use strict'; undefined; if (0) delete +a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete +a.b}.
FAIL 'use strict'; undefined; if (0) delete ++a.b should throw an exception. Was undefined.
diff --git a/deps/v8/test/webkit/fast/js/function-constructor-error-expected.txt b/deps/v8/test/webkit/fast/js/function-constructor-error-expected.txt
index e42d01e650..e7897b098b 100644
--- a/deps/v8/test/webkit/fast/js/function-constructor-error-expected.txt
+++ b/deps/v8/test/webkit/fast/js/function-constructor-error-expected.txt
@@ -26,8 +26,8 @@ This test checks that the Function constructor detects some syntax errors correc
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-FAIL Function('(i + (j)') should throw SyntaxError: Expected token ')'. Threw exception SyntaxError: Unexpected token }.
-FAIL Function('return (i + (j)') should throw SyntaxError: Expected token ')'. Threw exception SyntaxError: Unexpected token }.
+FAIL Function('(i + (j)') should throw SyntaxError: Expected token ')'. Threw exception SyntaxError: Unexpected token '}'.
+FAIL Function('return (i + (j)') should throw SyntaxError: Expected token ')'. Threw exception SyntaxError: Unexpected token '}'.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt b/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
index 1d0f081b36..da0073e528 100644
--- a/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
+++ b/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
@@ -234,145 +234,145 @@ PASS compileAndSerialize('a || b || c') is 'a || b || c'
PASS compileAndSerialize('(a || b) || c') is '(a || b) || c'
PASS compileAndSerialize('a || (b || c)') is 'a || (b || c)'
PASS compileAndSerialize('a = b = c') is 'a = b = c'
-FAIL compileAndSerialize('(a = b) = c') should be (a = b) = c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) = c') should be (a = b) = c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b = c)') is 'a = (b = c)'
PASS compileAndSerialize('a = b + c') is 'a = b + c'
PASS compileAndSerialize('(a = b) + c') is '(a = b) + c'
PASS compileAndSerialize('a = (b + c)') is 'a = (b + c)'
-PASS compileAndSerialize('a + b = c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) = c') should be (a + b) = c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b = c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) = c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b = c)') is 'a + (b = c)'
PASS compileAndSerialize('a *= b *= c') is 'a *= b *= c'
-FAIL compileAndSerialize('(a *= b) *= c') should be (a *= b) *= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a *= b) *= c') should be (a *= b) *= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a *= (b *= c)') is 'a *= (b *= c)'
PASS compileAndSerialize('a = b *= c') is 'a = b *= c'
-FAIL compileAndSerialize('(a = b) *= c') should be (a = b) *= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) *= c') should be (a = b) *= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b *= c)') is 'a = (b *= c)'
PASS compileAndSerialize('a *= b + c') is 'a *= b + c'
PASS compileAndSerialize('(a *= b) + c') is '(a *= b) + c'
PASS compileAndSerialize('a *= (b + c)') is 'a *= (b + c)'
-PASS compileAndSerialize('a + b *= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) *= c') should be (a + b) *= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b *= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) *= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b *= c)') is 'a + (b *= c)'
PASS compileAndSerialize('a /= b /= c') is 'a /= b /= c'
-FAIL compileAndSerialize('(a /= b) /= c') should be (a /= b) /= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a /= b) /= c') should be (a /= b) /= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a /= (b /= c)') is 'a /= (b /= c)'
PASS compileAndSerialize('a = b /= c') is 'a = b /= c'
-FAIL compileAndSerialize('(a = b) /= c') should be (a = b) /= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) /= c') should be (a = b) /= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b /= c)') is 'a = (b /= c)'
PASS compileAndSerialize('a /= b + c') is 'a /= b + c'
PASS compileAndSerialize('(a /= b) + c') is '(a /= b) + c'
PASS compileAndSerialize('a /= (b + c)') is 'a /= (b + c)'
-PASS compileAndSerialize('a + b /= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) /= c') should be (a + b) /= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b /= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) /= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b /= c)') is 'a + (b /= c)'
PASS compileAndSerialize('a %= b %= c') is 'a %= b %= c'
-FAIL compileAndSerialize('(a %= b) %= c') should be (a %= b) %= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a %= b) %= c') should be (a %= b) %= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a %= (b %= c)') is 'a %= (b %= c)'
PASS compileAndSerialize('a = b %= c') is 'a = b %= c'
-FAIL compileAndSerialize('(a = b) %= c') should be (a = b) %= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) %= c') should be (a = b) %= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b %= c)') is 'a = (b %= c)'
PASS compileAndSerialize('a %= b + c') is 'a %= b + c'
PASS compileAndSerialize('(a %= b) + c') is '(a %= b) + c'
PASS compileAndSerialize('a %= (b + c)') is 'a %= (b + c)'
-PASS compileAndSerialize('a + b %= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) %= c') should be (a + b) %= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b %= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) %= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b %= c)') is 'a + (b %= c)'
PASS compileAndSerialize('a += b += c') is 'a += b += c'
-FAIL compileAndSerialize('(a += b) += c') should be (a += b) += c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a += b) += c') should be (a += b) += c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a += (b += c)') is 'a += (b += c)'
PASS compileAndSerialize('a = b += c') is 'a = b += c'
-FAIL compileAndSerialize('(a = b) += c') should be (a = b) += c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) += c') should be (a = b) += c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b += c)') is 'a = (b += c)'
PASS compileAndSerialize('a += b + c') is 'a += b + c'
PASS compileAndSerialize('(a += b) + c') is '(a += b) + c'
PASS compileAndSerialize('a += (b + c)') is 'a += (b + c)'
-PASS compileAndSerialize('a + b += c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) += c') should be (a + b) += c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b += c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) += c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b += c)') is 'a + (b += c)'
PASS compileAndSerialize('a -= b -= c') is 'a -= b -= c'
-FAIL compileAndSerialize('(a -= b) -= c') should be (a -= b) -= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a -= b) -= c') should be (a -= b) -= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a -= (b -= c)') is 'a -= (b -= c)'
PASS compileAndSerialize('a = b -= c') is 'a = b -= c'
-FAIL compileAndSerialize('(a = b) -= c') should be (a = b) -= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) -= c') should be (a = b) -= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b -= c)') is 'a = (b -= c)'
PASS compileAndSerialize('a -= b + c') is 'a -= b + c'
PASS compileAndSerialize('(a -= b) + c') is '(a -= b) + c'
PASS compileAndSerialize('a -= (b + c)') is 'a -= (b + c)'
-PASS compileAndSerialize('a + b -= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) -= c') should be (a + b) -= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b -= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) -= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b -= c)') is 'a + (b -= c)'
PASS compileAndSerialize('a <<= b <<= c') is 'a <<= b <<= c'
-FAIL compileAndSerialize('(a <<= b) <<= c') should be (a <<= b) <<= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a <<= b) <<= c') should be (a <<= b) <<= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a <<= (b <<= c)') is 'a <<= (b <<= c)'
PASS compileAndSerialize('a = b <<= c') is 'a = b <<= c'
-FAIL compileAndSerialize('(a = b) <<= c') should be (a = b) <<= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) <<= c') should be (a = b) <<= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b <<= c)') is 'a = (b <<= c)'
PASS compileAndSerialize('a <<= b + c') is 'a <<= b + c'
PASS compileAndSerialize('(a <<= b) + c') is '(a <<= b) + c'
PASS compileAndSerialize('a <<= (b + c)') is 'a <<= (b + c)'
-PASS compileAndSerialize('a + b <<= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) <<= c') should be (a + b) <<= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b <<= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) <<= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b <<= c)') is 'a + (b <<= c)'
PASS compileAndSerialize('a >>= b >>= c') is 'a >>= b >>= c'
-FAIL compileAndSerialize('(a >>= b) >>= c') should be (a >>= b) >>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a >>= b) >>= c') should be (a >>= b) >>= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a >>= (b >>= c)') is 'a >>= (b >>= c)'
PASS compileAndSerialize('a = b >>= c') is 'a = b >>= c'
-FAIL compileAndSerialize('(a = b) >>= c') should be (a = b) >>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) >>= c') should be (a = b) >>= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b >>= c)') is 'a = (b >>= c)'
PASS compileAndSerialize('a >>= b + c') is 'a >>= b + c'
PASS compileAndSerialize('(a >>= b) + c') is '(a >>= b) + c'
PASS compileAndSerialize('a >>= (b + c)') is 'a >>= (b + c)'
-PASS compileAndSerialize('a + b >>= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) >>= c') should be (a + b) >>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b >>= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) >>= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b >>= c)') is 'a + (b >>= c)'
PASS compileAndSerialize('a >>>= b >>>= c') is 'a >>>= b >>>= c'
-FAIL compileAndSerialize('(a >>>= b) >>>= c') should be (a >>>= b) >>>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a >>>= b) >>>= c') should be (a >>>= b) >>>= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a >>>= (b >>>= c)') is 'a >>>= (b >>>= c)'
PASS compileAndSerialize('a = b >>>= c') is 'a = b >>>= c'
-FAIL compileAndSerialize('(a = b) >>>= c') should be (a = b) >>>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) >>>= c') should be (a = b) >>>= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b >>>= c)') is 'a = (b >>>= c)'
PASS compileAndSerialize('a >>>= b + c') is 'a >>>= b + c'
PASS compileAndSerialize('(a >>>= b) + c') is '(a >>>= b) + c'
PASS compileAndSerialize('a >>>= (b + c)') is 'a >>>= (b + c)'
-PASS compileAndSerialize('a + b >>>= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) >>>= c') should be (a + b) >>>= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b >>>= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) >>>= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b >>>= c)') is 'a + (b >>>= c)'
PASS compileAndSerialize('a &= b &= c') is 'a &= b &= c'
-FAIL compileAndSerialize('(a &= b) &= c') should be (a &= b) &= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a &= b) &= c') should be (a &= b) &= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a &= (b &= c)') is 'a &= (b &= c)'
PASS compileAndSerialize('a = b &= c') is 'a = b &= c'
-FAIL compileAndSerialize('(a = b) &= c') should be (a = b) &= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) &= c') should be (a = b) &= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b &= c)') is 'a = (b &= c)'
PASS compileAndSerialize('a &= b + c') is 'a &= b + c'
PASS compileAndSerialize('(a &= b) + c') is '(a &= b) + c'
PASS compileAndSerialize('a &= (b + c)') is 'a &= (b + c)'
-PASS compileAndSerialize('a + b &= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) &= c') should be (a + b) &= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b &= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) &= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b &= c)') is 'a + (b &= c)'
PASS compileAndSerialize('a ^= b ^= c') is 'a ^= b ^= c'
-FAIL compileAndSerialize('(a ^= b) ^= c') should be (a ^= b) ^= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a ^= b) ^= c') should be (a ^= b) ^= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a ^= (b ^= c)') is 'a ^= (b ^= c)'
PASS compileAndSerialize('a = b ^= c') is 'a = b ^= c'
-FAIL compileAndSerialize('(a = b) ^= c') should be (a = b) ^= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) ^= c') should be (a = b) ^= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b ^= c)') is 'a = (b ^= c)'
PASS compileAndSerialize('a ^= b + c') is 'a ^= b + c'
PASS compileAndSerialize('(a ^= b) + c') is '(a ^= b) + c'
PASS compileAndSerialize('a ^= (b + c)') is 'a ^= (b + c)'
-PASS compileAndSerialize('a + b ^= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) ^= c') should be (a + b) ^= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b ^= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) ^= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b ^= c)') is 'a + (b ^= c)'
PASS compileAndSerialize('a |= b |= c') is 'a |= b |= c'
-FAIL compileAndSerialize('(a |= b) |= c') should be (a |= b) |= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a |= b) |= c') should be (a |= b) |= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a |= (b |= c)') is 'a |= (b |= c)'
PASS compileAndSerialize('a = b |= c') is 'a = b |= c'
-FAIL compileAndSerialize('(a = b) |= c') should be (a = b) |= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+FAIL compileAndSerialize('(a = b) |= c') should be (a = b) |= c. Threw exception SyntaxError: Invalid left-hand side in assignment
PASS compileAndSerialize('a = (b |= c)') is 'a = (b |= c)'
PASS compileAndSerialize('a |= b + c') is 'a |= b + c'
PASS compileAndSerialize('(a |= b) + c') is '(a |= b) + c'
PASS compileAndSerialize('a |= (b + c)') is 'a |= (b + c)'
-PASS compileAndSerialize('a + b |= c') threw exception ReferenceError: Invalid left-hand side in assignment.
-FAIL compileAndSerialize('(a + b) |= c') should be (a + b) |= c. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('a + b |= c') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(a + b) |= c') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerialize('a + (b |= c)') is 'a + (b |= c)'
PASS compileAndSerialize('delete a + b') is 'delete a + b'
PASS compileAndSerialize('(delete a) + b') is '(delete a) + b'
@@ -391,12 +391,12 @@ PASS compileAndSerialize('!typeof a') is '!typeof a'
PASS compileAndSerialize('!(typeof a)') is '!(typeof a)'
PASS compileAndSerialize('++a + b') is '++a + b'
PASS compileAndSerialize('(++a) + b') is '(++a) + b'
-FAIL compileAndSerialize('++(a + b)') should be ++(a + b). Threw exception ReferenceError: Invalid left-hand side expression in prefix operation
+PASS compileAndSerialize('++(a + b)') threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
PASS compileAndSerialize('!++a') is '!++a'
PASS compileAndSerialize('!(++a)') is '!(++a)'
PASS compileAndSerialize('--a + b') is '--a + b'
PASS compileAndSerialize('(--a) + b') is '(--a) + b'
-FAIL compileAndSerialize('--(a + b)') should be --(a + b). Threw exception ReferenceError: Invalid left-hand side expression in prefix operation
+PASS compileAndSerialize('--(a + b)') threw exception SyntaxError: Invalid left-hand side expression in prefix operation.
PASS compileAndSerialize('!--a') is '!--a'
PASS compileAndSerialize('!(--a)') is '!(--a)'
PASS compileAndSerialize('+ a + b') is '+ a + b'
@@ -421,10 +421,10 @@ PASS compileAndSerialize('!!a') is '!!a'
PASS compileAndSerialize('!(!a)') is '!(!a)'
PASS compileAndSerialize('!a++') is '!a++'
PASS compileAndSerialize('!(a++)') is '!(a++)'
-FAIL compileAndSerialize('(!a)++') should be (!a)++. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation
+PASS compileAndSerialize('(!a)++') threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
PASS compileAndSerialize('!a--') is '!a--'
PASS compileAndSerialize('!(a--)') is '!(a--)'
-FAIL compileAndSerialize('(!a)--') should be (!a)--. Threw exception ReferenceError: Invalid left-hand side expression in postfix operation
+PASS compileAndSerialize('(!a)--') threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
PASS compileAndSerialize('(-1)[a]') is '(-1)[a]'
PASS compileAndSerialize('(-1)[a] = b') is '(-1)[a] = b'
PASS compileAndSerialize('(-1)[a] += b') is '(-1)[a] += b'
@@ -464,42 +464,42 @@ PASS compileAndSerialize('(1).a += b') is '(1).a += b'
PASS compileAndSerialize('(1).a++') is '(1).a++'
PASS compileAndSerialize('++(1).a') is '++(1).a'
PASS compileAndSerialize('(1).a()') is '(1).a()'
-FAIL compileAndSerialize('(-1) = a') should be (-1) = a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) = a') should be (- 0) = a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 = a') should be 1 = a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) *= a') should be (-1) *= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) *= a') should be (- 0) *= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 *= a') should be 1 *= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) /= a') should be (-1) /= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) /= a') should be (- 0) /= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 /= a') should be 1 /= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) %= a') should be (-1) %= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) %= a') should be (- 0) %= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 %= a') should be 1 %= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) += a') should be (-1) += a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) += a') should be (- 0) += a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 += a') should be 1 += a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) -= a') should be (-1) -= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) -= a') should be (- 0) -= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 -= a') should be 1 -= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) <<= a') should be (-1) <<= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) <<= a') should be (- 0) <<= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 <<= a') should be 1 <<= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) >>= a') should be (-1) >>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) >>= a') should be (- 0) >>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 >>= a') should be 1 >>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) >>>= a') should be (-1) >>>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) >>>= a') should be (- 0) >>>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 >>>= a') should be 1 >>>= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) &= a') should be (-1) &= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) &= a') should be (- 0) &= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 &= a') should be 1 &= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) ^= a') should be (-1) ^= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) ^= a') should be (- 0) ^= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 ^= a') should be 1 ^= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(-1) |= a') should be (-1) |= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('(- 0) |= a') should be (- 0) |= a. Threw exception ReferenceError: Invalid left-hand side in assignment
-FAIL compileAndSerialize('1 |= a') should be 1 |= a. Threw exception ReferenceError: Invalid left-hand side in assignment
+PASS compileAndSerialize('(-1) = a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) = a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 = a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) *= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) *= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 *= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) /= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) /= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 /= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) %= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) %= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 %= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) += a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) += a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 += a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) -= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) -= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 -= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) <<= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) <<= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 <<= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) >>= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) >>= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 >>= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) >>>= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) >>>= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 >>>= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) &= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) &= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 &= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) ^= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) ^= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 ^= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(-1) |= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('(- 0) |= a') threw exception SyntaxError: Invalid left-hand side in assignment.
+PASS compileAndSerialize('1 |= a') threw exception SyntaxError: Invalid left-hand side in assignment.
PASS compileAndSerializeLeftmostTest('({ }).x') is '({ }).x'
PASS compileAndSerializeLeftmostTest('x = { }') is 'x = { }'
PASS compileAndSerializeLeftmostTest('(function () { })()') is '(function () { })()'
diff --git a/deps/v8/test/webkit/fast/js/function-toString-parentheses.js b/deps/v8/test/webkit/fast/js/function-toString-parentheses.js
index 6bc62ab811..af831976b6 100644
--- a/deps/v8/test/webkit/fast/js/function-toString-parentheses.js
+++ b/deps/v8/test/webkit/fast/js/function-toString-parentheses.js
@@ -127,7 +127,7 @@ for (i = 0; i < assignmentOperators.length; ++i) {
testRightAssociativeSame("=", op);
testLowerFirst(op, "+");
shouldThrow("compileAndSerialize('a + b " + op + " c')");
- testKeepParentheses("(a + b) " + op + " c");
+ shouldThrow("compileAndSerialize('(a + b) " + op + " c')");
testKeepParentheses("a + (b " + op + " c)");
}
@@ -138,7 +138,10 @@ for (i = 0; i < prefixOperators.length; ++i) {
var op = prefixOperators[i] + prefixOperatorSpace[i];
testKeepParentheses("" + op + "a + b");
testOptionalParentheses("(" + op + "a) + b");
- testKeepParentheses("" + op + "(a + b)");
+ if (prefixOperators[i] !== "++" && prefixOperators[i] !== "--")
+ testKeepParentheses("" + op + "(a + b)");
+ else
+ shouldThrow("compileAndSerialize('" + op + "(a + b)')");
testKeepParentheses("!" + op + "a");
testOptionalParentheses("!(" + op + "a)");
}
@@ -146,11 +149,11 @@ for (i = 0; i < prefixOperators.length; ++i) {
testKeepParentheses("!a++");
testOptionalParentheses("!(a++)");
-testKeepParentheses("(!a)++");
+shouldThrow("compileAndSerialize('(!a)++')");
testKeepParentheses("!a--");
testOptionalParentheses("!(a--)");
-testKeepParentheses("(!a)--");
+shouldThrow("compileAndSerialize('(!a)--')");
testKeepParentheses("(-1)[a]");
testKeepParentheses("(-1)[a] = b");
@@ -205,9 +208,9 @@ testKeepParentheses("(1).a()");
for (i = 0; i < assignmentOperators.length; ++i) {
var op = assignmentOperators[i];
- testKeepParentheses("(-1) " + op + " a");
- testKeepParentheses("(- 0) " + op + " a");
- testKeepParentheses("1 " + op + " a");
+ shouldThrow("compileAndSerialize('(-1) " + op + " a')");
+ shouldThrow("compileAndSerialize('(- 0) " + op + " a')");
+ shouldThrow("compileAndSerialize('1 " + op + " a')");
}
shouldBe("compileAndSerializeLeftmostTest('({ }).x')", "'({ }).x'");
diff --git a/deps/v8/test/webkit/fast/js/object-extra-comma-expected.txt b/deps/v8/test/webkit/fast/js/object-extra-comma-expected.txt
index 22511af833..87041c37a2 100644
--- a/deps/v8/test/webkit/fast/js/object-extra-comma-expected.txt
+++ b/deps/v8/test/webkit/fast/js/object-extra-comma-expected.txt
@@ -29,8 +29,8 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS var foo = { 'bar' : 'YES' }; foo.bar is 'YES'
PASS var foo = { 'bar' : 'YES', }; foo.bar is 'YES'
PASS var foo = { 'bar' : 'YES' , }; foo.bar is 'YES'
-PASS var foo = { , 'bar' : 'YES' }; foo.bar threw exception SyntaxError: Unexpected token ,.
-PASS var foo = { 'bar' : 'YES',, }; foo.bar threw exception SyntaxError: Unexpected token ,.
+PASS var foo = { , 'bar' : 'YES' }; foo.bar threw exception SyntaxError: Unexpected token ','.
+PASS var foo = { 'bar' : 'YES',, }; foo.bar threw exception SyntaxError: Unexpected token ','.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
index f6dbe354e7..8bab89ad11 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
@@ -47,32 +47,32 @@ PASS Invalid: "new -a"
PASS Invalid: "function f() { new -a }"
PASS Valid: "new (-1)"
PASS Valid: "function f() { new (-1) }"
-PASS Valid: "a: b: c: new f(x++)++"
-PASS Valid: "function f() { a: b: c: new f(x++)++ }"
+PASS Invalid: "a: b: c: new f(x++)++"
+PASS Invalid: "function f() { a: b: c: new f(x++)++ }"
PASS Valid: "(a)++"
PASS Valid: "function f() { (a)++ }"
-PASS Valid: "(1--).x"
-PASS Valid: "function f() { (1--).x }"
+PASS Invalid: "(1--).x"
+PASS Invalid: "function f() { (1--).x }"
PASS Invalid: "a-- ++"
PASS Invalid: "function f() { a-- ++ }"
PASS Invalid: "(a:) --b"
PASS Invalid: "function f() { (a:) --b }"
-PASS Valid: "++ -- ++ a"
-PASS Valid: "function f() { ++ -- ++ a }"
-PASS Valid: "++ new new a ++"
-PASS Valid: "function f() { ++ new new a ++ }"
+PASS Invalid: "++ -- ++ a"
+PASS Invalid: "function f() { ++ -- ++ a }"
+PASS Invalid: "++ new new a ++"
+PASS Invalid: "function f() { ++ new new a ++ }"
PASS Valid: "delete void 0"
PASS Valid: "function f() { delete void 0 }"
PASS Invalid: "delete the void"
PASS Invalid: "function f() { delete the void }"
PASS Invalid: "(a++"
PASS Invalid: "function f() { (a++ }"
-PASS Valid: "++a--"
-PASS Valid: "function f() { ++a-- }"
-PASS Valid: "++((a))--"
-PASS Valid: "function f() { ++((a))-- }"
-PASS Valid: "(a.x++)++"
-PASS Valid: "function f() { (a.x++)++ }"
+PASS Invalid: "++a--"
+PASS Invalid: "function f() { ++a-- }"
+PASS Invalid: "++((a))--"
+PASS Invalid: "function f() { ++((a))-- }"
+PASS Invalid: "(a.x++)++"
+PASS Invalid: "function f() { (a.x++)++ }"
PASS Invalid: "1: null"
PASS Invalid: "function f() { 1: null }"
PASS Invalid: "+-!~"
@@ -114,18 +114,18 @@ PASS Invalid: "a in instanceof b.l"
PASS Invalid: "function f() { a in instanceof b.l }"
PASS Valid: "- - true % 5"
PASS Valid: "function f() { - - true % 5 }"
-FAIL Invalid: "- false = 3" should throw undefined
-FAIL Invalid: "function f() { - false = 3 }" should throw undefined
-PASS Valid: "a: b: c: (1 + null) = 3"
-PASS Valid: "function f() { a: b: c: (1 + null) = 3 }"
+PASS Invalid: "- false = 3"
+PASS Invalid: "function f() { - false = 3 }"
+PASS Invalid: "a: b: c: (1 + null) = 3"
+PASS Invalid: "function f() { a: b: c: (1 + null) = 3 }"
PASS Valid: "a[2] = b.l += c /= 4 * 7 ^ !6"
PASS Valid: "function f() { a[2] = b.l += c /= 4 * 7 ^ !6 }"
-FAIL Invalid: "a + typeof b += c in d" should throw undefined
-FAIL Invalid: "function f() { a + typeof b += c in d }" should throw undefined
-FAIL Invalid: "typeof a &= typeof b" should throw undefined
-FAIL Invalid: "function f() { typeof a &= typeof b }" should throw undefined
-PASS Valid: "a: ((typeof (a))) >>>= a || b.l && c"
-PASS Valid: "function f() { a: ((typeof (a))) >>>= a || b.l && c }"
+PASS Invalid: "a + typeof b += c in d"
+PASS Invalid: "function f() { a + typeof b += c in d }"
+PASS Invalid: "typeof a &= typeof b"
+PASS Invalid: "function f() { typeof a &= typeof b }"
+PASS Invalid: "a: ((typeof (a))) >>>= a || b.l && c"
+PASS Invalid: "function f() { a: ((typeof (a))) >>>= a || b.l && c }"
PASS Valid: "a: b: c[a /= f[a %= b]].l[c[x] = 7] -= a ? b <<= f : g"
PASS Valid: "function f() { a: b: c[a /= f[a %= b]].l[c[x] = 7] -= a ? b <<= f : g }"
PASS Valid: "-void+x['y'].l == x.l != 5 - f[7]"
@@ -133,8 +133,8 @@ PASS Valid: "function f() { -void+x['y'].l == x.l != 5 - f[7] }"
Function calls (and new with arguments)
PASS Valid: "a()()()"
PASS Valid: "function f() { a()()() }"
-PASS Valid: "s: l: a[2](4 == 6, 5 = 6)(f[4], 6)"
-PASS Valid: "function f() { s: l: a[2](4 == 6, 5 = 6)(f[4], 6) }"
+PASS Invalid: "s: l: a[2](4 == 6, 5 = 6)(f[4], 6)"
+PASS Invalid: "function f() { s: l: a[2](4 == 6, 5 = 6)(f[4], 6) }"
PASS Valid: "s: eval(a.apply(), b.call(c[5] - f[7]))"
PASS Valid: "function f() { s: eval(a.apply(), b.call(c[5] - f[7])) }"
PASS Invalid: "a("
@@ -143,8 +143,8 @@ PASS Invalid: "a(5"
PASS Invalid: "function f() { a(5 }"
PASS Invalid: "a(5,"
PASS Invalid: "function f() { a(5, }"
-FAIL Invalid: "a(5,)" should throw undefined
-FAIL Invalid: "function f() { a(5,) }" should throw undefined
+PASS Valid: "a(5,)"
+PASS Valid: "function f() { a(5,) }"
PASS Invalid: "a(5,6"
PASS Invalid: "function f() { a(5,6 }"
PASS Valid: "a(b[7], c <d> e.l, new a() > b)"
@@ -178,8 +178,8 @@ PASS Invalid: "function () {}"
PASS Invalid: "function f() { function () {} }"
PASS Invalid: "function f(a b) {}"
PASS Invalid: "function f() { function f(a b) {} }"
-FAIL Invalid: "function f(a,) {}" should throw undefined
-FAIL Invalid: "function f() { function f(a,) {} }" should throw undefined
+PASS Valid: "function f(a,) {}"
+PASS Valid: "function f() { function f(a,) {} }"
PASS Invalid: "function f(a,"
PASS Invalid: "function f() { function f(a, }"
PASS Invalid: "function f(a, 1) {}"
@@ -341,8 +341,8 @@ PASS Invalid: "var a = if (b) { c }"
PASS Invalid: "function f() { var a = if (b) { c } }"
PASS Invalid: "var a = var b"
PASS Invalid: "function f() { var a = var b }"
-FAIL Valid: "const a = b += c, a, a, a = (b - f())" should NOT throw
-FAIL Valid: "function f() { const a = b += c, a, a, a = (b - f()) }" should NOT throw
+PASS Invalid: "const a = b += c, a, a, a = (b - f())"
+PASS Invalid: "function f() { const a = b += c, a, a, a = (b - f()) }"
PASS Invalid: "var a %= b | 5"
PASS Invalid: "function f() { var a %= b | 5 }"
PASS Invalid: "var (a) = 5"
@@ -355,10 +355,10 @@ PASS Invalid: "var var = 3"
PASS Invalid: "function f() { var var = 3 }"
PASS Valid: "var varr = 3 in 1"
PASS Valid: "function f() { var varr = 3 in 1 }"
-FAIL Valid: "const a, a, a = void 7 - typeof 8, a = 8" should NOT throw
-FAIL Valid: "function f() { const a, a, a = void 7 - typeof 8, a = 8 }" should NOT throw
-PASS Valid: "const x_x = 6 /= 7 ? e : f"
-PASS Valid: "function f() { const x_x = 6 /= 7 ? e : f }"
+PASS Invalid: "const a, a, a = void 7 - typeof 8, a = 8"
+PASS Invalid: "function f() { const a, a, a = void 7 - typeof 8, a = 8 }"
+PASS Invalid: "const x_x = 6 /= 7 ? e : f"
+PASS Invalid: "function f() { const x_x = 6 /= 7 ? e : f }"
PASS Invalid: "var a = ?"
PASS Invalid: "function f() { var a = ? }"
PASS Invalid: "const a = *7"
@@ -452,8 +452,8 @@ PASS Invalid: "for ((a ? b : c) in c) break"
PASS Invalid: "function f() { for ((a ? b : c) in c) break }"
PASS Valid: "for (var a in b in c) break"
PASS Valid: "function f() { for (var a in b in c) break }"
-PASS Valid: "for (var a = 5 += 6 in b) break"
-PASS Valid: "function f() { for (var a = 5 += 6 in b) break }"
+PASS Invalid: "for (var a = 5 += 6 in b) break"
+PASS Invalid: "function f() { for (var a = 5 += 6 in b) break }"
PASS Invalid: "for (var a += 5 in b) break"
PASS Invalid: "function f() { for (var a += 5 in b) break }"
PASS Invalid: "for (var a = in b) break"
@@ -470,8 +470,8 @@ PASS Invalid: "for (var a = (b in c in d) break"
PASS Invalid: "function f() { for (var a = (b in c in d) break }"
PASS Invalid: "for (var (a) in b) { }"
PASS Invalid: "function f() { for (var (a) in b) { } }"
-PASS Valid: "for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {}"
-PASS Valid: "function f() { for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {} }"
+PASS Invalid: "for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {}"
+PASS Invalid: "function f() { for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {} }"
try statement
PASS Valid: "try {} finally { c++ }"
PASS Valid: "function f() { try {} finally { c++ } }"
@@ -582,8 +582,8 @@ PASS Valid: "if (0) obj.foo\u03bb; "
PASS Valid: "function f() { if (0) obj.foo\u03bb; }"
PASS Valid: "if (0) new a(b+c).d = 5"
PASS Valid: "function f() { if (0) new a(b+c).d = 5 }"
-PASS Valid: "if (0) new a(b+c) = 5"
-PASS Valid: "function f() { if (0) new a(b+c) = 5 }"
+PASS Invalid: "if (0) new a(b+c) = 5"
+PASS Invalid: "function f() { if (0) new a(b+c) = 5 }"
PASS Valid: "([1 || 1].a = 1)"
PASS Valid: "function f() { ([1 || 1].a = 1) }"
PASS Valid: "({a: 1 || 1}.a = 1)"
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check.js b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
index 15be4b048b..7dbd5583b2 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check.js
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
@@ -85,19 +85,19 @@ invalid("a.'l'");
valid ("a: +~!new a");
invalid("new -a");
valid ("new (-1)")
-valid ("a: b: c: new f(x++)++")
+invalid("a: b: c: new f(x++)++")
valid ("(a)++");
-valid ("(1--).x");
+invalid("(1--).x");
invalid("a-- ++");
invalid("(a:) --b");
-valid ("++ -- ++ a");
-valid ("++ new new a ++");
+invalid("++ -- ++ a");
+invalid("++ new new a ++");
valid ("delete void 0");
invalid("delete the void");
invalid("(a++");
-valid ("++a--");
-valid ("++((a))--");
-valid ("(a.x++)++");
+invalid("++a--");
+invalid("++((a))--");
+invalid("(a.x++)++");
invalid("1: null");
invalid("+-!~");
invalid("+-!~((");
@@ -122,23 +122,23 @@ valid ("a in b instanceof delete -c");
invalid("a in instanceof b.l");
valid ("- - true % 5");
invalid("- false = 3");
-valid ("a: b: c: (1 + null) = 3");
+invalid("a: b: c: (1 + null) = 3");
valid ("a[2] = b.l += c /= 4 * 7 ^ !6");
invalid("a + typeof b += c in d");
invalid("typeof a &= typeof b");
-valid ("a: ((typeof (a))) >>>= a || b.l && c");
+invalid("a: ((typeof (a))) >>>= a || b.l && c");
valid ("a: b: c[a /= f[a %= b]].l[c[x] = 7] -= a ? b <<= f : g");
valid ("-void+x['y'].l == x.l != 5 - f[7]");
debug ("Function calls (and new with arguments)");
valid ("a()()()");
-valid ("s: l: a[2](4 == 6, 5 = 6)(f[4], 6)");
+invalid("s: l: a[2](4 == 6, 5 = 6)(f[4], 6)");
valid ("s: eval(a.apply(), b.call(c[5] - f[7]))");
invalid("a(");
invalid("a(5");
invalid("a(5,");
-invalid("a(5,)");
+valid("a(5,)");
invalid("a(5,6");
valid ("a(b[7], c <d> e.l, new a() > b)");
invalid("a(b[5)");
@@ -158,7 +158,7 @@ valid ("function f() {}");
valid ("function f(a,b) {}");
invalid("function () {}");
invalid("function f(a b) {}");
-invalid("function f(a,) {}");
+valid("function f(a,) {}");
invalid("function f(a,");
invalid("function f(a, 1) {}");
valid ("function g(arguments, eval) {}");
@@ -248,15 +248,15 @@ invalid("var 5 = 6");
valid ("while (0) var a, b, c=6, d, e, f=5*6, g=f*h, h");
invalid("var a = if (b) { c }");
invalid("var a = var b");
-valid ("const a = b += c, a, a, a = (b - f())");
+invalid("const a = b += c, a, a, a = (b - f())");
invalid("var a %= b | 5");
invalid("var (a) = 5");
invalid("var a = (4, b = 6");
invalid("const 'l' = 3");
invalid("var var = 3");
valid ("var varr = 3 in 1");
-valid ("const a, a, a = void 7 - typeof 8, a = 8");
-valid ("const x_x = 6 /= 7 ? e : f");
+invalid("const a, a, a = void 7 - typeof 8, a = 8");
+invalid("const x_x = 6 /= 7 ? e : f");
invalid("var a = ?");
invalid("const a = *7");
invalid("var a = :)");
@@ -306,7 +306,7 @@ invalid("for ((a, b) in c) break");
invalid("for (a ? b : c in c) break");
invalid("for ((a ? b : c) in c) break");
valid ("for (var a in b in c) break");
-valid ("for (var a = 5 += 6 in b) break");
+invalid("for (var a = 5 += 6 in b) break");
invalid("for (var a += 5 in b) break");
invalid("for (var a = in b) break");
invalid("for (var a, b in b) break");
@@ -315,7 +315,7 @@ invalid("for (var a, b = 8 in b) break");
valid ("for (var a = (b in c) in d) break");
invalid("for (var a = (b in c in d) break");
invalid("for (var (a) in b) { }");
-valid ("for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {}");
+invalid("for (var a = 7, b = c < d >= d ; f()[6]++ ; --i()[1]++ ) {}");
debug ("try statement");
@@ -379,7 +379,7 @@ valid("if (0) obj.foo$; ")
valid("if (0) obj.foo_; ")
valid("if (0) obj.foo\\u03bb; ")
valid("if (0) new a(b+c).d = 5");
-valid("if (0) new a(b+c) = 5");
+invalid("if (0) new a(b+c) = 5");
valid("([1 || 1].a = 1)");
valid("({a: 1 || 1}.a = 1)");
diff --git a/deps/v8/test/webkit/fast/regex/toString-expected.txt b/deps/v8/test/webkit/fast/regex/toString-expected.txt
index 745e9ee6f8..3d6c5322a7 100644
--- a/deps/v8/test/webkit/fast/regex/toString-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/toString-expected.txt
@@ -51,7 +51,7 @@ PASS testLineTerminator("\u2028"); is false
PASS testLineTerminator("\\u2028"); is false
PASS testLineTerminator("\u2029"); is false
PASS testLineTerminator("\\u2029"); is false
-FAIL RegExp('[/]').source should be [/]. Was [\/].
+PASS RegExp('[/]').source is '[/]'
PASS RegExp('\\[/]').source is '\\[\\/]'
PASS var o = new RegExp(); o.toString() === '/'+o.source+'/' && eval(o.toString()+'.exec(String())') is [""]
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/function-toString-object-literals-expected.txt b/deps/v8/test/webkit/function-toString-object-literals-expected.txt
index 1b37162a34..410e5ce151 100644
--- a/deps/v8/test/webkit/function-toString-object-literals-expected.txt
+++ b/deps/v8/test/webkit/function-toString-object-literals-expected.txt
@@ -42,11 +42,11 @@ PASS compileAndSerialize('a = { "\'": null }') is 'a = { "\'": null }'
PASS compileAndSerialize('a = { "\\"": null }') is 'a = { "\\"": null }'
PASS compileAndSerialize('a = { get x() { } }') is 'a = { get x() { } }'
PASS compileAndSerialize('a = { set x(y) { } }') is 'a = { set x(y) { } }'
-PASS compileAndSerialize('a = { --1: null }') threw exception SyntaxError: Unexpected token --.
-PASS compileAndSerialize('a = { -NaN: null }') threw exception SyntaxError: Unexpected token -.
-PASS compileAndSerialize('a = { -0: null }') threw exception SyntaxError: Unexpected token -.
-PASS compileAndSerialize('a = { -0.0: null }') threw exception SyntaxError: Unexpected token -.
-PASS compileAndSerialize('a = { -Infinity: null }') threw exception SyntaxError: Unexpected token -.
+PASS compileAndSerialize('a = { --1: null }') threw exception SyntaxError: Unexpected token '--'.
+PASS compileAndSerialize('a = { -NaN: null }') threw exception SyntaxError: Unexpected token '-'.
+PASS compileAndSerialize('a = { -0: null }') threw exception SyntaxError: Unexpected token '-'.
+PASS compileAndSerialize('a = { -0.0: null }') threw exception SyntaxError: Unexpected token '-'.
+PASS compileAndSerialize('a = { -Infinity: null }') threw exception SyntaxError: Unexpected token '-'.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/parser-xml-close-comment-expected.txt b/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
index b6aebe3d06..34a9f637f9 100644
--- a/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
+++ b/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
@@ -26,9 +26,9 @@ Test to ensure correct handling of --> as a single line comment when at the begi
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS 'should be a syntax error' --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS /**/ 1 --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS 1 /**/ --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS 'should be a syntax error' --> threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS /**/ 1 --> threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
+PASS 1 /**/ --> threw exception SyntaxError: Invalid left-hand side expression in postfix operation.
PASS 1/*
*/--> is 1
PASS --> is undefined.
diff --git a/deps/v8/infra/testing/OWNERS b/deps/v8/testing/OWNERS
index 50b5741785..bdb1d555a4 100644
--- a/deps/v8/infra/testing/OWNERS
+++ b/deps/v8/testing/OWNERS
@@ -1,3 +1 @@
-set noparent
-
file://INFRA_OWNERS
diff --git a/deps/v8/third_party/binutils/OWNERS b/deps/v8/third_party/binutils/OWNERS
new file mode 100644
index 0000000000..bdb1d555a4
--- /dev/null
+++ b/deps/v8/third_party/binutils/OWNERS
@@ -0,0 +1 @@
+file://INFRA_OWNERS
diff --git a/deps/v8/third_party/colorama/OWNERS b/deps/v8/third_party/colorama/OWNERS
new file mode 100644
index 0000000000..bdb1d555a4
--- /dev/null
+++ b/deps/v8/third_party/colorama/OWNERS
@@ -0,0 +1 @@
+file://INFRA_OWNERS
diff --git a/deps/v8/third_party/inspector_protocol/BUILD.gn b/deps/v8/third_party/inspector_protocol/BUILD.gn
index 974471bf27..28b2ea4fb4 100644
--- a/deps/v8/third_party/inspector_protocol/BUILD.gn
+++ b/deps/v8/third_party/inspector_protocol/BUILD.gn
@@ -9,6 +9,13 @@ static_library("encoding") {
]
}
+static_library("bindings") {
+ sources = [
+ "bindings/bindings.cc",
+ "bindings/bindings.h",
+ ]
+}
+
# encoding_test is part of the unittests, defined in
# test/unittests/BUILD.gn.
@@ -32,3 +39,23 @@ v8_source_set("encoding_test") {
]
testonly = true
}
+
+v8_source_set("bindings_test") {
+ sources = [
+ "bindings/bindings_test.cc",
+ "bindings/bindings_test_helper.h",
+ ]
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+ deps = [
+ ":bindings",
+ "../..:v8_libbase",
+ "../../src/inspector:inspector_string_conversions",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ testonly = true
+}
+
diff --git a/deps/v8/third_party/inspector_protocol/OWNERS b/deps/v8/third_party/inspector_protocol/OWNERS
index 8d0b6d90cb..74da87a57b 100644
--- a/deps/v8/third_party/inspector_protocol/OWNERS
+++ b/deps/v8/third_party/inspector_protocol/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
alph@chromium.org
caseq@chromium.org
dgozman@chromium.org
diff --git a/deps/v8/third_party/inspector_protocol/README.md b/deps/v8/third_party/inspector_protocol/README.md
index da3f93f3f3..4482df7c89 100644
--- a/deps/v8/third_party/inspector_protocol/README.md
+++ b/deps/v8/third_party/inspector_protocol/README.md
@@ -15,8 +15,8 @@ https://cs.chromium.org/chromium/src/v8/third_party/inspector_protocol/
See also [Contributing to Chrome Devtools Protocol](https://docs.google.com/document/d/1c-COD2kaK__5iMM5SEx-PzNA7HFmgttcYfOHHX0HaOM/edit).
We're working on enabling standalone builds for parts of this package for
-testing and development, please feel free to ignore this for now.
-But, if you're familiar with
+testing and development.
+If you're familiar with
[Chromium's development process](https://www.chromium.org/developers/contributing-code)
and have the depot_tools installed, you may use these commands
to fetch the package (and dependencies) and build and run the tests:
@@ -24,8 +24,9 @@ to fetch the package (and dependencies) and build and run the tests:
fetch inspector_protocol
cd src
gn gen out/Release
- ninja -C out/Release json_parser_test
- out/Release/json_parser_test
+ ninja -C out/Release encoding_test bindings_test
+ out/Release/encoding_test
+ out/Release/bindings_test
You'll probably also need to install g++, since Clang uses this to find the
standard C++ headers. E.g.,
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index ade9e8e52c..622ff72f74 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: fe0467fd105a9ea90fbb091dc2a7b4cdbf539803
+Revision: 373efb7fe33a7ae84038868ed08b9f1bd328b55d
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/test/mjsunit/modules-skip-cycle.js b/deps/v8/third_party/inspector_protocol/bindings/bindings.cc
index b4adbc7ab3..7276805943 100644
--- a/deps/v8/test/mjsunit/modules-skip-cycle.js
+++ b/deps/v8/third_party/inspector_protocol/bindings/bindings.cc
@@ -1,5 +1,5 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
+// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export {foo} from "modules-cycle.js";
+#include "bindings.h"
diff --git a/deps/v8/third_party/inspector_protocol/bindings/bindings.h b/deps/v8/third_party/inspector_protocol/bindings/bindings.h
new file mode 100644
index 0000000000..0fae2032e4
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/bindings/bindings.h
@@ -0,0 +1,81 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_H_
+#define V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_H_
+
+#include <cassert>
+#include <memory>
+
+namespace v8_inspector_protocol_bindings {
+namespace glue {
+// =============================================================================
+// glue::detail::PtrMaybe, glue::detail::ValueMaybe, templates for optional
+// pointers / values which are used in ../lib/Forward_h.template.
+// =============================================================================
+namespace detail {
+template <typename T>
+class PtrMaybe {
+ public:
+ PtrMaybe() = default;
+ PtrMaybe(std::unique_ptr<T> value) : value_(std::move(value)) {}
+ PtrMaybe(PtrMaybe&& other) noexcept : value_(std::move(other.value_)) {}
+ void operator=(std::unique_ptr<T> value) { value_ = std::move(value); }
+ T* fromJust() const {
+ assert(value_);
+ return value_.get();
+ }
+ T* fromMaybe(T* default_value) const {
+ return value_ ? value_.get() : default_value;
+ }
+ bool isJust() const { return value_ != nullptr; }
+ std::unique_ptr<T> takeJust() {
+ assert(value_);
+ return std::move(value_);
+ }
+
+ private:
+ std::unique_ptr<T> value_;
+};
+
+template <typename T>
+class ValueMaybe {
+ public:
+ ValueMaybe() : is_just_(false), value_() {}
+ ValueMaybe(T value) : is_just_(true), value_(std::move(value)) {}
+ ValueMaybe(ValueMaybe&& other) noexcept
+ : is_just_(other.is_just_), value_(std::move(other.value_)) {}
+ void operator=(T value) {
+ value_ = value;
+ is_just_ = true;
+ }
+ const T& fromJust() const {
+ assert(is_just_);
+ return value_;
+ }
+ const T& fromMaybe(const T& default_value) const {
+ return is_just_ ? value_ : default_value;
+ }
+ bool isJust() const { return is_just_; }
+ // TODO(johannes): |is_just_| isn't reset by this operation -
+ // introduce && to ensure avoiding continued usage of |this|?
+ T takeJust() {
+ assert(is_just_);
+ return std::move(value_);
+ }
+
+ private:
+ bool is_just_;
+ T value_;
+};
+} // namespace detail
+} // namespace glue
+} // namespace v8_inspector_protocol_bindings
+
+#define PROTOCOL_DISALLOW_COPY(ClassName) \
+ private: \
+ ClassName(const ClassName&) = delete; \
+ ClassName& operator=(const ClassName&) = delete
+
+#endif // V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_H_
diff --git a/deps/v8/third_party/inspector_protocol/bindings/bindings_test.cc b/deps/v8/third_party/inspector_protocol/bindings/bindings_test.cc
new file mode 100644
index 0000000000..a39aa4d012
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/bindings/bindings_test.cc
@@ -0,0 +1,44 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "bindings.h"
+
+#include <string>
+#include <vector>
+
+#include "bindings_test_helper.h"
+
+namespace v8_inspector_protocol_bindings {
+namespace glue {
+// =============================================================================
+// glue::detail::PtrMaybe, glue::detail::ValueMaybe, templates for optional
+// pointers / values which are used in ../lib/Forward_h.template.
+// =============================================================================
+TEST(PtrMaybeTest, SmokeTest) {
+ detail::PtrMaybe<std::vector<uint32_t>> example;
+ EXPECT_FALSE(example.isJust());
+ EXPECT_TRUE(nullptr == example.fromMaybe(nullptr));
+ std::unique_ptr<std::vector<uint32_t>> v(new std::vector<uint32_t>);
+ v->push_back(42);
+ v->push_back(21);
+ example = std::move(v);
+ EXPECT_TRUE(example.isJust());
+ EXPECT_THAT(*example.fromJust(), testing::ElementsAre(42, 21));
+ std::unique_ptr<std::vector<uint32_t>> out = example.takeJust();
+ EXPECT_FALSE(example.isJust());
+ EXPECT_THAT(*out, testing::ElementsAre(42, 21));
+}
+
+TEST(PtrValueTest, SmokeTest) {
+ detail::ValueMaybe<int32_t> example;
+ EXPECT_FALSE(example.isJust());
+ EXPECT_EQ(-1, example.fromMaybe(-1));
+ example = 42;
+ EXPECT_TRUE(example.isJust());
+ EXPECT_EQ(42, example.fromJust());
+ int32_t out = example.takeJust();
+ EXPECT_EQ(out, 42);
+}
+} // namespace glue
+} // namespace v8_inspector_protocol_bindings
diff --git a/deps/v8/third_party/inspector_protocol/bindings/bindings_test_helper.h b/deps/v8/third_party/inspector_protocol/bindings/bindings_test_helper.h
new file mode 100644
index 0000000000..f4ccd49a2d
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/bindings/bindings_test_helper.h
@@ -0,0 +1,18 @@
+// Copyright 2019 The V8 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is V8 specific, to make bindings_test.cc work.
+// It is not rolled from the upstream project.
+
+#ifndef V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_TEST_HELPER_H_
+#define V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_TEST_HELPER_H_
+
+#include <string>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#endif // V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_TEST_HELPER_H_
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index 1e12343e05..7c72cc70e4 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -11,18 +11,18 @@ import functools
import re
import copy
try:
- import json
+ import json
except ImportError:
- import simplejson as json
+ import simplejson as json
import pdl
try:
- unicode
+ unicode
except NameError:
- # Define unicode for Py3
- def unicode(s, *_):
- return s
+ # Define unicode for Py3
+ def unicode(s, *_):
+ return s
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
@@ -35,654 +35,695 @@ except NameError:
module_path, module_filename = os.path.split(os.path.realpath(__file__))
def read_config():
- # pylint: disable=W0703
- def json_to_object(data, output_base, config_base):
- def json_object_hook(object_dict):
- items = [(k, os.path.join(config_base, v) if k == "path" else v) for (k, v) in object_dict.items()]
- items = [(k, os.path.join(output_base, v) if k == "output" else v) for (k, v) in items]
- keys, values = list(zip(*items))
- return collections.namedtuple('X', keys)(*values)
- return json.loads(data, object_hook=json_object_hook)
-
- def init_defaults(config_tuple, path, defaults):
- keys = list(config_tuple._fields) # pylint: disable=E1101
- values = [getattr(config_tuple, k) for k in keys]
- for i in range(len(keys)):
- if hasattr(values[i], "_fields"):
- values[i] = init_defaults(values[i], path + "." + keys[i], defaults)
- for optional in defaults:
- if optional.find(path + ".") != 0:
- continue
- optional_key = optional[len(path) + 1:]
- if optional_key.find(".") == -1 and optional_key not in keys:
- keys.append(optional_key)
- values.append(defaults[optional])
- return collections.namedtuple('X', keys)(*values)
-
- try:
- cmdline_parser = argparse.ArgumentParser()
- cmdline_parser.add_argument("--output_base", type=unicode, required=True)
- cmdline_parser.add_argument("--jinja_dir", type=unicode, required=True)
- cmdline_parser.add_argument("--config", type=unicode, required=True)
- cmdline_parser.add_argument("--config_value", default=[], action="append")
- arg_options = cmdline_parser.parse_args()
- jinja_dir = arg_options.jinja_dir
- output_base = arg_options.output_base
- config_file = arg_options.config
- config_base = os.path.dirname(config_file)
- config_values = arg_options.config_value
- except Exception:
- # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
- exc = sys.exc_info()[1]
- sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
- exit(1)
-
- try:
- config_json_file = open(config_file, "r")
- config_json_string = config_json_file.read()
- config_partial = json_to_object(config_json_string, output_base, config_base)
- config_json_file.close()
- defaults = {
- ".use_snake_file_names": False,
- ".use_title_case_methods": False,
- ".imported": False,
- ".imported.export_macro": "",
- ".imported.export_header": False,
- ".imported.header": False,
- ".imported.package": False,
- ".imported.options": False,
- ".protocol.export_macro": "",
- ".protocol.export_header": False,
- ".protocol.options": False,
- ".protocol.file_name_prefix": "",
- ".exported": False,
- ".exported.export_macro": "",
- ".exported.export_header": False,
- ".lib": False,
- ".lib.export_macro": "",
- ".lib.export_header": False,
- }
- for key_value in config_values:
- parts = key_value.split("=")
- if len(parts) == 2:
- defaults["." + parts[0]] = parts[1]
- return (jinja_dir, config_file, init_defaults(config_partial, "", defaults))
- except Exception:
- # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
- exc = sys.exc_info()[1]
- sys.stderr.write("Failed to parse config file: %s\n\n" % exc)
- exit(1)
+ # pylint: disable=W0703
+ def json_to_object(data, output_base, config_base):
+ def json_object_hook(object_dict):
+ items = [(k, os.path.join(config_base, v) if k == "path" else v)
+ for (k, v) in object_dict.items()]
+ items = [(k, os.path.join(output_base, v) if k == "output" else v)
+ for (k, v) in items]
+ keys, values = list(zip(*items))
+ return collections.namedtuple('X', keys)(*values)
+ return json.loads(data, object_hook=json_object_hook)
+
+ def init_defaults(config_tuple, path, defaults):
+ keys = list(config_tuple._fields) # pylint: disable=E1101
+ values = [getattr(config_tuple, k) for k in keys]
+ for i in range(len(keys)):
+ if hasattr(values[i], "_fields"):
+ values[i] = init_defaults(values[i], path + "." + keys[i], defaults)
+ for optional in defaults:
+ if optional.find(path + ".") != 0:
+ continue
+ optional_key = optional[len(path) + 1:]
+ if optional_key.find(".") == -1 and optional_key not in keys:
+ keys.append(optional_key)
+ values.append(defaults[optional])
+ return collections.namedtuple('X', keys)(*values)
+
+ try:
+ cmdline_parser = argparse.ArgumentParser()
+ cmdline_parser.add_argument("--output_base", type=unicode, required=True)
+ cmdline_parser.add_argument("--jinja_dir", type=unicode, required=True)
+ cmdline_parser.add_argument("--config", type=unicode, required=True)
+ cmdline_parser.add_argument("--config_value", default=[], action="append")
+ cmdline_parser.add_argument(
+ "--inspector_protocol_dir", type=unicode, required=True,
+ help=("directory with code_generator.py and C++ encoding / binding "
+ "libraries, relative to the root of the source tree."))
+ arg_options = cmdline_parser.parse_args()
+ jinja_dir = arg_options.jinja_dir
+ output_base = arg_options.output_base
+ config_file = arg_options.config
+ config_base = os.path.dirname(config_file)
+ config_values = arg_options.config_value
+ inspector_protocol_dir = arg_options.inspector_protocol_dir.lstrip('/')
+ except Exception:
+ # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
+ exc = sys.exc_info()[1]
+ sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
+ exit(1)
+
+ try:
+ config_json_file = open(config_file, "r")
+ config_json_string = config_json_file.read()
+ config_partial = json_to_object(config_json_string, output_base,
+ config_base)
+ config_json_file.close()
+ defaults = {
+ ".use_snake_file_names": False,
+ ".use_title_case_methods": False,
+ ".imported": False,
+ ".imported.export_macro": "",
+ ".imported.export_header": False,
+ ".imported.header": False,
+ ".imported.package": False,
+ ".imported.options": False,
+ ".protocol.export_macro": "",
+ ".protocol.export_header": False,
+ ".protocol.options": False,
+ ".protocol.file_name_prefix": "",
+ ".exported": False,
+ ".exported.export_macro": "",
+ ".exported.export_header": False,
+ ".lib": False,
+ ".lib.export_macro": "",
+ ".lib.export_header": False,
+ # The encoding lib consists of encoding/encoding.h and
+ # encoding/encoding.cc in its subdirectory, which binaries
+ # must link / depend on.
+ ".encoding_lib.header": os.path.join(inspector_protocol_dir,
+ "encoding/encoding.h"),
+ ".encoding_lib.namespace": "",
+ # Ditto for bindings, see bindings/bindings.h.
+ ".bindings_lib.header": os.path.join(inspector_protocol_dir,
+ "bindings/bindings.h"),
+ ".bindings_lib.namespace": ""
+ }
+ for key_value in config_values:
+ parts = key_value.split("=")
+ if len(parts) == 2:
+ defaults["." + parts[0]] = parts[1]
+ return (jinja_dir, config_file, init_defaults(config_partial, "", defaults))
+ except Exception:
+ # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
+ exc = sys.exc_info()[1]
+ sys.stderr.write("Failed to parse config file: %s\n\n" % exc)
+ exit(1)
# ---- Begin of utilities exposed to generator ----
def to_title_case(name):
- return name[:1].upper() + name[1:]
+ return name[:1].upper() + name[1:]
def dash_to_camelcase(word):
- prefix = ""
- if word[0] == "-":
- prefix = "Negative"
- word = word[1:]
- return prefix + "".join(to_title_case(x) or "-" for x in word.split("-"))
+ prefix = ""
+ if word[0] == "-":
+ prefix = "Negative"
+ word = word[1:]
+ return prefix + "".join(to_title_case(x) or "-" for x in word.split("-"))
def to_snake_case(name):
- return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxsize).lower()
+ return re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", name, sys.maxsize).lower()
def to_method_case(config, name):
- if config.use_title_case_methods:
- return to_title_case(name)
- return name
+ if config.use_title_case_methods:
+ return to_title_case(name)
+ return name
def join_arrays(dict, keys):
- result = []
- for key in keys:
- if key in dict:
- result += dict[key]
- return result
+ result = []
+ for key in keys:
+ if key in dict:
+ result += dict[key]
+ return result
def format_include(config, header, file_name=None):
- if file_name is not None:
- header = header + "/" + file_name + ".h"
- header = "\"" + header + "\"" if header[0] not in "<\"" else header
- if config.use_snake_file_names:
- header = to_snake_case(header)
- return header
+ if file_name is not None:
+ header = header + "/" + file_name + ".h"
+ header = "\"" + header + "\"" if header[0] not in "<\"" else header
+ if config.use_snake_file_names:
+ header = to_snake_case(header)
+ return header
def format_domain_include(config, header, file_name):
- return format_include(config, header, config.protocol.file_name_prefix + file_name)
+ return format_include(config, header,
+ config.protocol.file_name_prefix + file_name)
def to_file_name(config, file_name):
- if config.use_snake_file_names:
- return to_snake_case(file_name).replace(".cpp", ".cc")
- return file_name
+ if config.use_snake_file_names:
+ return to_snake_case(file_name).replace(".cpp", ".cc")
+ return file_name
# ---- End of utilities exposed to generator ----
def initialize_jinja_env(jinja_dir, cache_dir, config):
- # pylint: disable=F0401
- sys.path.insert(1, os.path.abspath(jinja_dir))
- import jinja2
-
- jinja_env = jinja2.Environment(
- loader=jinja2.FileSystemLoader(module_path),
- # Bytecode cache is not concurrency-safe unless pre-cached:
- # if pre-cached this is read-only, but writing creates a race condition.
- bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
- keep_trailing_newline=True, # newline-terminate generated files
- lstrip_blocks=True, # so can indent control flow tags
- trim_blocks=True)
- jinja_env.filters.update({"to_title_case": to_title_case, "dash_to_camelcase": dash_to_camelcase, "to_method_case": functools.partial(to_method_case, config)})
- jinja_env.add_extension("jinja2.ext.loopcontrols")
- return jinja_env
+ # pylint: disable=F0401
+ sys.path.insert(1, os.path.abspath(jinja_dir))
+ import jinja2
+
+ jinja_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(module_path),
+ # Bytecode cache is not concurrency-safe unless pre-cached:
+ # if pre-cached this is read-only, but writing creates a race condition.
+ bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
+ keep_trailing_newline=True, # newline-terminate generated files
+ lstrip_blocks=True, # so can indent control flow tags
+ trim_blocks=True)
+ jinja_env.filters.update({
+ "to_title_case": to_title_case,
+ "dash_to_camelcase": dash_to_camelcase,
+ "to_method_case": functools.partial(to_method_case, config)})
+ jinja_env.add_extension("jinja2.ext.loopcontrols")
+ return jinja_env
def create_imported_type_definition(domain_name, type, imported_namespace):
- # pylint: disable=W0622
- return {
- "return_type": "std::unique_ptr<%s::%s::API::%s>" % (imported_namespace, domain_name, type["id"]),
- "pass_type": "std::unique_ptr<%s::%s::API::%s>" % (imported_namespace, domain_name, type["id"]),
- "to_raw_type": "%s.get()",
- "to_pass_type": "std::move(%s)",
- "to_rvalue": "std::move(%s)",
- "type": "std::unique_ptr<%s::%s::API::%s>" % (imported_namespace, domain_name, type["id"]),
- "raw_type": "%s::%s::API::%s" % (imported_namespace, domain_name, type["id"]),
- "raw_pass_type": "%s::%s::API::%s*" % (imported_namespace, domain_name, type["id"]),
- "raw_return_type": "%s::%s::API::%s*" % (imported_namespace, domain_name, type["id"]),
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<%s::%s::API::%s>" % (
+ imported_namespace, domain_name, type["id"]),
+ "pass_type": "std::unique_ptr<%s::%s::API::%s>" % (
+ imported_namespace, domain_name, type["id"]),
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<%s::%s::API::%s>" % (
+ imported_namespace, domain_name, type["id"]),
+ "raw_type": "%s::%s::API::%s" % (
+ imported_namespace, domain_name, type["id"]),
+ "raw_pass_type": "%s::%s::API::%s*" % (
+ imported_namespace, domain_name, type["id"]),
+ "raw_return_type": "%s::%s::API::%s*" % (
+ imported_namespace, domain_name, type["id"]),
+ }
def create_user_type_definition(domain_name, type):
- # pylint: disable=W0622
- return {
- "return_type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
- "pass_type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
- "to_raw_type": "%s.get()",
- "to_pass_type": "std::move(%s)",
- "to_rvalue": "std::move(%s)",
- "type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
- "raw_type": "protocol::%s::%s" % (domain_name, type["id"]),
- "raw_pass_type": "protocol::%s::%s*" % (domain_name, type["id"]),
- "raw_return_type": "protocol::%s::%s*" % (domain_name, type["id"]),
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::%s::%s>" % (
+ domain_name, type["id"]),
+ "pass_type": "std::unique_ptr<protocol::%s::%s>" % (
+ domain_name, type["id"]),
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
+ "raw_type": "protocol::%s::%s" % (domain_name, type["id"]),
+ "raw_pass_type": "protocol::%s::%s*" % (domain_name, type["id"]),
+ "raw_return_type": "protocol::%s::%s*" % (domain_name, type["id"]),
+ }
def create_object_type_definition():
- # pylint: disable=W0622
- return {
- "return_type": "std::unique_ptr<protocol::DictionaryValue>",
- "pass_type": "std::unique_ptr<protocol::DictionaryValue>",
- "to_raw_type": "%s.get()",
- "to_pass_type": "std::move(%s)",
- "to_rvalue": "std::move(%s)",
- "type": "std::unique_ptr<protocol::DictionaryValue>",
- "raw_type": "protocol::DictionaryValue",
- "raw_pass_type": "protocol::DictionaryValue*",
- "raw_return_type": "protocol::DictionaryValue*",
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::DictionaryValue>",
+ "pass_type": "std::unique_ptr<protocol::DictionaryValue>",
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::DictionaryValue>",
+ "raw_type": "protocol::DictionaryValue",
+ "raw_pass_type": "protocol::DictionaryValue*",
+ "raw_return_type": "protocol::DictionaryValue*",
+ }
def create_any_type_definition():
- # pylint: disable=W0622
- return {
- "return_type": "std::unique_ptr<protocol::Value>",
- "pass_type": "std::unique_ptr<protocol::Value>",
- "to_raw_type": "%s.get()",
- "to_pass_type": "std::move(%s)",
- "to_rvalue": "std::move(%s)",
- "type": "std::unique_ptr<protocol::Value>",
- "raw_type": "protocol::Value",
- "raw_pass_type": "protocol::Value*",
- "raw_return_type": "protocol::Value*",
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::Value>",
+ "pass_type": "std::unique_ptr<protocol::Value>",
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::Value>",
+ "raw_type": "protocol::Value",
+ "raw_pass_type": "protocol::Value*",
+ "raw_return_type": "protocol::Value*",
+ }
def create_string_type_definition():
- # pylint: disable=W0622
- return {
- "return_type": "String",
- "pass_type": "const String&",
- "to_pass_type": "%s",
- "to_raw_type": "%s",
- "to_rvalue": "%s",
- "type": "String",
- "raw_type": "String",
- "raw_pass_type": "const String&",
- "raw_return_type": "String",
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "String",
+ "pass_type": "const String&",
+ "to_pass_type": "%s",
+ "to_raw_type": "%s",
+ "to_rvalue": "%s",
+ "type": "String",
+ "raw_type": "String",
+ "raw_pass_type": "const String&",
+ "raw_return_type": "String",
+ }
def create_binary_type_definition():
- # pylint: disable=W0622
- return {
- "return_type": "Binary",
- "pass_type": "const Binary&",
- "to_pass_type": "%s",
- "to_raw_type": "%s",
- "to_rvalue": "%s",
- "type": "Binary",
- "raw_type": "Binary",
- "raw_pass_type": "const Binary&",
- "raw_return_type": "Binary",
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "Binary",
+ "pass_type": "const Binary&",
+ "to_pass_type": "%s",
+ "to_raw_type": "%s",
+ "to_rvalue": "%s",
+ "type": "Binary",
+ "raw_type": "Binary",
+ "raw_pass_type": "const Binary&",
+ "raw_return_type": "Binary",
+ }
def create_primitive_type_definition(type):
- # pylint: disable=W0622
- typedefs = {
- "number": "double",
- "integer": "int",
- "boolean": "bool"
- }
- defaults = {
- "number": "0",
- "integer": "0",
- "boolean": "false"
- }
- jsontypes = {
- "number": "TypeDouble",
- "integer": "TypeInteger",
- "boolean": "TypeBoolean",
- }
- return {
- "return_type": typedefs[type],
- "pass_type": typedefs[type],
- "to_pass_type": "%s",
- "to_raw_type": "%s",
- "to_rvalue": "%s",
- "type": typedefs[type],
- "raw_type": typedefs[type],
- "raw_pass_type": typedefs[type],
- "raw_return_type": typedefs[type],
- "default_value": defaults[type]
- }
+ # pylint: disable=W0622
+ typedefs = {
+ "number": "double",
+ "integer": "int",
+ "boolean": "bool"
+ }
+ defaults = {
+ "number": "0",
+ "integer": "0",
+ "boolean": "false"
+ }
+ jsontypes = {
+ "number": "TypeDouble",
+ "integer": "TypeInteger",
+ "boolean": "TypeBoolean",
+ }
+ return {
+ "return_type": typedefs[type],
+ "pass_type": typedefs[type],
+ "to_pass_type": "%s",
+ "to_raw_type": "%s",
+ "to_rvalue": "%s",
+ "type": typedefs[type],
+ "raw_type": typedefs[type],
+ "raw_pass_type": typedefs[type],
+ "raw_return_type": typedefs[type],
+ "default_value": defaults[type]
+ }
def wrap_array_definition(type):
- # pylint: disable=W0622
- return {
- "return_type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
- "pass_type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
- "to_raw_type": "%s.get()",
- "to_pass_type": "std::move(%s)",
- "to_rvalue": "std::move(%s)",
- "type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
- "raw_type": "protocol::Array<%s>" % type["raw_type"],
- "raw_pass_type": "protocol::Array<%s>*" % type["raw_type"],
- "raw_return_type": "protocol::Array<%s>*" % type["raw_type"],
- "out_type": "protocol::Array<%s>&" % type["raw_type"],
- }
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
+ "pass_type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
+ "raw_type": "protocol::Array<%s>" % type["raw_type"],
+ "raw_pass_type": "protocol::Array<%s>*" % type["raw_type"],
+ "raw_return_type": "protocol::Array<%s>*" % type["raw_type"],
+ "out_type": "protocol::Array<%s>&" % type["raw_type"],
+ }
class Protocol(object):
- def __init__(self, config):
- self.config = config
- self.json_api = {"domains": []}
- self.imported_domains = []
- self.exported_domains = []
- self.generate_domains = self.read_protocol_file(config.protocol.path)
-
- if config.protocol.options:
- self.generate_domains = [rule.domain for rule in config.protocol.options]
- self.exported_domains = [rule.domain for rule in config.protocol.options if hasattr(rule, "exported")]
-
- if config.imported:
- self.imported_domains = self.read_protocol_file(config.imported.path)
- if config.imported.options:
- self.imported_domains = [rule.domain for rule in config.imported.options]
-
- self.patch_full_qualified_refs()
- self.create_notification_types()
- self.create_type_definitions()
- self.generate_used_types()
-
-
- def read_protocol_file(self, file_name):
- input_file = open(file_name, "r")
- parsed_json = pdl.loads(input_file.read(), file_name)
- input_file.close()
- version = parsed_json["version"]["major"] + "." + parsed_json["version"]["minor"]
- domains = []
- for domain in parsed_json["domains"]:
- domains.append(domain["domain"])
- domain["version"] = version
- self.json_api["domains"] += parsed_json["domains"]
- return domains
-
-
- def patch_full_qualified_refs(self):
- def patch_full_qualified_refs_in_domain(json, domain_name):
- if isinstance(json, list):
- for item in json:
- patch_full_qualified_refs_in_domain(item, domain_name)
- if not isinstance(json, dict):
- return
- for key in json:
- if key == "type" and json[key] == "string":
- json[key] = domain_name + ".string"
- if key != "$ref":
- patch_full_qualified_refs_in_domain(json[key], domain_name)
- continue
- if json["$ref"].find(".") == -1:
- json["$ref"] = domain_name + "." + json["$ref"]
- return
-
- for domain in self.json_api["domains"]:
- patch_full_qualified_refs_in_domain(domain, domain["domain"])
-
-
- def all_references(self, json):
- refs = set()
- if isinstance(json, list):
- for item in json:
- refs |= self.all_references(item)
- if not isinstance(json, dict):
- return refs
- for key in json:
- if key != "$ref":
- refs |= self.all_references(json[key])
- else:
- refs.add(json["$ref"])
- return refs
-
- def generate_used_types(self):
- all_refs = set()
- for domain in self.json_api["domains"]:
- domain_name = domain["domain"]
- if "commands" in domain:
- for command in domain["commands"]:
- if self.generate_command(domain_name, command["name"]):
- all_refs |= self.all_references(command)
- if "events" in domain:
- for event in domain["events"]:
- if self.generate_event(domain_name, event["name"]):
- all_refs |= self.all_references(event)
- all_refs.add(domain_name + "." + to_title_case(event["name"]) + "Notification")
-
- dependencies = self.generate_type_dependencies()
- queue = set(all_refs)
- while len(queue):
- ref = queue.pop()
- if ref in dependencies:
- queue |= dependencies[ref] - all_refs
- all_refs |= dependencies[ref]
- self.used_types = all_refs
-
-
- def generate_type_dependencies(self):
- dependencies = dict()
- domains_with_types = (x for x in self.json_api["domains"] if "types" in x)
- for domain in domains_with_types:
- domain_name = domain["domain"]
- for type in domain["types"]:
- related_types = self.all_references(type)
- if len(related_types):
- dependencies[domain_name + "." + type["id"]] = related_types
- return dependencies
-
-
- def create_notification_types(self):
- for domain in self.json_api["domains"]:
- if "events" in domain:
- for event in domain["events"]:
- event_type = dict()
- event_type["description"] = "Wrapper for notification params"
- event_type["type"] = "object"
- event_type["id"] = to_title_case(event["name"]) + "Notification"
- if "parameters" in event:
- event_type["properties"] = copy.deepcopy(event["parameters"])
- if "types" not in domain:
- domain["types"] = list()
- domain["types"].append(event_type)
-
-
- def create_type_definitions(self):
- imported_namespace = "::".join(self.config.imported.namespace) if self.config.imported else ""
- self.type_definitions = {}
- self.type_definitions["number"] = create_primitive_type_definition("number")
- self.type_definitions["integer"] = create_primitive_type_definition("integer")
- self.type_definitions["boolean"] = create_primitive_type_definition("boolean")
- self.type_definitions["object"] = create_object_type_definition()
- self.type_definitions["any"] = create_any_type_definition()
- self.type_definitions["binary"] = create_binary_type_definition()
- for domain in self.json_api["domains"]:
- self.type_definitions[domain["domain"] + ".string"] = create_string_type_definition()
- self.type_definitions[domain["domain"] + ".binary"] = create_binary_type_definition()
- if not ("types" in domain):
- continue
- for type in domain["types"]:
- type_name = domain["domain"] + "." + type["id"]
- if type["type"] == "object" and domain["domain"] in self.imported_domains:
- self.type_definitions[type_name] = create_imported_type_definition(domain["domain"], type, imported_namespace)
- elif type["type"] == "object":
- self.type_definitions[type_name] = create_user_type_definition(domain["domain"], type)
- elif type["type"] == "array":
- self.type_definitions[type_name] = self.resolve_type(type)
- elif type["type"] == domain["domain"] + ".string":
- self.type_definitions[type_name] = create_string_type_definition()
- elif type["type"] == domain["domain"] + ".binary":
- self.type_definitions[type_name] = create_binary_type_definition()
- else:
- self.type_definitions[type_name] = create_primitive_type_definition(type["type"])
-
-
- def check_options(self, options, domain, name, include_attr, exclude_attr, default):
- for rule in options:
- if rule.domain != domain:
- continue
- if include_attr and hasattr(rule, include_attr):
- return name in getattr(rule, include_attr)
- if exclude_attr and hasattr(rule, exclude_attr):
- return name not in getattr(rule, exclude_attr)
- return default
- return False
-
-
- # ---- Begin of methods exposed to generator
-
-
- def type_definition(self, name):
- return self.type_definitions[name]
-
-
- def resolve_type(self, prop):
- if "$ref" in prop:
- return self.type_definitions[prop["$ref"]]
- if prop["type"] == "array":
- return wrap_array_definition(self.resolve_type(prop["items"]))
- return self.type_definitions[prop["type"]]
+ def __init__(self, config):
+ self.config = config
+ self.json_api = {"domains": []}
+ self.imported_domains = []
+ self.exported_domains = []
+ self.generate_domains = self.read_protocol_file(config.protocol.path)
- def generate_command(self, domain, command):
- if not self.config.protocol.options:
- return domain in self.generate_domains
- return self.check_options(self.config.protocol.options, domain, command, "include", "exclude", True)
+ if config.protocol.options:
+ self.generate_domains = [rule.domain for rule in config.protocol.options]
+ self.exported_domains = [rule.domain for rule in config.protocol.options
+ if hasattr(rule, "exported")]
-
- def generate_event(self, domain, event):
- if not self.config.protocol.options:
- return domain in self.generate_domains
- return self.check_options(self.config.protocol.options, domain, event, "include_events", "exclude_events", True)
-
-
- def generate_type(self, domain, typename):
- return domain + "." + typename in self.used_types
-
-
- def is_async_command(self, domain, command):
- if not self.config.protocol.options:
- return False
- return self.check_options(self.config.protocol.options, domain, command, "async", None, False)
-
-
- def is_exported(self, domain, name):
- if not self.config.protocol.options:
- return False
- return self.check_options(self.config.protocol.options, domain, name, "exported", None, False)
-
-
- def is_imported(self, domain, name):
- if not self.config.imported:
- return False
- if not self.config.imported.options:
- return domain in self.imported_domains
- return self.check_options(self.config.imported.options, domain, name, "imported", None, False)
-
-
- def is_exported_domain(self, domain):
- return domain in self.exported_domains
-
-
- def generate_disable(self, domain):
- if "commands" not in domain:
- return True
+ if config.imported:
+ self.imported_domains = self.read_protocol_file(config.imported.path)
+ if config.imported.options:
+ self.imported_domains = [rule.domain
+ for rule in config.imported.options]
+
+ self.patch_full_qualified_refs()
+ self.create_notification_types()
+ self.create_type_definitions()
+ self.generate_used_types()
+
+ def read_protocol_file(self, file_name):
+ input_file = open(file_name, "r")
+ parsed_json = pdl.loads(input_file.read(), file_name)
+ input_file.close()
+ version = '%s.%s' % (parsed_json["version"]["major"],
+ parsed_json["version"]["minor"])
+ domains = []
+ for domain in parsed_json["domains"]:
+ domains.append(domain["domain"])
+ domain["version"] = version
+ self.json_api["domains"] += parsed_json["domains"]
+ return domains
+
+ def patch_full_qualified_refs(self):
+ def patch_full_qualified_refs_in_domain(json, domain_name):
+ if isinstance(json, list):
+ for item in json:
+ patch_full_qualified_refs_in_domain(item, domain_name)
+ if not isinstance(json, dict):
+ return
+ for key in json:
+ if key == "type" and json[key] == "string":
+ json[key] = domain_name + ".string"
+ if key != "$ref":
+ patch_full_qualified_refs_in_domain(json[key], domain_name)
+ continue
+ if json["$ref"].find(".") == -1:
+ json["$ref"] = domain_name + "." + json["$ref"]
+ return
+
+ for domain in self.json_api["domains"]:
+ patch_full_qualified_refs_in_domain(domain, domain["domain"])
+
+ def all_references(self, json):
+ refs = set()
+ if isinstance(json, list):
+ for item in json:
+ refs |= self.all_references(item)
+ if not isinstance(json, dict):
+ return refs
+ for key in json:
+ if key != "$ref":
+ refs |= self.all_references(json[key])
+ else:
+ refs.add(json["$ref"])
+ return refs
+
+ def generate_used_types(self):
+ all_refs = set()
+ for domain in self.json_api["domains"]:
+ domain_name = domain["domain"]
+ if "commands" in domain:
for command in domain["commands"]:
- if command["name"] == "disable" and self.generate_command(domain["domain"], "disable"):
- return False
- return True
-
+ if self.generate_command(domain_name, command["name"]):
+ all_refs |= self.all_references(command)
+ if "events" in domain:
+ for event in domain["events"]:
+ if self.generate_event(domain_name, event["name"]):
+ all_refs |= self.all_references(event)
+ all_refs.add('%s.%sNotification' % (domain_name,
+ to_title_case(event["name"])))
+
+ dependencies = self.generate_type_dependencies()
+ queue = set(all_refs)
+ while len(queue):
+ ref = queue.pop()
+ if ref in dependencies:
+ queue |= dependencies[ref] - all_refs
+ all_refs |= dependencies[ref]
+ self.used_types = all_refs
+
+ def generate_type_dependencies(self):
+ dependencies = dict()
+ domains_with_types = (x for x in self.json_api["domains"] if "types" in x)
+ for domain in domains_with_types:
+ domain_name = domain["domain"]
+ for type in domain["types"]:
+ related_types = self.all_references(type)
+ if len(related_types):
+ dependencies[domain_name + "." + type["id"]] = related_types
+ return dependencies
+
+ def create_notification_types(self):
+ for domain in self.json_api["domains"]:
+ if "events" in domain:
+ for event in domain["events"]:
+ event_type = dict()
+ event_type["description"] = "Wrapper for notification params"
+ event_type["type"] = "object"
+ event_type["id"] = to_title_case(event["name"]) + "Notification"
+ if "parameters" in event:
+ event_type["properties"] = copy.deepcopy(event["parameters"])
+ if "types" not in domain:
+ domain["types"] = list()
+ domain["types"].append(event_type)
+
+ def create_type_definitions(self):
+ imported_namespace = ""
+ if self.config.imported:
+ imported_namespace = "::".join(self.config.imported.namespace)
+ self.type_definitions = {}
+ self.type_definitions["number"] = create_primitive_type_definition("number")
+ self.type_definitions["integer"] = create_primitive_type_definition("integer")
+ self.type_definitions["boolean"] = create_primitive_type_definition("boolean")
+ self.type_definitions["object"] = create_object_type_definition()
+ self.type_definitions["any"] = create_any_type_definition()
+ self.type_definitions["binary"] = create_binary_type_definition()
+ for domain in self.json_api["domains"]:
+ self.type_definitions[domain["domain"] + ".string"] = (
+ create_string_type_definition())
+ self.type_definitions[domain["domain"] + ".binary"] = (
+ create_binary_type_definition())
+ if not ("types" in domain):
+ continue
+ for type in domain["types"]:
+ type_name = domain["domain"] + "." + type["id"]
+ if type["type"] == "object" and domain["domain"] in self.imported_domains:
+ self.type_definitions[type_name] = create_imported_type_definition(
+ domain["domain"], type, imported_namespace)
+ elif type["type"] == "object":
+ self.type_definitions[type_name] = create_user_type_definition(
+ domain["domain"], type)
+ elif type["type"] == "array":
+ self.type_definitions[type_name] = self.resolve_type(type)
+ elif type["type"] == domain["domain"] + ".string":
+ self.type_definitions[type_name] = create_string_type_definition()
+ elif type["type"] == domain["domain"] + ".binary":
+ self.type_definitions[type_name] = create_binary_type_definition()
+ else:
+ self.type_definitions[type_name] = create_primitive_type_definition(
+ type["type"])
+
+ def check_options(self, options, domain, name, include_attr, exclude_attr,
+ default):
+ for rule in options:
+ if rule.domain != domain:
+ continue
+ if include_attr and hasattr(rule, include_attr):
+ return name in getattr(rule, include_attr)
+ if exclude_attr and hasattr(rule, exclude_attr):
+ return name not in getattr(rule, exclude_attr)
+ return default
+ return False
+
+
+ # ---- Begin of methods exposed to generator
+
+ def type_definition(self, name):
+ return self.type_definitions[name]
+
+ def resolve_type(self, prop):
+ if "$ref" in prop:
+ return self.type_definitions[prop["$ref"]]
+ if prop["type"] == "array":
+ return wrap_array_definition(self.resolve_type(prop["items"]))
+ return self.type_definitions[prop["type"]]
+
+ def generate_command(self, domain, command):
+ if not self.config.protocol.options:
+ return domain in self.generate_domains
+ return self.check_options(self.config.protocol.options, domain, command,
+ "include", "exclude", True)
+
+ def generate_event(self, domain, event):
+ if not self.config.protocol.options:
+ return domain in self.generate_domains
+ return self.check_options(self.config.protocol.options, domain, event,
+ "include_events", "exclude_events", True)
+
+ def generate_type(self, domain, typename):
+ return domain + "." + typename in self.used_types
+
+ def is_async_command(self, domain, command):
+ if not self.config.protocol.options:
+ return False
+ return self.check_options(self.config.protocol.options, domain, command,
+ "async", None, False)
+
+ def is_exported(self, domain, name):
+ if not self.config.protocol.options:
+ return False
+ return self.check_options(self.config.protocol.options, domain, name,
+ "exported", None, False)
+
+ def is_imported(self, domain, name):
+ if not self.config.imported:
+ return False
+ if not self.config.imported.options:
+ return domain in self.imported_domains
+ return self.check_options(self.config.imported.options, domain, name,
+ "imported", None, False)
+
+ def is_exported_domain(self, domain):
+ return domain in self.exported_domains
+
+ def generate_disable(self, domain):
+ if "commands" not in domain:
+ return True
+ for command in domain["commands"]:
+ if command["name"] == "disable" and self.generate_command(
+ domain["domain"], "disable"):
+ return False
+ return True
- def is_imported_dependency(self, domain):
- return domain in self.generate_domains or domain in self.imported_domains
+ def is_imported_dependency(self, domain):
+ return domain in self.generate_domains or domain in self.imported_domains
def main():
- jinja_dir, config_file, config = read_config()
-
- protocol = Protocol(config)
-
- if not config.exported and len(protocol.exported_domains):
- sys.stderr.write("Domains [%s] are exported, but config is missing export entry\n\n" % ", ".join(protocol.exported_domains))
- exit(1)
+ jinja_dir, config_file, config = read_config()
+
+ protocol = Protocol(config)
+
+ if not config.exported and len(protocol.exported_domains):
+ sys.stderr.write(("Domains [%s] are exported, but config is missing export "
+ "entry\n\n") % ", ".join(protocol.exported_domains))
+ exit(1)
+
+ if not os.path.exists(config.protocol.output):
+ os.mkdir(config.protocol.output)
+ if len(protocol.exported_domains) and not os.path.exists(
+ config.exported.output):
+ os.mkdir(config.exported.output)
+ jinja_env = initialize_jinja_env(jinja_dir, config.protocol.output, config)
+
+ inputs = []
+ inputs.append(__file__)
+ inputs.append(config_file)
+ inputs.append(config.protocol.path)
+ if config.imported:
+ inputs.append(config.imported.path)
+ templates_dir = os.path.join(module_path, "templates")
+ inputs.append(os.path.join(templates_dir, "TypeBuilder_h.template"))
+ inputs.append(os.path.join(templates_dir, "TypeBuilder_cpp.template"))
+ inputs.append(os.path.join(templates_dir, "Exported_h.template"))
+ inputs.append(os.path.join(templates_dir, "Imported_h.template"))
+
+ h_template = jinja_env.get_template("templates/TypeBuilder_h.template")
+ cpp_template = jinja_env.get_template("templates/TypeBuilder_cpp.template")
+ exported_template = jinja_env.get_template("templates/Exported_h.template")
+ imported_template = jinja_env.get_template("templates/Imported_h.template")
+
+ outputs = dict()
+
+ for domain in protocol.json_api["domains"]:
+ class_name = domain["domain"]
+ file_name = config.protocol.file_name_prefix + class_name
+ template_context = {
+ "protocol": protocol,
+ "config": config,
+ "domain": domain,
+ "join_arrays": join_arrays,
+ "format_include": functools.partial(format_include, config),
+ "format_domain_include": functools.partial(format_domain_include, config),
+ }
- if not os.path.exists(config.protocol.output):
- os.mkdir(config.protocol.output)
- if len(protocol.exported_domains) and not os.path.exists(config.exported.output):
- os.mkdir(config.exported.output)
- jinja_env = initialize_jinja_env(jinja_dir, config.protocol.output, config)
+ if domain["domain"] in protocol.generate_domains:
+ outputs[os.path.join(config.protocol.output, to_file_name(
+ config, file_name + ".h"))] = h_template.render(template_context)
+ outputs[os.path.join(config.protocol.output, to_file_name(
+ config, file_name + ".cpp"))] = cpp_template.render(template_context)
+ if domain["domain"] in protocol.exported_domains:
+ outputs[os.path.join(config.exported.output, to_file_name(
+ config, file_name + ".h"))] = exported_template.render(
+ template_context)
+ if domain["domain"] in protocol.imported_domains:
+ outputs[os.path.join(config.protocol.output, to_file_name(
+ config, file_name + ".h"))] = imported_template.render(
+ template_context)
+
+ if config.lib:
+ template_context = {
+ "config": config,
+ "format_include": functools.partial(format_include, config),
+ }
- inputs = []
- inputs.append(__file__)
- inputs.append(config_file)
- inputs.append(config.protocol.path)
- if config.imported:
- inputs.append(config.imported.path)
- templates_dir = os.path.join(module_path, "templates")
- inputs.append(os.path.join(templates_dir, "TypeBuilder_h.template"))
- inputs.append(os.path.join(templates_dir, "TypeBuilder_cpp.template"))
- inputs.append(os.path.join(templates_dir, "Exported_h.template"))
- inputs.append(os.path.join(templates_dir, "Imported_h.template"))
-
- h_template = jinja_env.get_template("templates/TypeBuilder_h.template")
- cpp_template = jinja_env.get_template("templates/TypeBuilder_cpp.template")
- exported_template = jinja_env.get_template("templates/Exported_h.template")
- imported_template = jinja_env.get_template("templates/Imported_h.template")
-
- outputs = dict()
-
- for domain in protocol.json_api["domains"]:
- class_name = domain["domain"]
- file_name = config.protocol.file_name_prefix + class_name
- template_context = {
- "protocol": protocol,
- "config": config,
- "domain": domain,
- "join_arrays": join_arrays,
- "format_include": functools.partial(format_include, config),
- "format_domain_include": functools.partial(format_domain_include, config),
- }
-
- if domain["domain"] in protocol.generate_domains:
- outputs[os.path.join(config.protocol.output, to_file_name(config, file_name + ".h"))] = h_template.render(template_context)
- outputs[os.path.join(config.protocol.output, to_file_name(config, file_name + ".cpp"))] = cpp_template.render(template_context)
- if domain["domain"] in protocol.exported_domains:
- outputs[os.path.join(config.exported.output, to_file_name(config, file_name + ".h"))] = exported_template.render(template_context)
- if domain["domain"] in protocol.imported_domains:
- outputs[os.path.join(config.protocol.output, to_file_name(config, file_name + ".h"))] = imported_template.render(template_context)
-
- if config.lib:
- template_context = {
- "config": config,
- "format_include": functools.partial(format_include, config),
- }
-
- lib_templates_dir = os.path.join(module_path, "lib")
- # Note these should be sorted in the right order.
- # TODO(dgozman): sort them programmatically based on commented includes.
- protocol_h_templates = [
- "ErrorSupport_h.template",
- "Values_h.template",
- "Object_h.template",
- "ValueConversions_h.template",
- "Maybe_h.template",
- "Array_h.template",
- "DispatcherBase_h.template",
- "Parser_h.template",
- "encoding_h.template",
- ]
-
- protocol_cpp_templates = [
- "Protocol_cpp.template",
- "ErrorSupport_cpp.template",
- "Values_cpp.template",
- "Object_cpp.template",
- "DispatcherBase_cpp.template",
- "Parser_cpp.template",
- "encoding_cpp.template",
- ]
-
- forward_h_templates = [
- "Forward_h.template",
- "Allocator_h.template",
- "FrontendChannel_h.template",
- ]
-
- base_string_adapter_h_templates = [
- "base_string_adapter_h.template",
- ]
-
- base_string_adapter_cc_templates = [
- "base_string_adapter_cc.template",
- ]
-
- def generate_lib_file(file_name, template_files):
- parts = []
- for template_file in template_files:
- inputs.append(os.path.join(lib_templates_dir, template_file))
- template = jinja_env.get_template("lib/" + template_file)
- parts.append(template.render(template_context))
- outputs[file_name] = "\n\n".join(parts)
-
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Forward.h")), forward_h_templates)
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Protocol.h")), protocol_h_templates)
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "Protocol.cpp")), protocol_cpp_templates)
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "base_string_adapter.h")), base_string_adapter_h_templates)
- generate_lib_file(os.path.join(config.lib.output, to_file_name(config, "base_string_adapter.cc")), base_string_adapter_cc_templates)
-
- # Make gyp / make generatos happy, otherwise make rebuilds world.
- inputs_ts = max(map(os.path.getmtime, inputs))
- up_to_date = True
- for output_file in outputs.keys():
- if not os.path.exists(output_file) or os.path.getmtime(output_file) < inputs_ts:
- up_to_date = False
- break
- if up_to_date:
- sys.exit()
-
- for file_name, content in outputs.items():
- out_file = open(file_name, "w")
- out_file.write(content)
- out_file.close()
-
-
-main()
+ lib_templates_dir = os.path.join(module_path, "lib")
+ # Note these should be sorted in the right order.
+ # TODO(dgozman): sort them programmatically based on commented includes.
+ protocol_h_templates = [
+ "ErrorSupport_h.template",
+ "Values_h.template",
+ "Object_h.template",
+ "ValueConversions_h.template",
+ "DispatcherBase_h.template",
+ "Parser_h.template",
+ ]
+
+ protocol_cpp_templates = [
+ "Protocol_cpp.template",
+ "ErrorSupport_cpp.template",
+ "Values_cpp.template",
+ "Object_cpp.template",
+ "DispatcherBase_cpp.template",
+ "Parser_cpp.template",
+ ]
+
+ forward_h_templates = [
+ "Forward_h.template",
+ "FrontendChannel_h.template",
+ ]
+
+ base_string_adapter_h_templates = [
+ "base_string_adapter_h.template",
+ ]
+
+ base_string_adapter_cc_templates = [
+ "base_string_adapter_cc.template",
+ ]
+
+ def generate_lib_file(file_name, template_files):
+ parts = []
+ for template_file in template_files:
+ inputs.append(os.path.join(lib_templates_dir, template_file))
+ template = jinja_env.get_template("lib/" + template_file)
+ parts.append(template.render(template_context))
+ outputs[file_name] = "\n\n".join(parts)
+
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(
+ config, "Forward.h")), forward_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(
+ config, "Protocol.h")), protocol_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(
+ config, "Protocol.cpp")), protocol_cpp_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(
+ config, "base_string_adapter.h")), base_string_adapter_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, to_file_name(
+ config, "base_string_adapter.cc")), base_string_adapter_cc_templates)
+
+ # Make gyp / make generatos happy, otherwise make rebuilds world.
+ inputs_ts = max(map(os.path.getmtime, inputs))
+ up_to_date = True
+ for output_file in outputs.keys():
+ if (not os.path.exists(output_file)
+ or os.path.getmtime(output_file) < inputs_ts):
+ up_to_date = False
+ break
+ if up_to_date:
+ sys.exit()
+
+ for file_name, content in outputs.items():
+ out_file = open(file_name, "w")
+ out_file.write(content)
+ out_file.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/third_party/inspector_protocol/concatenate_protocols.py b/deps/v8/third_party/inspector_protocol/concatenate_protocols.py
index e9f448efe7..42ecc7f9cf 100755
--- a/deps/v8/third_party/inspector_protocol/concatenate_protocols.py
+++ b/deps/v8/third_party/inspector_protocol/concatenate_protocols.py
@@ -7,33 +7,36 @@ import os.path
import sys
try:
- import json
+ import json
except ImportError:
- import simplejson as json
+ import simplejson as json
import pdl
def main(argv):
- if len(argv) < 1:
- sys.stderr.write("Usage: %s <protocol-1> [<protocol-2> [, <protocol-3>...]] <output-file>\n" % sys.argv[0])
- return 1
-
- domains = []
- version = None
- for protocol in argv[:-1]:
- file_name = os.path.normpath(protocol)
- if not os.path.isfile(file_name):
- sys.stderr.write("Cannot find %s\n" % file_name)
- return 1
- input_file = open(file_name, "r")
- parsed_json = pdl.loads(input_file.read(), file_name)
- domains += parsed_json["domains"]
- version = parsed_json["version"]
-
- output_file = open(argv[-1], "w")
- json.dump({"version": version, "domains": domains}, output_file, indent=4, sort_keys=False, separators=(',', ': '))
- output_file.close()
+ if len(argv) < 1:
+ sys.stderr.write(
+ "Usage: %s <protocol-1> [<protocol-2> [, <protocol-3>...]] "
+ "<output-file>\n" % sys.argv[0])
+ return 1
+
+ domains = []
+ version = None
+ for protocol in argv[:-1]:
+ file_name = os.path.normpath(protocol)
+ if not os.path.isfile(file_name):
+ sys.stderr.write("Cannot find %s\n" % file_name)
+ return 1
+ input_file = open(file_name, "r")
+ parsed_json = pdl.loads(input_file.read(), file_name)
+ domains += parsed_json["domains"]
+ version = parsed_json["version"]
+
+ output_file = open(argv[-1], "w")
+ json.dump({"version": version, "domains": domains}, output_file,
+ indent=4, sort_keys=False, separators=(',', ': '))
+ output_file.close()
if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
index 649cc060f5..1da7c45aa3 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
@@ -185,11 +185,10 @@ namespace internals {
// |type| is the major type as specified in RFC 7049 Section 2.1.
// |value| is the payload (e.g. for MajorType::UNSIGNED) or is the size
// (e.g. for BYTE_STRING).
-// If successful, returns the number of bytes read. Otherwise returns -1.
-// TODO(johannes): change return type to size_t and use 0 for error.
-int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
+// If successful, returns the number of bytes read. Otherwise returns 0.
+size_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
if (bytes.empty())
- return -1;
+ return 0;
uint8_t initial_byte = bytes[0];
*type = MajorType((initial_byte & kMajorTypeMask) >> kMajorTypeBitShift);
@@ -203,32 +202,32 @@ int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
if (additional_information == kAdditionalInformation1Byte) {
// Values 24-255 are encoded with one initial byte, followed by the value.
if (bytes.size() < 2)
- return -1;
+ return 0;
*value = ReadBytesMostSignificantByteFirst<uint8_t>(bytes.subspan(1));
return 2;
}
if (additional_information == kAdditionalInformation2Bytes) {
// Values 256-65535: 1 initial byte + 2 bytes payload.
if (bytes.size() < 1 + sizeof(uint16_t))
- return -1;
+ return 0;
*value = ReadBytesMostSignificantByteFirst<uint16_t>(bytes.subspan(1));
return 3;
}
if (additional_information == kAdditionalInformation4Bytes) {
// 32 bit uint: 1 initial byte + 4 bytes payload.
if (bytes.size() < 1 + sizeof(uint32_t))
- return -1;
+ return 0;
*value = ReadBytesMostSignificantByteFirst<uint32_t>(bytes.subspan(1));
return 5;
}
if (additional_information == kAdditionalInformation8Bytes) {
// 64 bit uint: 1 initial byte + 8 bytes payload.
if (bytes.size() < 1 + sizeof(uint64_t))
- return -1;
+ return 0;
*value = ReadBytesMostSignificantByteFirst<uint64_t>(bytes.subspan(1));
return 9;
}
- return -1;
+ return 0;
}
// Writes the start of a token with |type|. The |value| may indicate the size,
@@ -770,10 +769,10 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
SetToken(CBORTokenTag::NULL_VALUE, 1);
return;
case kExpectedConversionToBase64Tag: { // BINARY
- const int8_t bytes_read = internals::ReadTokenStart(
+ const size_t bytes_read = internals::ReadTokenStart(
bytes_.subspan(status_.pos + 1), &token_start_type_,
&token_start_internal_value_);
- if (bytes_read < 0 || token_start_type_ != MajorType::BYTE_STRING ||
+ if (!bytes_read || token_start_type_ != MajorType::BYTE_STRING ||
token_start_internal_value_ > kMaxValidLength) {
SetError(Error::CBOR_INVALID_BINARY);
return;
@@ -823,47 +822,47 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
return;
}
default: {
- const int8_t token_start_length = internals::ReadTokenStart(
+ const size_t bytes_read = internals::ReadTokenStart(
bytes_.subspan(status_.pos), &token_start_type_,
&token_start_internal_value_);
- const bool success = token_start_length >= 0;
switch (token_start_type_) {
case MajorType::UNSIGNED: // INT32.
// INT32 is a signed int32 (int32 makes sense for the
// inspector_protocol, it's not a CBOR limitation), so we check
// against the signed max, so that the allowable values are
// 0, 1, 2, ... 2^31 - 1.
- if (!success || std::numeric_limits<int32_t>::max() <
- token_start_internal_value_) {
+ if (!bytes_read || std::numeric_limits<int32_t>::max() <
+ token_start_internal_value_) {
SetError(Error::CBOR_INVALID_INT32);
return;
}
- SetToken(CBORTokenTag::INT32, token_start_length);
+ SetToken(CBORTokenTag::INT32, bytes_read);
return;
case MajorType::NEGATIVE: { // INT32.
// INT32 is a signed int32 (int32 makes sense for the
// inspector_protocol, it's not a CBOR limitation); in CBOR, the
// negative values for INT32 are represented as NEGATIVE, that is, -1
// INT32 is represented as 1 << 5 | 0 (major type 1, additional info
- // value 0). The minimal allowed INT32 value in our protocol is
- // std::numeric_limits<int32_t>::min(). We check for it by directly
- // checking the payload against the maximal allowed signed (!) int32
- // value.
- if (!success || token_start_internal_value_ >
- std::numeric_limits<int32_t>::max()) {
+ // value 0).
+ // The represented allowed values range is -1 to -2^31.
+ // They are mapped into the encoded range of 0 to 2^31-1.
+ // We check the payload in token_start_internal_value_ against
+ // that range (2^31-1 is also known as
+ // std::numeric_limits<int32_t>::max()).
+ if (!bytes_read || token_start_internal_value_ >
+ std::numeric_limits<int32_t>::max()) {
SetError(Error::CBOR_INVALID_INT32);
return;
}
- SetToken(CBORTokenTag::INT32, token_start_length);
+ SetToken(CBORTokenTag::INT32, bytes_read);
return;
}
case MajorType::STRING: { // STRING8.
- if (!success || token_start_internal_value_ > kMaxValidLength) {
+ if (!bytes_read || token_start_internal_value_ > kMaxValidLength) {
SetError(Error::CBOR_INVALID_STRING8);
return;
}
- uint64_t token_byte_length =
- token_start_internal_value_ + token_start_length;
+ uint64_t token_byte_length = token_start_internal_value_ + bytes_read;
if (token_byte_length > remaining_bytes) {
SetError(Error::CBOR_INVALID_STRING8);
return;
@@ -875,13 +874,12 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
case MajorType::BYTE_STRING: { // STRING16.
// Length must be divisible by 2 since UTF16 is 2 bytes per
// character, hence the &1 check.
- if (!success || token_start_internal_value_ > kMaxValidLength ||
+ if (!bytes_read || token_start_internal_value_ > kMaxValidLength ||
token_start_internal_value_ & 1) {
SetError(Error::CBOR_INVALID_STRING16);
return;
}
- uint64_t token_byte_length =
- token_start_internal_value_ + token_start_length;
+ uint64_t token_byte_length = token_start_internal_value_ + bytes_read;
if (token_byte_length > remaining_bytes) {
SetError(Error::CBOR_INVALID_STRING16);
return;
@@ -1372,7 +1370,7 @@ class JSONEncoder : public StreamingParserHandler {
// If we have enough bytes in our input, decode the remaining ones
// belonging to this Unicode character into |codepoint|.
- if (ii + num_bytes_left > chars.size())
+ if (ii + num_bytes_left >= chars.size())
continue;
while (num_bytes_left > 0) {
c = chars[++ii];
@@ -1386,7 +1384,7 @@ class JSONEncoder : public StreamingParserHandler {
// Disallow overlong encodings for ascii characters, as these
// would include " and other characters significant to JSON
// string termination / control.
- if (codepoint < 0x7f)
+ if (codepoint <= 0x7f)
continue;
// Invalid in UTF8, and can't be represented in UTF16 anyway.
if (codepoint > 0x10ffff)
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.h b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
index 90916d42b3..340667f604 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding.h
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_
#define V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_
+#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -14,6 +15,19 @@
#include <vector>
namespace v8_inspector_protocol_encoding {
+// This library is designed to be portable. The only allowed dependency
+// are the C/C++ standard libraries, up to C++11. We support both 32 bit
+// and 64 architectures.
+//
+// Types used below:
+// uint8_t: a byte, e.g. for raw bytes or UTF8 characters
+// uint16_t: two bytes, e.g. for UTF16 characters
+// For input parameters:
+// span<uint8_t>: pointer to bytes and length
+// span<uint16_t>: pointer to UTF16 chars and length
+// For output parameters:
+// std::vector<uint8_t> - Owned segment of bytes / utf8 characters and length.
+// std::string - Same, for compatibility, even though char is signed.
// =============================================================================
// span - sequence of bytes
@@ -72,6 +86,22 @@ inline span<uint8_t> SpanFrom(const std::string& v) {
return span<uint8_t>(reinterpret_cast<const uint8_t*>(v.data()), v.size());
}
+// Less than / equality comparison functions for sorting / searching for byte
+// spans. These are similar to absl::string_view's < and == operators.
+inline bool SpanLessThan(span<uint8_t> x, span<uint8_t> y) noexcept {
+ auto min_size = std::min(x.size(), y.size());
+ const int r = min_size == 0 ? 0 : memcmp(x.data(), y.data(), min_size);
+ return (r < 0) || (r == 0 && x.size() < y.size());
+}
+
+inline bool SpanEquals(span<uint8_t> x, span<uint8_t> y) noexcept {
+ auto len = x.size();
+ if (len != y.size())
+ return false;
+ return x.data() == y.data() || len == 0 ||
+ std::memcmp(x.data(), y.data(), len) == 0;
+}
+
// =============================================================================
// Status and Error codes
// =============================================================================
@@ -427,7 +457,7 @@ Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
std::string* cbor);
namespace internals { // Exposed only for writing tests.
-int8_t ReadTokenStart(span<uint8_t> bytes,
+size_t ReadTokenStart(span<uint8_t> bytes,
cbor::MajorType* type,
uint64_t* value);
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
index 338d1ece10..768e19bb9e 100644
--- a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
@@ -121,6 +121,28 @@ TEST(SpanFromTest, FromConstCharAndLiteral) {
EXPECT_EQ(3u, SpanFrom("foo").size());
}
+TEST(SpanComparisons, ByteWiseLexicographicalOrder) {
+ // Compare the empty span.
+ EXPECT_FALSE(SpanLessThan(span<uint8_t>(), span<uint8_t>()));
+ EXPECT_TRUE(SpanEquals(span<uint8_t>(), span<uint8_t>()));
+
+ // Compare message with itself.
+ std::string msg = "Hello, world";
+ EXPECT_FALSE(SpanLessThan(SpanFrom(msg), SpanFrom(msg)));
+ EXPECT_TRUE(SpanEquals(SpanFrom(msg), SpanFrom(msg)));
+
+ // Compare message and copy.
+ EXPECT_FALSE(SpanLessThan(SpanFrom(msg), SpanFrom(std::string(msg))));
+ EXPECT_TRUE(SpanEquals(SpanFrom(msg), SpanFrom(std::string(msg))));
+
+ // Compare two messages. |lesser_msg| < |msg| because of the first
+ // byte ('A' < 'H').
+ std::string lesser_msg = "A lesser message.";
+ EXPECT_TRUE(SpanLessThan(SpanFrom(lesser_msg), SpanFrom(msg)));
+ EXPECT_FALSE(SpanLessThan(SpanFrom(msg), SpanFrom(lesser_msg)));
+ EXPECT_FALSE(SpanEquals(SpanFrom(msg), SpanFrom(lesser_msg)));
+}
+
// =============================================================================
// Status and Error codes
// =============================================================================
@@ -235,7 +257,9 @@ TEST(EncodeDecodeInt32Test, RoundtripsInt32Max) {
}
TEST(EncodeDecodeInt32Test, RoundtripsInt32Min) {
- // std::numeric_limits<int32_t> is encoded as a uint32 after the initial byte.
+ // std::numeric_limits<int32_t> is encoded as a uint32 (4 unsigned bytes)
+ // after the initial byte, which effectively carries the sign by
+ // designating the token as NEGATIVE.
std::vector<uint8_t> encoded;
EncodeInt32(std::numeric_limits<int32_t>::min(), &encoded);
// 1 for initial byte, 4 for the uint32.
@@ -248,6 +272,10 @@ TEST(EncodeDecodeInt32Test, RoundtripsInt32Min) {
CBORTokenizer tokenizer(SpanFrom(encoded));
EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
EXPECT_EQ(std::numeric_limits<int32_t>::min(), tokenizer.GetInt32());
+ // It's nice to see how the min int32 value reads in hex:
+ // That is, -1 minus the unsigned payload (0x7fffffff, see above).
+ int32_t expected = -1 - 0x7fffffff;
+ EXPECT_EQ(expected, tokenizer.GetInt32());
tokenizer.Next();
EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
}
@@ -1319,6 +1347,51 @@ void WriteUTF8AsUTF16(StreamingParserHandler* writer, const std::string& utf8) {
writer->HandleString16(SpanFrom(UTF8ToUTF16(SpanFrom(utf8))));
}
+TEST(JsonEncoder, OverlongEncodings) {
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+
+ // We encode 0x7f, which is the DEL ascii character, as a 4 byte UTF8
+ // sequence. This is called an overlong encoding, because only 1 byte
+ // is needed to represent 0x7f as UTF8.
+ std::vector<uint8_t> chars = {
+ 0xf0, // Starts 4 byte utf8 sequence
+ 0x80, // continuation byte
+ 0x81, // continuation byte w/ payload bit 7 set to 1.
+ 0xbf, // continuation byte w/ payload bits 0-6 set to 11111.
+ };
+ writer->HandleString8(SpanFrom(chars));
+ EXPECT_EQ("\"\"", out); // Empty string means that 0x7f was rejected (good).
+}
+
+TEST(JsonEncoder, IncompleteUtf8Sequence) {
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+
+ writer->HandleArrayBegin(); // This emits [, which starts an array.
+
+ { // 🌎 takes four bytes to encode in UTF-8. We test with the first three;
+ // This means we're trying to emit a string that consists solely of an
+ // incomplete UTF-8 sequence. So the string in the JSON output is emtpy.
+ std::string world_utf8 = "🌎";
+ ASSERT_EQ(4u, world_utf8.size());
+ std::vector<uint8_t> chars(world_utf8.begin(), world_utf8.begin() + 3);
+ writer->HandleString8(SpanFrom(chars));
+ EXPECT_EQ("[\"\"", out); // Incomplete sequence rejected: empty string.
+ }
+
+ { // This time, the incomplete sequence is at the end of the string.
+ std::string msg = "Hello, \xF0\x9F\x8C";
+ std::vector<uint8_t> chars(msg.begin(), msg.end());
+ writer->HandleString8(SpanFrom(chars));
+ EXPECT_EQ("[\"\",\"Hello, \"", out); // Incomplete sequence dropped at end.
+ }
+}
+
TEST(JsonStdStringWriterTest, HelloWorld) {
std::string out;
Status status;
@@ -1555,6 +1628,13 @@ TEST_F(JsonParserTest, UsAsciiDelCornerCase) {
"string16: a\x7f\n"
"map end\n",
log_.str());
+
+ // We've seen an implementation of UTF16ToUTF8 which would replace the DEL
+ // character with ' ', so this simple roundtrip tests the routines in
+ // encoding_test_helper.h, to make test failures of the above easier to
+ // diagnose.
+ std::vector<uint16_t> utf16 = UTF8ToUTF16(SpanFrom(json));
+ EXPECT_EQ(json, UTF16ToUTF8(SpanFrom(utf16)));
}
TEST_F(JsonParserTest, Whitespace) {
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
index d612fb6aeb..6e83e87d2a 100644
--- a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
@@ -33,17 +33,12 @@ template("inspector_protocol_generate") {
invoker.config_file,
"$inspector_protocol_dir/lib/base_string_adapter_cc.template",
"$inspector_protocol_dir/lib/base_string_adapter_h.template",
- "$inspector_protocol_dir/lib/encoding_h.template",
- "$inspector_protocol_dir/lib/encoding_cpp.template",
- "$inspector_protocol_dir/lib/Allocator_h.template",
- "$inspector_protocol_dir/lib/Array_h.template",
"$inspector_protocol_dir/lib/DispatcherBase_cpp.template",
"$inspector_protocol_dir/lib/DispatcherBase_h.template",
"$inspector_protocol_dir/lib/ErrorSupport_cpp.template",
"$inspector_protocol_dir/lib/ErrorSupport_h.template",
"$inspector_protocol_dir/lib/Forward_h.template",
"$inspector_protocol_dir/lib/FrontendChannel_h.template",
- "$inspector_protocol_dir/lib/Maybe_h.template",
"$inspector_protocol_dir/lib/Object_cpp.template",
"$inspector_protocol_dir/lib/Object_h.template",
"$inspector_protocol_dir/lib/Parser_cpp.template",
@@ -68,6 +63,8 @@ template("inspector_protocol_generate") {
rebase_path(invoker.out_dir, root_build_dir),
"--config",
rebase_path(invoker.config_file, root_build_dir),
+ "--inspector_protocol_dir",
+ "$inspector_protocol_dir",
]
if (defined(invoker.config_values)) {
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi b/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
deleted file mode 100644
index d614474e69..0000000000
--- a/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2016 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'variables': {
- 'inspector_protocol_files': [
- 'lib/encoding_h.template',
- 'lib/encoding_cpp.template',
- 'lib/Allocator_h.template',
- 'lib/Array_h.template',
- 'lib/DispatcherBase_cpp.template',
- 'lib/DispatcherBase_h.template',
- 'lib/ErrorSupport_cpp.template',
- 'lib/ErrorSupport_h.template',
- 'lib/Forward_h.template',
- 'lib/FrontendChannel_h.template',
- 'lib/Maybe_h.template',
- 'lib/Object_cpp.template',
- 'lib/Object_h.template',
- 'lib/Parser_cpp.template',
- 'lib/Parser_h.template',
- 'lib/Protocol_cpp.template',
- 'lib/ValueConversions_h.template',
- 'lib/Values_cpp.template',
- 'lib/Values_h.template',
- 'templates/Exported_h.template',
- 'templates/Imported_h.template',
- 'templates/TypeBuilder_cpp.template',
- 'templates/TypeBuilder_h.template',
- 'code_generator.py',
- ]
- }
-}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
deleted file mode 100644
index 15eaaaff02..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
+++ /dev/null
@@ -1,25 +0,0 @@
-// This file is generated by Allocator_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_Allocator_h
-#define {{"_".join(config.protocol.namespace)}}_Allocator_h
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-enum NotNullTagEnum { NotNullLiteral };
-
-#define PROTOCOL_DISALLOW_COPY(ClassName) \
- private: \
- ClassName(const ClassName&) = delete; \
- ClassName& operator=(const ClassName&) = delete
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_Allocator_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Array_h.template b/deps/v8/third_party/inspector_protocol/lib/Array_h.template
deleted file mode 100644
index c420a0f7e9..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/Array_h.template
+++ /dev/null
@@ -1,138 +0,0 @@
-// This file is generated by Array_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_Array_h
-#define {{"_".join(config.protocol.namespace)}}_Array_h
-
-//#include "ErrorSupport.h"
-//#include "Forward.h"
-//#include "ValueConversions.h"
-//#include "Values.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-template<typename T>
-class Array {
-public:
- static std::unique_ptr<Array<T>> create()
- {
- return std::unique_ptr<Array<T>>(new Array<T>());
- }
-
- static std::unique_ptr<Array<T>> fromValue(protocol::Value* value, ErrorSupport* errors)
- {
- protocol::ListValue* array = ListValue::cast(value);
- if (!array) {
- errors->addError("array expected");
- return nullptr;
- }
- std::unique_ptr<Array<T>> result(new Array<T>());
- errors->push();
- for (size_t i = 0; i < array->size(); ++i) {
- errors->setName(StringUtil::fromInteger(i));
- std::unique_ptr<T> item = ValueConversions<T>::fromValue(array->at(i), errors);
- result->m_vector.push_back(std::move(item));
- }
- errors->pop();
- if (errors->hasErrors())
- return nullptr;
- return result;
- }
-
- void addItem(std::unique_ptr<T> value)
- {
- m_vector.push_back(std::move(value));
- }
-
- size_t length()
- {
- return m_vector.size();
- }
-
- T* get(size_t index)
- {
- return m_vector[index].get();
- }
-
- std::unique_ptr<protocol::ListValue> toValue()
- {
- std::unique_ptr<protocol::ListValue> result = ListValue::create();
- for (auto& item : m_vector)
- result->pushValue(ValueConversions<T>::toValue(item));
- return result;
- }
-
-private:
- std::vector<std::unique_ptr<T>> m_vector;
-};
-
-template<typename T>
-class ArrayBase {
-public:
- static std::unique_ptr<Array<T>> create()
- {
- return std::unique_ptr<Array<T>>(new Array<T>());
- }
-
- static std::unique_ptr<Array<T>> fromValue(protocol::Value* value, ErrorSupport* errors)
- {
- protocol::ListValue* array = ListValue::cast(value);
- if (!array) {
- errors->addError("array expected");
- return nullptr;
- }
- errors->push();
- std::unique_ptr<Array<T>> result(new Array<T>());
- for (size_t i = 0; i < array->size(); ++i) {
- errors->setName(StringUtil::fromInteger(i));
- T item = ValueConversions<T>::fromValue(array->at(i), errors);
- result->m_vector.push_back(item);
- }
- errors->pop();
- if (errors->hasErrors())
- return nullptr;
- return result;
- }
-
- void addItem(const T& value)
- {
- m_vector.push_back(value);
- }
-
- size_t length()
- {
- return m_vector.size();
- }
-
- T get(size_t index)
- {
- return m_vector[index];
- }
-
- std::unique_ptr<protocol::ListValue> toValue()
- {
- std::unique_ptr<protocol::ListValue> result = ListValue::create();
- for (auto& item : m_vector)
- result->pushValue(ValueConversions<T>::toValue(item));
- return result;
- }
-
-private:
- std::vector<T> m_vector;
-};
-
-template<> class Array<String> : public ArrayBase<String> {};
-template<> class Array<int> : public ArrayBase<int> {};
-template<> class Array<double> : public ArrayBase<double> {};
-template<> class Array<bool> : public ArrayBase<bool> {};
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_Array_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
index 7d859c4f27..4aa0688adb 100644
--- a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -25,6 +25,9 @@ public:
kFallThrough = 2,
};
+ // For historical reasons, these error codes correspond to commonly used
+ // XMLRPC codes (e.g. see METHOD_NOT_FOUND in
+ // https://github.com/python/cpython/blob/master/Lib/xmlrpc/client.py).
enum ErrorCode {
kParseError = -32700,
kInvalidRequest = -32600,
diff --git a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
index ff5e685863..7cc47b4f0f 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
@@ -18,17 +18,17 @@
#include <unordered_map>
#include <unordered_set>
+#include "{{config.bindings_lib.header}}"
+
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
{% endfor %}
-template<typename T> class Array;
class DictionaryValue;
class DispatchResponse;
class ErrorSupport;
class FundamentalValue;
class ListValue;
-template<typename T> class Maybe;
class Object;
using Response = DispatchResponse;
class SerializedValue;
@@ -36,6 +36,52 @@ class StringValue;
class UberDispatcher;
class Value;
+namespace detail {
+template <typename T>
+struct ArrayTypedef { typedef std::vector<std::unique_ptr<T>> type; };
+
+template <>
+struct ArrayTypedef<String> { typedef std::vector<String> type; };
+
+template <>
+struct ArrayTypedef<int> { typedef std::vector<int> type; };
+
+template <>
+struct ArrayTypedef<double> { typedef std::vector<double> type; };
+
+template <>
+struct ArrayTypedef<bool> { typedef std::vector<bool> type; };
+} // namespace detail
+
+template <typename T>
+using Array = typename detail::ArrayTypedef<T>::type;
+
+namespace detail {
+using {{config.bindings_lib.namespace}}::glue::detail::PtrMaybe;
+using {{config.bindings_lib.namespace}}::glue::detail::ValueMaybe;
+
+template <typename T>
+struct MaybeTypedef { typedef PtrMaybe<T> type; };
+
+template <>
+struct MaybeTypedef<bool> { typedef ValueMaybe<bool> type; };
+
+template <>
+struct MaybeTypedef<int> { typedef ValueMaybe<int> type; };
+
+template <>
+struct MaybeTypedef<double> { typedef ValueMaybe<double> type; };
+
+template <>
+struct MaybeTypedef<String> { typedef ValueMaybe<String> type; };
+
+template <>
+struct MaybeTypedef<Binary> { typedef ValueMaybe<Binary> type; };
+} // namespace detail
+
+template <typename T>
+using Maybe = typename detail::MaybeTypedef<T>::type;
+
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
deleted file mode 100644
index 22cfac6b24..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
+++ /dev/null
@@ -1,139 +0,0 @@
-// This file is generated by Maybe_h.template.
-
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_Maybe_h
-#define {{"_".join(config.protocol.namespace)}}_Maybe_h
-
-// This macro allows to test for the version of the GNU C++ compiler.
-// Note that this also applies to compilers that masquerade as GCC,
-// for example clang and the Intel C++ compiler for Linux.
-// Use like:
-// #if IP_GNUC_PREREQ(4, 3, 1)
-// ...
-// #endif
-#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
-#define IP_GNUC_PREREQ(major, minor, patchlevel) \
- ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
- ((major)*10000 + (minor)*100 + (patchlevel)))
-#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
-#define IP_GNUC_PREREQ(major, minor, patchlevel) \
- ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= \
- ((major)*10000 + (minor)*100 + (patchlevel)))
-#else
-#define IP_GNUC_PREREQ(major, minor, patchlevel) 0
-#endif
-
-#if defined(__mips64)
-#define IP_TARGET_ARCH_MIPS64 1
-#elif defined(__MIPSEB__) || defined(__MIPSEL__)
-#define IP_TARGET_ARCH_MIPS 1
-#endif
-
-// Allowing the use of noexcept by removing the keyword on older compilers that
-// do not support adding noexcept to default members.
-#if ((IP_GNUC_PREREQ(4, 9, 0) && !defined(IP_TARGET_ARCH_MIPS) && \
- !defined(IP_TARGET_ARCH_MIPS64)) || \
- (defined(__clang__) && __cplusplus > 201300L))
-#define IP_NOEXCEPT noexcept
-#else
-#define IP_NOEXCEPT
-#endif
-
-//#include "Forward.h"
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-template<typename T>
-class Maybe {
-public:
- Maybe() : m_value() { }
- Maybe(std::unique_ptr<T> value) : m_value(std::move(value)) { }
- Maybe(Maybe&& other) IP_NOEXCEPT : m_value(std::move(other.m_value)) {}
- void operator=(std::unique_ptr<T> value) { m_value = std::move(value); }
- T* fromJust() const { DCHECK(m_value); return m_value.get(); }
- T* fromMaybe(T* defaultValue) const { return m_value ? m_value.get() : defaultValue; }
- bool isJust() const { return !!m_value; }
- std::unique_ptr<T> takeJust() { DCHECK(m_value); return std::move(m_value); }
-private:
- std::unique_ptr<T> m_value;
-};
-
-template<typename T>
-class MaybeBase {
-public:
- MaybeBase() : m_isJust(false) { }
- MaybeBase(T value) : m_isJust(true), m_value(value) { }
- MaybeBase(MaybeBase&& other) IP_NOEXCEPT
- : m_isJust(other.m_isJust),
- m_value(std::move(other.m_value)) {}
- void operator=(T value) { m_value = value; m_isJust = true; }
- T fromJust() const { DCHECK(m_isJust); return m_value; }
- T fromMaybe(const T& defaultValue) const { return m_isJust ? m_value : defaultValue; }
- bool isJust() const { return m_isJust; }
- T takeJust() { DCHECK(m_isJust); return m_value; }
-
-protected:
- bool m_isJust;
- T m_value;
-};
-
-template<>
-class Maybe<bool> : public MaybeBase<bool> {
-public:
- Maybe() { m_value = false; }
- Maybe(bool value) : MaybeBase(value) { }
- Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
- using MaybeBase::operator=;
-};
-
-template<>
-class Maybe<int> : public MaybeBase<int> {
-public:
- Maybe() { m_value = 0; }
- Maybe(int value) : MaybeBase(value) { }
- Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
- using MaybeBase::operator=;
-};
-
-template<>
-class Maybe<double> : public MaybeBase<double> {
-public:
- Maybe() { m_value = 0; }
- Maybe(double value) : MaybeBase(value) { }
- Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
- using MaybeBase::operator=;
-};
-
-template<>
-class Maybe<String> : public MaybeBase<String> {
-public:
- Maybe() { }
- Maybe(const String& value) : MaybeBase(value) { }
- Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
- using MaybeBase::operator=;
-};
-
-template<>
-class Maybe<Binary> : public MaybeBase<Binary> {
-public:
- Maybe() { }
- Maybe(Binary value) : MaybeBase(value) { }
- Maybe(Maybe&& other) IP_NOEXCEPT : MaybeBase(std::move(other)) {}
- using MaybeBase::operator=;
-};
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
-#undef IP_GNUC_PREREQ
-#undef IP_TARGET_ARCH_MIPS64
-#undef IP_TARGET_ARCH_MIPS
-#undef IP_NOEXCEPT
-
-#endif // !defined({{"_".join(config.protocol.namespace)}}_Maybe_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
index 2ee5b72454..63baf689c6 100644
--- a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
@@ -128,6 +128,72 @@ struct ValueConversions<Binary> {
}
};
+template<typename T>
+struct ValueConversions<std::vector<std::unique_ptr<T>>> {
+ static std::unique_ptr<std::vector<std::unique_ptr<T>>> fromValue(protocol::Value* value, ErrorSupport* errors) {
+ protocol::ListValue* array = ListValue::cast(value);
+ if (!array) {
+ errors->addError("array expected");
+ return nullptr;
+ }
+ errors->push();
+ std::unique_ptr<std::vector<std::unique_ptr<T>>> result(
+ new std::vector<std::unique_ptr<T>>());
+ result->reserve(array->size());
+ for (size_t i = 0; i < array->size(); ++i) {
+ errors->setName(StringUtil::fromInteger(i));
+ auto item = ValueConversions<T>::fromValue(array->at(i), errors);
+ result->emplace_back(std::move(item));
+ }
+ errors->pop();
+ if (errors->hasErrors())
+ return nullptr;
+ return result;
+ }
+
+ static std::unique_ptr<protocol::ListValue> toValue(std::vector<std::unique_ptr<T>>* v)
+ {
+ std::unique_ptr<protocol::ListValue> result = ListValue::create();
+ result->reserve(v->size());
+ for (auto& item : *v)
+ result->pushValue(ValueConversions<T>::toValue(item.get()));
+ return result;
+ }
+
+};
+
+template<typename T>
+struct ValueConversions<std::vector<T>> {
+ static std::unique_ptr<std::vector<T>> fromValue(protocol::Value* value, ErrorSupport* errors) {
+ protocol::ListValue* array = ListValue::cast(value);
+ if (!array) {
+ errors->addError("array expected");
+ return nullptr;
+ }
+ errors->push();
+ std::unique_ptr<std::vector<T>> result(new std::vector<T>());
+ result->reserve(array->size());
+ for (size_t i = 0; i < array->size(); ++i) {
+ errors->setName(StringUtil::fromInteger(i));
+ auto item = ValueConversions<T>::fromValue(array->at(i), errors);
+ result->emplace_back(std::move(item));
+ }
+ errors->pop();
+ if (errors->hasErrors())
+ return nullptr;
+ return result;
+ }
+
+ static std::unique_ptr<protocol::ListValue> toValue(std::vector<T>* v)
+ {
+ std::unique_ptr<protocol::ListValue> result = ListValue::create();
+ result->reserve(v->size());
+ for (auto& item : *v)
+ result->pushValue(ValueConversions<T>::toValue(item));
+ return result;
+ }
+};
+
template<>
struct ValueConversions<Value> {
static std::unique_ptr<Value> fromValue(protocol::Value* value, ErrorSupport* errors)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
index 7d3b907a26..038992f684 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
@@ -6,6 +6,8 @@
//#include "Values.h"
+#include "{{config.encoding_lib.header}}"
+
{% for namespace in config.protocol.namespace %}
namespace {{namespace}} {
{% endfor %}
@@ -64,6 +66,28 @@ void escapeStringForJSONInternal(const Char* str, unsigned len,
// to this constant.
static constexpr int kStackLimitValues = 1000;
+using {{config.encoding_lib.namespace}}::Error;
+using {{config.encoding_lib.namespace}}::Status;
+using {{config.encoding_lib.namespace}}::span;
+namespace cbor {
+using {{config.encoding_lib.namespace}}::cbor::CBORTokenTag;
+using {{config.encoding_lib.namespace}}::cbor::CBORTokenizer;
+using {{config.encoding_lib.namespace}}::cbor::EncodeBinary;
+using {{config.encoding_lib.namespace}}::cbor::EncodeDouble;
+using {{config.encoding_lib.namespace}}::cbor::EncodeFalse;
+using {{config.encoding_lib.namespace}}::cbor::EncodeFromLatin1;
+using {{config.encoding_lib.namespace}}::cbor::EncodeFromUTF16;
+using {{config.encoding_lib.namespace}}::cbor::EncodeIndefiniteLengthArrayStart;
+using {{config.encoding_lib.namespace}}::cbor::EncodeIndefiniteLengthMapStart;
+using {{config.encoding_lib.namespace}}::cbor::EncodeInt32;
+using {{config.encoding_lib.namespace}}::cbor::EncodeNull;
+using {{config.encoding_lib.namespace}}::cbor::EncodeStop;
+using {{config.encoding_lib.namespace}}::cbor::EncodeString8;
+using {{config.encoding_lib.namespace}}::cbor::EncodeTrue;
+using {{config.encoding_lib.namespace}}::cbor::EnvelopeEncoder;
+using {{config.encoding_lib.namespace}}::cbor::InitialByteForEnvelope;
+} // namespace cbor
+
// Below are three parsing routines for CBOR, which cover enough
// to roundtrip JSON messages.
std::unique_ptr<DictionaryValue> parseMap(int32_t stack_depth, cbor::CBORTokenizer* tokenizer);
@@ -138,7 +162,7 @@ std::unique_ptr<Value> parseValue(
case cbor::CBORTokenTag::STRING16: {
span<uint8_t> wire = tokenizer->GetString16WireRep();
DCHECK_EQ(wire.size() & 1, 0u);
- std::unique_ptr<Value> value = StringValue::create(StringUtil::fromUTF16(
+ std::unique_ptr<Value> value = StringValue::create(StringUtil::fromUTF16LE(
reinterpret_cast<const uint16_t*>(wire.data()), wire.size() / 2));
tokenizer->Next();
return value;
@@ -180,7 +204,7 @@ std::unique_ptr<DictionaryValue> parseMap(
} else if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING16) {
span<uint8_t> key_span = tokenizer->GetString16WireRep();
if (key_span.size() & 1) return nullptr; // UTF16 is 2 byte multiple.
- key = StringUtil::fromUTF16(
+ key = StringUtil::fromUTF16LE(
reinterpret_cast<const uint16_t*>(key_span.data()),
key_span.size() / 2);
tokenizer->Next();
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_h.template b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
index 4a2e58f4cd..4d6fde07d4 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
@@ -271,6 +271,7 @@ public:
Value* at(size_t index);
size_t size() const { return m_data.size(); }
+ void reserve(size_t capacity) { m_data.reserve(capacity); }
private:
ListValue();
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
index 639b39bb52..4619f32c30 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
@@ -185,8 +185,10 @@ void StringBuilder::reserveCapacity(size_t capacity) {
string_.reserve(capacity);
}
+// In Chromium, we do not support big endian architectures, so no conversion is needed
+// to interpret UTF16LE.
// static
-String StringUtil::fromUTF16(const uint16_t* data, size_t length) {
+String StringUtil::fromUTF16LE(const uint16_t* data, size_t length) {
std::string utf8;
base::UTF16ToUTF8(reinterpret_cast<const base::char16*>(data), length, &utf8);
return utf8;
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
index 8bf3c355c0..6a9ba3867a 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
@@ -100,7 +100,7 @@ class {{config.lib.export_macro}} StringUtil {
return std::string(reinterpret_cast<const char*>(data), length);
}
- static String fromUTF16(const uint16_t* data, size_t length);
+ static String fromUTF16LE(const uint16_t* data, size_t length);
static const uint8_t* CharactersLatin1(const String& s) { return nullptr; }
static const uint8_t* CharactersUTF8(const String& s) {
diff --git a/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template b/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template
deleted file mode 100644
index e55dffb5fd..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template
+++ /dev/null
@@ -1,2201 +0,0 @@
-{# This template is generated by gen_cbor_templates.py. #}
-// Generated by lib/encoding_cpp.template.
-
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#include <algorithm>
-#include <cassert>
-#include <cmath>
-#include <cstring>
-#include <limits>
-#include <stack>
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-// ===== encoding/encoding.cc =====
-
-// =============================================================================
-// Status and Error codes
-// =============================================================================
-
-std::string Status::ToASCIIString() const {
- switch (error) {
- case Error::OK:
- return "OK";
- case Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS:
- return ToASCIIString("JSON: unprocessed input remains");
- case Error::JSON_PARSER_STACK_LIMIT_EXCEEDED:
- return ToASCIIString("JSON: stack limit exceeded");
- case Error::JSON_PARSER_NO_INPUT:
- return ToASCIIString("JSON: no input");
- case Error::JSON_PARSER_INVALID_TOKEN:
- return ToASCIIString("JSON: invalid token");
- case Error::JSON_PARSER_INVALID_NUMBER:
- return ToASCIIString("JSON: invalid number");
- case Error::JSON_PARSER_INVALID_STRING:
- return ToASCIIString("JSON: invalid string");
- case Error::JSON_PARSER_UNEXPECTED_ARRAY_END:
- return ToASCIIString("JSON: unexpected array end");
- case Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED:
- return ToASCIIString("JSON: comma or array end expected");
- case Error::JSON_PARSER_STRING_LITERAL_EXPECTED:
- return ToASCIIString("JSON: string literal expected");
- case Error::JSON_PARSER_COLON_EXPECTED:
- return ToASCIIString("JSON: colon expected");
- case Error::JSON_PARSER_UNEXPECTED_MAP_END:
- return ToASCIIString("JSON: unexpected map end");
- case Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED:
- return ToASCIIString("JSON: comma or map end expected");
- case Error::JSON_PARSER_VALUE_EXPECTED:
- return ToASCIIString("JSON: value expected");
-
- case Error::CBOR_INVALID_INT32:
- return ToASCIIString("CBOR: invalid int32");
- case Error::CBOR_INVALID_DOUBLE:
- return ToASCIIString("CBOR: invalid double");
- case Error::CBOR_INVALID_ENVELOPE:
- return ToASCIIString("CBOR: invalid envelope");
- case Error::CBOR_INVALID_STRING8:
- return ToASCIIString("CBOR: invalid string8");
- case Error::CBOR_INVALID_STRING16:
- return ToASCIIString("CBOR: invalid string16");
- case Error::CBOR_INVALID_BINARY:
- return ToASCIIString("CBOR: invalid binary");
- case Error::CBOR_UNSUPPORTED_VALUE:
- return ToASCIIString("CBOR: unsupported value");
- case Error::CBOR_NO_INPUT:
- return ToASCIIString("CBOR: no input");
- case Error::CBOR_INVALID_START_BYTE:
- return ToASCIIString("CBOR: invalid start byte");
- case Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE:
- return ToASCIIString("CBOR: unexpected eof expected value");
- case Error::CBOR_UNEXPECTED_EOF_IN_ARRAY:
- return ToASCIIString("CBOR: unexpected eof in array");
- case Error::CBOR_UNEXPECTED_EOF_IN_MAP:
- return ToASCIIString("CBOR: unexpected eof in map");
- case Error::CBOR_INVALID_MAP_KEY:
- return ToASCIIString("CBOR: invalid map key");
- case Error::CBOR_STACK_LIMIT_EXCEEDED:
- return ToASCIIString("CBOR: stack limit exceeded");
- case Error::CBOR_TRAILING_JUNK:
- return ToASCIIString("CBOR: trailing junk");
- case Error::CBOR_MAP_START_EXPECTED:
- return ToASCIIString("CBOR: map start expected");
- case Error::CBOR_MAP_STOP_EXPECTED:
- return ToASCIIString("CBOR: map stop expected");
- case Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED:
- return ToASCIIString("CBOR: envelope size limit exceeded");
- }
- // Some compilers can't figure out that we can't get here.
- return "INVALID ERROR CODE";
-}
-
-std::string Status::ToASCIIString(const char* msg) const {
- return std::string(msg) + " at position " + std::to_string(pos);
-}
-
-namespace cbor {
-namespace {
-// Indicates the number of bits the "initial byte" needs to be shifted to the
-// right after applying |kMajorTypeMask| to produce the major type in the
-// lowermost bits.
-static constexpr uint8_t kMajorTypeBitShift = 5u;
-// Mask selecting the low-order 5 bits of the "initial byte", which is where
-// the additional information is encoded.
-static constexpr uint8_t kAdditionalInformationMask = 0x1f;
-// Mask selecting the high-order 3 bits of the "initial byte", which indicates
-// the major type of the encoded value.
-static constexpr uint8_t kMajorTypeMask = 0xe0;
-// Indicates the integer is in the following byte.
-static constexpr uint8_t kAdditionalInformation1Byte = 24u;
-// Indicates the integer is in the next 2 bytes.
-static constexpr uint8_t kAdditionalInformation2Bytes = 25u;
-// Indicates the integer is in the next 4 bytes.
-static constexpr uint8_t kAdditionalInformation4Bytes = 26u;
-// Indicates the integer is in the next 8 bytes.
-static constexpr uint8_t kAdditionalInformation8Bytes = 27u;
-
-// Encodes the initial byte, consisting of the |type| in the first 3 bits
-// followed by 5 bits of |additional_info|.
-constexpr uint8_t EncodeInitialByte(MajorType type, uint8_t additional_info) {
- return (static_cast<uint8_t>(type) << kMajorTypeBitShift) |
- (additional_info & kAdditionalInformationMask);
-}
-
-// TAG 24 indicates that what follows is a byte string which is
-// encoded in CBOR format. We use this as a wrapper for
-// maps and arrays, allowing us to skip them, because the
-// byte string carries its size (byte length).
-// https://tools.ietf.org/html/rfc7049#section-2.4.4.1
-static constexpr uint8_t kInitialByteForEnvelope =
- EncodeInitialByte(MajorType::TAG, 24);
-// The initial byte for a byte string with at most 2^32 bytes
-// of payload. This is used for envelope encoding, even if
-// the byte string is shorter.
-static constexpr uint8_t kInitialByteFor32BitLengthByteString =
- EncodeInitialByte(MajorType::BYTE_STRING, 26);
-
-// See RFC 7049 Section 2.2.1, indefinite length arrays / maps have additional
-// info = 31.
-static constexpr uint8_t kInitialByteIndefiniteLengthArray =
- EncodeInitialByte(MajorType::ARRAY, 31);
-static constexpr uint8_t kInitialByteIndefiniteLengthMap =
- EncodeInitialByte(MajorType::MAP, 31);
-// See RFC 7049 Section 2.3, Table 1; this is used for finishing indefinite
-// length maps / arrays.
-static constexpr uint8_t kStopByte =
- EncodeInitialByte(MajorType::SIMPLE_VALUE, 31);
-
-// See RFC 7049 Section 2.3, Table 2.
-static constexpr uint8_t kEncodedTrue =
- EncodeInitialByte(MajorType::SIMPLE_VALUE, 21);
-static constexpr uint8_t kEncodedFalse =
- EncodeInitialByte(MajorType::SIMPLE_VALUE, 20);
-static constexpr uint8_t kEncodedNull =
- EncodeInitialByte(MajorType::SIMPLE_VALUE, 22);
-static constexpr uint8_t kInitialByteForDouble =
- EncodeInitialByte(MajorType::SIMPLE_VALUE, 27);
-
-// See RFC 7049 Table 3 and Section 2.4.4.2. This is used as a prefix for
-// arbitrary binary data encoded as BYTE_STRING.
-static constexpr uint8_t kExpectedConversionToBase64Tag =
- EncodeInitialByte(MajorType::TAG, 22);
-
-// Writes the bytes for |v| to |out|, starting with the most significant byte.
-// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
-template <typename T, class C>
-void WriteBytesMostSignificantByteFirst(T v, C* out) {
- for (int shift_bytes = sizeof(T) - 1; shift_bytes >= 0; --shift_bytes)
- out->push_back(0xff & (v >> (shift_bytes * 8)));
-}
-
-// Extracts sizeof(T) bytes from |in| to extract a value of type T
-// (e.g. uint64_t, uint32_t, ...), most significant byte first.
-// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
-template <typename T>
-T ReadBytesMostSignificantByteFirst(span<uint8_t> in) {
- assert(in.size() >= sizeof(T));
- T result = 0;
- for (size_t shift_bytes = 0; shift_bytes < sizeof(T); ++shift_bytes)
- result |= T(in[sizeof(T) - 1 - shift_bytes]) << (shift_bytes * 8);
- return result;
-}
-} // namespace
-
-namespace internals {
-// Reads the start of a token with definitive size from |bytes|.
-// |type| is the major type as specified in RFC 7049 Section 2.1.
-// |value| is the payload (e.g. for MajorType::UNSIGNED) or is the size
-// (e.g. for BYTE_STRING).
-// If successful, returns the number of bytes read. Otherwise returns -1.
-// TODO(johannes): change return type to size_t and use 0 for error.
-int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
- if (bytes.empty())
- return -1;
- uint8_t initial_byte = bytes[0];
- *type = MajorType((initial_byte & kMajorTypeMask) >> kMajorTypeBitShift);
-
- uint8_t additional_information = initial_byte & kAdditionalInformationMask;
- if (additional_information < 24) {
- // Values 0-23 are encoded directly into the additional info of the
- // initial byte.
- *value = additional_information;
- return 1;
- }
- if (additional_information == kAdditionalInformation1Byte) {
- // Values 24-255 are encoded with one initial byte, followed by the value.
- if (bytes.size() < 2)
- return -1;
- *value = ReadBytesMostSignificantByteFirst<uint8_t>(bytes.subspan(1));
- return 2;
- }
- if (additional_information == kAdditionalInformation2Bytes) {
- // Values 256-65535: 1 initial byte + 2 bytes payload.
- if (bytes.size() < 1 + sizeof(uint16_t))
- return -1;
- *value = ReadBytesMostSignificantByteFirst<uint16_t>(bytes.subspan(1));
- return 3;
- }
- if (additional_information == kAdditionalInformation4Bytes) {
- // 32 bit uint: 1 initial byte + 4 bytes payload.
- if (bytes.size() < 1 + sizeof(uint32_t))
- return -1;
- *value = ReadBytesMostSignificantByteFirst<uint32_t>(bytes.subspan(1));
- return 5;
- }
- if (additional_information == kAdditionalInformation8Bytes) {
- // 64 bit uint: 1 initial byte + 8 bytes payload.
- if (bytes.size() < 1 + sizeof(uint64_t))
- return -1;
- *value = ReadBytesMostSignificantByteFirst<uint64_t>(bytes.subspan(1));
- return 9;
- }
- return -1;
-}
-
-// Writes the start of a token with |type|. The |value| may indicate the size,
-// or it may be the payload if the value is an unsigned integer.
-template <typename C>
-void WriteTokenStartTmpl(MajorType type, uint64_t value, C* encoded) {
- if (value < 24) {
- // Values 0-23 are encoded directly into the additional info of the
- // initial byte.
- encoded->push_back(EncodeInitialByte(type, /*additional_info=*/value));
- return;
- }
- if (value <= std::numeric_limits<uint8_t>::max()) {
- // Values 24-255 are encoded with one initial byte, followed by the value.
- encoded->push_back(EncodeInitialByte(type, kAdditionalInformation1Byte));
- encoded->push_back(value);
- return;
- }
- if (value <= std::numeric_limits<uint16_t>::max()) {
- // Values 256-65535: 1 initial byte + 2 bytes payload.
- encoded->push_back(EncodeInitialByte(type, kAdditionalInformation2Bytes));
- WriteBytesMostSignificantByteFirst<uint16_t>(value, encoded);
- return;
- }
- if (value <= std::numeric_limits<uint32_t>::max()) {
- // 32 bit uint: 1 initial byte + 4 bytes payload.
- encoded->push_back(EncodeInitialByte(type, kAdditionalInformation4Bytes));
- WriteBytesMostSignificantByteFirst<uint32_t>(static_cast<uint32_t>(value),
- encoded);
- return;
- }
- // 64 bit uint: 1 initial byte + 8 bytes payload.
- encoded->push_back(EncodeInitialByte(type, kAdditionalInformation8Bytes));
- WriteBytesMostSignificantByteFirst<uint64_t>(value, encoded);
-}
-void WriteTokenStart(MajorType type,
- uint64_t value,
- std::vector<uint8_t>* encoded) {
- WriteTokenStartTmpl(type, value, encoded);
-}
-void WriteTokenStart(MajorType type, uint64_t value, std::string* encoded) {
- WriteTokenStartTmpl(type, value, encoded);
-}
-} // namespace internals
-
-// =============================================================================
-// Detecting CBOR content
-// =============================================================================
-
-uint8_t InitialByteForEnvelope() {
- return kInitialByteForEnvelope;
-}
-uint8_t InitialByteFor32BitLengthByteString() {
- return kInitialByteFor32BitLengthByteString;
-}
-bool IsCBORMessage(span<uint8_t> msg) {
- return msg.size() >= 6 && msg[0] == InitialByteForEnvelope() &&
- msg[1] == InitialByteFor32BitLengthByteString();
-}
-
-// =============================================================================
-// Encoding invidiual CBOR items
-// =============================================================================
-
-uint8_t EncodeTrue() {
- return kEncodedTrue;
-}
-uint8_t EncodeFalse() {
- return kEncodedFalse;
-}
-uint8_t EncodeNull() {
- return kEncodedNull;
-}
-
-uint8_t EncodeIndefiniteLengthArrayStart() {
- return kInitialByteIndefiniteLengthArray;
-}
-
-uint8_t EncodeIndefiniteLengthMapStart() {
- return kInitialByteIndefiniteLengthMap;
-}
-
-uint8_t EncodeStop() {
- return kStopByte;
-}
-
-template <typename C>
-void EncodeInt32Tmpl(int32_t value, C* out) {
- if (value >= 0) {
- internals::WriteTokenStart(MajorType::UNSIGNED, value, out);
- } else {
- uint64_t representation = static_cast<uint64_t>(-(value + 1));
- internals::WriteTokenStart(MajorType::NEGATIVE, representation, out);
- }
-}
-void EncodeInt32(int32_t value, std::vector<uint8_t>* out) {
- EncodeInt32Tmpl(value, out);
-}
-void EncodeInt32(int32_t value, std::string* out) {
- EncodeInt32Tmpl(value, out);
-}
-
-template <typename C>
-void EncodeString16Tmpl(span<uint16_t> in, C* out) {
- uint64_t byte_length = static_cast<uint64_t>(in.size_bytes());
- internals::WriteTokenStart(MajorType::BYTE_STRING, byte_length, out);
- // When emitting UTF16 characters, we always write the least significant byte
- // first; this is because it's the native representation for X86.
- // TODO(johannes): Implement a more efficient thing here later, e.g.
- // casting *iff* the machine has this byte order.
- // The wire format for UTF16 chars will probably remain the same
- // (least significant byte first) since this way we can have
- // golden files, unittests, etc. that port easily and universally.
- // See also:
- // https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
- for (const uint16_t two_bytes : in) {
- out->push_back(two_bytes);
- out->push_back(two_bytes >> 8);
- }
-}
-void EncodeString16(span<uint16_t> in, std::vector<uint8_t>* out) {
- EncodeString16Tmpl(in, out);
-}
-void EncodeString16(span<uint16_t> in, std::string* out) {
- EncodeString16Tmpl(in, out);
-}
-
-template <typename C>
-void EncodeString8Tmpl(span<uint8_t> in, C* out) {
- internals::WriteTokenStart(MajorType::STRING,
- static_cast<uint64_t>(in.size_bytes()), out);
- out->insert(out->end(), in.begin(), in.end());
-}
-void EncodeString8(span<uint8_t> in, std::vector<uint8_t>* out) {
- EncodeString8Tmpl(in, out);
-}
-void EncodeString8(span<uint8_t> in, std::string* out) {
- EncodeString8Tmpl(in, out);
-}
-
-template <typename C>
-void EncodeFromLatin1Tmpl(span<uint8_t> latin1, C* out) {
- for (size_t ii = 0; ii < latin1.size(); ++ii) {
- if (latin1[ii] <= 127)
- continue;
- // If there's at least one non-ASCII char, convert to UTF8.
- std::vector<uint8_t> utf8(latin1.begin(), latin1.begin() + ii);
- for (; ii < latin1.size(); ++ii) {
- if (latin1[ii] <= 127) {
- utf8.push_back(latin1[ii]);
- } else {
- // 0xC0 means it's a UTF8 sequence with 2 bytes.
- utf8.push_back((latin1[ii] >> 6) | 0xc0);
- utf8.push_back((latin1[ii] | 0x80) & 0xbf);
- }
- }
- EncodeString8(SpanFrom(utf8), out);
- return;
- }
- EncodeString8(latin1, out);
-}
-void EncodeFromLatin1(span<uint8_t> latin1, std::vector<uint8_t>* out) {
- EncodeFromLatin1Tmpl(latin1, out);
-}
-void EncodeFromLatin1(span<uint8_t> latin1, std::string* out) {
- EncodeFromLatin1Tmpl(latin1, out);
-}
-
-template <typename C>
-void EncodeFromUTF16Tmpl(span<uint16_t> utf16, C* out) {
- // If there's at least one non-ASCII char, encode as STRING16 (UTF16).
- for (uint16_t ch : utf16) {
- if (ch <= 127)
- continue;
- EncodeString16(utf16, out);
- return;
- }
- // It's all US-ASCII, strip out every second byte and encode as UTF8.
- internals::WriteTokenStart(MajorType::STRING,
- static_cast<uint64_t>(utf16.size()), out);
- out->insert(out->end(), utf16.begin(), utf16.end());
-}
-void EncodeFromUTF16(span<uint16_t> utf16, std::vector<uint8_t>* out) {
- EncodeFromUTF16Tmpl(utf16, out);
-}
-void EncodeFromUTF16(span<uint16_t> utf16, std::string* out) {
- EncodeFromUTF16Tmpl(utf16, out);
-}
-
-template <typename C>
-void EncodeBinaryTmpl(span<uint8_t> in, C* out) {
- out->push_back(kExpectedConversionToBase64Tag);
- uint64_t byte_length = static_cast<uint64_t>(in.size_bytes());
- internals::WriteTokenStart(MajorType::BYTE_STRING, byte_length, out);
- out->insert(out->end(), in.begin(), in.end());
-}
-void EncodeBinary(span<uint8_t> in, std::vector<uint8_t>* out) {
- EncodeBinaryTmpl(in, out);
-}
-void EncodeBinary(span<uint8_t> in, std::string* out) {
- EncodeBinaryTmpl(in, out);
-}
-
-// A double is encoded with a specific initial byte
-// (kInitialByteForDouble) plus the 64 bits of payload for its value.
-constexpr size_t kEncodedDoubleSize = 1 + sizeof(uint64_t);
-
-// An envelope is encoded with a specific initial byte
-// (kInitialByteForEnvelope), plus the start byte for a BYTE_STRING with a 32
-// bit wide length, plus a 32 bit length for that string.
-constexpr size_t kEncodedEnvelopeHeaderSize = 1 + 1 + sizeof(uint32_t);
-
-template <typename C>
-void EncodeDoubleTmpl(double value, C* out) {
- // The additional_info=27 indicates 64 bits for the double follow.
- // See RFC 7049 Section 2.3, Table 1.
- out->push_back(kInitialByteForDouble);
- union {
- double from_double;
- uint64_t to_uint64;
- } reinterpret;
- reinterpret.from_double = value;
- WriteBytesMostSignificantByteFirst<uint64_t>(reinterpret.to_uint64, out);
-}
-void EncodeDouble(double value, std::vector<uint8_t>* out) {
- EncodeDoubleTmpl(value, out);
-}
-void EncodeDouble(double value, std::string* out) {
- EncodeDoubleTmpl(value, out);
-}
-
-// =============================================================================
-// cbor::EnvelopeEncoder - for wrapping submessages
-// =============================================================================
-
-template <typename C>
-void EncodeStartTmpl(C* out, size_t* byte_size_pos) {
- assert(*byte_size_pos == 0);
- out->push_back(kInitialByteForEnvelope);
- out->push_back(kInitialByteFor32BitLengthByteString);
- *byte_size_pos = out->size();
- out->resize(out->size() + sizeof(uint32_t));
-}
-
-void EnvelopeEncoder::EncodeStart(std::vector<uint8_t>* out) {
- EncodeStartTmpl<std::vector<uint8_t>>(out, &byte_size_pos_);
-}
-
-void EnvelopeEncoder::EncodeStart(std::string* out) {
- EncodeStartTmpl<std::string>(out, &byte_size_pos_);
-}
-
-template <typename C>
-bool EncodeStopTmpl(C* out, size_t* byte_size_pos) {
- assert(*byte_size_pos != 0);
- // The byte size is the size of the payload, that is, all the
- // bytes that were written past the byte size position itself.
- uint64_t byte_size = out->size() - (*byte_size_pos + sizeof(uint32_t));
- // We store exactly 4 bytes, so at most INT32MAX, with most significant
- // byte first.
- if (byte_size > std::numeric_limits<uint32_t>::max())
- return false;
- for (int shift_bytes = sizeof(uint32_t) - 1; shift_bytes >= 0;
- --shift_bytes) {
- (*out)[(*byte_size_pos)++] = 0xff & (byte_size >> (shift_bytes * 8));
- }
- return true;
-}
-
-bool EnvelopeEncoder::EncodeStop(std::vector<uint8_t>* out) {
- return EncodeStopTmpl(out, &byte_size_pos_);
-}
-
-bool EnvelopeEncoder::EncodeStop(std::string* out) {
- return EncodeStopTmpl(out, &byte_size_pos_);
-}
-
-// =============================================================================
-// cbor::NewCBOREncoder - for encoding from a streaming parser
-// =============================================================================
-
-namespace {
-template <typename C>
-class CBOREncoder : public StreamingParserHandler {
- public:
- CBOREncoder(C* out, Status* status) : out_(out), status_(status) {
- *status_ = Status();
- }
-
- void HandleMapBegin() override {
- if (!status_->ok())
- return;
- envelopes_.emplace_back();
- envelopes_.back().EncodeStart(out_);
- out_->push_back(kInitialByteIndefiniteLengthMap);
- }
-
- void HandleMapEnd() override {
- if (!status_->ok())
- return;
- out_->push_back(kStopByte);
- assert(!envelopes_.empty());
- if (!envelopes_.back().EncodeStop(out_)) {
- HandleError(
- Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, out_->size()));
- return;
- }
- envelopes_.pop_back();
- }
-
- void HandleArrayBegin() override {
- if (!status_->ok())
- return;
- envelopes_.emplace_back();
- envelopes_.back().EncodeStart(out_);
- out_->push_back(kInitialByteIndefiniteLengthArray);
- }
-
- void HandleArrayEnd() override {
- if (!status_->ok())
- return;
- out_->push_back(kStopByte);
- assert(!envelopes_.empty());
- if (!envelopes_.back().EncodeStop(out_)) {
- HandleError(
- Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, out_->size()));
- return;
- }
- envelopes_.pop_back();
- }
-
- void HandleString8(span<uint8_t> chars) override {
- if (!status_->ok())
- return;
- EncodeString8(chars, out_);
- }
-
- void HandleString16(span<uint16_t> chars) override {
- if (!status_->ok())
- return;
- EncodeFromUTF16(chars, out_);
- }
-
- void HandleBinary(span<uint8_t> bytes) override {
- if (!status_->ok())
- return;
- EncodeBinary(bytes, out_);
- }
-
- void HandleDouble(double value) override {
- if (!status_->ok())
- return;
- EncodeDouble(value, out_);
- }
-
- void HandleInt32(int32_t value) override {
- if (!status_->ok())
- return;
- EncodeInt32(value, out_);
- }
-
- void HandleBool(bool value) override {
- if (!status_->ok())
- return;
- // See RFC 7049 Section 2.3, Table 2.
- out_->push_back(value ? kEncodedTrue : kEncodedFalse);
- }
-
- void HandleNull() override {
- if (!status_->ok())
- return;
- // See RFC 7049 Section 2.3, Table 2.
- out_->push_back(kEncodedNull);
- }
-
- void HandleError(Status error) override {
- if (!status_->ok())
- return;
- *status_ = error;
- out_->clear();
- }
-
- private:
- C* out_;
- std::vector<EnvelopeEncoder> envelopes_;
- Status* status_;
-};
-} // namespace
-
-std::unique_ptr<StreamingParserHandler> NewCBOREncoder(
- std::vector<uint8_t>* out,
- Status* status) {
- return std::unique_ptr<StreamingParserHandler>(
- new CBOREncoder<std::vector<uint8_t>>(out, status));
-}
-std::unique_ptr<StreamingParserHandler> NewCBOREncoder(std::string* out,
- Status* status) {
- return std::unique_ptr<StreamingParserHandler>(
- new CBOREncoder<std::string>(out, status));
-}
-
-// =============================================================================
-// cbor::CBORTokenizer - for parsing individual CBOR items
-// =============================================================================
-
-CBORTokenizer::CBORTokenizer(span<uint8_t> bytes) : bytes_(bytes) {
- ReadNextToken(/*enter_envelope=*/false);
-}
-CBORTokenizer::~CBORTokenizer() {}
-
-CBORTokenTag CBORTokenizer::TokenTag() const {
- return token_tag_;
-}
-
-void CBORTokenizer::Next() {
- if (token_tag_ == CBORTokenTag::ERROR_VALUE ||
- token_tag_ == CBORTokenTag::DONE)
- return;
- ReadNextToken(/*enter_envelope=*/false);
-}
-
-void CBORTokenizer::EnterEnvelope() {
- assert(token_tag_ == CBORTokenTag::ENVELOPE);
- ReadNextToken(/*enter_envelope=*/true);
-}
-
-Status CBORTokenizer::Status() const {
- return status_;
-}
-
-// The following accessor functions ::GetInt32, ::GetDouble,
-// ::GetString8, ::GetString16WireRep, ::GetBinary, ::GetEnvelopeContents
-// assume that a particular token was recognized in ::ReadNextToken.
-// That's where all the error checking is done. By design,
-// the accessors (assuming the token was recognized) never produce
-// an error.
-
-int32_t CBORTokenizer::GetInt32() const {
- assert(token_tag_ == CBORTokenTag::INT32);
- // The range checks happen in ::ReadNextToken().
- return static_cast<int32_t>(
- token_start_type_ == MajorType::UNSIGNED
- ? token_start_internal_value_
- : -static_cast<int64_t>(token_start_internal_value_) - 1);
-}
-
-double CBORTokenizer::GetDouble() const {
- assert(token_tag_ == CBORTokenTag::DOUBLE);
- union {
- uint64_t from_uint64;
- double to_double;
- } reinterpret;
- reinterpret.from_uint64 = ReadBytesMostSignificantByteFirst<uint64_t>(
- bytes_.subspan(status_.pos + 1));
- return reinterpret.to_double;
-}
-
-span<uint8_t> CBORTokenizer::GetString8() const {
- assert(token_tag_ == CBORTokenTag::STRING8);
- auto length = static_cast<size_t>(token_start_internal_value_);
- return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
-}
-
-span<uint8_t> CBORTokenizer::GetString16WireRep() const {
- assert(token_tag_ == CBORTokenTag::STRING16);
- auto length = static_cast<size_t>(token_start_internal_value_);
- return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
-}
-
-span<uint8_t> CBORTokenizer::GetBinary() const {
- assert(token_tag_ == CBORTokenTag::BINARY);
- auto length = static_cast<size_t>(token_start_internal_value_);
- return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
-}
-
-span<uint8_t> CBORTokenizer::GetEnvelopeContents() const {
- assert(token_tag_ == CBORTokenTag::ENVELOPE);
- auto length = static_cast<size_t>(token_start_internal_value_);
- return bytes_.subspan(status_.pos + kEncodedEnvelopeHeaderSize, length);
-}
-
-// All error checking happens in ::ReadNextToken, so that the accessors
-// can avoid having to carry an error return value.
-//
-// With respect to checking the encoded lengths of strings, arrays, etc:
-// On the wire, CBOR uses 1,2,4, and 8 byte unsigned integers, so
-// we initially read them as uint64_t, usually into token_start_internal_value_.
-//
-// However, since these containers have a representation on the machine,
-// we need to do corresponding size computations on the input byte array,
-// output span (e.g. the payload for a string), etc., and size_t is
-// machine specific (in practice either 32 bit or 64 bit).
-//
-// Further, we must avoid overflowing size_t. Therefore, we use this
-// kMaxValidLength constant to:
-// - Reject values that are larger than the architecture specific
-// max size_t (differs between 32 bit and 64 bit arch).
-// - Reserve at least one bit so that we can check against overflows
-// when adding lengths (array / string length / etc.); we do this by
-// ensuring that the inputs to an addition are <= kMaxValidLength,
-// and then checking whether the sum went past it.
-//
-// See also
-// https://chromium.googlesource.com/chromium/src/+/master/docs/security/integer-semantics.md
-static const uint64_t kMaxValidLength =
- std::min<uint64_t>(std::numeric_limits<uint64_t>::max() >> 2,
- std::numeric_limits<size_t>::max());
-
-void CBORTokenizer::ReadNextToken(bool enter_envelope) {
- if (enter_envelope) {
- status_.pos += kEncodedEnvelopeHeaderSize;
- } else {
- status_.pos =
- status_.pos == Status::npos() ? 0 : status_.pos + token_byte_length_;
- }
- status_.error = Error::OK;
- if (status_.pos >= bytes_.size()) {
- token_tag_ = CBORTokenTag::DONE;
- return;
- }
- const size_t remaining_bytes = bytes_.size() - status_.pos;
- switch (bytes_[status_.pos]) {
- case kStopByte:
- SetToken(CBORTokenTag::STOP, 1);
- return;
- case kInitialByteIndefiniteLengthMap:
- SetToken(CBORTokenTag::MAP_START, 1);
- return;
- case kInitialByteIndefiniteLengthArray:
- SetToken(CBORTokenTag::ARRAY_START, 1);
- return;
- case kEncodedTrue:
- SetToken(CBORTokenTag::TRUE_VALUE, 1);
- return;
- case kEncodedFalse:
- SetToken(CBORTokenTag::FALSE_VALUE, 1);
- return;
- case kEncodedNull:
- SetToken(CBORTokenTag::NULL_VALUE, 1);
- return;
- case kExpectedConversionToBase64Tag: { // BINARY
- const int8_t bytes_read = internals::ReadTokenStart(
- bytes_.subspan(status_.pos + 1), &token_start_type_,
- &token_start_internal_value_);
- if (bytes_read < 0 || token_start_type_ != MajorType::BYTE_STRING ||
- token_start_internal_value_ > kMaxValidLength) {
- SetError(Error::CBOR_INVALID_BINARY);
- return;
- }
- const uint64_t token_byte_length = token_start_internal_value_ +
- /* tag before token start: */ 1 +
- /* token start: */ bytes_read;
- if (token_byte_length > remaining_bytes) {
- SetError(Error::CBOR_INVALID_BINARY);
- return;
- }
- SetToken(CBORTokenTag::BINARY, static_cast<size_t>(token_byte_length));
- return;
- }
- case kInitialByteForDouble: { // DOUBLE
- if (kEncodedDoubleSize > remaining_bytes) {
- SetError(Error::CBOR_INVALID_DOUBLE);
- return;
- }
- SetToken(CBORTokenTag::DOUBLE, kEncodedDoubleSize);
- return;
- }
- case kInitialByteForEnvelope: { // ENVELOPE
- if (kEncodedEnvelopeHeaderSize > remaining_bytes) {
- SetError(Error::CBOR_INVALID_ENVELOPE);
- return;
- }
- // The envelope must be a byte string with 32 bit length.
- if (bytes_[status_.pos + 1] != kInitialByteFor32BitLengthByteString) {
- SetError(Error::CBOR_INVALID_ENVELOPE);
- return;
- }
- // Read the length of the byte string.
- token_start_internal_value_ = ReadBytesMostSignificantByteFirst<uint32_t>(
- bytes_.subspan(status_.pos + 2));
- if (token_start_internal_value_ > kMaxValidLength) {
- SetError(Error::CBOR_INVALID_ENVELOPE);
- return;
- }
- uint64_t token_byte_length =
- token_start_internal_value_ + kEncodedEnvelopeHeaderSize;
- if (token_byte_length > remaining_bytes) {
- SetError(Error::CBOR_INVALID_ENVELOPE);
- return;
- }
- SetToken(CBORTokenTag::ENVELOPE, static_cast<size_t>(token_byte_length));
- return;
- }
- default: {
- const int8_t token_start_length = internals::ReadTokenStart(
- bytes_.subspan(status_.pos), &token_start_type_,
- &token_start_internal_value_);
- const bool success = token_start_length >= 0;
- switch (token_start_type_) {
- case MajorType::UNSIGNED: // INT32.
- // INT32 is a signed int32 (int32 makes sense for the
- // inspector_protocol, it's not a CBOR limitation), so we check
- // against the signed max, so that the allowable values are
- // 0, 1, 2, ... 2^31 - 1.
- if (!success || std::numeric_limits<int32_t>::max() <
- token_start_internal_value_) {
- SetError(Error::CBOR_INVALID_INT32);
- return;
- }
- SetToken(CBORTokenTag::INT32, token_start_length);
- return;
- case MajorType::NEGATIVE: { // INT32.
- // INT32 is a signed int32 (int32 makes sense for the
- // inspector_protocol, it's not a CBOR limitation); in CBOR, the
- // negative values for INT32 are represented as NEGATIVE, that is, -1
- // INT32 is represented as 1 << 5 | 0 (major type 1, additional info
- // value 0). The minimal allowed INT32 value in our protocol is
- // std::numeric_limits<int32_t>::min(). We check for it by directly
- // checking the payload against the maximal allowed signed (!) int32
- // value.
- if (!success || token_start_internal_value_ >
- std::numeric_limits<int32_t>::max()) {
- SetError(Error::CBOR_INVALID_INT32);
- return;
- }
- SetToken(CBORTokenTag::INT32, token_start_length);
- return;
- }
- case MajorType::STRING: { // STRING8.
- if (!success || token_start_internal_value_ > kMaxValidLength) {
- SetError(Error::CBOR_INVALID_STRING8);
- return;
- }
- uint64_t token_byte_length =
- token_start_internal_value_ + token_start_length;
- if (token_byte_length > remaining_bytes) {
- SetError(Error::CBOR_INVALID_STRING8);
- return;
- }
- SetToken(CBORTokenTag::STRING8,
- static_cast<size_t>(token_byte_length));
- return;
- }
- case MajorType::BYTE_STRING: { // STRING16.
- // Length must be divisible by 2 since UTF16 is 2 bytes per
- // character, hence the &1 check.
- if (!success || token_start_internal_value_ > kMaxValidLength ||
- token_start_internal_value_ & 1) {
- SetError(Error::CBOR_INVALID_STRING16);
- return;
- }
- uint64_t token_byte_length =
- token_start_internal_value_ + token_start_length;
- if (token_byte_length > remaining_bytes) {
- SetError(Error::CBOR_INVALID_STRING16);
- return;
- }
- SetToken(CBORTokenTag::STRING16,
- static_cast<size_t>(token_byte_length));
- return;
- }
- case MajorType::ARRAY:
- case MajorType::MAP:
- case MajorType::TAG:
- case MajorType::SIMPLE_VALUE:
- SetError(Error::CBOR_UNSUPPORTED_VALUE);
- return;
- }
- }
- }
-}
-
-void CBORTokenizer::SetToken(CBORTokenTag token_tag, size_t token_byte_length) {
- token_tag_ = token_tag;
- token_byte_length_ = token_byte_length;
-}
-
-void CBORTokenizer::SetError(Error error) {
- token_tag_ = CBORTokenTag::ERROR_VALUE;
- status_.error = error;
-}
-
-// =============================================================================
-// cbor::ParseCBOR - for receiving streaming parser events for CBOR messages
-// =============================================================================
-
-namespace {
-// When parsing CBOR, we limit recursion depth for objects and arrays
-// to this constant.
-static constexpr int kStackLimit = 300;
-
-// Below are three parsing routines for CBOR, which cover enough
-// to roundtrip JSON messages.
-bool ParseMap(int32_t stack_depth,
- CBORTokenizer* tokenizer,
- StreamingParserHandler* out);
-bool ParseArray(int32_t stack_depth,
- CBORTokenizer* tokenizer,
- StreamingParserHandler* out);
-bool ParseValue(int32_t stack_depth,
- CBORTokenizer* tokenizer,
- StreamingParserHandler* out);
-
-void ParseUTF16String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
- std::vector<uint16_t> value;
- span<uint8_t> rep = tokenizer->GetString16WireRep();
- for (size_t ii = 0; ii < rep.size(); ii += 2)
- value.push_back((rep[ii + 1] << 8) | rep[ii]);
- out->HandleString16(span<uint16_t>(value.data(), value.size()));
- tokenizer->Next();
-}
-
-bool ParseUTF8String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
- assert(tokenizer->TokenTag() == CBORTokenTag::STRING8);
- out->HandleString8(tokenizer->GetString8());
- tokenizer->Next();
- return true;
-}
-
-bool ParseValue(int32_t stack_depth,
- CBORTokenizer* tokenizer,
- StreamingParserHandler* out) {
- if (stack_depth > kStackLimit) {
- out->HandleError(
- Status{Error::CBOR_STACK_LIMIT_EXCEEDED, tokenizer->Status().pos});
- return false;
- }
- // Skip past the envelope to get to what's inside.
- if (tokenizer->TokenTag() == CBORTokenTag::ENVELOPE)
- tokenizer->EnterEnvelope();
- switch (tokenizer->TokenTag()) {
- case CBORTokenTag::ERROR_VALUE:
- out->HandleError(tokenizer->Status());
- return false;
- case CBORTokenTag::DONE:
- out->HandleError(Status{Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE,
- tokenizer->Status().pos});
- return false;
- case CBORTokenTag::TRUE_VALUE:
- out->HandleBool(true);
- tokenizer->Next();
- return true;
- case CBORTokenTag::FALSE_VALUE:
- out->HandleBool(false);
- tokenizer->Next();
- return true;
- case CBORTokenTag::NULL_VALUE:
- out->HandleNull();
- tokenizer->Next();
- return true;
- case CBORTokenTag::INT32:
- out->HandleInt32(tokenizer->GetInt32());
- tokenizer->Next();
- return true;
- case CBORTokenTag::DOUBLE:
- out->HandleDouble(tokenizer->GetDouble());
- tokenizer->Next();
- return true;
- case CBORTokenTag::STRING8:
- return ParseUTF8String(tokenizer, out);
- case CBORTokenTag::STRING16:
- ParseUTF16String(tokenizer, out);
- return true;
- case CBORTokenTag::BINARY: {
- out->HandleBinary(tokenizer->GetBinary());
- tokenizer->Next();
- return true;
- }
- case CBORTokenTag::MAP_START:
- return ParseMap(stack_depth + 1, tokenizer, out);
- case CBORTokenTag::ARRAY_START:
- return ParseArray(stack_depth + 1, tokenizer, out);
- default:
- out->HandleError(
- Status{Error::CBOR_UNSUPPORTED_VALUE, tokenizer->Status().pos});
- return false;
- }
-}
-
-// |bytes| must start with the indefinite length array byte, so basically,
-// ParseArray may only be called after an indefinite length array has been
-// detected.
-bool ParseArray(int32_t stack_depth,
- CBORTokenizer* tokenizer,
- StreamingParserHandler* out) {
- assert(tokenizer->TokenTag() == CBORTokenTag::ARRAY_START);
- tokenizer->Next();
- out->HandleArrayBegin();
- while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
- if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
- out->HandleError(
- Status{Error::CBOR_UNEXPECTED_EOF_IN_ARRAY, tokenizer->Status().pos});
- return false;
- }
- if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) {
- out->HandleError(tokenizer->Status());
- return false;
- }
- // Parse value.
- if (!ParseValue(stack_depth, tokenizer, out))
- return false;
- }
- out->HandleArrayEnd();
- tokenizer->Next();
- return true;
-}
-
-// |bytes| must start with the indefinite length array byte, so basically,
-// ParseArray may only be called after an indefinite length array has been
-// detected.
-bool ParseMap(int32_t stack_depth,
- CBORTokenizer* tokenizer,
- StreamingParserHandler* out) {
- assert(tokenizer->TokenTag() == CBORTokenTag::MAP_START);
- out->HandleMapBegin();
- tokenizer->Next();
- while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
- if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
- out->HandleError(
- Status{Error::CBOR_UNEXPECTED_EOF_IN_MAP, tokenizer->Status().pos});
- return false;
- }
- if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) {
- out->HandleError(tokenizer->Status());
- return false;
- }
- // Parse key.
- if (tokenizer->TokenTag() == CBORTokenTag::STRING8) {
- if (!ParseUTF8String(tokenizer, out))
- return false;
- } else if (tokenizer->TokenTag() == CBORTokenTag::STRING16) {
- ParseUTF16String(tokenizer, out);
- } else {
- out->HandleError(
- Status{Error::CBOR_INVALID_MAP_KEY, tokenizer->Status().pos});
- return false;
- }
- // Parse value.
- if (!ParseValue(stack_depth, tokenizer, out))
- return false;
- }
- out->HandleMapEnd();
- tokenizer->Next();
- return true;
-}
-} // namespace
-
-void ParseCBOR(span<uint8_t> bytes, StreamingParserHandler* out) {
- if (bytes.empty()) {
- out->HandleError(Status{Error::CBOR_NO_INPUT, 0});
- return;
- }
- if (bytes[0] != kInitialByteForEnvelope) {
- out->HandleError(Status{Error::CBOR_INVALID_START_BYTE, 0});
- return;
- }
- CBORTokenizer tokenizer(bytes);
- if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) {
- out->HandleError(tokenizer.Status());
- return;
- }
- // We checked for the envelope start byte above, so the tokenizer
- // must agree here, since it's not an error.
- assert(tokenizer.TokenTag() == CBORTokenTag::ENVELOPE);
- tokenizer.EnterEnvelope();
- if (tokenizer.TokenTag() != CBORTokenTag::MAP_START) {
- out->HandleError(
- Status{Error::CBOR_MAP_START_EXPECTED, tokenizer.Status().pos});
- return;
- }
- if (!ParseMap(/*stack_depth=*/1, &tokenizer, out))
- return;
- if (tokenizer.TokenTag() == CBORTokenTag::DONE)
- return;
- if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) {
- out->HandleError(tokenizer.Status());
- return;
- }
- out->HandleError(Status{Error::CBOR_TRAILING_JUNK, tokenizer.Status().pos});
-}
-
-// =============================================================================
-// cbor::AppendString8EntryToMap - for limited in-place editing of messages
-// =============================================================================
-
-template <typename C>
-Status AppendString8EntryToCBORMapTmpl(span<uint8_t> string8_key,
- span<uint8_t> string8_value,
- C* cbor) {
- // Careful below: Don't compare (*cbor)[idx] with a uint8_t, since
- // it could be a char (signed!). Instead, use bytes.
- span<uint8_t> bytes(reinterpret_cast<const uint8_t*>(cbor->data()),
- cbor->size());
- CBORTokenizer tokenizer(bytes);
- if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE)
- return tokenizer.Status();
- if (tokenizer.TokenTag() != CBORTokenTag::ENVELOPE)
- return Status(Error::CBOR_INVALID_ENVELOPE, 0);
- size_t envelope_size = tokenizer.GetEnvelopeContents().size();
- size_t old_size = cbor->size();
- if (old_size != envelope_size + kEncodedEnvelopeHeaderSize)
- return Status(Error::CBOR_INVALID_ENVELOPE, 0);
- if (envelope_size == 0 ||
- (tokenizer.GetEnvelopeContents()[0] != EncodeIndefiniteLengthMapStart()))
- return Status(Error::CBOR_MAP_START_EXPECTED, kEncodedEnvelopeHeaderSize);
- if (bytes[bytes.size() - 1] != EncodeStop())
- return Status(Error::CBOR_MAP_STOP_EXPECTED, cbor->size() - 1);
- cbor->pop_back();
- EncodeString8(string8_key, cbor);
- EncodeString8(string8_value, cbor);
- cbor->push_back(EncodeStop());
- size_t new_envelope_size = envelope_size + (cbor->size() - old_size);
- if (new_envelope_size > std::numeric_limits<uint32_t>::max())
- return Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, 0);
- size_t size_pos = cbor->size() - new_envelope_size - sizeof(uint32_t);
- uint8_t* out = reinterpret_cast<uint8_t*>(&cbor->at(size_pos));
- *(out++) = (new_envelope_size >> 24) & 0xff;
- *(out++) = (new_envelope_size >> 16) & 0xff;
- *(out++) = (new_envelope_size >> 8) & 0xff;
- *(out) = new_envelope_size & 0xff;
- return Status();
-}
-Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
- span<uint8_t> string8_value,
- std::vector<uint8_t>* cbor) {
- return AppendString8EntryToCBORMapTmpl(string8_key, string8_value, cbor);
-}
-Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
- span<uint8_t> string8_value,
- std::string* cbor) {
- return AppendString8EntryToCBORMapTmpl(string8_key, string8_value, cbor);
-}
-} // namespace cbor
-
-namespace json {
-
-// =============================================================================
-// json::NewJSONEncoder - for encoding streaming parser events as JSON
-// =============================================================================
-
-namespace {
-// Prints |value| to |out| with 4 hex digits, most significant chunk first.
-template <typename C>
-void PrintHex(uint16_t value, C* out) {
- for (int ii = 3; ii >= 0; --ii) {
- int four_bits = 0xf & (value >> (4 * ii));
- out->push_back(four_bits + ((four_bits <= 9) ? '0' : ('a' - 10)));
- }
-}
-
-// In the writer below, we maintain a stack of State instances.
-// It is just enough to emit the appropriate delimiters and brackets
-// in JSON.
-enum class Container {
- // Used for the top-level, initial state.
- NONE,
- // Inside a JSON object.
- MAP,
- // Inside a JSON array.
- ARRAY
-};
-class State {
- public:
- explicit State(Container container) : container_(container) {}
- void StartElement(std::vector<uint8_t>* out) { StartElementTmpl(out); }
- void StartElement(std::string* out) { StartElementTmpl(out); }
- Container container() const { return container_; }
-
- private:
- template <typename C>
- void StartElementTmpl(C* out) {
- assert(container_ != Container::NONE || size_ == 0);
- if (size_ != 0) {
- char delim = (!(size_ & 1) || container_ == Container::ARRAY) ? ',' : ':';
- out->push_back(delim);
- }
- ++size_;
- }
-
- Container container_ = Container::NONE;
- int size_ = 0;
-};
-
-constexpr char kBase64Table[] =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz0123456789+/";
-
-template <typename C>
-void Base64Encode(const span<uint8_t>& in, C* out) {
- // The following three cases are based on the tables in the example
- // section in https://en.wikipedia.org/wiki/Base64. We process three
- // input bytes at a time, emitting 4 output bytes at a time.
- size_t ii = 0;
-
- // While possible, process three input bytes.
- for (; ii + 3 <= in.size(); ii += 3) {
- uint32_t twentyfour_bits = (in[ii] << 16) | (in[ii + 1] << 8) | in[ii + 2];
- out->push_back(kBase64Table[(twentyfour_bits >> 18)]);
- out->push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
- out->push_back(kBase64Table[(twentyfour_bits >> 6) & 0x3f]);
- out->push_back(kBase64Table[twentyfour_bits & 0x3f]);
- }
- if (ii + 2 <= in.size()) { // Process two input bytes.
- uint32_t twentyfour_bits = (in[ii] << 16) | (in[ii + 1] << 8);
- out->push_back(kBase64Table[(twentyfour_bits >> 18)]);
- out->push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
- out->push_back(kBase64Table[(twentyfour_bits >> 6) & 0x3f]);
- out->push_back('='); // Emit padding.
- return;
- }
- if (ii + 1 <= in.size()) { // Process a single input byte.
- uint32_t twentyfour_bits = (in[ii] << 16);
- out->push_back(kBase64Table[(twentyfour_bits >> 18)]);
- out->push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
- out->push_back('='); // Emit padding.
- out->push_back('='); // Emit padding.
- }
-}
-
-// Implements a handler for JSON parser events to emit a JSON string.
-template <typename C>
-class JSONEncoder : public StreamingParserHandler {
- public:
- JSONEncoder(const Platform* platform, C* out, Status* status)
- : platform_(platform), out_(out), status_(status) {
- *status_ = Status();
- state_.emplace(Container::NONE);
- }
-
- void HandleMapBegin() override {
- if (!status_->ok())
- return;
- assert(!state_.empty());
- state_.top().StartElement(out_);
- state_.emplace(Container::MAP);
- Emit('{');
- }
-
- void HandleMapEnd() override {
- if (!status_->ok())
- return;
- assert(state_.size() >= 2 && state_.top().container() == Container::MAP);
- state_.pop();
- Emit('}');
- }
-
- void HandleArrayBegin() override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- state_.emplace(Container::ARRAY);
- Emit('[');
- }
-
- void HandleArrayEnd() override {
- if (!status_->ok())
- return;
- assert(state_.size() >= 2 && state_.top().container() == Container::ARRAY);
- state_.pop();
- Emit(']');
- }
-
- void HandleString16(span<uint16_t> chars) override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- Emit('"');
- for (const uint16_t ch : chars) {
- if (ch == '"') {
- Emit("\\\"");
- } else if (ch == '\\') {
- Emit("\\\\");
- } else if (ch == '\b') {
- Emit("\\b");
- } else if (ch == '\f') {
- Emit("\\f");
- } else if (ch == '\n') {
- Emit("\\n");
- } else if (ch == '\r') {
- Emit("\\r");
- } else if (ch == '\t') {
- Emit("\\t");
- } else if (ch >= 32 && ch <= 126) {
- Emit(ch);
- } else {
- Emit("\\u");
- PrintHex(ch, out_);
- }
- }
- Emit('"');
- }
-
- void HandleString8(span<uint8_t> chars) override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- Emit('"');
- for (size_t ii = 0; ii < chars.size(); ++ii) {
- uint8_t c = chars[ii];
- if (c == '"') {
- Emit("\\\"");
- } else if (c == '\\') {
- Emit("\\\\");
- } else if (c == '\b') {
- Emit("\\b");
- } else if (c == '\f') {
- Emit("\\f");
- } else if (c == '\n') {
- Emit("\\n");
- } else if (c == '\r') {
- Emit("\\r");
- } else if (c == '\t') {
- Emit("\\t");
- } else if (c >= 32 && c <= 126) {
- Emit(c);
- } else if (c < 32) {
- Emit("\\u");
- PrintHex(static_cast<uint16_t>(c), out_);
- } else {
- // Inspect the leading byte to figure out how long the utf8
- // byte sequence is; while doing this initialize |codepoint|
- // with the first few bits.
- // See table in: https://en.wikipedia.org/wiki/UTF-8
- // byte one is 110x xxxx -> 2 byte utf8 sequence
- // byte one is 1110 xxxx -> 3 byte utf8 sequence
- // byte one is 1111 0xxx -> 4 byte utf8 sequence
- uint32_t codepoint;
- int num_bytes_left;
- if ((c & 0xe0) == 0xc0) { // 2 byte utf8 sequence
- num_bytes_left = 1;
- codepoint = c & 0x1f;
- } else if ((c & 0xf0) == 0xe0) { // 3 byte utf8 sequence
- num_bytes_left = 2;
- codepoint = c & 0x0f;
- } else if ((c & 0xf8) == 0xf0) { // 4 byte utf8 sequence
- codepoint = c & 0x07;
- num_bytes_left = 3;
- } else {
- continue; // invalid leading byte
- }
-
- // If we have enough bytes in our input, decode the remaining ones
- // belonging to this Unicode character into |codepoint|.
- if (ii + num_bytes_left > chars.size())
- continue;
- while (num_bytes_left > 0) {
- c = chars[++ii];
- --num_bytes_left;
- // Check the next byte is a continuation byte, that is 10xx xxxx.
- if ((c & 0xc0) != 0x80)
- continue;
- codepoint = (codepoint << 6) | (c & 0x3f);
- }
-
- // Disallow overlong encodings for ascii characters, as these
- // would include " and other characters significant to JSON
- // string termination / control.
- if (codepoint < 0x7f)
- continue;
- // Invalid in UTF8, and can't be represented in UTF16 anyway.
- if (codepoint > 0x10ffff)
- continue;
-
- // So, now we transcode to UTF16,
- // using the math described at https://en.wikipedia.org/wiki/UTF-16,
- // for either one or two 16 bit characters.
- if (codepoint < 0xffff) {
- Emit("\\u");
- PrintHex(static_cast<uint16_t>(codepoint), out_);
- continue;
- }
- codepoint -= 0x10000;
- // high surrogate
- Emit("\\u");
- PrintHex(static_cast<uint16_t>((codepoint >> 10) + 0xd800), out_);
- // low surrogate
- Emit("\\u");
- PrintHex(static_cast<uint16_t>((codepoint & 0x3ff) + 0xdc00), out_);
- }
- }
- Emit('"');
- }
-
- void HandleBinary(span<uint8_t> bytes) override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- Emit('"');
- Base64Encode(bytes, out_);
- Emit('"');
- }
-
- void HandleDouble(double value) override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- // JSON cannot represent NaN or Infinity. So, for compatibility,
- // we behave like the JSON object in web browsers: emit 'null'.
- if (!std::isfinite(value)) {
- Emit("null");
- return;
- }
- std::unique_ptr<char[]> str_value = platform_->DToStr(value);
-
- // DToStr may fail to emit a 0 before the decimal dot. E.g. this is
- // the case in base::NumberToString in Chromium (which is based on
- // dmg_fp). So, much like
- // https://cs.chromium.org/chromium/src/base/json/json_writer.cc
- // we probe for this and emit the leading 0 anyway if necessary.
- const char* chars = str_value.get();
- if (chars[0] == '.') {
- Emit('0');
- } else if (chars[0] == '-' && chars[1] == '.') {
- Emit("-0");
- ++chars;
- }
- Emit(chars);
- }
-
- void HandleInt32(int32_t value) override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- Emit(std::to_string(value));
- }
-
- void HandleBool(bool value) override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- Emit(value ? "true" : "false");
- }
-
- void HandleNull() override {
- if (!status_->ok())
- return;
- state_.top().StartElement(out_);
- Emit("null");
- }
-
- void HandleError(Status error) override {
- assert(!error.ok());
- *status_ = error;
- out_->clear();
- }
-
- private:
- void Emit(char c) { out_->push_back(c); }
- void Emit(const char* str) {
- out_->insert(out_->end(), str, str + strlen(str));
- }
- void Emit(const std::string& str) {
- out_->insert(out_->end(), str.begin(), str.end());
- }
-
- const Platform* platform_;
- C* out_;
- Status* status_;
- std::stack<State> state_;
-};
-} // namespace
-
-std::unique_ptr<StreamingParserHandler> NewJSONEncoder(
- const Platform* platform,
- std::vector<uint8_t>* out,
- Status* status) {
- return std::unique_ptr<StreamingParserHandler>(
- new JSONEncoder<std::vector<uint8_t>>(platform, out, status));
-}
-std::unique_ptr<StreamingParserHandler> NewJSONEncoder(const Platform* platform,
- std::string* out,
- Status* status) {
- return std::unique_ptr<StreamingParserHandler>(
- new JSONEncoder<std::string>(platform, out, status));
-}
-
-// =============================================================================
-// json::ParseJSON - for receiving streaming parser events for JSON.
-// =============================================================================
-
-namespace {
-const int kStackLimit = 300;
-
-enum Token {
- ObjectBegin,
- ObjectEnd,
- ArrayBegin,
- ArrayEnd,
- StringLiteral,
- Number,
- BoolTrue,
- BoolFalse,
- NullToken,
- ListSeparator,
- ObjectPairSeparator,
- InvalidToken,
- NoInput
-};
-
-const char* const kNullString = "null";
-const char* const kTrueString = "true";
-const char* const kFalseString = "false";
-
-template <typename Char>
-class JsonParser {
- public:
- JsonParser(const Platform* platform, StreamingParserHandler* handler)
- : platform_(platform), handler_(handler) {}
-
- void Parse(const Char* start, size_t length) {
- start_pos_ = start;
- const Char* end = start + length;
- const Char* tokenEnd = nullptr;
- ParseValue(start, end, &tokenEnd, 0);
- if (error_)
- return;
- if (tokenEnd != end) {
- HandleError(Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS, tokenEnd);
- }
- }
-
- private:
- bool CharsToDouble(const uint16_t* chars, size_t length, double* result) {
- std::string buffer;
- buffer.reserve(length + 1);
- for (size_t ii = 0; ii < length; ++ii) {
- bool is_ascii = !(chars[ii] & ~0x7F);
- if (!is_ascii)
- return false;
- buffer.push_back(static_cast<char>(chars[ii]));
- }
- return platform_->StrToD(buffer.c_str(), result);
- }
-
- bool CharsToDouble(const uint8_t* chars, size_t length, double* result) {
- std::string buffer(reinterpret_cast<const char*>(chars), length);
- return platform_->StrToD(buffer.c_str(), result);
- }
-
- static bool ParseConstToken(const Char* start,
- const Char* end,
- const Char** token_end,
- const char* token) {
- // |token| is \0 terminated, it's one of the constants at top of the file.
- while (start < end && *token != '\0' && *start++ == *token++) {
- }
- if (*token != '\0')
- return false;
- *token_end = start;
- return true;
- }
-
- static bool ReadInt(const Char* start,
- const Char* end,
- const Char** token_end,
- bool allow_leading_zeros) {
- if (start == end)
- return false;
- bool has_leading_zero = '0' == *start;
- int length = 0;
- while (start < end && '0' <= *start && *start <= '9') {
- ++start;
- ++length;
- }
- if (!length)
- return false;
- if (!allow_leading_zeros && length > 1 && has_leading_zero)
- return false;
- *token_end = start;
- return true;
- }
-
- static bool ParseNumberToken(const Char* start,
- const Char* end,
- const Char** token_end) {
- // We just grab the number here. We validate the size in DecodeNumber.
- // According to RFC4627, a valid number is: [minus] int [frac] [exp]
- if (start == end)
- return false;
- Char c = *start;
- if ('-' == c)
- ++start;
-
- if (!ReadInt(start, end, &start, /*allow_leading_zeros=*/false))
- return false;
- if (start == end) {
- *token_end = start;
- return true;
- }
-
- // Optional fraction part
- c = *start;
- if ('.' == c) {
- ++start;
- if (!ReadInt(start, end, &start, /*allow_leading_zeros=*/true))
- return false;
- if (start == end) {
- *token_end = start;
- return true;
- }
- c = *start;
- }
-
- // Optional exponent part
- if ('e' == c || 'E' == c) {
- ++start;
- if (start == end)
- return false;
- c = *start;
- if ('-' == c || '+' == c) {
- ++start;
- if (start == end)
- return false;
- }
- if (!ReadInt(start, end, &start, /*allow_leading_zeros=*/true))
- return false;
- }
-
- *token_end = start;
- return true;
- }
-
- static bool ReadHexDigits(const Char* start,
- const Char* end,
- const Char** token_end,
- int digits) {
- if (end - start < digits)
- return false;
- for (int i = 0; i < digits; ++i) {
- Char c = *start++;
- if (!(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') ||
- ('A' <= c && c <= 'F')))
- return false;
- }
- *token_end = start;
- return true;
- }
-
- static bool ParseStringToken(const Char* start,
- const Char* end,
- const Char** token_end) {
- while (start < end) {
- Char c = *start++;
- if ('\\' == c) {
- if (start == end)
- return false;
- c = *start++;
- // Make sure the escaped char is valid.
- switch (c) {
- case 'x':
- if (!ReadHexDigits(start, end, &start, 2))
- return false;
- break;
- case 'u':
- if (!ReadHexDigits(start, end, &start, 4))
- return false;
- break;
- case '\\':
- case '/':
- case 'b':
- case 'f':
- case 'n':
- case 'r':
- case 't':
- case 'v':
- case '"':
- break;
- default:
- return false;
- }
- } else if ('"' == c) {
- *token_end = start;
- return true;
- }
- }
- return false;
- }
-
- static bool SkipComment(const Char* start,
- const Char* end,
- const Char** comment_end) {
- if (start == end)
- return false;
-
- if (*start != '/' || start + 1 >= end)
- return false;
- ++start;
-
- if (*start == '/') {
- // Single line comment, read to newline.
- for (++start; start < end; ++start) {
- if (*start == '\n' || *start == '\r') {
- *comment_end = start + 1;
- return true;
- }
- }
- *comment_end = end;
- // Comment reaches end-of-input, which is fine.
- return true;
- }
-
- if (*start == '*') {
- Char previous = '\0';
- // Block comment, read until end marker.
- for (++start; start < end; previous = *start++) {
- if (previous == '*' && *start == '/') {
- *comment_end = start + 1;
- return true;
- }
- }
- // Block comment must close before end-of-input.
- return false;
- }
-
- return false;
- }
-
- static bool IsSpaceOrNewLine(Char c) {
- // \v = vertial tab; \f = form feed page break.
- return c == ' ' || c == '\n' || c == '\v' || c == '\f' || c == '\r' ||
- c == '\t';
- }
-
- static void SkipWhitespaceAndComments(const Char* start,
- const Char* end,
- const Char** whitespace_end) {
- while (start < end) {
- if (IsSpaceOrNewLine(*start)) {
- ++start;
- } else if (*start == '/') {
- const Char* comment_end = nullptr;
- if (!SkipComment(start, end, &comment_end))
- break;
- start = comment_end;
- } else {
- break;
- }
- }
- *whitespace_end = start;
- }
-
- static Token ParseToken(const Char* start,
- const Char* end,
- const Char** tokenStart,
- const Char** token_end) {
- SkipWhitespaceAndComments(start, end, tokenStart);
- start = *tokenStart;
-
- if (start == end)
- return NoInput;
-
- switch (*start) {
- case 'n':
- if (ParseConstToken(start, end, token_end, kNullString))
- return NullToken;
- break;
- case 't':
- if (ParseConstToken(start, end, token_end, kTrueString))
- return BoolTrue;
- break;
- case 'f':
- if (ParseConstToken(start, end, token_end, kFalseString))
- return BoolFalse;
- break;
- case '[':
- *token_end = start + 1;
- return ArrayBegin;
- case ']':
- *token_end = start + 1;
- return ArrayEnd;
- case ',':
- *token_end = start + 1;
- return ListSeparator;
- case '{':
- *token_end = start + 1;
- return ObjectBegin;
- case '}':
- *token_end = start + 1;
- return ObjectEnd;
- case ':':
- *token_end = start + 1;
- return ObjectPairSeparator;
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- case '-':
- if (ParseNumberToken(start, end, token_end))
- return Number;
- break;
- case '"':
- if (ParseStringToken(start + 1, end, token_end))
- return StringLiteral;
- break;
- }
- return InvalidToken;
- }
-
- static int HexToInt(Char c) {
- if ('0' <= c && c <= '9')
- return c - '0';
- if ('A' <= c && c <= 'F')
- return c - 'A' + 10;
- if ('a' <= c && c <= 'f')
- return c - 'a' + 10;
- assert(false); // Unreachable.
- return 0;
- }
-
- static bool DecodeString(const Char* start,
- const Char* end,
- std::vector<uint16_t>* output) {
- if (start == end)
- return true;
- if (start > end)
- return false;
- output->reserve(end - start);
- while (start < end) {
- uint16_t c = *start++;
- // If the |Char| we're dealing with is really a byte, then
- // we have utf8 here, and we need to check for multibyte characters
- // and transcode them to utf16 (either one or two utf16 chars).
- if (sizeof(Char) == sizeof(uint8_t) && c > 0x7f) {
- // Inspect the leading byte to figure out how long the utf8
- // byte sequence is; while doing this initialize |codepoint|
- // with the first few bits.
- // See table in: https://en.wikipedia.org/wiki/UTF-8
- // byte one is 110x xxxx -> 2 byte utf8 sequence
- // byte one is 1110 xxxx -> 3 byte utf8 sequence
- // byte one is 1111 0xxx -> 4 byte utf8 sequence
- uint32_t codepoint;
- int num_bytes_left;
- if ((c & 0xe0) == 0xc0) { // 2 byte utf8 sequence
- num_bytes_left = 1;
- codepoint = c & 0x1f;
- } else if ((c & 0xf0) == 0xe0) { // 3 byte utf8 sequence
- num_bytes_left = 2;
- codepoint = c & 0x0f;
- } else if ((c & 0xf8) == 0xf0) { // 4 byte utf8 sequence
- codepoint = c & 0x07;
- num_bytes_left = 3;
- } else {
- return false; // invalid leading byte
- }
-
- // If we have enough bytes in our inpput, decode the remaining ones
- // belonging to this Unicode character into |codepoint|.
- if (start + num_bytes_left > end)
- return false;
- while (num_bytes_left > 0) {
- c = *start++;
- --num_bytes_left;
- // Check the next byte is a continuation byte, that is 10xx xxxx.
- if ((c & 0xc0) != 0x80)
- return false;
- codepoint = (codepoint << 6) | (c & 0x3f);
- }
-
- // Disallow overlong encodings for ascii characters, as these
- // would include " and other characters significant to JSON
- // string termination / control.
- if (codepoint <= 0x7f)
- return false;
- // Invalid in UTF8, and can't be represented in UTF16 anyway.
- if (codepoint > 0x10ffff)
- return false;
-
- // So, now we transcode to UTF16,
- // using the math described at https://en.wikipedia.org/wiki/UTF-16,
- // for either one or two 16 bit characters.
- if (codepoint < 0xffff) {
- output->push_back(codepoint);
- continue;
- }
- codepoint -= 0x10000;
- output->push_back((codepoint >> 10) + 0xd800); // high surrogate
- output->push_back((codepoint & 0x3ff) + 0xdc00); // low surrogate
- continue;
- }
- if ('\\' != c) {
- output->push_back(c);
- continue;
- }
- if (start == end)
- return false;
- c = *start++;
-
- if (c == 'x') {
- // \x is not supported.
- return false;
- }
-
- switch (c) {
- case '"':
- case '/':
- case '\\':
- break;
- case 'b':
- c = '\b';
- break;
- case 'f':
- c = '\f';
- break;
- case 'n':
- c = '\n';
- break;
- case 'r':
- c = '\r';
- break;
- case 't':
- c = '\t';
- break;
- case 'v':
- c = '\v';
- break;
- case 'u':
- c = (HexToInt(*start) << 12) + (HexToInt(*(start + 1)) << 8) +
- (HexToInt(*(start + 2)) << 4) + HexToInt(*(start + 3));
- start += 4;
- break;
- default:
- return false;
- }
- output->push_back(c);
- }
- return true;
- }
-
- void ParseValue(const Char* start,
- const Char* end,
- const Char** value_token_end,
- int depth) {
- if (depth > kStackLimit) {
- HandleError(Error::JSON_PARSER_STACK_LIMIT_EXCEEDED, start);
- return;
- }
- const Char* token_start = nullptr;
- const Char* token_end = nullptr;
- Token token = ParseToken(start, end, &token_start, &token_end);
- switch (token) {
- case NoInput:
- HandleError(Error::JSON_PARSER_NO_INPUT, token_start);
- return;
- case InvalidToken:
- HandleError(Error::JSON_PARSER_INVALID_TOKEN, token_start);
- return;
- case NullToken:
- handler_->HandleNull();
- break;
- case BoolTrue:
- handler_->HandleBool(true);
- break;
- case BoolFalse:
- handler_->HandleBool(false);
- break;
- case Number: {
- double value;
- if (!CharsToDouble(token_start, token_end - token_start, &value)) {
- HandleError(Error::JSON_PARSER_INVALID_NUMBER, token_start);
- return;
- }
- if (value >= std::numeric_limits<int32_t>::min() &&
- value <= std::numeric_limits<int32_t>::max() &&
- static_cast<int32_t>(value) == value)
- handler_->HandleInt32(static_cast<int32_t>(value));
- else
- handler_->HandleDouble(value);
- break;
- }
- case StringLiteral: {
- std::vector<uint16_t> value;
- bool ok = DecodeString(token_start + 1, token_end - 1, &value);
- if (!ok) {
- HandleError(Error::JSON_PARSER_INVALID_STRING, token_start);
- return;
- }
- handler_->HandleString16(span<uint16_t>(value.data(), value.size()));
- break;
- }
- case ArrayBegin: {
- handler_->HandleArrayBegin();
- start = token_end;
- token = ParseToken(start, end, &token_start, &token_end);
- while (token != ArrayEnd) {
- ParseValue(start, end, &token_end, depth + 1);
- if (error_)
- return;
-
- // After a list value, we expect a comma or the end of the list.
- start = token_end;
- token = ParseToken(start, end, &token_start, &token_end);
- if (token == ListSeparator) {
- start = token_end;
- token = ParseToken(start, end, &token_start, &token_end);
- if (token == ArrayEnd) {
- HandleError(Error::JSON_PARSER_UNEXPECTED_ARRAY_END, token_start);
- return;
- }
- } else if (token != ArrayEnd) {
- // Unexpected value after list value. Bail out.
- HandleError(Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED,
- token_start);
- return;
- }
- }
- handler_->HandleArrayEnd();
- break;
- }
- case ObjectBegin: {
- handler_->HandleMapBegin();
- start = token_end;
- token = ParseToken(start, end, &token_start, &token_end);
- while (token != ObjectEnd) {
- if (token != StringLiteral) {
- HandleError(Error::JSON_PARSER_STRING_LITERAL_EXPECTED,
- token_start);
- return;
- }
- std::vector<uint16_t> key;
- if (!DecodeString(token_start + 1, token_end - 1, &key)) {
- HandleError(Error::JSON_PARSER_INVALID_STRING, token_start);
- return;
- }
- handler_->HandleString16(span<uint16_t>(key.data(), key.size()));
- start = token_end;
-
- token = ParseToken(start, end, &token_start, &token_end);
- if (token != ObjectPairSeparator) {
- HandleError(Error::JSON_PARSER_COLON_EXPECTED, token_start);
- return;
- }
- start = token_end;
-
- ParseValue(start, end, &token_end, depth + 1);
- if (error_)
- return;
- start = token_end;
-
- // After a key/value pair, we expect a comma or the end of the
- // object.
- token = ParseToken(start, end, &token_start, &token_end);
- if (token == ListSeparator) {
- start = token_end;
- token = ParseToken(start, end, &token_start, &token_end);
- if (token == ObjectEnd) {
- HandleError(Error::JSON_PARSER_UNEXPECTED_MAP_END, token_start);
- return;
- }
- } else if (token != ObjectEnd) {
- // Unexpected value after last object value. Bail out.
- HandleError(Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED,
- token_start);
- return;
- }
- }
- handler_->HandleMapEnd();
- break;
- }
-
- default:
- // We got a token that's not a value.
- HandleError(Error::JSON_PARSER_VALUE_EXPECTED, token_start);
- return;
- }
-
- SkipWhitespaceAndComments(token_end, end, value_token_end);
- }
-
- void HandleError(Error error, const Char* pos) {
- assert(error != Error::OK);
- if (!error_) {
- handler_->HandleError(
- Status{error, static_cast<size_t>(pos - start_pos_)});
- error_ = true;
- }
- }
-
- const Char* start_pos_ = nullptr;
- bool error_ = false;
- const Platform* platform_;
- StreamingParserHandler* handler_;
-};
-} // namespace
-
-void ParseJSON(const Platform& platform,
- span<uint8_t> chars,
- StreamingParserHandler* handler) {
- JsonParser<uint8_t> parser(&platform, handler);
- parser.Parse(chars.data(), chars.size());
-}
-
-void ParseJSON(const Platform& platform,
- span<uint16_t> chars,
- StreamingParserHandler* handler) {
- JsonParser<uint16_t> parser(&platform, handler);
- parser.Parse(chars.data(), chars.size());
-}
-
-// =============================================================================
-// json::ConvertCBORToJSON, json::ConvertJSONToCBOR - for transcoding
-// =============================================================================
-template <typename C>
-Status ConvertCBORToJSONTmpl(const Platform& platform,
- span<uint8_t> cbor,
- C* json) {
- Status status;
- std::unique_ptr<StreamingParserHandler> json_writer =
- NewJSONEncoder(&platform, json, &status);
- cbor::ParseCBOR(cbor, json_writer.get());
- return status;
-}
-
-Status ConvertCBORToJSON(const Platform& platform,
- span<uint8_t> cbor,
- std::vector<uint8_t>* json) {
- return ConvertCBORToJSONTmpl(platform, cbor, json);
-}
-Status ConvertCBORToJSON(const Platform& platform,
- span<uint8_t> cbor,
- std::string* json) {
- return ConvertCBORToJSONTmpl(platform, cbor, json);
-}
-
-template <typename T, typename C>
-Status ConvertJSONToCBORTmpl(const Platform& platform, span<T> json, C* cbor) {
- Status status;
- std::unique_ptr<StreamingParserHandler> encoder =
- cbor::NewCBOREncoder(cbor, &status);
- ParseJSON(platform, json, encoder.get());
- return status;
-}
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint8_t> json,
- std::string* cbor) {
- return ConvertJSONToCBORTmpl(platform, json, cbor);
-}
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint16_t> json,
- std::string* cbor) {
- return ConvertJSONToCBORTmpl(platform, json, cbor);
-}
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint8_t> json,
- std::vector<uint8_t>* cbor) {
- return ConvertJSONToCBORTmpl(platform, json, cbor);
-}
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint16_t> json,
- std::vector<uint8_t>* cbor) {
- return ConvertJSONToCBORTmpl(platform, json, cbor);
-}
-} // namespace json
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-
diff --git a/deps/v8/third_party/inspector_protocol/lib/encoding_h.template b/deps/v8/third_party/inspector_protocol/lib/encoding_h.template
deleted file mode 100644
index f1a52a1958..0000000000
--- a/deps/v8/third_party/inspector_protocol/lib/encoding_h.template
+++ /dev/null
@@ -1,520 +0,0 @@
-{# This template is generated by gen_cbor_templates.py. #}
-// Generated by lib/encoding_h.template.
-
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef {{"_".join(config.protocol.namespace)}}_encoding_h
-#define {{"_".join(config.protocol.namespace)}}_encoding_h
-
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <limits>
-#include <memory>
-#include <string>
-#include <vector>
-
-{% for namespace in config.protocol.namespace %}
-namespace {{namespace}} {
-{% endfor %}
-
-// ===== encoding/encoding.h =====
-
-
-// =============================================================================
-// span - sequence of bytes
-// =============================================================================
-
-// This template is similar to std::span, which will be included in C++20.
-template <typename T>
-class span {
- public:
- using index_type = size_t;
-
- span() : data_(nullptr), size_(0) {}
- span(const T* data, index_type size) : data_(data), size_(size) {}
-
- const T* data() const { return data_; }
-
- const T* begin() const { return data_; }
- const T* end() const { return data_ + size_; }
-
- const T& operator[](index_type idx) const { return data_[idx]; }
-
- span<T> subspan(index_type offset, index_type count) const {
- return span(data_ + offset, count);
- }
-
- span<T> subspan(index_type offset) const {
- return span(data_ + offset, size_ - offset);
- }
-
- bool empty() const { return size_ == 0; }
-
- index_type size() const { return size_; }
- index_type size_bytes() const { return size_ * sizeof(T); }
-
- private:
- const T* data_;
- index_type size_;
-};
-
-template <typename T>
-span<T> SpanFrom(const std::vector<T>& v) {
- return span<T>(v.data(), v.size());
-}
-
-template <size_t N>
-span<uint8_t> SpanFrom(const char (&str)[N]) {
- return span<uint8_t>(reinterpret_cast<const uint8_t*>(str), N - 1);
-}
-
-inline span<uint8_t> SpanFrom(const char* str) {
- return str ? span<uint8_t>(reinterpret_cast<const uint8_t*>(str), strlen(str))
- : span<uint8_t>();
-}
-
-inline span<uint8_t> SpanFrom(const std::string& v) {
- return span<uint8_t>(reinterpret_cast<const uint8_t*>(v.data()), v.size());
-}
-
-// =============================================================================
-// Status and Error codes
-// =============================================================================
-enum class Error {
- OK = 0,
- // JSON parsing errors - json_parser.{h,cc}.
- JSON_PARSER_UNPROCESSED_INPUT_REMAINS = 0x01,
- JSON_PARSER_STACK_LIMIT_EXCEEDED = 0x02,
- JSON_PARSER_NO_INPUT = 0x03,
- JSON_PARSER_INVALID_TOKEN = 0x04,
- JSON_PARSER_INVALID_NUMBER = 0x05,
- JSON_PARSER_INVALID_STRING = 0x06,
- JSON_PARSER_UNEXPECTED_ARRAY_END = 0x07,
- JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED = 0x08,
- JSON_PARSER_STRING_LITERAL_EXPECTED = 0x09,
- JSON_PARSER_COLON_EXPECTED = 0x0a,
- JSON_PARSER_UNEXPECTED_MAP_END = 0x0b,
- JSON_PARSER_COMMA_OR_MAP_END_EXPECTED = 0x0c,
- JSON_PARSER_VALUE_EXPECTED = 0x0d,
-
- CBOR_INVALID_INT32 = 0x0e,
- CBOR_INVALID_DOUBLE = 0x0f,
- CBOR_INVALID_ENVELOPE = 0x10,
- CBOR_INVALID_STRING8 = 0x11,
- CBOR_INVALID_STRING16 = 0x12,
- CBOR_INVALID_BINARY = 0x13,
- CBOR_UNSUPPORTED_VALUE = 0x14,
- CBOR_NO_INPUT = 0x15,
- CBOR_INVALID_START_BYTE = 0x16,
- CBOR_UNEXPECTED_EOF_EXPECTED_VALUE = 0x17,
- CBOR_UNEXPECTED_EOF_IN_ARRAY = 0x18,
- CBOR_UNEXPECTED_EOF_IN_MAP = 0x19,
- CBOR_INVALID_MAP_KEY = 0x1a,
- CBOR_STACK_LIMIT_EXCEEDED = 0x1b,
- CBOR_TRAILING_JUNK = 0x1c,
- CBOR_MAP_START_EXPECTED = 0x1d,
- CBOR_MAP_STOP_EXPECTED = 0x1e,
- CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x1f,
-};
-
-// A status value with position that can be copied. The default status
-// is OK. Usually, error status values should come with a valid position.
-struct Status {
- static constexpr size_t npos() { return std::numeric_limits<size_t>::max(); }
-
- bool ok() const { return error == Error::OK; }
-
- Error error = Error::OK;
- size_t pos = npos();
- Status(Error error, size_t pos) : error(error), pos(pos) {}
- Status() = default;
-
- // Returns a 7 bit US-ASCII string, either "OK" or an error message
- // that includes the position.
- std::string ToASCIIString() const;
-
- private:
- std::string ToASCIIString(const char* msg) const;
-};
-
-// Handler interface for parser events emitted by a streaming parser.
-// See cbor::NewCBOREncoder, cbor::ParseCBOR, json::NewJSONEncoder,
-// json::ParseJSON.
-class StreamingParserHandler {
- public:
- virtual ~StreamingParserHandler() = default;
- virtual void HandleMapBegin() = 0;
- virtual void HandleMapEnd() = 0;
- virtual void HandleArrayBegin() = 0;
- virtual void HandleArrayEnd() = 0;
- virtual void HandleString8(span<uint8_t> chars) = 0;
- virtual void HandleString16(span<uint16_t> chars) = 0;
- virtual void HandleBinary(span<uint8_t> bytes) = 0;
- virtual void HandleDouble(double value) = 0;
- virtual void HandleInt32(int32_t value) = 0;
- virtual void HandleBool(bool value) = 0;
- virtual void HandleNull() = 0;
-
- // The parser may send one error even after other events have already
- // been received. Client code is reponsible to then discard the
- // already processed events.
- // |error| must be an eror, as in, |error.is_ok()| can't be true.
- virtual void HandleError(Status error) = 0;
-};
-
-namespace cbor {
-// The binary encoding for the inspector protocol follows the CBOR specification
-// (RFC 7049). Additional constraints:
-// - Only indefinite length maps and arrays are supported.
-// - Maps and arrays are wrapped with an envelope, that is, a
-// CBOR tag with value 24 followed by a byte string specifying
-// the byte length of the enclosed map / array. The byte string
-// must use a 32 bit wide length.
-// - At the top level, a message must be an indefinite length map
-// wrapped by an envelope.
-// - Maximal size for messages is 2^32 (4 GB).
-// - For scalars, we support only the int32_t range, encoded as
-// UNSIGNED/NEGATIVE (major types 0 / 1).
-// - UTF16 strings, including with unbalanced surrogate pairs, are encoded
-// as CBOR BYTE_STRING (major type 2). For such strings, the number of
-// bytes encoded must be even.
-// - UTF8 strings (major type 3) are supported.
-// - 7 bit US-ASCII strings must always be encoded as UTF8 strings, never
-// as UTF16 strings.
-// - Arbitrary byte arrays, in the inspector protocol called 'binary',
-// are encoded as BYTE_STRING (major type 2), prefixed with a byte
-// indicating base64 when rendered as JSON.
-
-// =============================================================================
-// Detecting CBOR content
-// =============================================================================
-
-// The first byte for an envelope, which we use for wrapping dictionaries
-// and arrays; and the byte that indicates a byte string with 32 bit length.
-// These two bytes start an envelope, and thereby also any CBOR message
-// produced or consumed by this protocol. See also |EnvelopeEncoder| below.
-uint8_t InitialByteForEnvelope();
-uint8_t InitialByteFor32BitLengthByteString();
-
-// Checks whether |msg| is a cbor message.
-bool IsCBORMessage(span<uint8_t> msg);
-
-// =============================================================================
-// Encoding individual CBOR items
-// =============================================================================
-
-// Some constants for CBOR tokens that only take a single byte on the wire.
-uint8_t EncodeTrue();
-uint8_t EncodeFalse();
-uint8_t EncodeNull();
-uint8_t EncodeIndefiniteLengthArrayStart();
-uint8_t EncodeIndefiniteLengthMapStart();
-uint8_t EncodeStop();
-
-// Encodes |value| as |UNSIGNED| (major type 0) iff >= 0, or |NEGATIVE|
-// (major type 1) iff < 0.
-void EncodeInt32(int32_t value, std::vector<uint8_t>* out);
-void EncodeInt32(int32_t value, std::string* out);
-
-// Encodes a UTF16 string as a BYTE_STRING (major type 2). Each utf16
-// character in |in| is emitted with most significant byte first,
-// appending to |out|.
-void EncodeString16(span<uint16_t> in, std::vector<uint8_t>* out);
-void EncodeString16(span<uint16_t> in, std::string* out);
-
-// Encodes a UTF8 string |in| as STRING (major type 3).
-void EncodeString8(span<uint8_t> in, std::vector<uint8_t>* out);
-void EncodeString8(span<uint8_t> in, std::string* out);
-
-// Encodes the given |latin1| string as STRING8.
-// If any non-ASCII character is present, it will be represented
-// as a 2 byte UTF8 sequence.
-void EncodeFromLatin1(span<uint8_t> latin1, std::vector<uint8_t>* out);
-void EncodeFromLatin1(span<uint8_t> latin1, std::string* out);
-
-// Encodes the given |utf16| string as STRING8 if it's entirely US-ASCII.
-// Otherwise, encodes as STRING16.
-void EncodeFromUTF16(span<uint16_t> utf16, std::vector<uint8_t>* out);
-void EncodeFromUTF16(span<uint16_t> utf16, std::string* out);
-
-// Encodes arbitrary binary data in |in| as a BYTE_STRING (major type 2) with
-// definitive length, prefixed with tag 22 indicating expected conversion to
-// base64 (see RFC 7049, Table 3 and Section 2.4.4.2).
-void EncodeBinary(span<uint8_t> in, std::vector<uint8_t>* out);
-void EncodeBinary(span<uint8_t> in, std::string* out);
-
-// Encodes / decodes a double as Major type 7 (SIMPLE_VALUE),
-// with additional info = 27, followed by 8 bytes in big endian.
-void EncodeDouble(double value, std::vector<uint8_t>* out);
-void EncodeDouble(double value, std::string* out);
-
-// =============================================================================
-// cbor::EnvelopeEncoder - for wrapping submessages
-// =============================================================================
-
-// An envelope indicates the byte length of a wrapped item.
-// We use this for maps and array, which allows the decoder
-// to skip such (nested) values whole sale.
-// It's implemented as a CBOR tag (major type 6) with additional
-// info = 24, followed by a byte string with a 32 bit length value;
-// so the maximal structure that we can wrap is 2^32 bits long.
-// See also: https://tools.ietf.org/html/rfc7049#section-2.4.4.1
-class EnvelopeEncoder {
- public:
- // Emits the envelope start bytes and records the position for the
- // byte size in |byte_size_pos_|. Also emits empty bytes for the
- // byte sisze so that encoding can continue.
- void EncodeStart(std::vector<uint8_t>* out);
- void EncodeStart(std::string* out);
- // This records the current size in |out| at position byte_size_pos_.
- // Returns true iff successful.
- bool EncodeStop(std::vector<uint8_t>* out);
- bool EncodeStop(std::string* out);
-
- private:
- size_t byte_size_pos_ = 0;
-};
-
-// =============================================================================
-// cbor::NewCBOREncoder - for encoding from a streaming parser
-// =============================================================================
-
-// This can be used to convert to CBOR, by passing the return value to a parser
-// that drives it. The handler will encode into |out|, and iff an error occurs
-// it will set |status| to an error and clear |out|. Otherwise, |status.ok()|
-// will be |true|.
-std::unique_ptr<StreamingParserHandler> NewCBOREncoder(
- std::vector<uint8_t>* out,
- Status* status);
-std::unique_ptr<StreamingParserHandler> NewCBOREncoder(std::string* out,
- Status* status);
-
-// =============================================================================
-// cbor::CBORTokenizer - for parsing individual CBOR items
-// =============================================================================
-
-// Tags for the tokens within a CBOR message that CBORTokenizer understands.
-// Note that this is not the same terminology as the CBOR spec (RFC 7049),
-// but rather, our adaptation. For instance, we lump unsigned and signed
-// major type into INT32 here (and disallow values outside the int32_t range).
-enum class CBORTokenTag {
- // Encountered an error in the structure of the message. Consult
- // status() for details.
- ERROR_VALUE,
- // Booleans and NULL.
- TRUE_VALUE,
- FALSE_VALUE,
- NULL_VALUE,
- // An int32_t (signed 32 bit integer).
- INT32,
- // A double (64 bit floating point).
- DOUBLE,
- // A UTF8 string.
- STRING8,
- // A UTF16 string.
- STRING16,
- // A binary string.
- BINARY,
- // Starts an indefinite length map; after the map start we expect
- // alternating keys and values, followed by STOP.
- MAP_START,
- // Starts an indefinite length array; after the array start we
- // expect values, followed by STOP.
- ARRAY_START,
- // Ends a map or an array.
- STOP,
- // An envelope indicator, wrapping a map or array.
- // Internally this carries the byte length of the wrapped
- // map or array. While CBORTokenizer::Next() will read / skip the entire
- // envelope, CBORTokenizer::EnterEnvelope() reads the tokens
- // inside of it.
- ENVELOPE,
- // We've reached the end there is nothing else to read.
- DONE,
-};
-
-// The major types from RFC 7049 Section 2.1.
-enum class MajorType {
- UNSIGNED = 0,
- NEGATIVE = 1,
- BYTE_STRING = 2,
- STRING = 3,
- ARRAY = 4,
- MAP = 5,
- TAG = 6,
- SIMPLE_VALUE = 7
-};
-
-// CBORTokenizer segments a CBOR message, presenting the tokens therein as
-// numbers, strings, etc. This is not a complete CBOR parser, but makes it much
-// easier to implement one (e.g. ParseCBOR, above). It can also be used to parse
-// messages partially.
-class CBORTokenizer {
- public:
- explicit CBORTokenizer(span<uint8_t> bytes);
- ~CBORTokenizer();
-
- // Identifies the current token that we're looking at,
- // or ERROR_VALUE (in which ase ::Status() has details)
- // or DONE (if we're past the last token).
- CBORTokenTag TokenTag() const;
-
- // Advances to the next token.
- void Next();
- // Can only be called if TokenTag() == CBORTokenTag::ENVELOPE.
- // While Next() would skip past the entire envelope / what it's
- // wrapping, EnterEnvelope positions the cursor inside of the envelope,
- // letting the client explore the nested structure.
- void EnterEnvelope();
-
- // If TokenTag() is CBORTokenTag::ERROR_VALUE, then Status().error describes
- // the error more precisely; otherwise it'll be set to Error::OK.
- // In either case, Status().pos is the current position.
- struct Status Status() const;
-
- // The following methods retrieve the token values. They can only
- // be called if TokenTag() matches.
-
- // To be called only if ::TokenTag() == CBORTokenTag::INT32.
- int32_t GetInt32() const;
-
- // To be called only if ::TokenTag() == CBORTokenTag::DOUBLE.
- double GetDouble() const;
-
- // To be called only if ::TokenTag() == CBORTokenTag::STRING8.
- span<uint8_t> GetString8() const;
-
- // Wire representation for STRING16 is low byte first (little endian).
- // To be called only if ::TokenTag() == CBORTokenTag::STRING16.
- span<uint8_t> GetString16WireRep() const;
-
- // To be called only if ::TokenTag() == CBORTokenTag::BINARY.
- span<uint8_t> GetBinary() const;
-
- // To be called only if ::TokenTag() == CBORTokenTag::ENVELOPE.
- span<uint8_t> GetEnvelopeContents() const;
-
- private:
- void ReadNextToken(bool enter_envelope);
- void SetToken(CBORTokenTag token, size_t token_byte_length);
- void SetError(Error error);
-
- span<uint8_t> bytes_;
- CBORTokenTag token_tag_;
- struct Status status_;
- size_t token_byte_length_;
- MajorType token_start_type_;
- uint64_t token_start_internal_value_;
-};
-
-// =============================================================================
-// cbor::ParseCBOR - for receiving streaming parser events for CBOR messages
-// =============================================================================
-
-// Parses a CBOR encoded message from |bytes|, sending events to
-// |out|. If an error occurs, sends |out->HandleError|, and parsing stops.
-// The client is responsible for discarding the already received information in
-// that case.
-void ParseCBOR(span<uint8_t> bytes, StreamingParserHandler* out);
-
-// =============================================================================
-// cbor::AppendString8EntryToMap - for limited in-place editing of messages
-// =============================================================================
-
-// Modifies the |cbor| message by appending a new key/value entry at the end
-// of the map. Patches up the envelope size; Status.ok() iff successful.
-// If not successful, |cbor| may be corrupted after this call.
-Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
- span<uint8_t> string8_value,
- std::vector<uint8_t>* cbor);
-Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
- span<uint8_t> string8_value,
- std::string* cbor);
-
-namespace internals { // Exposed only for writing tests.
-int8_t ReadTokenStart(span<uint8_t> bytes,
- cbor::MajorType* type,
- uint64_t* value);
-
-void WriteTokenStart(cbor::MajorType type,
- uint64_t value,
- std::vector<uint8_t>* encoded);
-void WriteTokenStart(cbor::MajorType type,
- uint64_t value,
- std::string* encoded);
-} // namespace internals
-} // namespace cbor
-
-namespace json {
-// Client code must provide an instance. Implementation should delegate
-// to whatever is appropriate.
-class Platform {
- public:
- virtual ~Platform() = default;
- // Parses |str| into |result|. Returns false iff there are
- // leftover characters or parsing errors.
- virtual bool StrToD(const char* str, double* result) const = 0;
-
- // Prints |value| in a format suitable for JSON.
- virtual std::unique_ptr<char[]> DToStr(double value) const = 0;
-};
-
-// =============================================================================
-// json::NewJSONEncoder - for encoding streaming parser events as JSON
-// =============================================================================
-
-// Returns a handler object which will write ascii characters to |out|.
-// |status->ok()| will be false iff the handler routine HandleError() is called.
-// In that case, we'll stop emitting output.
-// Except for calling the HandleError routine at any time, the client
-// code must call the Handle* methods in an order in which they'd occur
-// in valid JSON; otherwise we may crash (the code uses assert).
-std::unique_ptr<StreamingParserHandler> NewJSONEncoder(
- const Platform* platform,
- std::vector<uint8_t>* out,
- Status* status);
-std::unique_ptr<StreamingParserHandler> NewJSONEncoder(const Platform* platform,
- std::string* out,
- Status* status);
-
-// =============================================================================
-// json::ParseJSON - for receiving streaming parser events for JSON
-// =============================================================================
-
-void ParseJSON(const Platform& platform,
- span<uint8_t> chars,
- StreamingParserHandler* handler);
-void ParseJSON(const Platform& platform,
- span<uint16_t> chars,
- StreamingParserHandler* handler);
-
-// =============================================================================
-// json::ConvertCBORToJSON, json::ConvertJSONToCBOR - for transcoding
-// =============================================================================
-Status ConvertCBORToJSON(const Platform& platform,
- span<uint8_t> cbor,
- std::string* json);
-Status ConvertCBORToJSON(const Platform& platform,
- span<uint8_t> cbor,
- std::vector<uint8_t>* json);
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint8_t> json,
- std::vector<uint8_t>* cbor);
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint16_t> json,
- std::vector<uint8_t>* cbor);
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint8_t> json,
- std::string* cbor);
-Status ConvertJSONToCBOR(const Platform& platform,
- span<uint16_t> json,
- std::string* cbor);
-} // namespace json
-
-{% for namespace in config.protocol.namespace %}
-} // namespace {{namespace}}
-{% endfor %}
-#endif // !defined({{"_".join(config.protocol.namespace)}}_encoding_h)
diff --git a/deps/v8/third_party/inspector_protocol/pdl.py b/deps/v8/third_party/inspector_protocol/pdl.py
index 03d11b39d6..d7733634e5 100644
--- a/deps/v8/third_party/inspector_protocol/pdl.py
+++ b/deps/v8/third_party/inspector_protocol/pdl.py
@@ -12,160 +12,167 @@ import sys
description = ''
-primitiveTypes = ['integer', 'number', 'boolean', 'string', 'object', 'any', 'array', 'binary']
+primitiveTypes = ['integer', 'number', 'boolean', 'string', 'object',
+ 'any', 'array', 'binary']
def assignType(item, type, is_array=False, map_binary_to_string=False):
- if is_array:
- item['type'] = 'array'
- item['items'] = collections.OrderedDict()
- assignType(item['items'], type, False, map_binary_to_string)
- return
-
- if type == 'enum':
- type = 'string'
- if map_binary_to_string and type == 'binary':
- type = 'string'
- if type in primitiveTypes:
- item['type'] = type
- else:
- item['$ref'] = type
+ if is_array:
+ item['type'] = 'array'
+ item['items'] = collections.OrderedDict()
+ assignType(item['items'], type, False, map_binary_to_string)
+ return
+
+ if type == 'enum':
+ type = 'string'
+ if map_binary_to_string and type == 'binary':
+ type = 'string'
+ if type in primitiveTypes:
+ item['type'] = type
+ else:
+ item['$ref'] = type
def createItem(d, experimental, deprecated, name=None):
- result = collections.OrderedDict(d)
- if name:
- result['name'] = name
- global description
- if description:
- result['description'] = description.strip()
- if experimental:
- result['experimental'] = True
- if deprecated:
- result['deprecated'] = True
- return result
+ result = collections.OrderedDict(d)
+ if name:
+ result['name'] = name
+ global description
+ if description:
+ result['description'] = description.strip()
+ if experimental:
+ result['experimental'] = True
+ if deprecated:
+ result['deprecated'] = True
+ return result
def parse(data, file_name, map_binary_to_string=False):
- protocol = collections.OrderedDict()
- protocol['version'] = collections.OrderedDict()
- protocol['domains'] = []
- domain = None
- item = None
- subitems = None
- nukeDescription = False
- global description
- lines = data.split('\n')
- for i in range(0, len(lines)):
- if nukeDescription:
- description = ''
- nukeDescription = False
- line = lines[i]
- trimLine = line.strip()
-
- if trimLine.startswith('#'):
- if len(description):
- description += '\n'
- description += trimLine[2:]
- continue
+ protocol = collections.OrderedDict()
+ protocol['version'] = collections.OrderedDict()
+ protocol['domains'] = []
+ domain = None
+ item = None
+ subitems = None
+ nukeDescription = False
+ global description
+ lines = data.split('\n')
+ for i in range(0, len(lines)):
+ if nukeDescription:
+ description = ''
+ nukeDescription = False
+ line = lines[i]
+ trimLine = line.strip()
+
+ if trimLine.startswith('#'):
+ if len(description):
+ description += '\n'
+ description += trimLine[2:]
+ continue
+ else:
+ nukeDescription = True
+
+ if len(trimLine) == 0:
+ continue
+
+ match = re.compile(
+ r'^(experimental )?(deprecated )?domain (.*)').match(line)
+ if match:
+ domain = createItem({'domain' : match.group(3)}, match.group(1),
+ match.group(2))
+ protocol['domains'].append(domain)
+ continue
+
+ match = re.compile(r'^ depends on ([^\s]+)').match(line)
+ if match:
+ if 'dependencies' not in domain:
+ domain['dependencies'] = []
+ domain['dependencies'].append(match.group(1))
+ continue
+
+ match = re.compile(r'^ (experimental )?(deprecated )?type (.*) '
+ r'extends (array of )?([^\s]+)').match(line)
+ if match:
+ if 'types' not in domain:
+ domain['types'] = []
+ item = createItem({'id': match.group(3)}, match.group(1), match.group(2))
+ assignType(item, match.group(5), match.group(4), map_binary_to_string)
+ domain['types'].append(item)
+ continue
+
+ match = re.compile(
+ r'^ (experimental )?(deprecated )?(command|event) (.*)').match(line)
+ if match:
+ list = []
+ if match.group(3) == 'command':
+ if 'commands' in domain:
+ list = domain['commands']
+ else:
+ list = domain['commands'] = []
+ else:
+ if 'events' in domain:
+ list = domain['events']
else:
- nukeDescription = True
-
- if len(trimLine) == 0:
- continue
-
- match = re.compile(r'^(experimental )?(deprecated )?domain (.*)').match(line)
- if match:
- domain = createItem({'domain' : match.group(3)}, match.group(1), match.group(2))
- protocol['domains'].append(domain)
- continue
-
- match = re.compile(r'^ depends on ([^\s]+)').match(line)
- if match:
- if 'dependencies' not in domain:
- domain['dependencies'] = []
- domain['dependencies'].append(match.group(1))
- continue
-
- match = re.compile(r'^ (experimental )?(deprecated )?type (.*) extends (array of )?([^\s]+)').match(line)
- if match:
- if 'types' not in domain:
- domain['types'] = []
- item = createItem({'id': match.group(3)}, match.group(1), match.group(2))
- assignType(item, match.group(5), match.group(4), map_binary_to_string)
- domain['types'].append(item)
- continue
-
- match = re.compile(r'^ (experimental )?(deprecated )?(command|event) (.*)').match(line)
- if match:
- list = []
- if match.group(3) == 'command':
- if 'commands' in domain:
- list = domain['commands']
- else:
- list = domain['commands'] = []
- else:
- if 'events' in domain:
- list = domain['events']
- else:
- list = domain['events'] = []
-
- item = createItem({}, match.group(1), match.group(2), match.group(4))
- list.append(item)
- continue
-
- match = re.compile(r'^ (experimental )?(deprecated )?(optional )?(array of )?([^\s]+) ([^\s]+)').match(line)
- if match:
- param = createItem({}, match.group(1), match.group(2), match.group(6))
- if match.group(3):
- param['optional'] = True
- assignType(param, match.group(5), match.group(4), map_binary_to_string)
- if match.group(5) == 'enum':
- enumliterals = param['enum'] = []
- subitems.append(param)
- continue
-
- match = re.compile(r'^ (parameters|returns|properties)').match(line)
- if match:
- subitems = item[match.group(1)] = []
- continue
-
- match = re.compile(r'^ enum').match(line)
- if match:
- enumliterals = item['enum'] = []
- continue
-
- match = re.compile(r'^version').match(line)
- if match:
- continue
-
- match = re.compile(r'^ major (\d+)').match(line)
- if match:
- protocol['version']['major'] = match.group(1)
- continue
-
- match = re.compile(r'^ minor (\d+)').match(line)
- if match:
- protocol['version']['minor'] = match.group(1)
- continue
-
- match = re.compile(r'^ redirect ([^\s]+)').match(line)
- if match:
- item['redirect'] = match.group(1)
- continue
-
- match = re.compile(r'^ ( )?[^\s]+$').match(line)
- if match:
- # enum literal
- enumliterals.append(trimLine)
- continue
-
- print('Error in %s:%s, illegal token: \t%s' % (file_name, i, line))
- sys.exit(1)
- return protocol
+ list = domain['events'] = []
+
+ item = createItem({}, match.group(1), match.group(2), match.group(4))
+ list.append(item)
+ continue
+
+ match = re.compile(
+ r'^ (experimental )?(deprecated )?(optional )?'
+ r'(array of )?([^\s]+) ([^\s]+)').match(line)
+ if match:
+ param = createItem({}, match.group(1), match.group(2), match.group(6))
+ if match.group(3):
+ param['optional'] = True
+ assignType(param, match.group(5), match.group(4), map_binary_to_string)
+ if match.group(5) == 'enum':
+ enumliterals = param['enum'] = []
+ subitems.append(param)
+ continue
+
+ match = re.compile(r'^ (parameters|returns|properties)').match(line)
+ if match:
+ subitems = item[match.group(1)] = []
+ continue
+
+ match = re.compile(r'^ enum').match(line)
+ if match:
+ enumliterals = item['enum'] = []
+ continue
+
+ match = re.compile(r'^version').match(line)
+ if match:
+ continue
+
+ match = re.compile(r'^ major (\d+)').match(line)
+ if match:
+ protocol['version']['major'] = match.group(1)
+ continue
+
+ match = re.compile(r'^ minor (\d+)').match(line)
+ if match:
+ protocol['version']['minor'] = match.group(1)
+ continue
+
+ match = re.compile(r'^ redirect ([^\s]+)').match(line)
+ if match:
+ item['redirect'] = match.group(1)
+ continue
+
+ match = re.compile(r'^ ( )?[^\s]+$').match(line)
+ if match:
+ # enum literal
+ enumliterals.append(trimLine)
+ continue
+
+ print('Error in %s:%s, illegal token: \t%s' % (file_name, i, line))
+ sys.exit(1)
+ return protocol
def loads(data, file_name, map_binary_to_string=False):
- if file_name.endswith(".pdl"):
- return parse(data, file_name, map_binary_to_string)
- return json.loads(data)
+ if file_name.endswith(".pdl"):
+ return parse(data, file_name, map_binary_to_string)
+ return json.loads(data)
diff --git a/deps/v8/third_party/inspector_protocol/roll.py b/deps/v8/third_party/inspector_protocol/roll.py
index abe636e270..ba265198ce 100755
--- a/deps/v8/third_party/inspector_protocol/roll.py
+++ b/deps/v8/third_party/inspector_protocol/roll.py
@@ -21,6 +21,9 @@ FILES_TO_SYNC = [
'encoding/encoding.h',
'encoding/encoding.cc',
'encoding/encoding_test.cc',
+ 'bindings/bindings.h',
+ 'bindings/bindings.cc',
+ 'bindings/bindings_test.cc',
'inspector_protocol.gni',
'inspector_protocol.gypi',
'lib/*',
@@ -143,6 +146,12 @@ def main(argv):
contents = contents.replace(
'namespace inspector_protocol_encoding',
'namespace v8_inspector_protocol_encoding')
+ contents = contents.replace(
+ 'INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_H_',
+ 'V8_INSPECTOR_PROTOCOL_BINDINGS_BINDINGS_H_')
+ contents = contents.replace(
+ 'namespace inspector_protocol_bindings',
+ 'namespace v8_inspector_protocol_bindings')
open(os.path.join(dest_dir, f), 'w').write(contents)
shutil.copymode(os.path.join(src_dir, f), os.path.join(dest_dir, f))
for f in to_delete:
diff --git a/deps/v8/third_party/v8/builtins/OWNERS b/deps/v8/third_party/v8/builtins/OWNERS
new file mode 100644
index 0000000000..255508218e
--- /dev/null
+++ b/deps/v8/third_party/v8/builtins/OWNERS
@@ -0,0 +1,3 @@
+jgruber@chromium.org
+szuend@chromium.org
+tebbi@chromium.org
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index c751e4831d..530ed4faf9 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -14,7 +14,7 @@
// https://github.com/python/cpython/blob/master/Objects/listsort.txt
namespace array {
- class SortState {
+ class SortState extends Struct {
Compare(implicit context: Context)(x: Object, y: Object): Number {
const sortCompare: CompareBuiltinFn = this.sortComparePtr;
return sortCompare(context, this.userCmpFn, x, y);
@@ -144,7 +144,7 @@ namespace array {
try {
GotoIfForceSlowPath() otherwise Slow;
- let a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+ const a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
// Copy copy-on-write (COW) arrays.
array::EnsureWriteableFastElements(a);
@@ -230,7 +230,7 @@ namespace array {
transitioning builtin Load<ElementsAccessor: type>(
context: Context, sortState: SortState, index: Smi): Object {
const receiver = sortState.receiver;
- if (!HasProperty_Inline(receiver, index)) return Hole;
+ if (!HasProperty_Inline(receiver, index)) return TheHole;
return GetProperty(receiver, index);
}
@@ -257,7 +257,7 @@ namespace array {
return AllocateHeapNumberWithValue(value);
}
label IfHole {
- return Hole;
+ return TheHole;
}
}
@@ -298,7 +298,7 @@ namespace array {
context: Context, sortState: SortState, index: Smi): Smi {
const receiver = sortState.receiver;
if (!HasProperty_Inline(receiver, index)) return kSuccess;
- DeleteProperty(receiver, index, kSloppy);
+ DeleteProperty(receiver, index, kStrict);
return kSuccess;
}
@@ -308,7 +308,7 @@ namespace array {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
- elements.objects[index] = Hole;
+ elements.objects[index] = TheHole;
return kSuccess;
}
@@ -318,7 +318,7 @@ namespace array {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
- elements.objects[index] = Hole;
+ elements.objects[index] = TheHole;
return kSuccess;
}
@@ -389,8 +389,8 @@ namespace array {
}
CanUseSameAccessor<GenericElementsAccessor>(
- context: Context, receiver: JSReceiver, initialReceiverMap: Object,
- initialReceiverLength: Number): Boolean {
+ _context: Context, _receiver: JSReceiver, _initialReceiverMap: Object,
+ _initialReceiverLength: Number): Boolean {
// Do nothing. We are already on the slow path.
return True;
}
@@ -477,7 +477,7 @@ namespace array {
} else {
let srcIdx: Smi = srcPos;
let dstIdx: Smi = dstPos;
- let to: Smi = srcPos + length;
+ const to: Smi = srcPos + length;
while (srcIdx < to) {
target.objects[dstIdx++] = source.objects[srcIdx++];
@@ -566,7 +566,7 @@ namespace array {
const workArray = sortState.workArray;
- let low: Smi = lowArg + 1;
+ const low: Smi = lowArg + 1;
if (low == high) return 1;
let runLength: Smi = 2;
@@ -631,7 +631,7 @@ namespace array {
const pendingRuns: FixedArray = sortState.pendingRuns;
let baseA: Smi = GetPendingRunBase(pendingRuns, i);
let lengthA: Smi = GetPendingRunLength(pendingRuns, i);
- let baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
+ const baseB: Smi = GetPendingRunBase(pendingRuns, i + 1);
let lengthB: Smi = GetPendingRunLength(pendingRuns, i + 1);
assert(lengthA > 0 && lengthB > 0);
assert(baseA + lengthA == baseB);
@@ -710,7 +710,7 @@ namespace array {
// a[base + hint + lastOfs] < key <= a[base + hint + offset].
// a[base + length - 1] is highest.
- let maxOfs: Smi = length - hint;
+ const maxOfs: Smi = length - hint;
while (offset < maxOfs) {
const offsetElement = array.objects[base + hint + offset];
order = sortState.Compare(offsetElement, key);
@@ -736,7 +736,7 @@ namespace array {
assert(order >= 0);
// a[base + hint] is lowest.
- let maxOfs: Smi = hint + 1;
+ const maxOfs: Smi = hint + 1;
while (offset < maxOfs) {
const offsetElement = array.objects[base + hint - offset];
order = sortState.Compare(offsetElement, key);
@@ -807,7 +807,7 @@ namespace array {
// a[base + hint - offset] <= key < a[base + hint - lastOfs].
// a[base + hint] is lowest.
- let maxOfs: Smi = hint + 1;
+ const maxOfs: Smi = hint + 1;
while (offset < maxOfs) {
const offsetElement = array.objects[base + hint - offset];
order = sortState.Compare(key, offsetElement);
@@ -832,7 +832,7 @@ namespace array {
// a[base + hint + lastOfs] <= key < a[base + hint + offset].
// a[base + length - 1] is highest.
- let maxOfs: Smi = length - hint;
+ const maxOfs: Smi = length - hint;
while (offset < maxOfs) {
const offsetElement = array.objects[base + hint + offset];
order = sortState.Compare(key, offsetElement);
@@ -920,7 +920,7 @@ namespace array {
while (Int32TrueConstant()) {
assert(lengthA > 1 && lengthB > 0);
- let order = sortState.Compare(
+ const order = sortState.Compare(
workArray.objects[cursorB], tempArray.objects[cursorTemp]);
if (order < 0) {
@@ -1052,7 +1052,7 @@ namespace array {
while (Int32TrueConstant()) {
assert(lengthA > 0 && lengthB > 1);
- let order = sortState.Compare(
+ const order = sortState.Compare(
tempArray.objects[cursorTemp], workArray.objects[cursorA]);
if (order < 0) {
@@ -1222,7 +1222,7 @@ namespace array {
// remains. This is used at the end of the mergesort.
transitioning macro
MergeForceCollapse(context: Context, sortState: SortState) {
- let pendingRuns: FixedArray = sortState.pendingRuns;
+ const pendingRuns: FixedArray = sortState.pendingRuns;
// Reload the stack size becuase MergeAt might change it.
while (GetPendingRunsSize(sortState) > 1) {
@@ -1297,7 +1297,7 @@ namespace array {
for (let i: Smi = 0; i < receiverLength; ++i) {
const element: Object = loadFn(context, sortState, i);
- if (element == Hole) {
+ if (element == TheHole) {
// Do nothing for holes. The result is that elements are
// compacted at the front of the work array.
} else if (element == Undefined) {
@@ -1330,7 +1330,7 @@ namespace array {
// 1. Copy the sorted elements from the workarray to the receiver.
// 2. Add {nOfUndefined} undefineds to the receiver.
// 3. Depending on the backing store either delete properties or
- // set them to the Hole up to {sortState.sortLength}.
+ // set them to the TheHole up to {sortState.sortLength}.
let index: Smi = 0;
for (; index < numberOfNonUndefined; ++index) {
storeFn(context, sortState, index, workArray.objects[index]);
@@ -1369,7 +1369,8 @@ namespace array {
// https://tc39.github.io/ecma262/#sec-array.prototype.sort
transitioning javascript builtin
- ArrayPrototypeSort(context: Context, receiver: Object, ...arguments): Object {
+ ArrayPrototypeSort(js-implicit context: Context, receiver: Object)(
+ ...arguments): Object {
// 1. If comparefn is not undefined and IsCallable(comparefn) is false,
// throw a TypeError exception.
const comparefnObj: Object = arguments[0];
diff --git a/deps/v8/third_party/wasm-api/LICENSE b/deps/v8/third_party/wasm-api/LICENSE
new file mode 100644
index 0000000000..8f71f43fee
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/deps/v8/third_party/wasm-api/OWNERS b/deps/v8/third_party/wasm-api/OWNERS
new file mode 100644
index 0000000000..596440f532
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/OWNERS
@@ -0,0 +1,2 @@
+jkummerow@chromium.org
+mstarzinger@chromium.org
diff --git a/deps/v8/third_party/wasm-api/README.v8 b/deps/v8/third_party/wasm-api/README.v8
new file mode 100644
index 0000000000..ea957620b0
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/README.v8
@@ -0,0 +1,17 @@
+Name: Wasm C/C++ API
+Short Name: wasm-c-api
+URL: https://github.com/WebAssembly/wasm-c-api/
+Version: 0
+Revision: 5c742b048f7766a0c00be3a7af23fb71ba816026
+Date: 2019-03-18
+License: Apache 2.0
+License File: LICENSE
+Security Critical: yes
+
+Description:
+Provides a "black box" API for embedding a Wasm engine in C/C++ applications.
+
+Local modifications:
+None.
+The contents of the upstream "include/" directory are directly in here.
+The upstream "example/" directory is copied as-is.
diff --git a/deps/v8/third_party/wasm-api/example/callback.c b/deps/v8/third_party/wasm-api/example/callback.c
new file mode 100644
index 0000000000..f3b9018594
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/callback.c
@@ -0,0 +1,167 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+// Print a Wasm value
+void wasm_val_print(wasm_val_t val) {
+ switch (val.kind) {
+ case WASM_I32: {
+ printf("%" PRIu32, val.of.i32);
+ } break;
+ case WASM_I64: {
+ printf("%" PRIu64, val.of.i64);
+ } break;
+ case WASM_F32: {
+ printf("%f", val.of.f32);
+ } break;
+ case WASM_F64: {
+ printf("%g", val.of.f64);
+ } break;
+ case WASM_ANYREF:
+ case WASM_FUNCREF: {
+ if (val.of.ref == NULL) {
+ printf("null");
+ } else {
+ printf("ref(%p)", val.of.ref);
+ }
+ } break;
+ }
+}
+
+// A function to be called from Wasm code.
+own wasm_trap_t* print_callback(
+ const wasm_val_t args[], wasm_val_t results[]
+) {
+ printf("Calling back...\n> ");
+ wasm_val_print(args[0]);
+ printf("\n");
+
+ wasm_val_copy(&results[0], &args[0]);
+ return NULL;
+}
+
+
+// A function closure.
+own wasm_trap_t* closure_callback(
+ void* env, const wasm_val_t args[], wasm_val_t results[]
+) {
+ int i = *(int*)env;
+ printf("Calling back closure...\n");
+ printf("> %d\n", i);
+
+ results[0].kind = WASM_I32;
+ results[0].of.i32 = (int32_t)i;
+ return NULL;
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("callback.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Create external print functions.
+ printf("Creating callback...\n");
+ own wasm_functype_t* print_type = wasm_functype_new_1_1(wasm_valtype_new_i32(), wasm_valtype_new_i32());
+ own wasm_func_t* print_func = wasm_func_new(store, print_type, print_callback);
+
+ int i = 42;
+ own wasm_functype_t* closure_type = wasm_functype_new_0_1(wasm_valtype_new_i32());
+ own wasm_func_t* closure_func = wasm_func_new_with_env(store, closure_type, closure_callback, &i, NULL);
+
+ wasm_functype_delete(print_type);
+ wasm_functype_delete(closure_type);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ const wasm_extern_t* imports[] = {
+ wasm_func_as_extern(print_func), wasm_func_as_extern(closure_func)
+ };
+ own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ wasm_func_delete(print_func);
+ wasm_func_delete(closure_func);
+
+ // Extract export.
+ printf("Extracting export...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ if (exports.size == 0) {
+ printf("> Error accessing exports!\n");
+ return 1;
+ }
+ const wasm_func_t* run_func = wasm_extern_as_func(exports.data[0]);
+ if (run_func == NULL) {
+ printf("> Error accessing export!\n");
+ return 1;
+ }
+
+ wasm_module_delete(module);
+ wasm_instance_delete(instance);
+
+ // Call.
+ printf("Calling export...\n");
+ wasm_val_t args[2];
+ args[0].kind = WASM_I32;
+ args[0].of.i32 = 3;
+ args[1].kind = WASM_I32;
+ args[1].of.i32 = 4;
+ wasm_val_t results[1];
+ if (wasm_func_call(run_func, args, results)) {
+ printf("> Error calling function!\n");
+ return 1;
+ }
+
+ wasm_extern_vec_delete(&exports);
+
+ // Print result.
+ printf("Printing result...\n");
+ printf("> %u\n", results[0].of.i32);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/callback.cc b/deps/v8/third_party/wasm-api/example/callback.cc
new file mode 100644
index 0000000000..a9ee9aa919
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/callback.cc
@@ -0,0 +1,145 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+// Print a Wasm value
+auto operator<<(std::ostream& out, const wasm::Val& val) -> std::ostream& {
+ switch (val.kind()) {
+ case wasm::I32: {
+ out << val.i32();
+ } break;
+ case wasm::I64: {
+ out << val.i64();
+ } break;
+ case wasm::F32: {
+ out << val.f32();
+ } break;
+ case wasm::F64: {
+ out << val.f64();
+ } break;
+ case wasm::ANYREF:
+ case wasm::FUNCREF: {
+ if (val.ref() == nullptr) {
+ out << "null";
+ } else {
+ out << "ref(" << val.ref() << ")";
+ }
+ } break;
+ }
+ return out;
+}
+
+// A function to be called from Wasm code.
+auto print_callback(
+ const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ std::cout << "Calling back..." << std::endl << "> " << args[0] << std::endl;
+ results[0] = args[0].copy();
+ return nullptr;
+}
+
+
+// A function closure.
+auto closure_callback(
+ void* env, const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ auto i = *reinterpret_cast<int*>(env);
+ std::cout << "Calling back closure..." << std::endl;
+ std::cout << "> " << i << std::endl;
+ results[0] = wasm::Val::i32(static_cast<int32_t>(i));
+ return nullptr;
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("callback.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Create external print functions.
+ std::cout << "Creating callback..." << std::endl;
+ auto print_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32)),
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+ );
+ auto print_func = wasm::Func::make(store, print_type.get(), print_callback);
+
+ // Creating closure.
+ std::cout << "Creating closure..." << std::endl;
+ int i = 42;
+ auto closure_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(),
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+ );
+ auto closure_func = wasm::Func::make(store, closure_type.get(), closure_callback, &i);
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ wasm::Extern* imports[] = {print_func.get(), closure_func.get()};
+ auto instance = wasm::Instance::make(store, module.get(), imports);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting export..." << std::endl;
+ auto exports = instance->exports();
+ if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
+ std::cout << "> Error accessing export!" << std::endl;
+ return;
+ }
+ auto run_func = exports[0]->func();
+
+ // Call.
+ std::cout << "Calling export..." << std::endl;
+ wasm::Val args[] = {wasm::Val::i32(3), wasm::Val::i32(4)};
+ wasm::Val results[1];
+ if (run_func->call(args, results)) {
+ std::cout << "> Error calling function!" << std::endl;
+ return;
+ }
+
+ // Print result.
+ std::cout << "Printing result..." << std::endl;
+ std::cout << "> " << results[0].i32() << std::endl;
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/callback.wasm b/deps/v8/third_party/wasm-api/example/callback.wasm
new file mode 100644
index 0000000000..7e00b58014
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/callback.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/callback.wat b/deps/v8/third_party/wasm-api/example/callback.wat
new file mode 100644
index 0000000000..d86195f51d
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/callback.wat
@@ -0,0 +1,10 @@
+(module
+ (func $print (import "" "print") (param i32) (result i32))
+ (func $closure (import "" "closure") (result i32))
+ (func (export "run") (param $x i32) (param $y i32) (result i32)
+ (i32.add
+ (call $print (i32.add (local.get $x) (local.get $y)))
+ (call $closure)
+ )
+ )
+)
diff --git a/deps/v8/third_party/wasm-api/example/finalize.c b/deps/v8/third_party/wasm-api/example/finalize.c
new file mode 100644
index 0000000000..6841617262
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/finalize.c
@@ -0,0 +1,75 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+const int iterations = 100000;
+
+void finalize(void* data) {
+ int i = (int)data;
+ if (i % (iterations / 10) == 0) printf("Finalizing #%d...\n", i);
+}
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("finalize.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Instantiate.
+ printf("Instantiating modules...\n");
+ for (int i = 0; i <= iterations; ++i) {
+ if (i % (iterations / 10) == 0) printf("%d\n", i);
+ own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+ if (!instance) {
+ printf("> Error instantiating module %d!\n", i);
+ return 1;
+ }
+ void* data = (void*)(intptr_t)i;
+ wasm_instance_set_host_info_with_finalizer(instance, data, &finalize);
+ wasm_instance_delete(instance);
+ }
+
+ wasm_module_delete(module);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/finalize.cc b/deps/v8/third_party/wasm-api/example/finalize.cc
new file mode 100644
index 0000000000..a354a2601d
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/finalize.cc
@@ -0,0 +1,70 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+const int iterations = 100000;
+
+void finalize(void* data) {
+ intptr_t i = reinterpret_cast<intptr_t>(data);
+ if (i % (iterations / 10) == 0) {
+ std::cout << "Finalizing #" << i << "..." << std::endl;
+ }
+}
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("finalize.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Instantiate.
+ std::cout << "Instantiating modules..." << std::endl;
+ for (int i = 0; i <= iterations; ++i) {
+ if (i % (iterations / 10) == 0) std::cout << i << std::endl;
+ auto instance = wasm::Instance::make(store, module.get(), nullptr);
+ if (!instance) {
+ std::cout << "> Error instantiating module " << i << "!" << std::endl;
+ return;
+ }
+ instance->set_host_info(reinterpret_cast<void*>(i), &finalize);
+ }
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/finalize.wasm b/deps/v8/third_party/wasm-api/example/finalize.wasm
new file mode 100644
index 0000000000..74f9c56624
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/finalize.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/finalize.wat b/deps/v8/third_party/wasm-api/example/finalize.wat
new file mode 100644
index 0000000000..6237e734ac
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/finalize.wat
@@ -0,0 +1,5 @@
+(module
+ (func (export "f"))
+ (func (export "g"))
+ (func (export "h"))
+)
diff --git a/deps/v8/third_party/wasm-api/example/global.c b/deps/v8/third_party/wasm-api/example/global.c
new file mode 100644
index 0000000000..b82d86242e
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/global.c
@@ -0,0 +1,222 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+wasm_global_t* get_export_global(const wasm_extern_vec_t* exports, size_t i) {
+ if (exports->size <= i || !wasm_extern_as_global(exports->data[i])) {
+ printf("> Error accessing global export %zu!\n", i);
+ exit(1);
+ }
+ return wasm_extern_as_global(exports->data[i]);
+}
+
+wasm_func_t* get_export_func(const wasm_extern_vec_t* exports, size_t i) {
+ if (exports->size <= i || !wasm_extern_as_func(exports->data[i])) {
+ printf("> Error accessing function export %zu!\n", i);
+ exit(1);
+ }
+ return wasm_extern_as_func(exports->data[i]);
+}
+
+
+#define check(val, type, expected) \
+ if (val.of.type != expected) { \
+ printf("> Error reading value\n"); \
+ exit(1); \
+ }
+
+#define check_global(global, type, expected) \
+ { \
+ wasm_val_t val; \
+ wasm_global_get(global, &val); \
+ check(val, type, expected); \
+ }
+
+#define check_call(func, type, expected) \
+ { \
+ wasm_val_t results[1]; \
+ wasm_func_call(func, NULL, results); \
+ check(results[0], type, expected); \
+ }
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("global.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Create external globals.
+ printf("Creating globals...\n");
+ own wasm_globaltype_t* const_f32_type = wasm_globaltype_new(
+ wasm_valtype_new(WASM_F32), WASM_CONST);
+ own wasm_globaltype_t* const_i64_type = wasm_globaltype_new(
+ wasm_valtype_new(WASM_I64), WASM_CONST);
+ own wasm_globaltype_t* var_f32_type = wasm_globaltype_new(
+ wasm_valtype_new(WASM_F32), WASM_VAR);
+ own wasm_globaltype_t* var_i64_type = wasm_globaltype_new(
+ wasm_valtype_new(WASM_I64), WASM_VAR);
+
+ wasm_val_t val_f32_1 = {.kind = WASM_F32, .of = {.f32 = 1}};
+ own wasm_global_t* const_f32_import = wasm_global_new(store, const_f32_type, &val_f32_1);
+ wasm_val_t val_i64_2 = {.kind = WASM_I64, .of = {.i64 = 2}};
+ own wasm_global_t* const_i64_import = wasm_global_new(store, const_i64_type, &val_i64_2);
+ wasm_val_t val_f32_3 = {.kind = WASM_F32, .of = {.f32 = 3}};
+ own wasm_global_t* var_f32_import = wasm_global_new(store, var_f32_type, &val_f32_3);
+ wasm_val_t val_i64_4 = {.kind = WASM_I64, .of = {.i64 = 4}};
+ own wasm_global_t* var_i64_import = wasm_global_new(store, var_i64_type, &val_i64_4);
+
+ wasm_globaltype_delete(const_f32_type);
+ wasm_globaltype_delete(const_i64_type);
+ wasm_globaltype_delete(var_f32_type);
+ wasm_globaltype_delete(var_i64_type);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ const wasm_extern_t* imports[] = {
+ wasm_global_as_extern(const_f32_import),
+ wasm_global_as_extern(const_i64_import),
+ wasm_global_as_extern(var_f32_import),
+ wasm_global_as_extern(var_i64_import)
+ };
+ own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ wasm_module_delete(module);
+
+ // Extract export.
+ printf("Extracting exports...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ size_t i = 0;
+ wasm_global_t* const_f32_export = get_export_global(&exports, i++);
+ wasm_global_t* const_i64_export = get_export_global(&exports, i++);
+ wasm_global_t* var_f32_export = get_export_global(&exports, i++);
+ wasm_global_t* var_i64_export = get_export_global(&exports, i++);
+ wasm_func_t* get_const_f32_import = get_export_func(&exports, i++);
+ wasm_func_t* get_const_i64_import = get_export_func(&exports, i++);
+ wasm_func_t* get_var_f32_import = get_export_func(&exports, i++);
+ wasm_func_t* get_var_i64_import = get_export_func(&exports, i++);
+ wasm_func_t* get_const_f32_export = get_export_func(&exports, i++);
+ wasm_func_t* get_const_i64_export = get_export_func(&exports, i++);
+ wasm_func_t* get_var_f32_export = get_export_func(&exports, i++);
+ wasm_func_t* get_var_i64_export = get_export_func(&exports, i++);
+ wasm_func_t* set_var_f32_import = get_export_func(&exports, i++);
+ wasm_func_t* set_var_i64_import = get_export_func(&exports, i++);
+ wasm_func_t* set_var_f32_export = get_export_func(&exports, i++);
+ wasm_func_t* set_var_i64_export = get_export_func(&exports, i++);
+
+ // Interact.
+ printf("Accessing globals...\n");
+
+ // Check initial values.
+ check_global(const_f32_import, f32, 1);
+ check_global(const_i64_import, i64, 2);
+ check_global(var_f32_import, f32, 3);
+ check_global(var_i64_import, i64, 4);
+ check_global(const_f32_export, f32, 5);
+ check_global(const_i64_export, i64, 6);
+ check_global(var_f32_export, f32, 7);
+ check_global(var_i64_export, i64, 8);
+
+ check_call(get_const_f32_import, f32, 1);
+ check_call(get_const_i64_import, i64, 2);
+ check_call(get_var_f32_import, f32, 3);
+ check_call(get_var_i64_import, i64, 4);
+ check_call(get_const_f32_export, f32, 5);
+ check_call(get_const_i64_export, i64, 6);
+ check_call(get_var_f32_export, f32, 7);
+ check_call(get_var_i64_export, i64, 8);
+
+ // Modify variables through API and check again.
+ wasm_val_t val33 = {.kind = WASM_F32, .of = {.f32 = 33}};
+ wasm_global_set(var_f32_import, &val33);
+ wasm_val_t val34 = {.kind = WASM_I64, .of = {.i64 = 34}};
+ wasm_global_set(var_i64_import, &val34);
+ wasm_val_t val37 = {.kind = WASM_F32, .of = {.f32 = 37}};
+ wasm_global_set(var_f32_export, &val37);
+ wasm_val_t val38 = {.kind = WASM_I64, .of = {.i64 = 38}};
+ wasm_global_set(var_i64_export, &val38);
+
+ check_global(var_f32_import, f32, 33);
+ check_global(var_i64_import, i64, 34);
+ check_global(var_f32_export, f32, 37);
+ check_global(var_i64_export, i64, 38);
+
+ check_call(get_var_f32_import, f32, 33);
+ check_call(get_var_i64_import, i64, 34);
+ check_call(get_var_f32_export, f32, 37);
+ check_call(get_var_i64_export, i64, 38);
+
+ // Modify variables through calls and check again.
+ wasm_val_t args73[] = { {.kind = WASM_F32, .of = {.f32 = 73}} };
+ wasm_func_call(set_var_f32_import, args73, NULL);
+ wasm_val_t args74[] = { {.kind = WASM_I64, .of = {.i64 = 74}} };
+ wasm_func_call(set_var_i64_import, args74, NULL);
+ wasm_val_t args77[] = { {.kind = WASM_F32, .of = {.f32 = 77}} };
+ wasm_func_call(set_var_f32_export, args77, NULL);
+ wasm_val_t args78[] = { {.kind = WASM_I64, .of = {.i64 = 78}} };
+ wasm_func_call(set_var_i64_export, args78, NULL);
+
+ check_global(var_f32_import, f32, 73);
+ check_global(var_i64_import, i64, 74);
+ check_global(var_f32_export, f32, 77);
+ check_global(var_i64_export, i64, 78);
+
+ check_call(get_var_f32_import, f32, 73);
+ check_call(get_var_i64_import, i64, 74);
+ check_call(get_var_f32_export, f32, 77);
+ check_call(get_var_i64_export, i64, 78);
+
+ wasm_global_delete(const_f32_import);
+ wasm_global_delete(const_i64_import);
+ wasm_global_delete(var_f32_import);
+ wasm_global_delete(var_i64_import);
+ wasm_extern_vec_delete(&exports);
+ wasm_instance_delete(instance);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/global.cc b/deps/v8/third_party/wasm-api/example/global.cc
new file mode 100644
index 0000000000..75a2513c82
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/global.cc
@@ -0,0 +1,193 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+auto get_export_global(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Global* {
+ if (exports.size() <= i || !exports[i]->global()) {
+ std::cout << "> Error accessing global export " << i << "!" << std::endl;
+ exit(1);
+ }
+ return exports[i]->global();
+}
+
+auto get_export_func(const wasm::vec<wasm::Extern*>& exports, size_t i) -> const wasm::Func* {
+ if (exports.size() <= i || !exports[i]->func()) {
+ std::cout << "> Error accessing function export " << i << "!" << std::endl;
+ exit(1);
+ }
+ return exports[i]->func();
+}
+
+template<class T, class U>
+void check(T actual, U expected) {
+ if (actual != expected) {
+ std::cout << "> Error reading value, expected " << expected << ", got " << actual << std::endl;
+ exit(1);
+ }
+}
+
+auto call(const wasm::Func* func) -> wasm::Val {
+ wasm::Val results[1];
+ if (func->call(nullptr, results)) {
+ std::cout << "> Error calling function!" << std::endl;
+ exit(1);
+ }
+ return results[0].copy();
+}
+
+void call(const wasm::Func* func, wasm::Val&& arg) {
+ wasm::Val args[1] = {std::move(arg)};
+ if (func->call(args)) {
+ std::cout << "> Error calling function!" << std::endl;
+ exit(1);
+ }
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("global.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Create external globals.
+ std::cout << "Creating globals..." << std::endl;
+ auto const_f32_type = wasm::GlobalType::make(
+ wasm::ValType::make(wasm::F32), wasm::CONST);
+ auto const_i64_type = wasm::GlobalType::make(
+ wasm::ValType::make(wasm::I64), wasm::CONST);
+ auto var_f32_type = wasm::GlobalType::make(
+ wasm::ValType::make(wasm::F32), wasm::VAR);
+ auto var_i64_type = wasm::GlobalType::make(
+ wasm::ValType::make(wasm::I64), wasm::VAR);
+ auto const_f32_import = wasm::Global::make(store, const_f32_type.get(), wasm::Val::f32(1));
+ auto const_i64_import = wasm::Global::make(store, const_i64_type.get(), wasm::Val::i64(2));
+ auto var_f32_import = wasm::Global::make(store, var_f32_type.get(), wasm::Val::f32(3));
+ auto var_i64_import = wasm::Global::make(store, var_i64_type.get(), wasm::Val::i64(4));
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ wasm::Extern* imports[] = {
+ const_f32_import.get(), const_i64_import.get(),
+ var_f32_import.get(), var_i64_import.get()
+ };
+ auto instance = wasm::Instance::make(store, module.get(), imports);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting exports..." << std::endl;
+ auto exports = instance->exports();
+ size_t i = 0;
+ auto const_f32_export = get_export_global(exports, i++);
+ auto const_i64_export = get_export_global(exports, i++);
+ auto var_f32_export = get_export_global(exports, i++);
+ auto var_i64_export = get_export_global(exports, i++);
+ auto get_const_f32_import = get_export_func(exports, i++);
+ auto get_const_i64_import = get_export_func(exports, i++);
+ auto get_var_f32_import = get_export_func(exports, i++);
+ auto get_var_i64_import = get_export_func(exports, i++);
+ auto get_const_f32_export = get_export_func(exports, i++);
+ auto get_const_i64_export = get_export_func(exports, i++);
+ auto get_var_f32_export = get_export_func(exports, i++);
+ auto get_var_i64_export = get_export_func(exports, i++);
+ auto set_var_f32_import = get_export_func(exports, i++);
+ auto set_var_i64_import = get_export_func(exports, i++);
+ auto set_var_f32_export = get_export_func(exports, i++);
+ auto set_var_i64_export = get_export_func(exports, i++);
+
+ // Interact.
+ std::cout << "Accessing globals..." << std::endl;
+
+ // Check initial values.
+ check(const_f32_import->get().f32(), 1);
+ check(const_i64_import->get().i64(), 2);
+ check(var_f32_import->get().f32(), 3);
+ check(var_i64_import->get().i64(), 4);
+ check(const_f32_export->get().f32(), 5);
+ check(const_i64_export->get().i64(), 6);
+ check(var_f32_export->get().f32(), 7);
+ check(var_i64_export->get().i64(), 8);
+
+ check(call(get_const_f32_import).f32(), 1);
+ check(call(get_const_i64_import).i64(), 2);
+ check(call(get_var_f32_import).f32(), 3);
+ check(call(get_var_i64_import).i64(), 4);
+ check(call(get_const_f32_export).f32(), 5);
+ check(call(get_const_i64_export).i64(), 6);
+ check(call(get_var_f32_export).f32(), 7);
+ check(call(get_var_i64_export).i64(), 8);
+
+ // Modify variables through API and check again.
+ var_f32_import->set(wasm::Val::f32(33));
+ var_i64_import->set(wasm::Val::i64(34));
+ var_f32_export->set(wasm::Val::f32(37));
+ var_i64_export->set(wasm::Val::i64(38));
+
+ check(var_f32_import->get().f32(), 33);
+ check(var_i64_import->get().i64(), 34);
+ check(var_f32_export->get().f32(), 37);
+ check(var_i64_export->get().i64(), 38);
+
+ check(call(get_var_f32_import).f32(), 33);
+ check(call(get_var_i64_import).i64(), 34);
+ check(call(get_var_f32_export).f32(), 37);
+ check(call(get_var_i64_export).i64(), 38);
+
+ // Modify variables through calls and check again.
+ call(set_var_f32_import, wasm::Val::f32(73));
+ call(set_var_i64_import, wasm::Val::i64(74));
+ call(set_var_f32_export, wasm::Val::f32(77));
+ call(set_var_i64_export, wasm::Val::i64(78));
+
+ check(var_f32_import->get().f32(), 73);
+ check(var_i64_import->get().i64(), 74);
+ check(var_f32_export->get().f32(), 77);
+ check(var_i64_export->get().i64(), 78);
+
+ check(call(get_var_f32_import).f32(), 73);
+ check(call(get_var_i64_import).i64(), 74);
+ check(call(get_var_f32_export).f32(), 77);
+ check(call(get_var_i64_export).i64(), 78);
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/global.wasm b/deps/v8/third_party/wasm-api/example/global.wasm
new file mode 100644
index 0000000000..0e76863278
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/global.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/global.wat b/deps/v8/third_party/wasm-api/example/global.wat
new file mode 100644
index 0000000000..dea085772b
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/global.wat
@@ -0,0 +1,27 @@
+(module
+ (global $f32_import (import "" "const f32") f32)
+ (global $i64_import (import "" "const i64") i64)
+ (global $mut_f32_import (import "" "var f32") (mut f32))
+ (global $mut_i64_import (import "" "var i64") (mut i64))
+
+ (global $f32_export (export "const f32") f32 (f32.const 5))
+ (global $i64_export (export "const i64") i64 (i64.const 6))
+ (global $mut_f32_export (export "var f32") (mut f32) (f32.const 7))
+ (global $mut_i64_export (export "var i64") (mut i64) (i64.const 8))
+
+ (func (export "get const f32 import") (result f32) (global.get $f32_import))
+ (func (export "get const i64 import") (result i64) (global.get $i64_import))
+ (func (export "get var f32 import") (result f32) (global.get $mut_f32_import))
+ (func (export "get var i64 import") (result i64) (global.get $mut_i64_import))
+
+ (func (export "get const f32 export") (result f32) (global.get $f32_export))
+ (func (export "get const i64 export") (result i64) (global.get $i64_export))
+ (func (export "get var f32 export") (result f32) (global.get $mut_f32_export))
+ (func (export "get var i64 export") (result i64) (global.get $mut_i64_export))
+
+ (func (export "set var f32 import") (param f32) (global.set $mut_f32_import (local.get 0)))
+ (func (export "set var i64 import") (param i64) (global.set $mut_i64_import (local.get 0)))
+
+ (func (export "set var f32 export") (param f32) (global.set $mut_f32_export (local.get 0)))
+ (func (export "set var f64 export") (param i64) (global.set $mut_i64_export (local.get 0)))
+)
diff --git a/deps/v8/third_party/wasm-api/example/hello.c b/deps/v8/third_party/wasm-api/example/hello.c
new file mode 100644
index 0000000000..b1c8c5fee5
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hello.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+// A function to be called from Wasm code.
+own wasm_trap_t* hello_callback(
+ const wasm_val_t args[], wasm_val_t results[]
+) {
+ printf("Calling back...\n");
+ printf("> Hello World!\n");
+ return NULL;
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("hello.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Create external print functions.
+ printf("Creating callback...\n");
+ own wasm_functype_t* hello_type = wasm_functype_new_0_0();
+ own wasm_func_t* hello_func =
+ wasm_func_new(store, hello_type, hello_callback);
+
+ wasm_functype_delete(hello_type);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ const wasm_extern_t* imports[] = { wasm_func_as_extern(hello_func) };
+ own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ wasm_func_delete(hello_func);
+
+ // Extract export.
+ printf("Extracting export...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ if (exports.size == 0) {
+ printf("> Error accessing exports!\n");
+ return 1;
+ }
+ const wasm_func_t* run_func = wasm_extern_as_func(exports.data[0]);
+ if (run_func == NULL) {
+ printf("> Error accessing export!\n");
+ return 1;
+ }
+
+ wasm_module_delete(module);
+ wasm_instance_delete(instance);
+
+ // Call.
+ printf("Calling export...\n");
+ if (wasm_func_call(run_func, NULL, NULL)) {
+ printf("> Error calling function!\n");
+ return 1;
+ }
+
+ wasm_extern_vec_delete(&exports);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/hello.cc b/deps/v8/third_party/wasm-api/example/hello.cc
new file mode 100644
index 0000000000..4956be885f
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hello.cc
@@ -0,0 +1,91 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+// A function to be called from Wasm code.
+auto hello_callback(
+ const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ std::cout << "Calling back..." << std::endl;
+ std::cout << "> Hello world!" << std::endl;
+ return nullptr;
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("hello.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Create external print functions.
+ std::cout << "Creating callback..." << std::endl;
+ auto hello_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(), wasm::vec<wasm::ValType*>::make()
+ );
+ auto hello_func = wasm::Func::make(store, hello_type.get(), hello_callback);
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ wasm::Extern* imports[] = {hello_func.get()};
+ auto instance = wasm::Instance::make(store, module.get(), imports);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting export..." << std::endl;
+ auto exports = instance->exports();
+ if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
+ std::cout << "> Error accessing export!" << std::endl;
+ return;
+ }
+ auto run_func = exports[0]->func();
+
+ // Call.
+ std::cout << "Calling export..." << std::endl;
+ if (run_func->call()) {
+ std::cout << "> Error calling function!" << std::endl;
+ return;
+ }
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/hello.wasm b/deps/v8/third_party/wasm-api/example/hello.wasm
new file mode 100644
index 0000000000..2207c03eea
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hello.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/hello.wat b/deps/v8/third_party/wasm-api/example/hello.wat
new file mode 100644
index 0000000000..1c56c55822
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/hello.wat
@@ -0,0 +1,4 @@
+(module
+ (func $hello (import "" "hello"))
+ (func (export "run") (call $hello))
+)
diff --git a/deps/v8/third_party/wasm-api/example/memory.c b/deps/v8/third_party/wasm-api/example/memory.c
new file mode 100644
index 0000000000..64b0f86b51
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/memory.c
@@ -0,0 +1,217 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+
+wasm_memory_t* get_export_memory(const wasm_extern_vec_t* exports, size_t i) {
+ if (exports->size <= i || !wasm_extern_as_memory(exports->data[i])) {
+ printf("> Error accessing memory export %zu!\n", i);
+ exit(1);
+ }
+ return wasm_extern_as_memory(exports->data[i]);
+}
+
+wasm_func_t* get_export_func(const wasm_extern_vec_t* exports, size_t i) {
+ if (exports->size <= i || !wasm_extern_as_func(exports->data[i])) {
+ printf("> Error accessing function export %zu!\n", i);
+ exit(1);
+ }
+ return wasm_extern_as_func(exports->data[i]);
+}
+
+
+void check(bool success) {
+ if (!success) {
+ printf("> Error, expected success\n");
+ exit(1);
+ }
+}
+
+void check_call(wasm_func_t* func, wasm_val_t args[], int32_t expected) {
+ wasm_val_t results[1];
+ if (wasm_func_call(func, args, results) || results[0].of.i32 != expected) {
+ printf("> Error on result\n");
+ exit(1);
+ }
+}
+
+void check_call0(wasm_func_t* func, int32_t expected) {
+ check_call(func, NULL, expected);
+}
+
+void check_call1(wasm_func_t* func, int32_t arg, int32_t expected) {
+ wasm_val_t args[] = { {.kind = WASM_I32, .of = {.i32 = arg}} };
+ check_call(func, args, expected);
+}
+
+void check_call2(wasm_func_t* func, int32_t arg1, int32_t arg2, int32_t expected) {
+ wasm_val_t args[2] = {
+ {.kind = WASM_I32, .of = {.i32 = arg1}},
+ {.kind = WASM_I32, .of = {.i32 = arg2}}
+ };
+ check_call(func, args, expected);
+}
+
+void check_ok(wasm_func_t* func, wasm_val_t args[]) {
+ if (wasm_func_call(func, args, NULL)) {
+ printf("> Error on result, expected empty\n");
+ exit(1);
+ }
+}
+
+void check_ok2(wasm_func_t* func, int32_t arg1, int32_t arg2) {
+ wasm_val_t args[2] = {
+ {.kind = WASM_I32, .of = {.i32 = arg1}},
+ {.kind = WASM_I32, .of = {.i32 = arg2}}
+ };
+ check_ok(func, args);
+}
+
+void check_trap(wasm_func_t* func, wasm_val_t args[]) {
+ wasm_val_t results[1];
+ own wasm_trap_t* trap = wasm_func_call(func, args, results);
+ if (! trap) {
+ printf("> Error on result, expected trap\n");
+ exit(1);
+ }
+ wasm_trap_delete(trap);
+}
+
+void check_trap1(wasm_func_t* func, int32_t arg) {
+ wasm_val_t args[1] = { {.kind = WASM_I32, .of = {.i32 = arg}} };
+ check_trap(func, args);
+}
+
+void check_trap2(wasm_func_t* func, int32_t arg1, int32_t arg2) {
+ wasm_val_t args[2] = {
+ {.kind = WASM_I32, .of = {.i32 = arg1}},
+ {.kind = WASM_I32, .of = {.i32 = arg2}}
+ };
+ check_trap(func, args);
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("memory.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ // Extract export.
+ printf("Extracting exports...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ size_t i = 0;
+ wasm_memory_t* memory = get_export_memory(&exports, i++);
+ wasm_func_t* size_func = get_export_func(&exports, i++);
+ wasm_func_t* load_func = get_export_func(&exports, i++);
+ wasm_func_t* store_func = get_export_func(&exports, i++);
+
+ wasm_module_delete(module);
+
+ // Check initial memory.
+ printf("Checking memory...\n");
+ check(wasm_memory_size(memory) == 2);
+ check(wasm_memory_data_size(memory) == 0x20000);
+ check(wasm_memory_data(memory)[0] == 0);
+ check(wasm_memory_data(memory)[0x1000] == 1);
+ check(wasm_memory_data(memory)[0x1003] == 4);
+
+ check_call0(size_func, 2);
+ check_call1(load_func, 0, 0);
+ check_call1(load_func, 0x1000, 1);
+ check_call1(load_func, 0x1003, 4);
+ check_call1(load_func, 0x1ffff, 0);
+ check_trap1(load_func, 0x20000);
+
+ // Mutate memory.
+ printf("Mutating memory...\n");
+ wasm_memory_data(memory)[0x1003] = 5;
+ check_ok2(store_func, 0x1002, 6);
+ check_trap2(store_func, 0x20000, 0);
+
+ check(wasm_memory_data(memory)[0x1002] == 6);
+ check(wasm_memory_data(memory)[0x1003] == 5);
+ check_call1(load_func, 0x1002, 6);
+ check_call1(load_func, 0x1003, 5);
+
+ // Grow memory.
+ printf("Growing memory...\n");
+ check(wasm_memory_grow(memory, 1));
+ check(wasm_memory_size(memory) == 3);
+ check(wasm_memory_data_size(memory) == 0x30000);
+
+ check_call1(load_func, 0x20000, 0);
+ check_ok2(store_func, 0x20000, 0);
+ check_trap1(load_func, 0x30000);
+ check_trap2(store_func, 0x30000, 0);
+
+ check(! wasm_memory_grow(memory, 1));
+ check(wasm_memory_grow(memory, 0));
+
+ wasm_extern_vec_delete(&exports);
+ wasm_instance_delete(instance);
+
+ // Create stand-alone memory.
+ // TODO(wasm+): Once Wasm allows multiple memories, turn this into import.
+ printf("Creating stand-alone memory...\n");
+ wasm_limits_t limits = {5, 5};
+ own wasm_memorytype_t* memorytype = wasm_memorytype_new(&limits);
+ own wasm_memory_t* memory2 = wasm_memory_new(store, memorytype);
+ check(wasm_memory_size(memory2) == 5);
+ check(! wasm_memory_grow(memory2, 1));
+ check(wasm_memory_grow(memory2, 0));
+
+ wasm_memorytype_delete(memorytype);
+ wasm_memory_delete(memory2);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/memory.cc b/deps/v8/third_party/wasm-api/example/memory.cc
new file mode 100644
index 0000000000..fb50565c85
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/memory.cc
@@ -0,0 +1,169 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+auto get_export_memory(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Memory* {
+ if (exports.size() <= i || !exports[i]->memory()) {
+ std::cout << "> Error accessing memory export " << i << "!" << std::endl;
+ exit(1);
+ }
+ return exports[i]->memory();
+}
+
+auto get_export_func(const wasm::vec<wasm::Extern*>& exports, size_t i) -> const wasm::Func* {
+ if (exports.size() <= i || !exports[i]->func()) {
+ std::cout << "> Error accessing function export " << i << "!" << std::endl;
+ exit(1);
+ }
+ return exports[i]->func();
+}
+
+template<class T, class U>
+void check(T actual, U expected) {
+ if (actual != expected) {
+ std::cout << "> Error on result, expected " << expected << ", got " << actual << std::endl;
+ exit(1);
+ }
+}
+
+template<class... Args>
+void check_ok(const wasm::Func* func, Args... xs) {
+ wasm::Val args[] = {wasm::Val::i32(xs)...};
+ if (func->call(args)) {
+ std::cout << "> Error on result, expected return" << std::endl;
+ exit(1);
+ }
+}
+
+template<class... Args>
+void check_trap(const wasm::Func* func, Args... xs) {
+ wasm::Val args[] = {wasm::Val::i32(xs)...};
+ if (! func->call(args)) {
+ std::cout << "> Error on result, expected trap" << std::endl;
+ exit(1);
+ }
+}
+
+template<class... Args>
+auto call(const wasm::Func* func, Args... xs) -> int32_t {
+ wasm::Val args[] = {wasm::Val::i32(xs)...};
+ wasm::Val results[1];
+ if (func->call(args, results)) {
+ std::cout << "> Error on result, expected return" << std::endl;
+ exit(1);
+ }
+ return results[0].i32();
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("memory.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ auto instance = wasm::Instance::make(store, module.get(), nullptr);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting exports..." << std::endl;
+ auto exports = instance->exports();
+ size_t i = 0;
+ auto memory = get_export_memory(exports, i++);
+ auto size_func = get_export_func(exports, i++);
+ auto load_func = get_export_func(exports, i++);
+ auto store_func = get_export_func(exports, i++);
+
+ // Check initial memory.
+ std::cout << "Checking memory..." << std::endl;
+ check(memory->size(), 2u);
+ check(memory->data_size(), 0x20000u);
+ check(memory->data()[0], 0);
+ check(memory->data()[0x1000], 1);
+ check(memory->data()[0x1003], 4);
+
+ check(call(size_func), 2);
+ check(call(load_func, 0), 0);
+ check(call(load_func, 0x1000), 1);
+ check(call(load_func, 0x1003), 4);
+ check(call(load_func, 0x1ffff), 0);
+ check_trap(load_func, 0x20000);
+
+ // Mutate memory.
+ std::cout << "Mutating memory..." << std::endl;
+ memory->data()[0x1003] = 5;
+ check_ok(store_func, 0x1002, 6);
+ check_trap(store_func, 0x20000, 0);
+
+ check(memory->data()[0x1002], 6);
+ check(memory->data()[0x1003], 5);
+ check(call(load_func, 0x1002), 6);
+ check(call(load_func, 0x1003), 5);
+
+ // Grow memory.
+ std::cout << "Growing memory..." << std::endl;
+ check(memory->grow(1), true);
+ check(memory->size(), 3u);
+ check(memory->data_size(), 0x30000u);
+
+ check(call(load_func, 0x20000), 0);
+ check_ok(store_func, 0x20000, 0);
+ check_trap(load_func, 0x30000);
+ check_trap(store_func, 0x30000, 0);
+
+ check(memory->grow(1), false);
+ check(memory->grow(0), true);
+
+ // Create stand-alone memory.
+ // TODO(wasm+): Once Wasm allows multiple memories, turn this into import.
+ std::cout << "Creating stand-alone memory..." << std::endl;
+ auto memorytype = wasm::MemoryType::make(wasm::Limits(5, 5));
+ auto memory2 = wasm::Memory::make(store, memorytype.get());
+ check(memory2->size(), 5u);
+ check(memory2->grow(1), false);
+ check(memory2->grow(0), true);
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/memory.wasm b/deps/v8/third_party/wasm-api/example/memory.wasm
new file mode 100644
index 0000000000..6f6518b187
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/memory.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/memory.wat b/deps/v8/third_party/wasm-api/example/memory.wat
new file mode 100644
index 0000000000..4cf43e2c7d
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/memory.wat
@@ -0,0 +1,11 @@
+(module
+ (memory (export "memory") 2 3)
+
+ (func (export "size") (result i32) (memory.size))
+ (func (export "load") (param i32) (result i32) (i32.load8_s (local.get 0)))
+ (func (export "store") (param i32 i32)
+ (i32.store8 (local.get 0) (local.get 1))
+ )
+
+ (data (i32.const 0x1000) "\01\02\03\04")
+)
diff --git a/deps/v8/third_party/wasm-api/example/reflect.c b/deps/v8/third_party/wasm-api/example/reflect.c
new file mode 100644
index 0000000000..a210a85c15
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/reflect.c
@@ -0,0 +1,164 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+void print_mutability(wasm_mutability_t mut) {
+ switch (mut) {
+ case WASM_VAR: printf("var"); break;
+ case WASM_CONST: printf("const"); break;
+ }
+}
+
+void print_limits(const wasm_limits_t* limits) {
+ printf("%ud", limits->min);
+ if (limits->max < wasm_limits_max_default) printf(" %ud", limits->max);
+}
+
+void print_valtype(const wasm_valtype_t* type) {
+ switch (wasm_valtype_kind(type)) {
+ case WASM_I32: printf("i32"); break;
+ case WASM_I64: printf("i64"); break;
+ case WASM_F32: printf("f32"); break;
+ case WASM_F64: printf("f64"); break;
+ case WASM_ANYREF: printf("anyref"); break;
+ case WASM_FUNCREF: printf("funcref"); break;
+ }
+}
+
+void print_valtypes(const wasm_valtype_vec_t* types) {
+ bool first = true;
+ for (size_t i = 0; i < types->size; ++i) {
+ if (first) {
+ first = false;
+ } else {
+ printf(" ");
+ }
+ print_valtype(types->data[i]);
+ }
+}
+
+void print_externtype(const wasm_externtype_t* type) {
+ switch (wasm_externtype_kind(type)) {
+ case WASM_EXTERN_FUNC: {
+ const wasm_functype_t* functype = wasm_externtype_as_functype_const(type);
+ printf("func ");
+ print_valtypes(wasm_functype_params(functype));
+ printf(" -> ");
+ print_valtypes(wasm_functype_results(functype));
+ } break;
+ case WASM_EXTERN_GLOBAL: {
+ const wasm_globaltype_t* globaltype = wasm_externtype_as_globaltype_const(type);
+ printf("global ");
+ print_mutability(wasm_globaltype_mutability(globaltype));
+ printf(" ");
+ print_valtype(wasm_globaltype_content(globaltype));
+ } break;
+ case WASM_EXTERN_TABLE: {
+ const wasm_tabletype_t* tabletype = wasm_externtype_as_tabletype_const(type);
+ printf("table ");
+ print_limits(wasm_tabletype_limits(tabletype));
+ printf(" ");
+ print_valtype(wasm_tabletype_element(tabletype));
+ } break;
+ case WASM_EXTERN_MEMORY: {
+ const wasm_memorytype_t* memorytype = wasm_externtype_as_memorytype_const(type);
+ printf("memory ");
+ print_limits(wasm_memorytype_limits(memorytype));
+ } break;
+ }
+}
+
+void print_name(const wasm_name_t* name) {
+ printf("\"%.*s\"", (int)name->size, name->data);
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("reflect.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ // Extract export.
+ printf("Extracting export...\n");
+ own wasm_exporttype_vec_t export_types;
+ own wasm_extern_vec_t exports;
+ wasm_module_exports(module, &export_types);
+ wasm_instance_exports(instance, &exports);
+ assert(exports.size == export_types.size);
+
+ for (size_t i = 0; i < exports.size; ++i) {
+ assert(wasm_extern_kind(exports.data[i]) == wasm_externtype_kind(wasm_exporttype_type(export_types.data[i])));
+ printf("> export %zu ", i);
+ print_name(wasm_exporttype_name(export_types.data[i]));
+ printf("\n");
+ printf(">> initial: ");
+ print_externtype(wasm_exporttype_type(export_types.data[i]));
+ printf("\n");
+ printf(">> current: ");
+ own wasm_externtype_t* current = wasm_extern_type(exports.data[i]);
+ print_externtype(current);
+ wasm_externtype_delete(current);
+ printf("\n");
+ if (wasm_extern_kind(exports.data[i]) == WASM_EXTERN_FUNC) {
+ wasm_func_t* func = wasm_extern_as_func(exports.data[i]);
+ printf(">> in-arity: %zu", wasm_func_param_arity(func));
+ printf(", out-arity: %zu\n", wasm_func_result_arity(func));
+ }
+ }
+
+ wasm_module_delete(module);
+ wasm_instance_delete(instance);
+ wasm_extern_vec_delete(&exports);
+ wasm_exporttype_vec_delete(&export_types);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/reflect.cc b/deps/v8/third_party/wasm-api/example/reflect.cc
new file mode 100644
index 0000000000..c04b9e4d4e
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/reflect.cc
@@ -0,0 +1,138 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+auto operator<<(std::ostream& out, wasm::Mutability mut) -> std::ostream& {
+ switch (mut) {
+ case wasm::VAR: return out << "var";
+ case wasm::CONST: return out << "const";
+ }
+ return out;
+}
+
+auto operator<<(std::ostream& out, wasm::Limits limits) -> std::ostream& {
+ out << limits.min;
+ if (limits.max < wasm::Limits(0).max) out << " " << limits.max;
+ return out;
+}
+
+auto operator<<(std::ostream& out, const wasm::ValType& type) -> std::ostream& {
+ switch (type.kind()) {
+ case wasm::I32: return out << "i32";
+ case wasm::I64: return out << "i64";
+ case wasm::F32: return out << "f32";
+ case wasm::F64: return out << "f64";
+ case wasm::ANYREF: return out << "anyref";
+ case wasm::FUNCREF: return out << "funcref";
+ }
+ return out;
+}
+
+auto operator<<(std::ostream& out, const wasm::vec<wasm::ValType*>& types) -> std::ostream& {
+ bool first = true;
+ for (size_t i = 0; i < types.size(); ++i) {
+ if (first) {
+ first = false;
+ } else {
+ out << " ";
+ }
+ out << *types[i].get();
+ }
+ return out;
+}
+
+auto operator<<(std::ostream& out, const wasm::ExternType& type) -> std::ostream& {
+ switch (type.kind()) {
+ case wasm::EXTERN_FUNC: {
+ out << "func " << type.func()->params() << " -> " << type.func()->results();
+ } break;
+ case wasm::EXTERN_GLOBAL: {
+ out << "global " << type.global()->mutability() << " " << *type.global()->content();
+ } break;
+ case wasm::EXTERN_TABLE: {
+ out << "table " << type.table()->limits() << " " << *type.table()->element();
+ } break;
+ case wasm::EXTERN_MEMORY: {
+ out << "memory " << type.memory()->limits();
+ } break;
+ }
+ return out;
+}
+
+auto operator<<(std::ostream& out, const wasm::Name& name) -> std::ostream& {
+ out << "\"" << std::string(name.get(), name.size()) << "\"";
+ return out;
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("reflect.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ auto instance = wasm::Instance::make(store, module.get(), nullptr);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract exports.
+ std::cout << "Extracting export..." << std::endl;
+ auto export_types = module->exports();
+ auto exports = instance->exports();
+ assert(exports.size() == export_types.size());
+
+ for (size_t i = 0; i < exports.size(); ++i) {
+ assert(exports[i]->kind() == export_types[i]->type()->kind());
+ std::cout << "> export " << i << " " << export_types[i]->name() << std::endl;
+ std::cout << ">> initial: " << *export_types[i]->type() << std::endl;
+ std::cout << ">> current: " << *exports[i]->type() << std::endl;
+ if (exports[i]->kind() == wasm::EXTERN_FUNC) {
+ auto func = exports[i]->func();
+ std::cout << ">> in-arity: " << func->param_arity();
+ std::cout << ", out-arity: " << func->result_arity() << std::endl;
+ }
+ }
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/reflect.wasm b/deps/v8/third_party/wasm-api/example/reflect.wasm
new file mode 100644
index 0000000000..15a68fe8f7
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/reflect.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/reflect.wat b/deps/v8/third_party/wasm-api/example/reflect.wat
new file mode 100644
index 0000000000..261dfd3c33
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/reflect.wat
@@ -0,0 +1,6 @@
+(module
+ (func (export "func") (param i32 f64 f32) (result i32) (unreachable))
+ (global (export "global") f64 (f64.const 0))
+ (table (export "table") 0 50 anyfunc)
+ (memory (export "memory") 1)
+)
diff --git a/deps/v8/third_party/wasm-api/example/serialize.c b/deps/v8/third_party/wasm-api/example/serialize.c
new file mode 100644
index 0000000000..8c7efc2ee9
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/serialize.c
@@ -0,0 +1,122 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+// A function to be called from Wasm code.
+own wasm_trap_t* hello_callback(const wasm_val_t args[], wasm_val_t results[]) {
+ printf("Calling back...\n");
+ printf("> Hello World!\n");
+ return NULL;
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("serialize.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Serialize module.
+ printf("Serializing module...\n");
+ own wasm_byte_vec_t serialized;
+ wasm_module_serialize(module, &serialized);
+
+ wasm_module_delete(module);
+
+ // Deserialize module.
+ printf("Deserializing module...\n");
+ own wasm_module_t* deserialized = wasm_module_deserialize(store, &serialized);
+ if (!deserialized) {
+ printf("> Error deserializing module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&serialized);
+
+ // Create external print functions.
+ printf("Creating callback...\n");
+ own wasm_functype_t* hello_type = wasm_functype_new_0_0();
+ own wasm_func_t* hello_func =
+ wasm_func_new(store, hello_type, hello_callback);
+
+ wasm_functype_delete(hello_type);
+
+ // Instantiate.
+ printf("Instantiating deserialized module...\n");
+ const wasm_extern_t* imports[] = { wasm_func_as_extern(hello_func) };
+ own wasm_instance_t* instance = wasm_instance_new(store, deserialized, imports);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ wasm_func_delete(hello_func);
+
+ // Extract export.
+ printf("Extracting export...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ if (exports.size == 0) {
+ printf("> Error accessing exports!\n");
+ return 1;
+ }
+ const wasm_func_t* run_func = wasm_extern_as_func(exports.data[0]);
+ if (run_func == NULL) {
+ printf("> Error accessing export!\n");
+ return 1;
+ }
+
+ wasm_module_delete(deserialized);
+ wasm_instance_delete(instance);
+
+ // Call.
+ printf("Calling export...\n");
+ if (wasm_func_call(run_func, NULL, NULL)) {
+ printf("> Error calling function!\n");
+ return 1;
+ }
+
+ wasm_extern_vec_delete(&exports);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/serialize.cc b/deps/v8/third_party/wasm-api/example/serialize.cc
new file mode 100644
index 0000000000..895ef396e0
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/serialize.cc
@@ -0,0 +1,103 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+// A function to be called from Wasm code.
+auto hello_callback(
+ const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ std::cout << "Calling back..." << std::endl;
+ std::cout << "> Hello world!" << std::endl;
+ return nullptr;
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("serialize.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Serialize module.
+ std::cout << "Serializing module..." << std::endl;
+ auto serialized = module->serialize();
+
+ // Deserialize module.
+ std::cout << "Deserializing module..." << std::endl;
+ auto deserialized = wasm::Module::deserialize(store, serialized);
+ if (!deserialized) {
+ std::cout << "> Error deserializing module!" << std::endl;
+ return;
+ }
+
+ // Create external print functions.
+ std::cout << "Creating callback..." << std::endl;
+ auto hello_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(), wasm::vec<wasm::ValType*>::make()
+ );
+ auto hello_func = wasm::Func::make(store, hello_type.get(), hello_callback);
+
+ // Instantiate.
+ std::cout << "Instantiating deserialized module..." << std::endl;
+ wasm::Extern* imports[] = {hello_func.get()};
+ auto instance = wasm::Instance::make(store, deserialized.get(), imports);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting export..." << std::endl;
+ auto exports = instance->exports();
+ if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
+ std::cout << "> Error accessing export!" << std::endl;
+ return;
+ }
+ auto run_func = exports[0]->func();
+
+ // Call.
+ std::cout << "Calling export..." << std::endl;
+ if (! run_func->call()) {
+ std::cout << "> Error calling function!" << std::endl;
+ return;
+ }
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/serialize.wasm b/deps/v8/third_party/wasm-api/example/serialize.wasm
new file mode 100644
index 0000000000..2207c03eea
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/serialize.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/serialize.wat b/deps/v8/third_party/wasm-api/example/serialize.wat
new file mode 100644
index 0000000000..1c56c55822
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/serialize.wat
@@ -0,0 +1,4 @@
+(module
+ (func $hello (import "" "hello"))
+ (func (export "run") (call $hello))
+)
diff --git a/deps/v8/third_party/wasm-api/example/table.c b/deps/v8/third_party/wasm-api/example/table.c
new file mode 100644
index 0000000000..8fec71f23f
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/table.c
@@ -0,0 +1,208 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+// A function to be called from Wasm code.
+own wasm_trap_t* neg_callback(
+ const wasm_val_t args[], wasm_val_t results[]
+) {
+ printf("Calling back...\n");
+ results[0].kind = WASM_I32;
+ results[0].of.i32 = -args[0].of.i32;
+ return NULL;
+}
+
+
+wasm_table_t* get_export_table(const wasm_extern_vec_t* exports, size_t i) {
+ if (exports->size <= i || !wasm_extern_as_table(exports->data[i])) {
+ printf("> Error accessing table export %zu!\n", i);
+ exit(1);
+ }
+ return wasm_extern_as_table(exports->data[i]);
+}
+
+wasm_func_t* get_export_func(const wasm_extern_vec_t* exports, size_t i) {
+ if (exports->size <= i || !wasm_extern_as_func(exports->data[i])) {
+ printf("> Error accessing function export %zu!\n", i);
+ exit(1);
+ }
+ return wasm_extern_as_func(exports->data[i]);
+}
+
+
+void check(bool success) {
+ if (!success) {
+ printf("> Error, expected success\n");
+ exit(1);
+ }
+}
+
+void check_table(wasm_table_t* table, int32_t i, bool expect_set) {
+ own wasm_ref_t* ref = wasm_table_get(table, i);
+ check((ref != NULL) == expect_set);
+ if (ref) wasm_ref_delete(ref);
+}
+
+void check_call(wasm_func_t* func, int32_t arg1, int32_t arg2, int32_t expected) {
+ wasm_val_t args[2] = {
+ {.kind = WASM_I32, .of = {.i32 = arg1}},
+ {.kind = WASM_I32, .of = {.i32 = arg2}}
+ };
+ wasm_val_t results[1];
+ if (wasm_func_call(func, args, results) || results[0].of.i32 != expected) {
+ printf("> Error on result\n");
+ exit(1);
+ }
+}
+
+void check_trap(wasm_func_t* func, int32_t arg1, int32_t arg2) {
+ wasm_val_t args[2] = {
+ {.kind = WASM_I32, .of = {.i32 = arg1}},
+ {.kind = WASM_I32, .of = {.i32 = arg2}}
+ };
+ own wasm_trap_t* trap = wasm_func_call(func, args, NULL);
+ if (! trap) {
+ printf("> Error on result, expected trap\n");
+ exit(1);
+ }
+ wasm_trap_delete(trap);
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("table.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ own wasm_instance_t* instance = wasm_instance_new(store, module, NULL);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ // Extract export.
+ printf("Extracting exports...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ size_t i = 0;
+ wasm_table_t* table = get_export_table(&exports, i++);
+ wasm_func_t* call_indirect = get_export_func(&exports, i++);
+ wasm_func_t* f = get_export_func(&exports, i++);
+ wasm_func_t* g = get_export_func(&exports, i++);
+
+ wasm_module_delete(module);
+
+ // Create external function.
+ printf("Creating callback...\n");
+ own wasm_functype_t* neg_type = wasm_functype_new_1_1(wasm_valtype_new_i32(), wasm_valtype_new_i32());
+ own wasm_func_t* h = wasm_func_new(store, neg_type, neg_callback);
+
+ wasm_functype_delete(neg_type);
+
+ // Check initial table.
+ printf("Checking table...\n");
+ check(wasm_table_size(table) == 2);
+ check_table(table, 0, false);
+ check_table(table, 1, true);
+ check_trap(call_indirect, 0, 0);
+ check_call(call_indirect, 7, 1, 7);
+ check_trap(call_indirect, 0, 2);
+
+ // Mutate table.
+ printf("Mutating table...\n");
+ check(wasm_table_set(table, 0, wasm_func_as_ref(g)));
+ check(wasm_table_set(table, 1, NULL));
+ check(! wasm_table_set(table, 2, wasm_func_as_ref(f)));
+ check_table(table, 0, true);
+ check_table(table, 1, false);
+ check_call(call_indirect, 7, 0, 666);
+ check_trap(call_indirect, 0, 1);
+ check_trap(call_indirect, 0, 2);
+
+ // Grow table.
+ printf("Growing table...\n");
+ check(wasm_table_grow(table, 3, NULL));
+ check(wasm_table_size(table) == 5);
+ check(wasm_table_set(table, 2, wasm_func_as_ref(f)));
+ check(wasm_table_set(table, 3, wasm_func_as_ref(h)));
+ check(! wasm_table_set(table, 5, NULL));
+ check_table(table, 2, true);
+ check_table(table, 3, true);
+ check_table(table, 4, false);
+ check_call(call_indirect, 5, 2, 5);
+ check_call(call_indirect, 6, 3, -6);
+ check_trap(call_indirect, 0, 4);
+ check_trap(call_indirect, 0, 5);
+
+ check(wasm_table_grow(table, 2, wasm_func_as_ref(f)));
+ check(wasm_table_size(table) == 7);
+ check_table(table, 5, true);
+ check_table(table, 6, true);
+
+ check(! wasm_table_grow(table, 5, NULL));
+ check(wasm_table_grow(table, 3, NULL));
+ check(wasm_table_grow(table, 0, NULL));
+
+ wasm_func_delete(h);
+ wasm_extern_vec_delete(&exports);
+ wasm_instance_delete(instance);
+
+ // Create stand-alone table.
+ // TODO(wasm+): Once Wasm allows multiple tables, turn this into import.
+ printf("Creating stand-alone table...\n");
+ wasm_limits_t limits = {5, 5};
+ own wasm_tabletype_t* tabletype =
+ wasm_tabletype_new(wasm_valtype_new(WASM_FUNCREF), &limits);
+ own wasm_table_t* table2 = wasm_table_new(store, tabletype, NULL);
+ check(wasm_table_size(table2) == 5);
+ check(! wasm_table_grow(table2, 1, NULL));
+ check(wasm_table_grow(table2, 0, NULL));
+
+ wasm_tabletype_delete(tabletype);
+ wasm_table_delete(table2);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/table.cc b/deps/v8/third_party/wasm-api/example/table.cc
new file mode 100644
index 0000000000..cb669cdb16
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/table.cc
@@ -0,0 +1,189 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+
+// A function to be called from Wasm code.
+auto neg_callback(
+ const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ std::cout << "Calling back..." << std::endl;
+ results[0] = wasm::Val(-args[0].i32());
+ return nullptr;
+}
+
+
+auto get_export_table(wasm::vec<wasm::Extern*>& exports, size_t i) -> wasm::Table* {
+ if (exports.size() <= i || !exports[i]->table()) {
+ std::cout << "> Error accessing table export " << i << "!" << std::endl;
+ exit(1);
+ }
+ return exports[i]->table();
+}
+
+auto get_export_func(const wasm::vec<wasm::Extern*>& exports, size_t i) -> const wasm::Func* {
+ if (exports.size() <= i || !exports[i]->func()) {
+ std::cout << "> Error accessing function export " << i << "!" << std::endl;
+ exit(1);
+ }
+ return exports[i]->func();
+}
+
+template<class T, class U>
+void check(T actual, U expected) {
+ if (actual != expected) {
+ std::cout << "> Error on result, expected " << expected << ", got " << actual << std::endl;
+ exit(1);
+ }
+}
+
+void check(bool success) {
+ if (! success) {
+ std::cout << "> Error, expected success" << std::endl;
+ exit(1);
+ }
+}
+
+auto call(
+ const wasm::Func* func, wasm::Val&& arg1, wasm::Val&& arg2
+) -> wasm::Val {
+ wasm::Val args[2] = {std::move(arg1), std::move(arg2)};
+ wasm::Val results[1];
+ if (func->call(args, results)) {
+ std::cout << "> Error on result, expected return" << std::endl;
+ exit(1);
+ }
+ return results[0].copy();
+}
+
+void check_trap(const wasm::Func* func, wasm::Val&& arg1, wasm::Val&& arg2) {
+ wasm::Val args[2] = {std::move(arg1), std::move(arg2)};
+ wasm::Val results[1];
+ if (! func->call(args, results)) {
+ std::cout << "> Error on result, expected trap" << std::endl;
+ exit(1);
+ }
+}
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("table.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ auto instance = wasm::Instance::make(store, module.get(), nullptr);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting exports..." << std::endl;
+ auto exports = instance->exports();
+ size_t i = 0;
+ auto table = get_export_table(exports, i++);
+ auto call_indirect = get_export_func(exports, i++);
+ auto f = get_export_func(exports, i++);
+ auto g = get_export_func(exports, i++);
+
+ // Create external function.
+ std::cout << "Creating callback..." << std::endl;
+ auto neg_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32)),
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+ );
+ auto h = wasm::Func::make(store, neg_type.get(), neg_callback);
+
+ // Check initial table.
+ std::cout << "Checking table..." << std::endl;
+ check(table->size(), 2u);
+ check(table->get(0) == nullptr);
+ check(table->get(1) != nullptr);
+ check_trap(call_indirect, wasm::Val::i32(0), wasm::Val::i32(0));
+ check(call(call_indirect, wasm::Val::i32(7), wasm::Val::i32(1)).i32(), 7);
+ check_trap(call_indirect, wasm::Val::i32(0), wasm::Val::i32(2));
+
+ // Mutate table.
+ std::cout << "Mutating table..." << std::endl;
+ check(table->set(0, g));
+ check(table->set(1, nullptr));
+ check(! table->set(2, f));
+ check(table->get(0) != nullptr);
+ check(table->get(1) == nullptr);
+ check(call(call_indirect, wasm::Val::i32(7), wasm::Val::i32(0)).i32(), 666);
+ check_trap(call_indirect, wasm::Val::i32(0), wasm::Val::i32(1));
+ check_trap(call_indirect, wasm::Val::i32(0), wasm::Val::i32(2));
+
+ // Grow table.
+ std::cout << "Growing table..." << std::endl;
+ check(table->grow(3));
+ check(table->size(), 5u);
+ check(table->set(2, f));
+ check(table->set(3, h.get()));
+ check(! table->set(5, nullptr));
+ check(table->get(2) != nullptr);
+ check(table->get(3) != nullptr);
+ check(table->get(4) == nullptr);
+ check(call(call_indirect, wasm::Val::i32(5), wasm::Val::i32(2)).i32(), 5);
+ check(call(call_indirect, wasm::Val::i32(6), wasm::Val::i32(3)).i32(), -6);
+ check_trap(call_indirect, wasm::Val::i32(0), wasm::Val::i32(4));
+ check_trap(call_indirect, wasm::Val::i32(0), wasm::Val::i32(5));
+
+ check(table->grow(2, f));
+ check(table->size(), 7u);
+ check(table->get(5) != nullptr);
+ check(table->get(6) != nullptr);
+
+ check(! table->grow(5));
+ check(table->grow(3));
+ check(table->grow(0));
+
+ // Create stand-alone table.
+ // TODO(wasm+): Once Wasm allows multiple tables, turn this into import.
+ std::cout << "Creating stand-alone table..." << std::endl;
+ auto tabletype = wasm::TableType::make(
+ wasm::ValType::make(wasm::FUNCREF), wasm::Limits(5, 5));
+ auto table2 = wasm::Table::make(store, tabletype.get());
+ check(table2->size() == 5);
+ check(! table2->grow(1));
+ check(table2->grow(0));
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/table.wasm b/deps/v8/third_party/wasm-api/example/table.wasm
new file mode 100644
index 0000000000..cdc0d8c35d
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/table.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/table.wat b/deps/v8/third_party/wasm-api/example/table.wat
new file mode 100644
index 0000000000..d3e3a945aa
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/table.wat
@@ -0,0 +1,12 @@
+(module
+ (table (export "table") 2 10 funcref)
+
+ (func (export "call_indirect") (param i32 i32) (result i32)
+ (call_indirect (param i32) (result i32) (local.get 0) (local.get 1))
+ )
+
+ (func $f (export "f") (param i32) (result i32) (local.get 0))
+ (func (export "g") (param i32) (result i32) (i32.const 666))
+
+ (elem (i32.const 1) $f)
+)
diff --git a/deps/v8/third_party/wasm-api/example/threads.c b/deps/v8/third_party/wasm-api/example/threads.c
new file mode 100644
index 0000000000..2f5b0f3c1f
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/threads.c
@@ -0,0 +1,152 @@
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include "wasm.h"
+
+#define own
+
+const int N_THREADS = 10;
+const int N_REPS = 3;
+
+// A function to be called from Wasm code.
+own wasm_trap_t* callback(const wasm_val_t args[], wasm_val_t results[]) {
+ assert(args[0].kind == WASM_I32);
+ printf("> Thread %d running\n", args[0].of.i32);
+ return NULL;
+}
+
+
+typedef struct {
+ wasm_engine_t* engine;
+ wasm_shared_module_t* module;
+ int id;
+} thread_args;
+
+void* run(void* args_abs) {
+ thread_args* args = (thread_args*)args_abs;
+
+ // Rereate store and module.
+ own wasm_store_t* store = wasm_store_new(args->engine);
+ own wasm_module_t* module = wasm_module_obtain(store, args->module);
+
+ // Run the example N times.
+ for (int i = 0; i < N_REPS; ++i) {
+ usleep(100000);
+
+ // Create imports.
+ own wasm_functype_t* func_type = wasm_functype_new_1_0(wasm_valtype_new_i32());
+ own wasm_func_t* func = wasm_func_new(store, func_type, callback);
+ wasm_functype_delete(func_type);
+
+ wasm_val_t val = {.kind = WASM_I32, .of = {.i32 = (int32_t)args->id}};
+ own wasm_globaltype_t* global_type =
+ wasm_globaltype_new(wasm_valtype_new_i32(), WASM_CONST);
+ own wasm_global_t* global = wasm_global_new(store, global_type, &val);
+ wasm_globaltype_delete(global_type);
+
+ // Instantiate.
+ const wasm_extern_t* imports[] = {
+ wasm_func_as_extern(func), wasm_global_as_extern(global),
+ };
+ own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return NULL;
+ }
+
+ wasm_func_delete(func);
+ wasm_global_delete(global);
+
+ // Extract export.
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ if (exports.size == 0) {
+ printf("> Error accessing exports!\n");
+ return NULL;
+ }
+ const wasm_func_t *run_func = wasm_extern_as_func(exports.data[0]);
+ if (run_func == NULL) {
+ printf("> Error accessing export!\n");
+ return NULL;
+ }
+
+ wasm_instance_delete(instance);
+
+ // Call.
+ if (wasm_func_call(run_func, NULL, NULL)) {
+ printf("> Error calling function!\n");
+ return NULL;
+ }
+
+ wasm_extern_vec_delete(&exports);
+ }
+
+ wasm_module_delete(module);
+ wasm_store_delete(store);
+
+ free(args_abs);
+
+ return NULL;
+}
+
+int main(int argc, const char *argv[]) {
+ // Initialize.
+ wasm_engine_t* engine = wasm_engine_new();
+
+ // Load binary.
+ FILE* file = fopen("threads.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile and share.
+ own wasm_store_t* store = wasm_store_new(engine);
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ own wasm_shared_module_t* shared = wasm_module_share(module);
+
+ wasm_module_delete(module);
+ wasm_store_delete(store);
+
+ // Spawn threads.
+ pthread_t threads[N_THREADS];
+ for (int i = 0; i < N_THREADS; i++) {
+ thread_args* args = malloc(sizeof(thread_args));
+ args->id = i;
+ args->engine = engine;
+ args->module = shared;
+ printf("Initializing thread %d...\n", i);
+ pthread_create(&threads[i], NULL, &run, args);
+ }
+
+ for (int i = 0; i < N_THREADS; i++) {
+ printf("Waiting for thread: %d\n", i);
+ pthread_join(threads[i], NULL);
+ }
+
+ wasm_shared_module_delete(shared);
+ wasm_engine_delete(engine);
+
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/threads.cc b/deps/v8/third_party/wasm-api/example/threads.cc
new file mode 100644
index 0000000000..48b4fcd486
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/threads.cc
@@ -0,0 +1,124 @@
+#include <iostream>
+#include <fstream>
+#include <thread>
+#include <mutex>
+
+#include "wasm.hh"
+
+const int N_THREADS = 10;
+const int N_REPS = 3;
+
+// A function to be called from Wasm code.
+auto callback(
+ void* env, const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ assert(args[0].kind() == wasm::I32);
+ std::lock_guard<std::mutex>(*reinterpret_cast<std::mutex*>(env));
+ std::cout << "Thread " << args[0].i32() << " running..." << std::endl;
+ std::cout.flush();
+ return nullptr;
+}
+
+
+void run(
+ wasm::Engine* engine, const wasm::Shared<wasm::Module>* shared,
+ std::mutex* mutex, int id
+) {
+ // Create store.
+ auto store_ = wasm::Store::make(engine);
+ auto store = store_.get();
+
+ // Obtain.
+ auto module = wasm::Module::obtain(store, shared);
+ if (!module) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Run the example N times.
+ for (int i = 0; i < N_REPS; ++i) {
+ std::this_thread::sleep_for(std::chrono::nanoseconds(100000));
+
+ // Create imports.
+ auto func_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32)),
+ wasm::vec<wasm::ValType*>::make()
+ );
+ auto func = wasm::Func::make(store, func_type.get(), callback, mutex);
+
+ auto global_type = wasm::GlobalType::make(
+ wasm::ValType::make(wasm::I32), wasm::CONST);
+ auto global = wasm::Global::make(
+ store, global_type.get(), wasm::Val::i32(i));
+
+ // Instantiate.
+ wasm::Extern* imports[] = {func.get(), global.get()};
+ auto instance = wasm::Instance::make(store, module.get(), imports);
+ if (!instance) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ auto exports = instance->exports();
+ if (exports.size() == 0 || exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func()) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ std::cout << "> Error accessing export!" << std::endl;
+ return;
+ }
+ auto run_func = exports[0]->func();
+
+ // Call.
+ run_func->call();
+ }
+}
+
+int main(int argc, const char *argv[]) {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("threads.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return 1;
+ }
+
+ // Compile and share.
+ std::cout << "Compiling and sharing module..." << std::endl;
+ auto store = wasm::Store::make(engine.get());
+ auto module = wasm::Module::make(store.get(), binary);
+ auto shared = module->share();
+
+ // Spawn threads.
+ std::cout << "Spawning threads..." << std::endl;
+ std::mutex mutex;
+ std::thread threads[N_THREADS];
+ for (int i = 0; i < N_THREADS; ++i) {
+ {
+ std::lock_guard<std::mutex> lock(mutex);
+ std::cout << "Initializing thread " << i << "..." << std::endl;
+ }
+ threads[i] = std::thread(run, engine.get(), shared.get(), &mutex, i);
+ }
+
+ for (int i = 0; i < N_THREADS; ++i) {
+ {
+ std::lock_guard<std::mutex> lock(mutex);
+ std::cout << "Waiting for thread " << i << "..." << std::endl;
+ }
+ threads[i].join();
+ }
+
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/threads.wasm b/deps/v8/third_party/wasm-api/example/threads.wasm
new file mode 100644
index 0000000000..9a5c19d0ac
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/threads.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/threads.wat b/deps/v8/third_party/wasm-api/example/threads.wat
new file mode 100644
index 0000000000..29a3bbcc1a
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/threads.wat
@@ -0,0 +1,5 @@
+(module
+ (func $message (import "" "hello") (param i32))
+ (global $id (import "" "id") i32)
+ (func (export "run") (call $message (global.get $id)))
+)
diff --git a/deps/v8/third_party/wasm-api/example/trap.c b/deps/v8/third_party/wasm-api/example/trap.c
new file mode 100644
index 0000000000..74620dce3b
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/trap.c
@@ -0,0 +1,121 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "wasm.h"
+
+#define own
+
+// A function to be called from Wasm code.
+own wasm_trap_t* fail_callback(
+ void* env, const wasm_val_t args[], wasm_val_t results[]
+) {
+ printf("Calling back...\n");
+ own wasm_name_t message;
+ wasm_name_new_from_string(&message, "callback abort");
+ own wasm_trap_t* trap = wasm_trap_new((wasm_store_t*)env, &message);
+ wasm_name_delete(&message);
+ return trap;
+}
+
+
+int main(int argc, const char* argv[]) {
+ // Initialize.
+ printf("Initializing...\n");
+ wasm_engine_t* engine = wasm_engine_new();
+ wasm_store_t* store = wasm_store_new(engine);
+
+ // Load binary.
+ printf("Loading binary...\n");
+ FILE* file = fopen("trap.wasm", "r");
+ if (!file) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fseek(file, 0L, SEEK_END);
+ size_t file_size = ftell(file);
+ fseek(file, 0L, SEEK_SET);
+ wasm_byte_vec_t binary;
+ wasm_byte_vec_new_uninitialized(&binary, file_size);
+ if (fread(binary.data, file_size, 1, file) != 1) {
+ printf("> Error loading module!\n");
+ return 1;
+ }
+ fclose(file);
+
+ // Compile.
+ printf("Compiling module...\n");
+ own wasm_module_t* module = wasm_module_new(store, &binary);
+ if (!module) {
+ printf("> Error compiling module!\n");
+ return 1;
+ }
+
+ wasm_byte_vec_delete(&binary);
+
+ // Create external print functions.
+ printf("Creating callback...\n");
+ own wasm_functype_t* fail_type = wasm_functype_new_0_1(wasm_valtype_new_i32());
+ own wasm_func_t* fail_func = wasm_func_new_with_env(store, fail_type, fail_callback, store, NULL);
+
+ wasm_functype_delete(fail_type);
+
+ // Instantiate.
+ printf("Instantiating module...\n");
+ const wasm_extern_t* imports[] = { wasm_func_as_extern(fail_func) };
+ own wasm_instance_t* instance = wasm_instance_new(store, module, imports);
+ if (!instance) {
+ printf("> Error instantiating module!\n");
+ return 1;
+ }
+
+ wasm_func_delete(fail_func);
+
+ // Extract export.
+ printf("Extracting exports...\n");
+ own wasm_extern_vec_t exports;
+ wasm_instance_exports(instance, &exports);
+ if (exports.size < 2) {
+ printf("> Error accessing exports!\n");
+ return 1;
+ }
+
+ wasm_module_delete(module);
+ wasm_instance_delete(instance);
+
+ // Call.
+ for (int i = 0; i < 2; ++i) {
+ const wasm_func_t* func = wasm_extern_as_func(exports.data[i]);
+ if (func == NULL) {
+ printf("> Error accessing export!\n");
+ return 1;
+ }
+
+ printf("Calling export %d...\n", i);
+ own wasm_trap_t* trap = wasm_func_call(func, NULL, NULL);
+ if (! trap) {
+ printf("> Error calling function!\n");
+ return 1;
+ }
+
+ printf("Printing message...\n");
+ own wasm_name_t message;
+ wasm_trap_message(trap, &message);
+ printf("> %s\n", message.data);
+
+ wasm_trap_delete(trap);
+ wasm_name_delete(&message);
+ }
+
+ wasm_extern_vec_delete(&exports);
+
+ // Shut down.
+ printf("Shutting down...\n");
+ wasm_store_delete(store);
+ wasm_engine_delete(engine);
+
+ // All done.
+ printf("Done.\n");
+ return 0;
+}
diff --git a/deps/v8/third_party/wasm-api/example/trap.cc b/deps/v8/third_party/wasm-api/example/trap.cc
new file mode 100644
index 0000000000..3311621724
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/trap.cc
@@ -0,0 +1,100 @@
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <string>
+#include <cinttypes>
+
+#include "wasm.hh"
+
+// A function to be called from Wasm code.
+auto fail_callback(
+ void* env, const wasm::Val args[], wasm::Val results[]
+) -> wasm::own<wasm::Trap*> {
+ std::cout << "Calling back..." << std::endl;
+ auto store = reinterpret_cast<wasm::Store*>(env);
+ auto message = wasm::Name::make(std::string("callback abort"));
+ return wasm::Trap::make(store, message);
+}
+
+
+void run() {
+ // Initialize.
+ std::cout << "Initializing..." << std::endl;
+ auto engine = wasm::Engine::make();
+ auto store_ = wasm::Store::make(engine.get());
+ auto store = store_.get();
+
+ // Load binary.
+ std::cout << "Loading binary..." << std::endl;
+ std::ifstream file("trap.wasm");
+ file.seekg(0, std::ios_base::end);
+ auto file_size = file.tellg();
+ file.seekg(0);
+ auto binary = wasm::vec<byte_t>::make_uninitialized(file_size);
+ file.read(binary.get(), file_size);
+ file.close();
+ if (file.fail()) {
+ std::cout << "> Error loading module!" << std::endl;
+ return;
+ }
+
+ // Compile.
+ std::cout << "Compiling module..." << std::endl;
+ auto module = wasm::Module::make(store, binary);
+ if (!module) {
+ std::cout << "> Error compiling module!" << std::endl;
+ return;
+ }
+
+ // Create external print functions.
+ std::cout << "Creating callback..." << std::endl;
+ auto fail_type = wasm::FuncType::make(
+ wasm::vec<wasm::ValType*>::make(),
+ wasm::vec<wasm::ValType*>::make(wasm::ValType::make(wasm::I32))
+ );
+ auto fail_func =
+ wasm::Func::make(store, fail_type.get(), fail_callback, store);
+
+ // Instantiate.
+ std::cout << "Instantiating module..." << std::endl;
+ wasm::Extern* imports[] = {fail_func.get()};
+ auto instance = wasm::Instance::make(store, module.get(), imports);
+ if (!instance) {
+ std::cout << "> Error instantiating module!" << std::endl;
+ return;
+ }
+
+ // Extract export.
+ std::cout << "Extracting exports..." << std::endl;
+ auto exports = instance->exports();
+ if (exports.size() < 2 ||
+ exports[0]->kind() != wasm::EXTERN_FUNC || !exports[0]->func() ||
+ exports[1]->kind() != wasm::EXTERN_FUNC || !exports[1]->func()) {
+ std::cout << "> Error accessing exports!" << std::endl;
+ return;
+ }
+
+ // Call.
+ for (size_t i = 0; i < 2; ++i) {
+ std::cout << "Calling export " << i << "..." << std::endl;
+ auto trap = exports[i]->func()->call();
+ if (!trap) {
+ std::cout << "> Error calling function!" << std::endl;
+ return;
+ }
+
+ std::cout << "Printing message..." << std::endl;
+ std::cout << "> " << trap->message().get() << std::endl;
+ }
+
+ // Shut down.
+ std::cout << "Shutting down..." << std::endl;
+}
+
+
+int main(int argc, const char* argv[]) {
+ run();
+ std::cout << "Done." << std::endl;
+ return 0;
+}
+
diff --git a/deps/v8/third_party/wasm-api/example/trap.wasm b/deps/v8/third_party/wasm-api/example/trap.wasm
new file mode 100644
index 0000000000..eeed14c897
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/trap.wasm
Binary files differ
diff --git a/deps/v8/third_party/wasm-api/example/trap.wat b/deps/v8/third_party/wasm-api/example/trap.wat
new file mode 100644
index 0000000000..dfd20fb12a
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/example/trap.wat
@@ -0,0 +1,5 @@
+(module
+ (func $callback (import "" "callback") (result i32))
+ (func (export "callback") (result i32) (call $callback))
+ (func (export "unreachable") (result i32) (unreachable) (i32.const 1))
+)
diff --git a/deps/v8/third_party/wasm-api/wasm.h b/deps/v8/third_party/wasm-api/wasm.h
new file mode 100644
index 0000000000..bb66c042d9
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/wasm.h
@@ -0,0 +1,677 @@
+// WebAssembly C API
+
+#ifndef __WASM_H
+#define __WASM_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+// Auxiliaries
+
+// Machine types
+
+inline void assertions() {
+ static_assert(sizeof(float) == sizeof(uint32_t), "incompatible float type");
+ static_assert(sizeof(double) == sizeof(uint64_t), "incompatible double type");
+ static_assert(sizeof(intptr_t) == sizeof(uint32_t) ||
+ sizeof(intptr_t) == sizeof(uint64_t),
+ "incompatible pointer type");
+}
+
+typedef char byte_t;
+typedef float float32_t;
+typedef double float64_t;
+
+
+// Ownership
+
+#define own
+
+// The qualifier `own` is used to indicate ownership of data in this API.
+// It is intended to be interpreted similar to a `const` qualifier:
+//
+// - `own wasm_xxx_t*` owns the pointed-to data
+// - `own wasm_xxx_t` distributes to all fields of a struct or union `xxx`
+// - `own wasm_xxx_vec_t` owns the vector as well as its elements(!)
+// - an `own` function parameter passes ownership from caller to callee
+// - an `own` function result passes ownership from callee to caller
+// - an exception are `own` pointer parameters named `out`, which are copy-back
+// output parameters passing back ownership from callee to caller
+//
+// Own data is created by `wasm_xxx_new` functions and some others.
+// It must be released with the corresponding `wasm_xxx_delete` function.
+//
+// Deleting a reference does not necessarily delete the underlying object,
+// it merely indicates that this owner no longer uses it.
+//
+// For vectors, `const wasm_xxx_vec_t` is used informally to indicate that
+// neither the vector nor its elements should be modified.
+// TODO: introduce proper `wasm_xxx_const_vec_t`?
+
+
+#define WASM_DECLARE_OWN(name) \
+ typedef struct wasm_##name##_t wasm_##name##_t; \
+ \
+ void wasm_##name##_delete(own wasm_##name##_t*);
+
+
+// Vectors
+
+#define WASM_DECLARE_VEC(name, ptr_or_none) \
+ typedef struct wasm_##name##_vec_t { \
+ size_t size; \
+ wasm_##name##_t ptr_or_none* data; \
+ } wasm_##name##_vec_t; \
+ \
+ void wasm_##name##_vec_new_empty(own wasm_##name##_vec_t* out); \
+ void wasm_##name##_vec_new_uninitialized( \
+ own wasm_##name##_vec_t* out, size_t); \
+ void wasm_##name##_vec_new( \
+ own wasm_##name##_vec_t* out, \
+ size_t, own wasm_##name##_t ptr_or_none const[]); \
+ void wasm_##name##_vec_copy( \
+ own wasm_##name##_vec_t* out, wasm_##name##_vec_t*); \
+ void wasm_##name##_vec_delete(own wasm_##name##_vec_t*);
+
+
+// Byte vectors
+
+typedef byte_t wasm_byte_t;
+WASM_DECLARE_VEC(byte, )
+
+typedef wasm_byte_vec_t wasm_name_t;
+
+#define wasm_name wasm_byte_vec
+#define wasm_name_new wasm_byte_vec_new
+#define wasm_name_new_empty wasm_byte_vec_new_empty
+#define wasm_name_new_new_uninitialized wasm_byte_vec_new_uninitialized
+#define wasm_name_copy wasm_byte_vec_copy
+#define wasm_name_delete wasm_byte_vec_delete
+
+static inline void wasm_name_new_from_string(
+ own wasm_name_t* out, const char* s
+) {
+ wasm_name_new(out, strlen(s) + 1, s);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Environment
+
+// Configuration
+
+WASM_DECLARE_OWN(config)
+
+own wasm_config_t* wasm_config_new();
+
+// Embedders may provide custom functions for manipulating configs.
+
+
+// Engine
+
+WASM_DECLARE_OWN(engine)
+
+own wasm_engine_t* wasm_engine_new();
+own wasm_engine_t* wasm_engine_new_with_config(own wasm_config_t*);
+
+
+// Store
+
+WASM_DECLARE_OWN(store)
+
+own wasm_store_t* wasm_store_new(wasm_engine_t*);
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Type Representations
+
+// Type attributes
+
+typedef enum wasm_mutability_t {
+ WASM_CONST,
+ WASM_VAR
+} wasm_mutability_t;
+
+typedef struct wasm_limits_t {
+ uint32_t min;
+ uint32_t max;
+} wasm_limits_t;
+
+static const uint32_t wasm_limits_max_default = 0xffffffff;
+
+
+// Generic
+
+#define WASM_DECLARE_TYPE(name) \
+ WASM_DECLARE_OWN(name) \
+ WASM_DECLARE_VEC(name, *) \
+ \
+ own wasm_##name##_t* wasm_##name##_copy(wasm_##name##_t*);
+
+
+// Value Types
+
+WASM_DECLARE_TYPE(valtype)
+
+typedef enum wasm_valkind_t {
+ WASM_I32,
+ WASM_I64,
+ WASM_F32,
+ WASM_F64,
+ WASM_ANYREF,
+ WASM_FUNCREF
+} wasm_valkind_t;
+
+own wasm_valtype_t* wasm_valtype_new(wasm_valkind_t);
+
+wasm_valkind_t wasm_valtype_kind(const wasm_valtype_t*);
+
+static inline bool wasm_valkind_is_num(wasm_valkind_t k) {
+ return k < WASM_ANYREF;
+}
+static inline bool wasm_valkind_is_ref(wasm_valkind_t k) {
+ return k >= WASM_ANYREF;
+}
+
+static inline bool wasm_valtype_is_num(const wasm_valtype_t* t) {
+ return wasm_valkind_is_num(wasm_valtype_kind(t));
+}
+static inline bool wasm_valtype_is_ref(const wasm_valtype_t* t) {
+ return wasm_valkind_is_ref(wasm_valtype_kind(t));
+}
+
+
+// Function Types
+
+WASM_DECLARE_TYPE(functype)
+
+own wasm_functype_t* wasm_functype_new(
+ own wasm_valtype_vec_t* params, own wasm_valtype_vec_t* results);
+
+const wasm_valtype_vec_t* wasm_functype_params(const wasm_functype_t*);
+const wasm_valtype_vec_t* wasm_functype_results(const wasm_functype_t*);
+
+
+// Global Types
+
+WASM_DECLARE_TYPE(globaltype)
+
+own wasm_globaltype_t* wasm_globaltype_new(
+ own wasm_valtype_t*, wasm_mutability_t);
+
+const wasm_valtype_t* wasm_globaltype_content(const wasm_globaltype_t*);
+wasm_mutability_t wasm_globaltype_mutability(const wasm_globaltype_t*);
+
+
+// Table Types
+
+WASM_DECLARE_TYPE(tabletype)
+
+own wasm_tabletype_t* wasm_tabletype_new(
+ own wasm_valtype_t*, const wasm_limits_t*);
+
+const wasm_valtype_t* wasm_tabletype_element(const wasm_tabletype_t*);
+const wasm_limits_t* wasm_tabletype_limits(const wasm_tabletype_t*);
+
+
+// Memory Types
+
+WASM_DECLARE_TYPE(memorytype)
+
+own wasm_memorytype_t* wasm_memorytype_new(const wasm_limits_t*);
+
+const wasm_limits_t* wasm_memorytype_limits(const wasm_memorytype_t*);
+
+
+// Extern Types
+
+WASM_DECLARE_TYPE(externtype)
+
+typedef enum wasm_externkind_t {
+ WASM_EXTERN_FUNC,
+ WASM_EXTERN_GLOBAL,
+ WASM_EXTERN_TABLE,
+ WASM_EXTERN_MEMORY
+} wasm_externkind_t;
+
+wasm_externkind_t wasm_externtype_kind(const wasm_externtype_t*);
+
+wasm_externtype_t* wasm_functype_as_externtype(wasm_functype_t*);
+wasm_externtype_t* wasm_globaltype_as_externtype(wasm_globaltype_t*);
+wasm_externtype_t* wasm_tabletype_as_externtype(wasm_tabletype_t*);
+wasm_externtype_t* wasm_memorytype_as_externtype(wasm_memorytype_t*);
+
+wasm_functype_t* wasm_externtype_as_functype(wasm_externtype_t*);
+wasm_globaltype_t* wasm_externtype_as_globaltype(wasm_externtype_t*);
+wasm_tabletype_t* wasm_externtype_as_tabletype(wasm_externtype_t*);
+wasm_memorytype_t* wasm_externtype_as_memorytype(wasm_externtype_t*);
+
+const wasm_externtype_t* wasm_functype_as_externtype_const(const wasm_functype_t*);
+const wasm_externtype_t* wasm_globaltype_as_externtype_const(const wasm_globaltype_t*);
+const wasm_externtype_t* wasm_tabletype_as_externtype_const(const wasm_tabletype_t*);
+const wasm_externtype_t* wasm_memorytype_as_externtype_const(const wasm_memorytype_t*);
+
+const wasm_functype_t* wasm_externtype_as_functype_const(const wasm_externtype_t*);
+const wasm_globaltype_t* wasm_externtype_as_globaltype_const(const wasm_externtype_t*);
+const wasm_tabletype_t* wasm_externtype_as_tabletype_const(const wasm_externtype_t*);
+const wasm_memorytype_t* wasm_externtype_as_memorytype_const(const wasm_externtype_t*);
+
+
+// Import Types
+
+WASM_DECLARE_TYPE(importtype)
+
+own wasm_importtype_t* wasm_importtype_new(
+ own wasm_name_t* module, own wasm_name_t* name, own wasm_externtype_t*);
+
+const wasm_name_t* wasm_importtype_module(const wasm_importtype_t*);
+const wasm_name_t* wasm_importtype_name(const wasm_importtype_t*);
+const wasm_externtype_t* wasm_importtype_type(const wasm_importtype_t*);
+
+
+// Export Types
+
+WASM_DECLARE_TYPE(exporttype)
+
+own wasm_exporttype_t* wasm_exporttype_new(
+ own wasm_name_t*, own wasm_externtype_t*);
+
+const wasm_name_t* wasm_exporttype_name(const wasm_exporttype_t*);
+const wasm_externtype_t* wasm_exporttype_type(const wasm_exporttype_t*);
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Objects
+
+// Values
+
+struct wasm_ref_t;
+
+typedef struct wasm_val_t {
+ wasm_valkind_t kind;
+ union {
+ int32_t i32;
+ int64_t i64;
+ float32_t f32;
+ float64_t f64;
+ struct wasm_ref_t* ref;
+ } of;
+} wasm_val_t;
+
+void wasm_val_delete(own wasm_val_t* v);
+void wasm_val_copy(own wasm_val_t* out, const wasm_val_t*);
+
+WASM_DECLARE_VEC(val, )
+
+
+// References
+
+#define WASM_DECLARE_REF_BASE(name) \
+ WASM_DECLARE_OWN(name) \
+ \
+ own wasm_##name##_t* wasm_##name##_copy(const wasm_##name##_t*); \
+ \
+ void* wasm_##name##_get_host_info(const wasm_##name##_t*); \
+ void wasm_##name##_set_host_info(wasm_##name##_t*, void*); \
+ void wasm_##name##_set_host_info_with_finalizer( \
+ wasm_##name##_t*, void*, void (*)(void*));
+
+#define WASM_DECLARE_REF(name) \
+ WASM_DECLARE_REF_BASE(name) \
+ \
+ wasm_ref_t* wasm_##name##_as_ref(wasm_##name##_t*); \
+ wasm_##name##_t* wasm_ref_as_##name(wasm_ref_t*); \
+ const wasm_ref_t* wasm_##name##_as_ref_const(const wasm_##name##_t*); \
+ const wasm_##name##_t* wasm_ref_as_##name##_const(const wasm_ref_t*);
+
+#define WASM_DECLARE_SHARABLE_REF(name) \
+ WASM_DECLARE_REF(name) \
+ WASM_DECLARE_OWN(shared_##name) \
+ \
+ own wasm_shared_##name##_t* wasm_##name##_share(const wasm_##name##_t*); \
+ own wasm_##name##_t* wasm_##name##_obtain(wasm_store_t*, const wasm_shared_##name##_t*);
+
+
+WASM_DECLARE_REF_BASE(ref)
+
+
+// Traps
+
+typedef wasm_name_t wasm_message_t; // null terminated
+
+WASM_DECLARE_REF(trap)
+
+own wasm_trap_t* wasm_trap_new(wasm_store_t* store, const wasm_message_t*);
+
+void wasm_trap_message(const wasm_trap_t*, own wasm_message_t* out);
+
+
+// Foreign Objects
+
+WASM_DECLARE_REF(foreign)
+
+own wasm_foreign_t* wasm_foreign_new(wasm_store_t*);
+
+
+// Modules
+
+WASM_DECLARE_SHARABLE_REF(module)
+
+own wasm_module_t* wasm_module_new(
+ wasm_store_t*, const wasm_byte_vec_t* binary);
+
+bool wasm_module_validate(wasm_store_t*, const wasm_byte_vec_t* binary);
+
+void wasm_module_imports(const wasm_module_t*, own wasm_importtype_vec_t* out);
+void wasm_module_exports(const wasm_module_t*, own wasm_exporttype_vec_t* out);
+
+void wasm_module_serialize(const wasm_module_t*, own wasm_byte_vec_t* out);
+own wasm_module_t* wasm_module_deserialize(wasm_store_t*, const wasm_byte_vec_t*);
+
+
+// Function Instances
+
+WASM_DECLARE_REF(func)
+
+typedef own wasm_trap_t* (*wasm_func_callback_t)(
+ const wasm_val_t args[], wasm_val_t results[]);
+typedef own wasm_trap_t* (*wasm_func_callback_with_env_t)(
+ void* env, const wasm_val_t args[], wasm_val_t results[]);
+
+own wasm_func_t* wasm_func_new(
+ wasm_store_t*, const wasm_functype_t*, wasm_func_callback_t);
+own wasm_func_t* wasm_func_new_with_env(
+ wasm_store_t*, const wasm_functype_t* type, wasm_func_callback_with_env_t,
+ void* env, void (*finalizer)(void*));
+
+own wasm_functype_t* wasm_func_type(const wasm_func_t*);
+size_t wasm_func_param_arity(const wasm_func_t*);
+size_t wasm_func_result_arity(const wasm_func_t*);
+
+own wasm_trap_t* wasm_func_call(
+ const wasm_func_t*, const wasm_val_t args[], wasm_val_t results[]);
+
+
+// Global Instances
+
+WASM_DECLARE_REF(global)
+
+own wasm_global_t* wasm_global_new(
+ wasm_store_t*, const wasm_globaltype_t*, const wasm_val_t*);
+
+own wasm_globaltype_t* wasm_global_type(const wasm_global_t*);
+
+void wasm_global_get(const wasm_global_t*, own wasm_val_t* out);
+void wasm_global_set(wasm_global_t*, const wasm_val_t*);
+
+
+// Table Instances
+
+WASM_DECLARE_REF(table)
+
+typedef uint32_t wasm_table_size_t;
+
+own wasm_table_t* wasm_table_new(
+ wasm_store_t*, const wasm_tabletype_t*, wasm_ref_t* init);
+
+own wasm_tabletype_t* wasm_table_type(const wasm_table_t*);
+
+own wasm_ref_t* wasm_table_get(const wasm_table_t*, wasm_table_size_t index);
+bool wasm_table_set(wasm_table_t*, wasm_table_size_t index, wasm_ref_t*);
+
+wasm_table_size_t wasm_table_size(const wasm_table_t*);
+bool wasm_table_grow(wasm_table_t*, wasm_table_size_t delta, wasm_ref_t* init);
+
+
+// Memory Instances
+
+WASM_DECLARE_REF(memory)
+
+typedef uint32_t wasm_memory_pages_t;
+
+static const size_t MEMORY_PAGE_SIZE = 0x10000;
+
+own wasm_memory_t* wasm_memory_new(wasm_store_t*, const wasm_memorytype_t*);
+
+own wasm_memorytype_t* wasm_memory_type(const wasm_memory_t*);
+
+byte_t* wasm_memory_data(wasm_memory_t*);
+size_t wasm_memory_data_size(const wasm_memory_t*);
+
+wasm_memory_pages_t wasm_memory_size(const wasm_memory_t*);
+bool wasm_memory_grow(wasm_memory_t*, wasm_memory_pages_t delta);
+
+
+// Externals
+
+WASM_DECLARE_REF(extern)
+WASM_DECLARE_VEC(extern, *)
+
+wasm_externkind_t wasm_extern_kind(const wasm_extern_t*);
+own wasm_externtype_t* wasm_extern_type(const wasm_extern_t*);
+
+wasm_extern_t* wasm_func_as_extern(wasm_func_t*);
+wasm_extern_t* wasm_global_as_extern(wasm_global_t*);
+wasm_extern_t* wasm_table_as_extern(wasm_table_t*);
+wasm_extern_t* wasm_memory_as_extern(wasm_memory_t*);
+
+wasm_func_t* wasm_extern_as_func(wasm_extern_t*);
+wasm_global_t* wasm_extern_as_global(wasm_extern_t*);
+wasm_table_t* wasm_extern_as_table(wasm_extern_t*);
+wasm_memory_t* wasm_extern_as_memory(wasm_extern_t*);
+
+const wasm_extern_t* wasm_func_as_extern_const(const wasm_func_t*);
+const wasm_extern_t* wasm_global_as_extern_const(const wasm_global_t*);
+const wasm_extern_t* wasm_table_as_extern_const(const wasm_table_t*);
+const wasm_extern_t* wasm_memory_as_extern_const(const wasm_memory_t*);
+
+const wasm_func_t* wasm_extern_as_func_const(const wasm_extern_t*);
+const wasm_global_t* wasm_extern_as_global_const(const wasm_extern_t*);
+const wasm_table_t* wasm_extern_as_table_const(const wasm_extern_t*);
+const wasm_memory_t* wasm_extern_as_memory_const(const wasm_extern_t*);
+
+
+// Module Instances
+
+WASM_DECLARE_REF(instance)
+
+own wasm_instance_t* wasm_instance_new(
+ wasm_store_t*, const wasm_module_t*, const wasm_extern_t* const imports[]);
+
+void wasm_instance_exports(const wasm_instance_t*, own wasm_extern_vec_t* out);
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Convenience
+
+// Value Type construction short-hands
+
+static inline own wasm_valtype_t* wasm_valtype_new_i32() {
+ return wasm_valtype_new(WASM_I32);
+}
+static inline own wasm_valtype_t* wasm_valtype_new_i64() {
+ return wasm_valtype_new(WASM_I64);
+}
+static inline own wasm_valtype_t* wasm_valtype_new_f32() {
+ return wasm_valtype_new(WASM_F32);
+}
+static inline own wasm_valtype_t* wasm_valtype_new_f64() {
+ return wasm_valtype_new(WASM_F64);
+}
+
+static inline own wasm_valtype_t* wasm_valtype_new_anyref() {
+ return wasm_valtype_new(WASM_ANYREF);
+}
+static inline own wasm_valtype_t* wasm_valtype_new_funcref() {
+ return wasm_valtype_new(WASM_FUNCREF);
+}
+
+
+// Function Types construction short-hands
+
+static inline own wasm_functype_t* wasm_functype_new_0_0() {
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new_empty(&params);
+ wasm_valtype_vec_new_empty(&results);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_1_0(
+ own wasm_valtype_t* p
+) {
+ wasm_valtype_t* ps[1] = {p};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 1, ps);
+ wasm_valtype_vec_new_empty(&results);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_2_0(
+ own wasm_valtype_t* p1, own wasm_valtype_t* p2
+) {
+ wasm_valtype_t* ps[2] = {p1, p2};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 2, ps);
+ wasm_valtype_vec_new_empty(&results);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_3_0(
+ own wasm_valtype_t* p1, own wasm_valtype_t* p2, own wasm_valtype_t* p3
+) {
+ wasm_valtype_t* ps[3] = {p1, p2, p3};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 3, ps);
+ wasm_valtype_vec_new_empty(&results);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_0_1(
+ own wasm_valtype_t* r
+) {
+ wasm_valtype_t* rs[1] = {r};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new_empty(&params);
+ wasm_valtype_vec_new(&results, 1, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_1_1(
+ own wasm_valtype_t* p, own wasm_valtype_t* r
+) {
+ wasm_valtype_t* ps[1] = {p};
+ wasm_valtype_t* rs[1] = {r};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 1, ps);
+ wasm_valtype_vec_new(&results, 1, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_2_1(
+ own wasm_valtype_t* p1, own wasm_valtype_t* p2, own wasm_valtype_t* r
+) {
+ wasm_valtype_t* ps[2] = {p1, p2};
+ wasm_valtype_t* rs[1] = {r};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 2, ps);
+ wasm_valtype_vec_new(&results, 1, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_3_1(
+ own wasm_valtype_t* p1, own wasm_valtype_t* p2, own wasm_valtype_t* p3,
+ own wasm_valtype_t* r
+) {
+ wasm_valtype_t* ps[3] = {p1, p2, p3};
+ wasm_valtype_t* rs[1] = {r};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 3, ps);
+ wasm_valtype_vec_new(&results, 1, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_0_2(
+ own wasm_valtype_t* r1, own wasm_valtype_t* r2
+) {
+ wasm_valtype_t* rs[2] = {r1, r2};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new_empty(&params);
+ wasm_valtype_vec_new(&results, 2, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_1_2(
+ own wasm_valtype_t* p, own wasm_valtype_t* r1, own wasm_valtype_t* r2
+) {
+ wasm_valtype_t* ps[1] = {p};
+ wasm_valtype_t* rs[2] = {r1, r2};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 1, ps);
+ wasm_valtype_vec_new(&results, 2, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_2_2(
+ own wasm_valtype_t* p1, own wasm_valtype_t* p2,
+ own wasm_valtype_t* r1, own wasm_valtype_t* r2
+) {
+ wasm_valtype_t* ps[2] = {p1, p2};
+ wasm_valtype_t* rs[2] = {r1, r2};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 2, ps);
+ wasm_valtype_vec_new(&results, 2, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+static inline own wasm_functype_t* wasm_functype_new_3_2(
+ own wasm_valtype_t* p1, own wasm_valtype_t* p2, own wasm_valtype_t* p3,
+ own wasm_valtype_t* r1, own wasm_valtype_t* r2
+) {
+ wasm_valtype_t* ps[3] = {p1, p2, p3};
+ wasm_valtype_t* rs[2] = {r1, r2};
+ wasm_valtype_vec_t params, results;
+ wasm_valtype_vec_new(&params, 3, ps);
+ wasm_valtype_vec_new(&results, 2, rs);
+ return wasm_functype_new(&params, &results);
+}
+
+
+// Value construction short-hands
+
+static inline void wasm_val_init_ptr(own wasm_val_t* out, void* p) {
+#if UINTPTR_MAX == UINT32_MAX
+ out->kind = WASM_I32;
+ out->of.i32 = (intptr_t)p;
+#elif UINTPTR_MAX == UINT64_MAX
+ out->kind = WASM_I64;
+ out->of.i64 = (intptr_t)p;
+#endif
+}
+
+static inline void* wasm_val_ptr(const wasm_val_t* val) {
+#if UINTPTR_MAX == UINT32_MAX
+ return (void*)(intptr_t)val->of.i32;
+#elif UINTPTR_MAX == UINT64_MAX
+ return (void*)(intptr_t)val->of.i64;
+#endif
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+#undef own
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // #ifdef __WASM_H
diff --git a/deps/v8/third_party/wasm-api/wasm.hh b/deps/v8/third_party/wasm-api/wasm.hh
new file mode 100644
index 0000000000..c153d4b9df
--- /dev/null
+++ b/deps/v8/third_party/wasm-api/wasm.hh
@@ -0,0 +1,770 @@
+// WebAssembly C++ API
+
+#ifndef __WASM_HH
+#define __WASM_HH
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <limits>
+#include <string>
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Auxiliaries
+
+// Machine types
+
+static_assert(sizeof(float) == sizeof(int32_t), "incompatible float type");
+static_assert(sizeof(double) == sizeof(int64_t), "incompatible double type");
+static_assert(sizeof(intptr_t) == sizeof(int32_t) ||
+ sizeof(intptr_t) == sizeof(int64_t), "incompatible pointer type");
+
+using byte_t = char;
+using float32_t = float;
+using float64_t = double;
+
+
+namespace wasm {
+
+// Ownership
+
+template<class T> struct owner { using type = T; };
+template<class T> struct owner<T*> { using type = std::unique_ptr<T>; };
+
+template<class T>
+using own = typename owner<T>::type;
+
+template<class T>
+auto make_own(T x) -> own<T> { return own<T>(std::move(x)); }
+
+
+// Vectors
+
+template<class T>
+struct vec_traits {
+ static void construct(size_t size, T data[]) {}
+ static void destruct(size_t size, T data[]) {}
+ static void move(size_t size, T* data, T init[]) {
+ for (size_t i = 0; i < size; ++i) data[i] = std::move(init[i]);
+ }
+ static void copy(size_t size, T data[], const T init[]) {
+ for (size_t i = 0; i < size; ++i) data[i] = init[i];
+ }
+
+ using proxy = T&;
+};
+
+template<class T>
+struct vec_traits<T*> {
+ static void construct(size_t size, T* data[]) {
+ for (size_t i = 0; i < size; ++i) data[i] = nullptr;
+ }
+ static void destruct(size_t size, T* data[]) {
+ for (size_t i = 0; i < size; ++i) {
+ if (data[i]) delete data[i];
+ }
+ }
+ static void move(size_t size, T* data[], own<T*> init[]) {
+ for (size_t i = 0; i < size; ++i) data[i] = init[i].release();
+ }
+ static void copy(size_t size, T* data[], const T* const init[]) {
+ for (size_t i = 0; i < size; ++i) {
+ if (init[i]) data[i] = init[i]->copy().release();
+ }
+ }
+
+ class proxy {
+ T*& elem_;
+ public:
+ proxy(T*& elem) : elem_(elem) {}
+ operator T*() { return elem_; }
+ operator const T*() const { return elem_; }
+ auto operator=(own<T*>&& elem) -> proxy& {
+ reset(std::move(elem));
+ return *this;
+ }
+ void reset(own<T*>&& val = own<T*>()) {
+ if (elem_) delete elem_;
+ elem_ = val.release();
+ }
+ auto release() -> T* {
+ auto elem = elem_;
+ elem_ = nullptr;
+ return elem;
+ }
+ auto move() -> own<T*> { return make_own(release()); }
+ auto get() -> T* { return elem_; }
+ auto get() const -> const T* { return elem_; }
+ auto operator->() -> T* { return elem_; }
+ auto operator->() const -> const T* { return elem_; }
+ };
+};
+
+
+template<class T>
+class vec {
+ static const size_t invalid_size = SIZE_MAX;
+
+ size_t size_;
+ std::unique_ptr<T[]> data_;
+
+#ifdef DEBUG
+ void make_data();
+ void free_data();
+#else
+ void make_data() {}
+ void free_data() {}
+#endif
+
+ vec(size_t size) : vec(size, size ? new(std::nothrow) T[size] : nullptr) {
+ make_data();
+ }
+
+ vec(size_t size, T* data) : size_(size), data_(data) {
+ assert(!!size_ == !!data_ || size_ == invalid_size);
+ }
+
+public:
+ template<class U>
+ vec(vec<U>&& that) : vec(that.size_, that.data_.release()) {}
+
+ ~vec() {
+ if (data_) vec_traits<T>::destruct(size_, data_.get());
+ free_data();
+ }
+
+ operator bool() const {
+ return bool(size_ != invalid_size);
+ }
+
+ auto size() const -> size_t {
+ return size_;
+ }
+
+ auto get() const -> const T* {
+ return data_.get();
+ }
+
+ auto get() -> T* {
+ return data_.get();
+ }
+
+ auto release() -> T* {
+ return data_.release();
+ }
+
+ void reset() {
+ if (data_) vec_traits<T>::destruct(size_, data_.get());
+ free_data();
+ size_ = 0;
+ data_.reset();
+ }
+
+ void reset(vec& that) {
+ reset();
+ size_ = that.size_;
+ data_.reset(that.data_.release());
+ }
+
+ auto operator=(vec&& that) -> vec& {
+ reset(that);
+ return *this;
+ }
+
+ auto operator[](size_t i) -> typename vec_traits<T>::proxy {
+ assert(i < size_);
+ return typename vec_traits<T>::proxy(data_[i]);
+ }
+
+ auto operator[](size_t i) const -> const typename vec_traits<T>::proxy {
+ assert(i < size_);
+ return typename vec_traits<T>::proxy(data_[i]);
+ }
+
+ auto copy() const -> vec {
+ auto v = vec(size_);
+ if (v) vec_traits<T>::copy(size_, v.data_.get(), data_.get());
+ return v;
+ }
+
+ static auto make_uninitialized(size_t size = 0) -> vec {
+ auto v = vec(size);
+ if (v) vec_traits<T>::construct(size, v.data_.get());
+ return v;
+ }
+
+ static auto make(size_t size, own<T> init[]) -> vec {
+ auto v = vec(size);
+ if (v) vec_traits<T>::move(size, v.data_.get(), init);
+ return v;
+ }
+
+ static auto make(std::string s) -> vec<char> {
+ auto v = vec(s.length() + 1);
+ if (v) std::strcpy(v.get(), s.data());
+ return v;
+ }
+
+ static auto make() -> vec {
+ return vec(0);
+ }
+
+ template<class... Ts>
+ static auto make(Ts&&... args) -> vec {
+ own<T> data[] = { make_own(std::move(args))... };
+ return make(sizeof...(Ts), data);
+ }
+
+ static auto adopt(size_t size, T data[]) -> vec {
+ return vec(size, data);
+ }
+
+ static auto invalid() -> vec {
+ return vec(invalid_size, nullptr);
+ }
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Environment
+
+// Configuration
+
+class Config {
+public:
+ Config() = delete;
+ ~Config();
+ void operator delete(void*);
+
+ static auto make() -> own<Config*>;
+
+ // Implementations may provide custom methods for manipulating Configs.
+};
+
+
+// Engine
+
+class Engine {
+public:
+ Engine() = delete;
+ ~Engine();
+ void operator delete(void*);
+
+ static auto make(own<Config*>&& = Config::make()) -> own<Engine*>;
+};
+
+
+// Store
+
+class Store {
+public:
+ Store() = delete;
+ ~Store();
+ void operator delete(void*);
+
+ static auto make(Engine*) -> own<Store*>;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Type Representations
+
+// Type attributes
+
+enum Mutability { CONST, VAR };
+
+struct Limits {
+ uint32_t min;
+ uint32_t max;
+
+ Limits(uint32_t min, uint32_t max = std::numeric_limits<uint32_t>::max()) :
+ min(min), max(max) {}
+};
+
+
+// Value Types
+
+enum ValKind { I32, I64, F32, F64, ANYREF, FUNCREF };
+
+inline bool is_num(ValKind k) { return k < ANYREF; }
+inline bool is_ref(ValKind k) { return k >= ANYREF; }
+
+
+class ValType {
+public:
+ ValType() = delete;
+ ~ValType();
+ void operator delete(void*);
+
+ static auto make(ValKind) -> own<ValType*>;
+ auto copy() const -> own<ValType*>;
+
+ auto kind() const -> ValKind;
+ auto is_num() const -> bool { return wasm::is_num(kind()); }
+ auto is_ref() const -> bool { return wasm::is_ref(kind()); }
+};
+
+
+// External Types
+
+enum ExternKind {
+ EXTERN_FUNC, EXTERN_GLOBAL, EXTERN_TABLE, EXTERN_MEMORY
+};
+
+class FuncType;
+class GlobalType;
+class TableType;
+class MemoryType;
+
+class ExternType {
+public:
+ ExternType() = delete;
+ ~ExternType();
+ void operator delete(void*);
+
+ auto copy() const-> own<ExternType*>;
+
+ auto kind() const -> ExternKind;
+
+ auto func() -> FuncType*;
+ auto global() -> GlobalType*;
+ auto table() -> TableType*;
+ auto memory() -> MemoryType*;
+
+ auto func() const -> const FuncType*;
+ auto global() const -> const GlobalType*;
+ auto table() const -> const TableType*;
+ auto memory() const -> const MemoryType*;
+};
+
+
+// Function Types
+
+enum class arrow { ARROW };
+
+class FuncType : public ExternType {
+public:
+ FuncType() = delete;
+ ~FuncType();
+
+ static auto make(
+ vec<ValType*>&& params = vec<ValType*>::make(),
+ vec<ValType*>&& results = vec<ValType*>::make()
+ ) -> own<FuncType*>;
+
+ auto copy() const -> own<FuncType*>;
+
+ auto params() const -> const vec<ValType*>&;
+ auto results() const -> const vec<ValType*>&;
+};
+
+
+// Global Types
+
+class GlobalType : public ExternType {
+public:
+ GlobalType() = delete;
+ ~GlobalType();
+
+ static auto make(own<ValType*>&&, Mutability) -> own<GlobalType*>;
+ auto copy() const -> own<GlobalType*>;
+
+ auto content() const -> const ValType*;
+ auto mutability() const -> Mutability;
+};
+
+
+// Table Types
+
+class TableType : public ExternType {
+public:
+ TableType() = delete;
+ ~TableType();
+
+ static auto make(own<ValType*>&&, Limits) -> own<TableType*>;
+ auto copy() const -> own<TableType*>;
+
+ auto element() const -> const ValType*;
+ auto limits() const -> const Limits&;
+};
+
+
+// Memory Types
+
+class MemoryType : public ExternType {
+public:
+ MemoryType() = delete;
+ ~MemoryType();
+
+ static auto make(Limits) -> own<MemoryType*>;
+ auto copy() const -> own<MemoryType*>;
+
+ auto limits() const -> const Limits&;
+};
+
+
+// Import Types
+
+using Name = vec<byte_t>;
+
+class ImportType {
+public:
+ ImportType() = delete;
+ ~ImportType();
+ void operator delete(void*);
+
+ static auto make(Name&& module, Name&& name, own<ExternType*>&&) ->
+ own<ImportType*>;
+ auto copy() const -> own<ImportType*>;
+
+ auto module() const -> const Name&;
+ auto name() const -> const Name&;
+ auto type() const -> const ExternType*;
+};
+
+
+// Export Types
+
+class ExportType {
+public:
+ ExportType() = delete;
+ ~ExportType();
+ void operator delete(void*);
+
+ static auto make(Name&&, own<ExternType*>&&) -> own<ExportType*>;
+ auto copy() const -> own<ExportType*>;
+
+ auto name() const -> const Name&;
+ auto type() const -> const ExternType*;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Runtime Objects
+
+// References
+
+class Ref {
+public:
+ Ref() = delete;
+ ~Ref();
+ void operator delete(void*);
+
+ auto copy() const -> own<Ref*>;
+
+ auto get_host_info() const -> void*;
+ void set_host_info(void* info, void (*finalizer)(void*) = nullptr);
+};
+
+
+// Values
+
+class Val {
+ ValKind kind_;
+ union impl {
+ int32_t i32;
+ int64_t i64;
+ float32_t f32;
+ float64_t f64;
+ Ref* ref;
+ } impl_;
+
+ Val(ValKind kind, impl impl) : kind_(kind), impl_(impl) {}
+
+public:
+ Val() : kind_(ANYREF) { impl_.ref = nullptr; }
+ Val(int32_t i) : kind_(I32) { impl_.i32 = i; }
+ Val(int64_t i) : kind_(I64) { impl_.i64 = i; }
+ Val(float32_t z) : kind_(F32) { impl_.f32 = z; }
+ Val(float64_t z) : kind_(F64) { impl_.f64 = z; }
+ Val(own<Ref*>&& r) : kind_(ANYREF) { impl_.ref = r.release(); }
+
+ Val(Val&& that) : kind_(that.kind_), impl_(that.impl_) {
+ if (is_ref()) that.impl_.ref = nullptr;
+ }
+
+ ~Val() {
+ reset();
+ }
+
+ auto is_num() const -> bool { return wasm::is_num(kind_); }
+ auto is_ref() const -> bool { return wasm::is_ref(kind_); }
+
+ static auto i32(int32_t x) -> Val { return Val(x); }
+ static auto i64(int64_t x) -> Val { return Val(x); }
+ static auto f32(float32_t x) -> Val { return Val(x); }
+ static auto f64(float64_t x) -> Val { return Val(x); }
+ static auto ref(own<Ref*>&& x) -> Val { return Val(std::move(x)); }
+ template<class T> inline static auto make(T x) -> Val;
+ template<class T> inline static auto make(own<T>&& x) -> Val;
+
+ void reset() {
+ if (is_ref() && impl_.ref) {
+ delete impl_.ref;
+ impl_.ref = nullptr;
+ }
+ }
+
+ void reset(Val& that) {
+ reset();
+ kind_ = that.kind_;
+ impl_ = that.impl_;
+ if (is_ref()) that.impl_.ref = nullptr;
+ }
+
+ auto operator=(Val&& that) -> Val& {
+ reset(that);
+ return *this;
+ }
+
+ auto kind() const -> ValKind { return kind_; }
+ auto i32() const -> int32_t { assert(kind_ == I32); return impl_.i32; }
+ auto i64() const -> int64_t { assert(kind_ == I64); return impl_.i64; }
+ auto f32() const -> float32_t { assert(kind_ == F32); return impl_.f32; }
+ auto f64() const -> float64_t { assert(kind_ == F64); return impl_.f64; }
+ auto ref() const -> Ref* { assert(is_ref()); return impl_.ref; }
+ template<class T> inline auto get() const -> T;
+
+ auto release_ref() -> own<Ref*> {
+ assert(is_ref());
+ auto ref = impl_.ref;
+ ref = nullptr;
+ return own<Ref*>(ref);
+ }
+
+ auto copy() const -> Val {
+ if (is_ref() && impl_.ref != nullptr) {
+ impl impl;
+ impl.ref = impl_.ref->copy().release();
+ return Val(kind_, impl);
+ } else {
+ return Val(kind_, impl_);
+ }
+ }
+};
+
+
+template<> inline auto Val::make<int32_t>(int32_t x) -> Val { return Val(x); }
+template<> inline auto Val::make<int64_t>(int64_t x) -> Val { return Val(x); }
+template<> inline auto Val::make<float32_t>(float32_t x) -> Val { return Val(x); }
+template<> inline auto Val::make<float64_t>(float64_t x) -> Val { return Val(x); }
+template<> inline auto Val::make<Ref*>(own<Ref*>&& x) -> Val {
+ return Val(std::move(x));
+}
+
+template<> inline auto Val::make<uint32_t>(uint32_t x) -> Val {
+ return Val(static_cast<int32_t>(x));
+}
+template<> inline auto Val::make<uint64_t>(uint64_t x) -> Val {
+ return Val(static_cast<int64_t>(x));
+}
+
+template<> inline auto Val::get<int32_t>() const -> int32_t { return i32(); }
+template<> inline auto Val::get<int64_t>() const -> int64_t { return i64(); }
+template<> inline auto Val::get<float32_t>() const -> float32_t { return f32(); }
+template<> inline auto Val::get<float64_t>() const -> float64_t { return f64(); }
+template<> inline auto Val::get<Ref*>() const -> Ref* { return ref(); }
+
+template<> inline auto Val::get<uint32_t>() const -> uint32_t {
+ return static_cast<uint32_t>(i32());
+}
+template<> inline auto Val::get<uint64_t>() const -> uint64_t {
+ return static_cast<uint64_t>(i64());
+}
+
+
+// Traps
+
+using Message = vec<byte_t>; // null terminated
+
+class Trap : public Ref {
+public:
+ Trap() = delete;
+ ~Trap();
+
+ static auto make(Store*, const Message& msg) -> own<Trap*>;
+ auto copy() const -> own<Trap*>;
+
+ auto message() const -> Message;
+};
+
+
+// Shared objects
+
+template<class T>
+class Shared {
+public:
+ Shared() = delete;
+ ~Shared();
+ void operator delete(void*);
+};
+
+
+// Modules
+
+class Module : public Ref {
+public:
+ Module() = delete;
+ ~Module();
+
+ static auto validate(Store*, const vec<byte_t>& binary) -> bool;
+ static auto make(Store*, const vec<byte_t>& binary) -> own<Module*>;
+ auto copy() const -> own<Module*>;
+
+ auto imports() const -> vec<ImportType*>;
+ auto exports() const -> vec<ExportType*>;
+
+ auto share() const -> own<Shared<Module>*>;
+ static auto obtain(Store*, const Shared<Module>*) -> own<Module*>;
+
+ auto serialize() const -> vec<byte_t>;
+ static auto deserialize(Store*, const vec<byte_t>&) -> own<Module*>;
+};
+
+
+// Foreign Objects
+
+class Foreign : public Ref {
+public:
+ Foreign() = delete;
+ ~Foreign();
+
+ static auto make(Store*) -> own<Foreign*>;
+ auto copy() const -> own<Foreign*>;
+};
+
+
+// Externals
+
+class Func;
+class Global;
+class Table;
+class Memory;
+
+class Extern : public Ref {
+public:
+ Extern() = delete;
+ ~Extern();
+
+ auto copy() const -> own<Extern*>;
+
+ auto kind() const -> ExternKind;
+ auto type() const -> own<ExternType*>;
+
+ auto func() -> Func*;
+ auto global() -> Global*;
+ auto table() -> Table*;
+ auto memory() -> Memory*;
+
+ auto func() const -> const Func*;
+ auto global() const -> const Global*;
+ auto table() const -> const Table*;
+ auto memory() const -> const Memory*;
+};
+
+
+// Function Instances
+
+class Func : public Extern {
+public:
+ Func() = delete;
+ ~Func();
+
+ using callback = auto (*)(const Val[], Val[]) -> own<Trap*>;
+ using callback_with_env = auto (*)(void*, const Val[], Val[]) -> own<Trap*>;
+
+ static auto make(Store*, const FuncType*, callback) -> own<Func*>;
+ static auto make(Store*, const FuncType*, callback_with_env,
+ void*, void (*finalizer)(void*) = nullptr) -> own<Func*>;
+ auto copy() const -> own<Func*>;
+
+ auto type() const -> own<FuncType*>;
+ auto param_arity() const -> size_t;
+ auto result_arity() const -> size_t;
+
+ auto call(const Val[] = nullptr, Val[] = nullptr) const -> own<Trap*>;
+};
+
+
+// Global Instances
+
+class Global : public Extern {
+public:
+ Global() = delete;
+ ~Global();
+
+ static auto make(Store*, const GlobalType*, const Val&) -> own<Global*>;
+ auto copy() const -> own<Global*>;
+
+ auto type() const -> own<GlobalType*>;
+ auto get() const -> Val;
+ void set(const Val&);
+};
+
+
+// Table Instances
+
+class Table : public Extern {
+public:
+ Table() = delete;
+ ~Table();
+
+ using size_t = uint32_t;
+
+ static auto make(
+ Store*, const TableType*, const Ref* init = nullptr) -> own<Table*>;
+ auto copy() const -> own<Table*>;
+
+ auto type() const -> own<TableType*>;
+ auto get(size_t index) const -> own<Ref*>;
+ auto set(size_t index, const Ref*) -> bool;
+ auto size() const -> size_t;
+ auto grow(size_t delta, const Ref* init = nullptr) -> bool;
+};
+
+
+// Memory Instances
+
+class Memory : public Extern {
+public:
+ Memory() = delete;
+ ~Memory();
+
+ static auto make(Store*, const MemoryType*) -> own<Memory*>;
+ auto copy() const -> own<Memory*>;
+
+ using pages_t = uint32_t;
+
+ static const size_t page_size = 0x10000;
+
+ auto type() const -> own<MemoryType*>;
+ auto data() const -> byte_t*;
+ auto data_size() const -> size_t;
+ auto size() const -> pages_t;
+ auto grow(pages_t delta) -> bool;
+};
+
+
+// Module Instances
+
+class Instance : public Ref {
+public:
+ Instance() = delete;
+ ~Instance();
+
+ static auto make(
+ Store*, const Module*, const Extern* const[]) -> own<Instance*>;
+ auto copy() const -> own<Instance*>;
+
+ auto exports() const -> vec<Extern*>;
+};
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespave wasm
+
+#endif // #ifdef __WASM_HH
diff --git a/deps/v8/tools/OWNERS b/deps/v8/tools/OWNERS
index bdb1d555a4..bd9cea5b3e 100644
--- a/deps/v8/tools/OWNERS
+++ b/deps/v8/tools/OWNERS
@@ -1 +1,2 @@
-file://INFRA_OWNERS
+file://COMMON_OWNERS
+
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 70db89b5da..7c5bd4a6f5 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -545,10 +545,9 @@ def create_total_page_stats(domains, args):
# Add a new "Total" page containing the summed up metrics.
domains['Total'] = total
+# Generate Raw JSON file.
-# Generate JSON file.
-
-def do_json(args):
+def _read_logs(args):
versions = {}
for path in args.logdirs:
if os.path.isdir(path):
@@ -562,6 +561,36 @@ def do_json(args):
if domain not in versions[version]: versions[version][domain] = {}
read_stats(os.path.join(root, filename),
versions[version][domain], args)
+
+ return versions
+
+def do_raw_json(args):
+ versions = _read_logs(args)
+
+ for version, domains in versions.items():
+ if args.aggregate:
+ create_total_page_stats(domains, args)
+ for domain, entries in domains.items():
+ raw_entries = []
+ for name, value in entries.items():
+ # We don't want the calculated sum in the JSON file.
+ if name == "Sum": continue
+ raw_entries.append({
+ 'name': name,
+ 'duration': value['time_list'],
+ 'count': value['count_list'],
+ })
+
+ domains[domain] = raw_entries
+
+ print(json.dumps(versions, separators=(',', ':')))
+
+
+# Generate JSON file.
+
+def do_json(args):
+ versions = _read_logs(args)
+
for version, domains in versions.items():
if args.aggregate:
create_total_page_stats(domains, args)
@@ -706,6 +735,20 @@ def main():
help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
"Additionally creates a Total page with all entries.")
+ # Command: raw-json.
+ subparsers["raw-json"] = subparser_adder.add_parser(
+ "raw-json", help="Collect raw results from 'run' command into" \
+ "a single json file.")
+ subparsers["raw-json"].set_defaults(
+ func=do_raw_json, error=subparsers["json"].error)
+ subparsers["raw-json"].add_argument(
+ "logdirs", type=str, metavar="<logdir>", nargs="*",
+ help="specify directories with log files to parse")
+ subparsers["raw-json"].add_argument(
+ "--aggregate", dest="aggregate", action="store_true", default=False,
+ help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
+ "Additionally creates a Total page with all entries.")
+
# Command: help.
subparsers["help"] = subparser_adder.add_parser(
"help", help="help information")
diff --git a/deps/v8/tools/clusterfuzz/OWNERS b/deps/v8/tools/clusterfuzz/OWNERS
index 50b5741785..bdb1d555a4 100644
--- a/deps/v8/tools/clusterfuzz/OWNERS
+++ b/deps/v8/tools/clusterfuzz/OWNERS
@@ -1,3 +1 @@
-set noparent
-
file://INFRA_OWNERS
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index dae84cbbb1..fe94bb9ecc 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
index fa3d672f00..636f4c9d9e 100644
--- a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -11,7 +11,7 @@
# Flags of x64,ignition:
--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 159fea9496..55f76e8bc6 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -49,6 +49,7 @@ CONFIGS = dict(
'--liftoff',
'--no-wasm-tier-up',
'--no-use-ic',
+ '--no-lazy-feedback-allocation',
],
ignition_turbo=[],
ignition_turbo_no_ic=[
@@ -58,11 +59,13 @@ CONFIGS = dict(
'--always-opt',
'--no-liftoff',
'--no-wasm-tier-up',
+ '--no-lazy-feedback-allocation'
],
ignition_turbo_opt_eager=[
'--always-opt',
'--no-lazy',
'--no-lazy-inner-functions',
+ '--no-lazy-feedback-allocation',
],
jitless=[
'--jitless',
@@ -73,6 +76,7 @@ CONFIGS = dict(
slow_path_opt=[
'--always-opt',
'--force-slow-path',
+ '--no-lazy-feedback-allocation',
],
trusted=[
'--no-untrusted-code-mitigations',
@@ -80,25 +84,10 @@ CONFIGS = dict(
trusted_opt=[
'--always-opt',
'--no-untrusted-code-mitigations',
+ '--no-lazy-feedback-allocation',
],
)
-# Additional flag experiments. List of tuples like
-# (<likelihood to use flags in [0,1)>, <flag>).
-ADDITIONAL_FLAGS = [
- (0.1, '--stress-marking=100'),
- (0.1, '--stress-scavenge=100'),
- (0.1, '--stress-compaction-random'),
- (0.1, '--random-gc-interval=2000'),
- (0.2, '--noanalyze-environment-liveness'),
- (0.1, '--stress-delay-tasks'),
- (0.01, '--thread-pool-size=1'),
- (0.01, '--thread-pool-size=2'),
- (0.01, '--thread-pool-size=4'),
- (0.01, '--thread-pool-size=8'),
- (0.1, '--interrupt-budget=1000'),
-]
-
# Timeout in seconds for one d8 run.
TIMEOUT = 3
@@ -188,6 +177,12 @@ def parse_args():
parser.add_argument(
'--second-config', help='second configuration', default='ignition_turbo')
parser.add_argument(
+ '--first-config-extra-flags', action='append', default=[],
+ help='Additional flags to pass to the run of the first configuration')
+ parser.add_argument(
+ '--second-config-extra-flags', action='append', default=[],
+ help='Additional flags to pass to the run of the second configuration')
+ parser.add_argument(
'--first-d8', default='d8',
help='optional path to first d8 executable, '
'default: bundled in the same directory as this script')
@@ -305,7 +300,6 @@ def print_difference(
def main():
options = parse_args()
- rng = random.Random(options.random_seed)
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
@@ -323,13 +317,10 @@ def main():
# Set up runtime arguments.
common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
- first_config_flags = common_flags + CONFIGS[options.first_config]
- second_config_flags = common_flags + CONFIGS[options.second_config]
-
- # Add additional flags to second config based on experiment percentages.
- for p, flag in ADDITIONAL_FLAGS:
- if rng.random() < p:
- second_config_flags.append(flag)
+ first_config_flags = (common_flags + CONFIGS[options.first_config] +
+ options.first_config_extra_flags)
+ second_config_flags = (common_flags + CONFIGS[options.second_config] +
+ options.second_config_extra_flags)
def run_d8(d8, config_flags, config_label=None, testcase=options.testcase):
preamble = PREAMBLE[:]
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index b13d3d7677..43b65e850b 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -39,10 +39,11 @@ class ConfigTest(unittest.TestCase):
self.assertEqual(
[
'--first-config=ignition',
- '--second-config=ignition_turbo_no_ic',
+ '--second-config=ignition_turbo',
'--second-d8=d8',
+ '--second-config-extra-flags=--stress-scavenge=100',
],
- v8_fuzz_config.Config('foo', Rng()).choose_foozzie_flags(),
+ v8_fuzz_config.Config('foo', Rng(), 42).choose_foozzie_flags(),
)
@@ -138,7 +139,10 @@ class SystemTest(unittest.TestCase):
with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
expected_output = f.read()
with self.assertRaises(subprocess.CalledProcessError) as ctx:
- run_foozzie('test_d8_1.py', 'test_d8_3.py', '--skip-sanity-checks')
+ run_foozzie('test_d8_1.py', 'test_d8_3.py', '--skip-sanity-checks',
+ '--first-config-extra-flags=--flag1',
+ '--first-config-extra-flags=--flag2=0',
+ '--second-config-extra-flags=--flag3')
e = ctx.exception
self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
self.assertEquals(expected_output, cut_verbose_output(e.output))
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index 1cd353225b..0dcacf216b 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -11,11 +11,11 @@ FOOZZIE_EXPERIMENTS = [
[10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
- [10, 'ignition', 'ignition_turbo', 'd8'],
- [10, 'ignition_no_ic', 'ignition_turbo', 'd8'],
- [10, 'ignition', 'ignition_turbo_no_ic', 'd8'],
- [10, 'ignition', 'ignition_turbo_opt', 'd8'],
- [10, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
+ [26, 'ignition', 'ignition_turbo', 'd8'],
+ [2, 'ignition_no_ic', 'ignition_turbo', 'd8'],
+ [2, 'ignition', 'ignition_turbo_no_ic', 'd8'],
+ [18, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [2, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
[5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
[5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x86/d8'],
@@ -23,16 +23,56 @@ FOOZZIE_EXPERIMENTS = [
[5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
]
+# Additional flag experiments. List of tuples like
+# (<likelihood to use flags in [0,1)>, <flag>).
+ADDITIONAL_FLAGS = [
+ (0.1, '--stress-marking=100'),
+ (0.1, '--stress-scavenge=100'),
+ (0.1, '--stress-compaction-random'),
+ (0.1, '--random-gc-interval=2000'),
+ (0.2, '--noanalyze-environment-liveness'),
+ (0.1, '--stress-delay-tasks'),
+ (0.01, '--thread-pool-size=1'),
+ (0.01, '--thread-pool-size=2'),
+ (0.01, '--thread-pool-size=4'),
+ (0.01, '--thread-pool-size=8'),
+ (0.1, '--interrupt-budget=1000'),
+]
+
class Config(object):
- def __init__(self, name, rng=None):
+ def __init__(self, name, rng=None, random_seed=None):
+ """
+ Args:
+ name: Name of the used fuzzer.
+ rng: Random number generator for generating experiments.
+ random_seed: Random-seed used for d8 throughout one fuzz session.
+ TODO(machenbach): Remove random_seed after a grace period of a couple of
+ days. We only have it to keep bisection stable. Afterwards we can just
+ use rng.
+ """
self.name = name
self.rng = rng or random.Random()
+ self.random_seed = random_seed
def choose_foozzie_flags(self):
"""Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
Returns: List of flags to pass to v8_foozzie.py fuzz harness.
"""
+ # TODO(machenbach): Temporarily use same RNG state for all test cases in one
+ # fuzz session. See also TODO above.
+ if self.random_seed is not None:
+ flags_rng = random.Random(self.random_seed)
+ else:
+ flags_rng = random.Random()
+
+ # Add additional flags to second config based on experiment percentages.
+ extra_flags = []
+ for p, flag in ADDITIONAL_FLAGS:
+ if flags_rng.random() < p:
+ extra_flags.append('--second-config-extra-flags=%s' % flag)
+
+ # Calculate flags determining the experiment.
acc = 0
threshold = self.rng.random() * 100
for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
@@ -42,5 +82,5 @@ class Config(object):
'--first-config=' + first_config,
'--second-config=' + second_config,
'--second-d8=' + second_d8,
- ]
+ ] + extra_flags
assert False
diff --git a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
index 1b682432ce..f2cb8935a2 100644
--- a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
+++ b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -19,3 +19,14 @@ print("https://crbug.com/935800");
}
print(Object.getOwnPropertyNames(foo().bar));
})();
+
+print("https://crbug.com/985154");
+(function () {
+ "use strict";
+ function foo() {
+ "use asm";
+ function baz() {}
+ return {bar: baz};
+ }
+ print(Object.getOwnPropertyNames(foo().bar));
+})();
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index 2ef4472207..0434a64ff5 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -26,6 +26,7 @@ group("v8_run_gcmole") {
"../../third_party/icu/source/",
"../../third_party/wasm-api/wasm.h",
"../../third_party/wasm-api/wasm.hh",
+ "../../third_party/inspector_protocol/",
"$target_gen_dir/../../",
"$target_gen_dir/../../torque-generated/",
]
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index a91554c3fa..ad7847df31 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -65,6 +65,15 @@ Print the current JavaScript stack trace
Usage: jst
end
+# Print TurboFan graph node.
+define pn
+call _v8_internal_Node_Print((void*)($arg0))
+end
+document pn
+Print a v8 TurboFan graph node
+Usage: pn node_address
+end
+
# Skip the JavaScript stack.
define jss
set $js_entry_sp=v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_
@@ -191,25 +200,6 @@ def add_debug_file_directory(dir):
"set debug-file-directory %s" % ":".join(current_dirs), to_string=True)
-def load_libcxx_pretty_printers(src_dir):
- libcxx_pretty_printers = os.path.join(src_dir, 'third_party',
- 'libcxx-pretty-printers')
- if not os.path.isdir(libcxx_pretty_printers):
- return
- sys.path.insert(1, libcxx_pretty_printers)
- from printers import register_libcxx_printers
- register_libcxx_printers(None)
-
-
-def load_gdb_chrome(src_dir):
- tools_gdb = os.path.join(src_dir, 'tools', 'gdb')
-
- sys.path.insert(1, tools_gdb)
- import gdb_chrome
-
- gdb.execute('source %s' % os.path.join(tools_gdb, 'viewg.gdb'))
-
-
def newobj_handler(event):
global compile_dirs
compile_dir = os.path.dirname(event.new_objfile.filename)
@@ -227,24 +217,10 @@ def newobj_handler(event):
# https://crbug.com/603286#c35
add_debug_file_directory(compile_dir)
- git = subprocess.Popen(
- ['git', '-C', compile_dir, 'rev-parse', '--show-toplevel'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- src_dir, _ = git.communicate()
- if git.returncode:
- return
- src_dir = str(src_dir).rstrip()
-
- load_libcxx_pretty_printers(src_dir)
-
- load_gdb_chrome(src_dir)
-
-
# Event hook for newly loaded objfiles.
# https://sourceware.org/gdb/onlinedocs/gdb/Events-In-Python.html
gdb.events.new_objfile.connect(newobj_handler)
-gdb.execute("set environment CHROMIUM_GDBINIT_SOURCED=1")
+gdb.execute("set environment V8_GDBINIT_SOURCED=1")
end
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 02b0873598..2323e8ab9c 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -85,10 +85,6 @@ consts_misc = [
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
- { 'name': 'SystemPointerSize', 'value': 'kSystemPointerSize' },
- { 'name': 'SystemPointerSizeLog2', 'value': 'kSystemPointerSizeLog2' },
- { 'name': 'TaggedSize', 'value': 'kTaggedSize' },
- { 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
@@ -165,8 +161,6 @@ consts_misc = [
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
- { 'name': 'class_Map__instance_descriptors_offset',
- 'value': 'Map::kInstanceDescriptorsOffset' },
{ 'name': 'off_fp_context_or_frame_type',
'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
@@ -250,7 +244,6 @@ extras_accessors = [
'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_offset, size_t, kByteOffsetOffset',
- 'JSTypedArray, external_pointer, uintptr_t, kExternalPointerOffset',
'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
@@ -261,8 +254,6 @@ extras_accessors = [
'Map, prototype, Object, kPrototypeOffset',
'Oddball, kind_offset, int, kKindOffset',
'HeapNumber, value, double, kValueOffset',
- 'ConsString, first, String, kFirstOffset',
- 'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
@@ -285,7 +276,7 @@ extras_accessors = [
#
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
- 'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
+ 'JSObject', 'JSRegExp', 'JSPrimitiveWrapper', 'Map', 'Oddball', 'Script',
'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise'
];
@@ -377,6 +368,7 @@ def load_objects_from_file(objfilename, checktypes):
in_insttype = False;
typestr = '';
+ uncommented_file = ''
#
# Iterate the header file line-by-line to collect type and class
@@ -399,21 +391,26 @@ def load_objects_from_file(objfilename, checktypes):
typestr += line;
continue;
- match = re.match(r'class(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
- r'\s+(\w[^:]*)'
- r'(?:: public (\w[^{]*))?\s*{\s*',
- line);
-
- if (match):
- klass = match.group(1).strip();
- pklass = match.group(2);
- if (pklass):
- # Strip potential template arguments from parent
- # class.
- match = re.match(r'(\w+)(<.*>)?', pklass.strip());
- pklass = match.group(1).strip();
+ uncommented_file += '\n' + line
- klasses[klass] = { 'parent': pklass };
+ for match in re.finditer(r'\nclass(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
+ r'\s+(\w[^:;]*)'
+ r'(?:: public (\w[^{]*))?\s*{\s*',
+ uncommented_file):
+ klass = match.group(1).strip();
+ pklass = match.group(2);
+ if (pklass):
+ # Check for generated Torque class.
+ gen_match = re.match(
+ r'TorqueGenerated\w+\s*<\s*\w+,\s*(\w+)\s*>',
+ pklass)
+ if (gen_match):
+ pklass = gen_match.group(1)
+ # Strip potential template arguments from parent
+ # class.
+ match = re.match(r'(\w+)(<.*>)?', pklass.strip());
+ pklass = match.group(1).strip();
+ klasses[klass] = { 'parent': pklass };
#
# Process the instance type declaration.
@@ -640,7 +637,9 @@ def emit_config():
out.write('/* class type information */\n');
consts = [];
- for typename in sorted(typeclasses):
+ keys = typeclasses.keys();
+ keys.sort();
+ for typename in keys:
klass = typeclasses[typename];
consts.append({
'name': 'type_%s__%s' % (klass, typename),
@@ -651,7 +650,9 @@ def emit_config():
out.write('/* class hierarchy information */\n');
consts = [];
- for klassname in sorted(klasses):
+ keys = klasses.keys();
+ keys.sort();
+ for klassname in keys:
pklass = klasses[klassname]['parent'];
bklass = get_base_class(klassname);
if (bklass != 'Object'):
diff --git a/deps/v8/tools/get_landmines.py b/deps/v8/tools/get_landmines.py
index 33e0829ac8..bf8efa595e 100755
--- a/deps/v8/tools/get_landmines.py
+++ b/deps/v8/tools/get_landmines.py
@@ -47,6 +47,7 @@ def print_landmines(): # pylint: disable=invalid-name
print('Clober again to fix windows build problems.')
print('Clobber to possibly resolve failure on win-32 bot.')
print('Clobber for http://crbug.com/668958.')
+ print('Clobber to possibly resolve build failure on Misc V8 Linux gcc.')
build_get_landmines.print_landmines()
return 0
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 6d06d00418..773622de76 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -586,7 +586,10 @@ MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
MD_CPU_ARCHITECTURE_X86 = 0
MD_CPU_ARCHITECTURE_ARM = 5
-MD_CPU_ARCHITECTURE_ARM64 = 0x8003
+# Breakpad used a custom value of 0x8003 here; Crashpad uses the new
+# standardized value 12.
+MD_CPU_ARCHITECTURE_ARM64 = 12
+MD_CPU_ARCHITECTURE_ARM64_BREAKPAD_LEGACY = 0x8003
MD_CPU_ARCHITECTURE_AMD64 = 9
OBJDUMP_BIN = None
@@ -647,6 +650,8 @@ class MinidumpReader(object):
system_info = MINIDUMP_RAW_SYSTEM_INFO.Read(
self.minidump, d.location.rva)
self.arch = system_info.processor_architecture
+ if self.arch == MD_CPU_ARCHITECTURE_ARM64_BREAKPAD_LEGACY:
+ self.arch = MD_CPU_ARCHITECTURE_ARM64
assert self.arch in [MD_CPU_ARCHITECTURE_AMD64,
MD_CPU_ARCHITECTURE_ARM,
MD_CPU_ARCHITECTURE_ARM64,
diff --git a/deps/v8/tools/heap-stats/categories.js b/deps/v8/tools/heap-stats/categories.js
index b94a534896..e02571b4f8 100644
--- a/deps/v8/tools/heap-stats/categories.js
+++ b/deps/v8/tools/heap-stats/categories.js
@@ -66,6 +66,7 @@ const CATEGORIES = new Map([
'JS_MAP_VALUE_ITERATOR_TYPE',
'JS_MESSAGE_OBJECT_TYPE',
'JS_OBJECT_TYPE',
+ 'JS_PRIMITIVE_WRAPPER_TYPE',
'JS_PROMISE_TYPE',
'JS_PROXY_TYPE',
'JS_REGEXP_TYPE',
@@ -75,7 +76,6 @@ const CATEGORIES = new Map([
'JS_STRING_ITERATOR_TYPE',
'JS_TO_WASM_FUNCTION',
'JS_TYPED_ARRAY_TYPE',
- 'JS_VALUE_TYPE',
'JS_WEAK_MAP_TYPE',
'MUTABLE_HEAP_NUMBER_TYPE',
'NATIVE_CONTEXT_TYPE',
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index b79a380796..f66f82a8f3 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -407,7 +407,7 @@ class MetaBuildWrapper(object):
if self.platform == 'darwin':
os_dim = ('os', 'Mac-10.12')
elif self.platform.startswith('linux'):
- os_dim = ('os', 'Ubuntu-14.04')
+ os_dim = ('os', 'Ubuntu-16.04')
elif self.platform == 'win32':
os_dim = ('os', 'Windows-10')
else:
diff --git a/deps/v8/tools/node/fetch_deps.py b/deps/v8/tools/node/fetch_deps.py
index c87e7a5e0c..ee5b629e2b 100755
--- a/deps/v8/tools/node/fetch_deps.py
+++ b/deps/v8/tools/node/fetch_deps.py
@@ -34,7 +34,6 @@ GCLIENT_SOLUTION = [
"v8/test/test262/data" : None,
"v8/test/test262/harness" : None,
"v8/third_party/android_ndk" : None,
- "v8/third_party/android_tools" : None,
"v8/third_party/android_sdk" : None,
"v8/third_party/catapult" : None,
"v8/third_party/colorama/src" : None,
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 419cc47847..a98dcae91a 100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -266,6 +266,7 @@ class ResultTracker(object):
mean = numpy.mean(results)
mean_stderr = numpy.std(results) / numpy.sqrt(len(results))
logging.debug(' Mean: %.2f, mean_stderr: %.2f', mean, mean_stderr)
+ logging.info('>>> Confidence level is %.2f', mean / (1000.0 * mean_stderr))
return confidence_level * mean_stderr < mean / 1000.0
def __str__(self): # pragma: no cover
@@ -928,16 +929,16 @@ def Main(argv):
'--filter=JSTests/TypedArrays/ will run only TypedArray '
'benchmarks from the JSTests suite.',
default='')
- parser.add_argument('--confidence-level', type=int,
+ parser.add_argument('--confidence-level', type=float,
help='Repeatedly runs each benchmark until specified '
'confidence level is reached. The value is interpreted '
'as the number of standard deviations from the mean that '
'all values must lie within. Typical values are 1, 2 and '
- '3 and correspond to 68%, 95% and 99.7% probability that '
- 'the measured value is within 0.1% of the true value. '
- 'Larger values result in more retries and thus longer '
- 'runtime, but also provide more reliable results. Also '
- 'see --max-total-duration flag.')
+ '3 and correspond to 68%%, 95%% and 99.7%% probability '
+ 'that the measured value is within 0.1%% of the true '
+ 'value. Larger values result in more retries and thus '
+ 'longer runtime, but also provide more reliable results. '
+ 'Also see --max-total-duration flag.')
parser.add_argument('--max-total-duration', type=int, default=7140, # 1h 59m
help='Max total duration in seconds allowed for retries '
'across all tests. This is especially useful in '
@@ -1088,8 +1089,11 @@ def Main(argv):
break
attempts_left -= 1
- have_failed_tests = True
- if attempts_left:
+ if not attempts_left:
+ logging.info('>>> Suite %s failed after %d retries',
+ runnable_name, runnable.retry_count + 1)
+ have_failed_tests = True
+ else:
logging.info('>>> Retrying suite: %s', runnable_name)
except MaxTotalDurationReachedError:
have_failed_tests = True
diff --git a/deps/v8/tools/testrunner/OWNERS b/deps/v8/tools/testrunner/OWNERS
index 50b5741785..bdb1d555a4 100644
--- a/deps/v8/tools/testrunner/OWNERS
+++ b/deps/v8/tools/testrunner/OWNERS
@@ -1,3 +1 @@
-set noparent
-
file://INFRA_OWNERS
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 525a198156..7f9b43435f 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -120,8 +120,9 @@ class ModeConfig(object):
self.execution_mode = execution_mode
-DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
-RELEASE_FLAGS = ["--nohard-abort"]
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap",
+ "--testing-d8-test-runner"]
+RELEASE_FLAGS = ["--nohard-abort", "--testing-d8-test-runner"]
MODES = {
"debug": ModeConfig(
flags=DEBUG_FLAGS,
@@ -348,9 +349,6 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -561,6 +559,9 @@ class BaseTestRunner(object):
asan_options.append('detect_leaks=1')
else:
asan_options.append('detect_leaks=0')
+ if utils.GuessOS() == 'windows':
+ # https://crbug.com/967663
+ asan_options.append('detect_stack_use_after_return=0')
os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
if self.build_config.cfi_vptr:
@@ -790,9 +791,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 1bfc0317fe..864d7346fc 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -154,11 +154,18 @@ class GenericTestLoader(TestLoader):
return [self.test_root]
@property
- def extension(self):
- return ""
+ def extensions(self):
+ return []
+
+ def __find_extension(self, filename):
+ for extension in self.extensions:
+ if filename.endswith(extension):
+ return extension
+
+ return False
def _should_filter_by_name(self, filename):
- if not filename.endswith(self.extension):
+ if not self.__find_extension(filename):
return True
for suffix in self.excluded_suffixes:
@@ -171,10 +178,11 @@ class GenericTestLoader(TestLoader):
return False
def _filename_to_testname(self, filename):
- if not self.extension:
+ extension = self.__find_extension(filename)
+ if not extension:
return filename
- return filename[:-len(self.extension)]
+ return filename[:-len(extension)]
def _to_relpath(self, abspath, test_root):
return os.path.relpath(abspath, test_root)
@@ -197,8 +205,8 @@ class GenericTestLoader(TestLoader):
class JSTestLoader(GenericTestLoader):
@property
- def extension(self):
- return ".js"
+ def extensions(self):
+ return [".js", ".mjs"]
class TestGenerator(object):
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index dc92db6099..4b0cf1553b 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -4,6 +4,7 @@
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
+ "assert_types": [["--assert-types"]],
"code_serializer": [["--cache=code"]],
"default": [[]],
"future": [["--future"]],
diff --git a/deps/v8/tools/testrunner/outproc/message.py b/deps/v8/tools/testrunner/outproc/message.py
index bbfc1cdf7e..f196cfd614 100644
--- a/deps/v8/tools/testrunner/outproc/message.py
+++ b/deps/v8/tools/testrunner/outproc/message.py
@@ -32,8 +32,15 @@ class OutProc(base.OutProc):
if len(expected_lines) != len(actual_lines):
return True
+ # Try .js first, and fall back to .mjs.
+ # TODO(v8:9406): clean this up by never separating the path from
+ # the extension in the first place.
+ base_path = self._basepath + '.js'
+ if not os.path.exists(base_path):
+ base_path = self._basepath + '.mjs'
+
env = {
- 'basename': os.path.basename(self._basepath + '.js'),
+ 'basename': os.path.basename(base_path),
}
for (expected, actual) in itertools.izip_longest(
expected_lines, actual_lines, fillvalue=''):
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index bc79c015bd..51e78608cb 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -9,7 +9,6 @@ from __future__ import print_function
from functools import reduce
import os
-import re
import sys
# Adds testrunner to the path hence it has to be imported at the beggining.
@@ -21,10 +20,8 @@ from testrunner.objects import predictable
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import ResultsTracker
from testrunner.testproc.seed import SeedProc
from testrunner.testproc.variant import VariantProc
-from testrunner.utils import random_utils
ARCH_GUESS = utils.DefaultArch()
@@ -43,7 +40,7 @@ VARIANT_ALIASES = {
'dev': VARIANTS,
# Additional variants, run on all bots.
'more': MORE_VARIANTS,
- # Shortcut for the two above ('more' first - it has the longer running tests).
+ # Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': ['nooptimization', 'future', 'no_wasm_traps'],
@@ -66,10 +63,10 @@ PREDICTABLE_WRAPPER = os.path.join(
class StandardTestRunner(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
- super(StandardTestRunner, self).__init__(*args, **kwargs)
+ super(StandardTestRunner, self).__init__(*args, **kwargs)
- self.sancov_dir = None
- self._variants = None
+ self.sancov_dir = None
+ self._variants = None
@property
def framework_name(self):
@@ -156,7 +153,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option('--report', default=False, action='store_true',
help='Print a summary of the tests to be run')
-
def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
@@ -224,7 +220,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
self._variants = self._parse_variants(options.variants)
def CheckTestMode(name, option): # pragma: no cover
- if not option in ['run', 'skip', 'dontcare']:
+ if option not in ['run', 'skip', 'dontcare']:
print('Unknown %s mode %s' % (name, option))
raise base_runner.TestRunnerError()
CheckTestMode('slow test', options.slow_tests)
@@ -319,7 +315,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
self._prepare_procs(procs)
- loader.load_initial_tests(initial_batch_size=options.j*2)
+ loader.load_initial_tests(initial_batch_size=options.j * 2)
# This starts up worker processes and blocks until all tests are
# processed.
@@ -328,7 +324,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
for indicator in indicators:
indicator.finished()
-
if tests.test_count_estimate:
percentage = float(results.total) / tests.test_count_estimate * 100
else:
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 3ba10f9528..aad6740c1c 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -13,7 +13,6 @@ import sys
import time
from . import base
-from ..local import junit_output
# Base dir of the build products for Release and Debug.
@@ -282,45 +281,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index ddb6d029f6..34c2249fcc 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -635,23 +635,44 @@ CppEntriesProvider.prototype.parseVmSymbols = function(
libName, libStart, libEnd, libASLRSlide, processorFunc) {
this.loadSymbols(libName);
- var prevEntry;
+ var lastUnknownSize;
+ var lastAdded;
+
+ function inRange(funcInfo, start, end) {
+ return funcInfo.start >= start && funcInfo.end <= end;
+ }
function addEntry(funcInfo) {
// Several functions can be mapped onto the same address. To avoid
// creating zero-sized entries, skip such duplicates.
// Also double-check that function belongs to the library address space.
- if (prevEntry && !prevEntry.end &&
- prevEntry.start < funcInfo.start &&
- prevEntry.start >= libStart && funcInfo.start <= libEnd) {
- processorFunc(prevEntry.name, prevEntry.start, funcInfo.start);
+
+ if (lastUnknownSize &&
+ lastUnknownSize.start < funcInfo.start) {
+ // Try to update lastUnknownSize based on new entries start position.
+ lastUnknownSize.end = funcInfo.start;
+ if ((!lastAdded || !inRange(lastUnknownSize, lastAdded.start,
+ lastAdded.end)) &&
+ inRange(lastUnknownSize, libStart, libEnd)) {
+ processorFunc(lastUnknownSize.name, lastUnknownSize.start,
+ lastUnknownSize.end);
+ lastAdded = lastUnknownSize;
+ }
}
- if (funcInfo.end &&
- (!prevEntry || prevEntry.start != funcInfo.start) &&
- funcInfo.start >= libStart && funcInfo.end <= libEnd) {
- processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+ lastUnknownSize = undefined;
+
+ if (funcInfo.end) {
+ // Skip duplicates that have the same start address as the last added.
+ if ((!lastAdded || lastAdded.start != funcInfo.start) &&
+ inRange(funcInfo, libStart, libEnd)) {
+ processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+ lastAdded = funcInfo;
+ }
+ } else {
+ // If a funcInfo doesn't have an end, try to match it up with then next
+ // entry.
+ lastUnknownSize = funcInfo;
}
- prevEntry = funcInfo;
}
while (true) {
@@ -701,7 +722,10 @@ UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
libName = this.apkEmbeddedLibrary;
}
- libName = this.targetRootFS + libName;
+ if (this.targetRootFS) {
+ libName = libName.substring(libName.lastIndexOf('/') + 1);
+ libName = this.targetRootFS + libName;
+ }
try {
this.symbols = [
os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 51b588f90b..2150d7e0cc 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -51,6 +51,7 @@ def preprocess(input):
r'\1_OtheSaLi', input)
input = re.sub(r'@if\(', r'@iF(', input)
input = re.sub(r'@export', r'@eXpOrT', input)
+ input = re.sub(r'js-implicit[ \n]+', r'jS_iMpLiCiT_', input)
# Special handing of '%' for intrinsics, turn the percent
# into a unicode character so that it gets treated as part of the
@@ -88,6 +89,8 @@ def postprocess(output):
output = re.sub(r'@iF\(', r'@if(', output)
output = re.sub(r'@eXpOrT',
r"@export", output)
+ output = re.sub(r'jS_iMpLiCiT_',
+ r"js-implicit ", output)
while True:
old = output
@@ -117,14 +120,14 @@ def process(filename, lint, should_format):
print("error code " + str(rc) + " running clang-format. Exiting...")
sys.exit(rc);
- if lint:
- if (output != original_input):
+ if (output != original_input):
+ if lint:
print(filename + ' requires formatting', file=sys.stderr)
- if should_format:
- output_file = open(filename, 'w')
- output_file.write(output);
- output_file.close()
+ if should_format:
+ output_file = open(filename, 'w')
+ output_file.write(output);
+ output_file.close()
def print_usage():
print('format-torque -i file1[, file2[, ...]]')
diff --git a/deps/v8/tools/torque/vim-torque/syntax/torque.vim b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
index 1a4ce987c7..592e870820 100644
--- a/deps/v8/tools/torque/vim-torque/syntax/torque.vim
+++ b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -20,14 +20,14 @@ syn region torqueComment start="/\*" end="\*/" contains=@Spell
syn region torqueStringS start=+'+ skip=+\\\\\|\\'+ end=+'\|$+
syn keyword torqueAssert assert check debug unreachable
-syn keyword torqueAtom True False Undefined Hole Null
+syn keyword torqueAtom True False Undefined TheHole Null
syn keyword torqueBoolean true false
syn keyword torqueBranch break continue goto
syn keyword torqueConditional if else typeswitch otherwise
syn match torqueConstant /\v<[A-Z][A-Z0-9_]+>/
syn match torqueConstant /\v<k[A-Z][A-Za-z0-9]*>/
syn keyword torqueFunction macro builtin runtime intrinsic
-syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast
+syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast js-implicit implicit
syn keyword torqueLabel case
syn keyword torqueMatching try label catch
syn keyword torqueModifier extern javascript constexpr transitioning transient weak export
diff --git a/deps/v8/tools/torque/vscode-torque/.npmrc b/deps/v8/tools/torque/vscode-torque/.npmrc
deleted file mode 100644
index 43c97e719a..0000000000
--- a/deps/v8/tools/torque/vscode-torque/.npmrc
+++ /dev/null
@@ -1 +0,0 @@
-package-lock=false
diff --git a/deps/v8/tools/torque/vscode-torque/README.md b/deps/v8/tools/torque/vscode-torque/README.md
deleted file mode 100644
index fc4efa69ff..0000000000
--- a/deps/v8/tools/torque/vscode-torque/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Torque support
-
-This extension adds language support for [the Torque language used in V8](https://v8.dev/docs/torque).
-
-## Installation
-
-Since the extension is currently not published to the marketplace, the easiest way to
-install the extension is to symlink it to your local extension directory:
-
-```
-ln -s $V8/tools/torque/vscode-torque $HOME/.vscode/extensions/vscode-torque
-```
-
-Additionally, for advanced language server features, the extension needs to be built
-locally (the syntax highlighting does not require this step). The following needs to be run
-everytime the extension is updated:
-
-```
-cd $V8/tools/torque/vscode-torque
-npm install
-```
-
-### Language server
-
-The language server is not built by default. To build the language server manually:
-
-```
-autoninja -C <output dir> torque-language-server
-```
-
-The default directory where the extension looks for the executable is "out/x64.release",
-but the absolute path to the executable can be configured with the `torque.ls.executable`
-setting.
diff --git a/deps/v8/tools/torque/vscode-torque/language-configuration.json b/deps/v8/tools/torque/vscode-torque/language-configuration.json
deleted file mode 100644
index d43e282cc7..0000000000
--- a/deps/v8/tools/torque/vscode-torque/language-configuration.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "comments": {
- "lineComment": "//",
- "blockComment": [ "/*", "*/" ]
- },
- "brackets": [
- ["{", "}"],
- ["[", "]"],
- ["(", ")"]
- ],
- "autoClosingPairs": [
- ["{", "}"],
- ["[", "]"],
- ["(", ")"],
- ["\"", "\""],
- ["'", "'"]
- ],
- "surroundingPairs": [
- ["{", "}"],
- ["[", "]"],
- ["(", ")"],
- ["\"", "\""],
- ["'", "'"]
- ]
-} \ No newline at end of file
diff --git a/deps/v8/tools/torque/vscode-torque/out/extension.js b/deps/v8/tools/torque/vscode-torque/out/extension.js
deleted file mode 100644
index 8cfae5dea4..0000000000
--- a/deps/v8/tools/torque/vscode-torque/out/extension.js
+++ /dev/null
@@ -1,99 +0,0 @@
-"use strict";
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
- return new (P || (P = Promise))(function (resolve, reject) {
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
- function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); }
- step((generator = generator.apply(thisArg, _arguments || [])).next());
- });
-};
-Object.defineProperty(exports, "__esModule", { value: true });
-// The file out/extension.js gets automatically created from
-// src/extension.ts. out/extension.js should not be modified manually.
-const path = require("path");
-const vscode_1 = require("vscode");
-const vscode_languageclient_1 = require("vscode-languageclient");
-const vscode_languageclient_2 = require("vscode-languageclient");
-let client;
-let outputChannel;
-class TorqueErrorHandler {
- constructor(config) {
- this.config = config;
- }
- error(error, message, count) {
- outputChannel.appendLine("TorqueErrorHandler: ");
- outputChannel.append(error.toString());
- outputChannel.append(message.toString());
- return vscode_languageclient_1.ErrorAction.Continue;
- }
- closed() {
- return vscode_languageclient_1.CloseAction.DoNotRestart;
- }
-}
-function activate(context) {
- return __awaiter(this, void 0, void 0, function* () {
- // Create a status bar item that displays the current status of the language server.
- const statusBarItem = vscode_1.window.createStatusBarItem(vscode_1.StatusBarAlignment.Left, 0);
- statusBarItem.text = "torque-ls: <unknown>";
- statusBarItem.show();
- const torqueConfiguration = vscode_1.workspace.getConfiguration("torque.ls");
- let serverExecutable = torqueConfiguration.get("executable");
- if (serverExecutable == null) {
- serverExecutable = path.join(vscode_1.workspace.rootPath, "out", "x64.release", "torque-language-server");
- }
- let serverArguments = [];
- const loggingEnabled = torqueConfiguration.get("logging");
- if (loggingEnabled) {
- const logfile = torqueConfiguration.get("logfile");
- serverArguments = ["-l", logfile];
- }
- const serverOptions = { command: serverExecutable, args: serverArguments };
- outputChannel = vscode_1.window.createOutputChannel("Torque Language Server");
- const clientOptions = {
- diagnosticCollectionName: "torque",
- documentSelector: [{ scheme: "file", language: "torque" }],
- errorHandler: new TorqueErrorHandler(vscode_1.workspace.getConfiguration("torque")),
- initializationFailedHandler: (e) => {
- outputChannel.appendLine(e);
- return false;
- },
- outputChannel,
- revealOutputChannelOn: vscode_languageclient_1.RevealOutputChannelOn.Info,
- };
- // Create the language client and start the client.
- client = new vscode_languageclient_2.LanguageClient("torque", "Torque Language Server", serverOptions, clientOptions);
- client.trace = vscode_languageclient_1.Trace.Verbose;
- // Update the status bar according to the client state.
- client.onDidChangeState((event) => {
- if (event.newState === vscode_languageclient_1.State.Running) {
- statusBarItem.text = "torque-ls: Running";
- }
- else if (event.newState === vscode_languageclient_1.State.Starting) {
- statusBarItem.text = "torque-ls: Starting";
- }
- else {
- statusBarItem.text = "torque-ls: Stopped";
- }
- });
- // This will start client and server.
- client.start();
- yield client.onReady();
- // The server needs an initial list of all the Torque files
- // in the workspace, send them over.
- vscode_1.workspace.findFiles("**/*.tq").then((urls) => {
- client.sendNotification("torque/fileList", { files: urls.map((url) => url.toString()) });
- });
- });
-}
-exports.activate = activate;
-function deactivate() {
- if (!client) {
- return undefined;
- }
- return client.stop();
-}
-exports.deactivate = deactivate;
-//# sourceMappingURL=extension.js.map \ No newline at end of file
diff --git a/deps/v8/tools/torque/vscode-torque/package.json b/deps/v8/tools/torque/vscode-torque/package.json
deleted file mode 100644
index 16c8095f86..0000000000
--- a/deps/v8/tools/torque/vscode-torque/package.json
+++ /dev/null
@@ -1,90 +0,0 @@
-{
- "name": "vscode-torque",
- "displayName": "Torque language support",
- "description": "Syntax highlighting and language server for the V8 Torque programming language",
- "version": "0.0.1",
- "publisher": "szuend",
- "engines": {
- "vscode": "^1.31.0"
- },
- "categories": [
- "Programming Languages"
- ],
- "activationEvents": [
- "onLanguage:torque",
- "workspaceContains:**/*.tq"
- ],
- "main": "./out/extension",
- "contributes": {
- "configuration": {
- "type": "object",
- "title": "Torque",
- "properties": {
- "torque.ls.executable": {
- "type": [
- "string",
- null
- ],
- "default": null,
- "description": "Path to the torque language server executable (absolute)"
- },
- "torque.ls.logging": {
- "type": "boolean",
- "default": false,
- "description": "Enable language server diagnostics output to log file"
- },
- "torque.ls.logfile": {
- "type": "string",
- "default": "torque-log.txt",
- "description": "Target file for language server logging output"
- },
- "torque.trace.server": {
- "type": "string",
- "enum": [
- "off",
- "messages",
- "verbose"
- ],
- "default": "off",
- "description": "Trace the communication with the Torque language server from VSCode."
- }
- }
- },
- "languages": [
- {
- "id": "torque",
- "aliases": [
- "Torque",
- "torque"
- ],
- "extensions": [
- ".tq"
- ],
- "configuration": "./language-configuration.json"
- }
- ],
- "grammars": [
- {
- "language": "torque",
- "scopeName": "source.torque",
- "path": "./syntaxes/torque.tmLanguage.json"
- }
- ]
- },
- "dependencies": {
- "vscode-languageclient": "^5.2.1"
- },
- "devDependencies": {
- "@types/node": "^8.0.0",
- "vscode": "^1.1.21",
- "tslint": "^5.11.0",
- "typescript": "^3.1.3"
- },
- "scripts": {
- "update-vscode": "vscode-install",
- "postinstall": "vscode-install",
- "vscode:prepublish": "npm run update-vscode && npm run compile",
- "compile": "tsc -b",
- "watch": "tsc -b -w"
- }
-}
diff --git a/deps/v8/tools/torque/vscode-torque/src/extension.ts b/deps/v8/tools/torque/vscode-torque/src/extension.ts
deleted file mode 100644
index 7caff1e6dd..0000000000
--- a/deps/v8/tools/torque/vscode-torque/src/extension.ts
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The file out/extension.js gets automatically created from
-// src/extension.ts. out/extension.js should not be modified manually.
-
-import * as path from "path";
-import { ExtensionContext, OutputChannel, StatusBarAlignment,
- window, workspace, WorkspaceConfiguration } from "vscode";
-import { CloseAction, ErrorAction, ErrorHandler, Message,
- RevealOutputChannelOn, State, Trace } from "vscode-languageclient";
-
-import {
- LanguageClient,
- LanguageClientOptions,
- ServerOptions,
-} from "vscode-languageclient";
-
-let client: LanguageClient;
-let outputChannel: OutputChannel;
-
-class TorqueErrorHandler implements ErrorHandler {
- constructor(readonly config: WorkspaceConfiguration) {}
-
- public error(error: Error, message: Message, count: number): ErrorAction {
- outputChannel.appendLine("TorqueErrorHandler: ");
- outputChannel.append(error.toString());
- outputChannel.append(message.toString());
- return ErrorAction.Continue;
- }
-
- public closed(): CloseAction {
- return CloseAction.DoNotRestart;
- }
-}
-
-export async function activate(context: ExtensionContext) {
- // Create a status bar item that displays the current status of the language server.
- const statusBarItem = window.createStatusBarItem(StatusBarAlignment.Left, 0);
- statusBarItem.text = "torque-ls: <unknown>";
- statusBarItem.show();
-
- const torqueConfiguration = workspace.getConfiguration("torque.ls");
- let serverExecutable: string | null = torqueConfiguration.get("executable");
- if (serverExecutable == null) {
- serverExecutable = path.join(workspace.rootPath, "out", "x64.release", "torque-language-server");
- }
-
- let serverArguments = [];
- const loggingEnabled: boolean = torqueConfiguration.get("logging");
- if (loggingEnabled) {
- const logfile = torqueConfiguration.get("logfile");
- serverArguments = ["-l", logfile];
- }
-
- const serverOptions: ServerOptions = { command: serverExecutable, args: serverArguments };
-
- outputChannel = window.createOutputChannel("Torque Language Server");
-
- const clientOptions: LanguageClientOptions = {
- diagnosticCollectionName: "torque",
- documentSelector: [{ scheme: "file", language: "torque" }],
- errorHandler: new TorqueErrorHandler(workspace.getConfiguration("torque")),
- initializationFailedHandler: (e) => {
- outputChannel.appendLine(e);
- return false;
- },
- outputChannel,
- revealOutputChannelOn: RevealOutputChannelOn.Info,
- };
-
- // Create the language client and start the client.
- client = new LanguageClient("torque", "Torque Language Server", serverOptions, clientOptions);
- client.trace = Trace.Verbose;
-
- // Update the status bar according to the client state.
- client.onDidChangeState((event) => {
- if (event.newState === State.Running) {
- statusBarItem.text = "torque-ls: Running";
- } else if (event.newState === State.Starting) {
- statusBarItem.text = "torque-ls: Starting";
- } else {
- statusBarItem.text = "torque-ls: Stopped";
- }
- });
-
- // This will start client and server.
- client.start();
-
- await client.onReady();
-
- // The server needs an initial list of all the Torque files
- // in the workspace, send them over.
- workspace.findFiles("**/*.tq").then((urls) => {
- client.sendNotification("torque/fileList",
- { files: urls.map((url) => url.toString())});
- });
-}
-
-export function deactivate(): Thenable<void> | undefined {
- if (!client) { return undefined; }
- return client.stop();
-}
diff --git a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
deleted file mode 100644
index dea5be517b..0000000000
--- a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
+++ /dev/null
@@ -1,177 +0,0 @@
-{
- "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
- "name": "Torque",
- "patterns": [
- {
- "name": "comment.line.double-slash.torque",
- "begin": "//",
- "end": "$"
- },
- {
- "name": "comment.block.torque",
- "begin": "/\\*",
- "end": "\\*/"
- },
- {
- "name": "support.function.torque",
- "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
- },
- {
- "name": "constant.other.torque",
- "match": "\\b(true|True|false|False|Undefined|Hole|Null|k[A-Z][A-Za-z0-9]+)\\b"
- },
- {
- "begin": "\\b<(?=[A-Za-z][0-9A-Za-z_|, ]*>)",
- "end": ">",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "begin": "\\b(?=(macro|runtime|builtin)\\b)",
- "end": ";|\\{",
- "patterns": [
- {
- "begin": "\\(",
- "end": "\\)",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "match": "(([A-Za-z][0-9A-Za-z_]*):\\s*)?([A-Za-z][0-9A-Za-z_]*)",
- "captures":{
- "3": {"name": "support.type.torque"}
- }
- }
- ]
- },
- {
- "include": "#common"
- }
- ]
- },
- {
- "begin": "\\b(type)\\b",
- "end": ";",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- }
- },
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "\\b([A-Za-z][0-9A-Za-z_]*)\\b"
- }
- ]
- },
- {
- "name": "keyword.control.torque",
- "match": "#include"
- },
- {
- "include": "#common"
- }
- ],
- "repository": {
- "common": {
- "patterns": [
- {
- "match": "\\b(extends)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "name": "keyword.control.torque",
- "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
- },
- {
- "name": "keyword.other.torque",
- "match": "\\b(constexpr|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|weak|extern|const|typeswitch|case|transient|transitioning|operator|namespace|export)\\b"
- },
- {
- "name": "keyword.operator.torque",
- "match": "\\b(=|\\*=)\\b"
- },
- {
- "match": "\\b(class|new)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "match": "\\b(struct)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "name": "string.quoted.double.torque",
- "begin": "\"",
- "end": "\"",
- "patterns": [
- {
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }
- ]
- },
- {
- "name": "string.quoted.single.torque",
- "begin": "'",
- "end": "'",
- "patterns": [
- {
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }
- ]
- },
- {
- "begin": ":(\\s*)?",
- "end": "(?=(generates|[^0-9A-Za-z_| ]))",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "name": "support.function.torque",
- "match": "\\b[A-Za-z0-9_]+\\b(?=(<[ ,:A-Za-z0-9_]+>)?\\()"
- }
- ]
- }
- },
- "scopeName": "source.torque"
-}
diff --git a/deps/v8/tools/torque/vscode-torque/tsconfig.json b/deps/v8/tools/torque/vscode-torque/tsconfig.json
deleted file mode 100644
index e1b012eed7..0000000000
--- a/deps/v8/tools/torque/vscode-torque/tsconfig.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "compilerOptions": {
- "module": "commonjs",
- "target": "es6",
- "outDir": "out",
- "rootDir": "src",
- "lib": [ "es6" ],
- "sourceMap": true
- },
- "include": [
- "src"
- ],
- "exclude": [
- "node_modules",
- ".vscode-test"
- ]
-} \ No newline at end of file
diff --git a/deps/v8/tools/torque/vscode-torque/tslint.json b/deps/v8/tools/torque/vscode-torque/tslint.json
deleted file mode 100644
index eaa124644b..0000000000
--- a/deps/v8/tools/torque/vscode-torque/tslint.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "defaultSeverity": "error",
- "extends": [
- "tslint:recommended"
- ],
- "jsRules": {},
- "rules": {
- "indent": [true, "spaces", 2]
- },
- "rulesDirectory": []
-} \ No newline at end of file
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 7c450f7bc1..2c9c382d02 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -66,6 +66,15 @@ def main():
'try server; see its waterfall for more info')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print debug information')
+ parser.add_argument('-c', '--confidence-level', type=float,
+ help='Repeatedly runs each benchmark until specified '
+ 'confidence level is reached. The value is interpreted '
+ 'as the number of standard deviations from the mean that '
+ 'all values must lie within. Typical values are 1, 2 and '
+ '3 and correspond to 68%%, 95%% and 99.7%% probability '
+ 'that the measured value is within 0.1%% of the true '
+ 'value. Larger values result in more retries and thus '
+ 'longer runtime, but also provide more reliable results.')
for option in sorted(BOTS):
parser.add_argument(
option, dest='bots', action='append_const', const=BOTS[option],
@@ -98,11 +107,14 @@ def main():
cmd = ['git cl try', '-B', 'luci.v8-internal.try']
cmd += ['-b %s' % bot for bot in options.bots]
- if options.revision: cmd += ['-r %s' % options.revision]
+ if options.revision:
+ cmd.append('-r %s' % options.revision)
benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
- cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+ cmd.append('-p \'testfilter=[%s]\'' % ','.join(benchmarks))
if options.extra_flags:
- cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+ cmd.append('-p \'extra_flags="%s"\'' % options.extra_flags)
+ if options.confidence_level:
+ cmd.append('-p confidence_level=%f' % options.confidence_level)
if options.verbose:
cmd.append('-vv')
print('Running %s' % ' '.join(cmd))
diff --git a/deps/v8/tools/turbolizer/OWNERS b/deps/v8/tools/turbolizer/OWNERS
index fc52961eff..b7694bd267 100644
--- a/deps/v8/tools/turbolizer/OWNERS
+++ b/deps/v8/tools/turbolizer/OWNERS
@@ -1 +1,2 @@
danno@chromium.org
+sigurds@chromium.org
diff --git a/deps/v8/tools/turbolizer/info-view.html b/deps/v8/tools/turbolizer/info-view.html
index 7c714c3f9f..b523e655aa 100644
--- a/deps/v8/tools/turbolizer/info-view.html
+++ b/deps/v8/tools/turbolizer/info-view.html
@@ -65,11 +65,11 @@
<td>Select effect output node</td>
</tr>
<tr>
- <td>o</td>
+ <td>i</td>
<td>Reveal node's input nodes</td>
</tr>
<tr>
- <td>i</td>
+ <td>o</td>
<td>Reveal node's output nodes</td>
</tr>
<tr>
diff --git a/deps/v8/tools/turbolizer/src/graph-view.ts b/deps/v8/tools/turbolizer/src/graph-view.ts
index c46413c400..07e0d7f1eb 100644
--- a/deps/v8/tools/turbolizer/src/graph-view.ts
+++ b/deps/v8/tools/turbolizer/src/graph-view.ts
@@ -242,17 +242,17 @@ export class GraphView extends PhaseView {
partial(this.layoutAction, this)));
this.toolbox.appendChild(createImgInput("show-all", "show all nodes",
partial(this.showAllAction, this)));
- this.toolbox.appendChild(createImgInput("show-control", "show all nodes",
+ this.toolbox.appendChild(createImgInput("show-control", "show only control nodes",
partial(this.showControlAction, this)));
- this.toolbox.appendChild(createImgInput("toggle-hide-dead", "show only live nodes",
+ this.toolbox.appendChild(createImgInput("toggle-hide-dead", "toggle hide dead nodes",
partial(this.toggleHideDead, this)));
- this.toolbox.appendChild(createImgInput("hide-unselected", "show only live nodes",
+ this.toolbox.appendChild(createImgInput("hide-unselected", "hide unselected",
partial(this.hideUnselectedAction, this)));
- this.toolbox.appendChild(createImgInput("hide-selected", "show only live nodes",
+ this.toolbox.appendChild(createImgInput("hide-selected", "hide selected",
partial(this.hideSelectedAction, this)));
- this.toolbox.appendChild(createImgInput("zoom-selection", "show only live nodes",
+ this.toolbox.appendChild(createImgInput("zoom-selection", "zoom selection",
partial(this.zoomSelectionAction, this)));
- this.toolbox.appendChild(createImgInput("toggle-types", "show only live nodes",
+ this.toolbox.appendChild(createImgInput("toggle-types", "toggle types",
partial(this.toggleTypesAction, this)));
this.phaseName = data.name;
@@ -411,7 +411,11 @@ export class GraphView extends PhaseView {
toggleHideDead(view: GraphView) {
view.state.hideDead = !view.state.hideDead;
- if (view.state.hideDead) view.hideDead();
+ if (view.state.hideDead) {
+ view.hideDead();
+ } else {
+ view.showDead();
+ }
const element = document.getElementById('toggle-hide-dead');
element.classList.toggle('button-input-toggled', view.state.hideDead);
}
@@ -426,6 +430,15 @@ export class GraphView extends PhaseView {
this.updateGraphVisibility();
}
+ showDead() {
+ for (const n of this.graph.nodes()) {
+ if (!n.isLive()) {
+ n.visible = true;
+ }
+ }
+ this.updateGraphVisibility();
+ }
+
hideUnselectedAction(view: GraphView) {
for (const n of view.graph.nodes()) {
if (!view.state.selection.isSelected(n)) {
diff --git a/deps/v8/tools/turbolizer/src/sequence-view.ts b/deps/v8/tools/turbolizer/src/sequence-view.ts
index b0a85f1bd3..e7691c688f 100644
--- a/deps/v8/tools/turbolizer/src/sequence-view.ts
+++ b/deps/v8/tools/turbolizer/src/sequence-view.ts
@@ -107,10 +107,11 @@ export class SequenceView extends TextView {
// Print gap moves.
const gapEl = createElement("div", "gap", "gap");
- instContentsEl.appendChild(gapEl);
+ let hasGaps = false;
for (const gap of instruction.gaps) {
const moves = createElement("div", ["comma-sep-list", "gap-move"]);
for (const move of gap) {
+ hasGaps = true;
const moveEl = createElement("div", "move");
const destinationEl = elementForOperand(move[0], searchInfo);
moveEl.appendChild(destinationEl);
@@ -122,6 +123,9 @@ export class SequenceView extends TextView {
}
gapEl.appendChild(moves);
}
+ if (hasGaps) {
+ instContentsEl.appendChild(gapEl);
+ }
const instEl = createElement("div", "instruction");
instContentsEl.appendChild(instEl);
@@ -137,8 +141,12 @@ export class SequenceView extends TextView {
instEl.appendChild(assignEl);
}
- const text = instruction.opcode + instruction.flags;
- const instLabel = createElement("div", "node-label", text);
+ let text = instruction.opcode + instruction.flags;
+ const instLabel = createElement("div", "node-label", text)
+ if (instruction.opcode == "ArchNop" && instruction.outputs.length == 1 && instruction.outputs[0].tooltip) {
+ instLabel.innerText = instruction.outputs[0].tooltip;
+ }
+
searchInfo.push(text);
view.addHtmlElementForNodeId(text, instLabel);
instEl.appendChild(instLabel);
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json
index bba3f04e96..31fac89ec8 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json
@@ -4,7 +4,7 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"exit_code": 1,
"expected": [
@@ -14,7 +14,8 @@
"--test",
"strawberries",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"framework_name": "standard_runner",
"name": "sweet/strawberries",
@@ -22,13 +23,13 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
"target_name": "d8_mocked.py",
"variant": "default",
"variant_flags": []
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"exit_code": 1,
"expected": [
@@ -38,7 +39,8 @@
"--test",
"strawberries",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"framework_name": "standard_runner",
"name": "sweet/strawberries",
@@ -46,13 +48,13 @@
"result": "FAIL",
"run": 2,
"stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
"target_name": "d8_mocked.py",
"variant": "default",
"variant_flags": []
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"exit_code": 1,
"expected": [
@@ -62,7 +64,8 @@
"--test",
"strawberries",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"framework_name": "standard_runner",
"name": "sweet/strawberries",
@@ -70,7 +73,7 @@
"result": "FAIL",
"run": 3,
"stderr": "",
- "stdout": "--test strawberries --random-seed=123 --nohard-abort\n",
+ "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
"target_name": "d8_mocked.py",
"variant": "default",
"variant_flags": []
@@ -78,37 +81,40 @@
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"flags": [
"--test",
"strawberries",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"flags": [
"--test",
"strawberries",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"marked_slow": true,
"name": "sweet/strawberries"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"flags": [
"--test",
"strawberries",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"marked_slow": true,
"name": "sweet/strawberries"
diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json
index bbbb90f4ac..fd17972798 100644
--- a/deps/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json
@@ -4,7 +4,7 @@
"mode": "release",
"results": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"exit_code": 1,
"expected": [
@@ -13,7 +13,8 @@
"flags": [
"bananaflakes",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"framework_name": "standard_runner",
"name": "sweet/bananaflakes",
@@ -21,13 +22,13 @@
"result": "FAIL",
"run": 1,
"stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
"target_name": "d8_mocked.py",
"variant": "default",
"variant_flags": []
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"exit_code": 0,
"expected": [
@@ -36,7 +37,8 @@
"flags": [
"bananaflakes",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"framework_name": "standard_runner",
"name": "sweet/bananaflakes",
@@ -44,7 +46,7 @@
"result": "PASS",
"run": 2,
"stderr": "",
- "stdout": "bananaflakes --random-seed=123 --nohard-abort\n",
+ "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
"target_name": "d8_mocked.py",
"variant": "default",
"variant_flags": []
@@ -52,23 +54,25 @@
],
"slowest_tests": [
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"flags": [
"bananaflakes",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"marked_slow": false,
"name": "sweet/bananaflakes"
},
{
- "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort",
+ "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
"duration": 1,
"flags": [
"bananaflakes",
"--random-seed=123",
- "--nohard-abort"
+ "--nohard-abort",
+ "--testing-d8-test-runner"
],
"marked_slow": false,
"name": "sweet/bananaflakes"
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index 7237000695..346fc9aad5 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -65,7 +65,6 @@ LINT_RULES = """
-build/include_what_you_use
-readability/fn_size
-readability/multiline_comment
--runtime/references
-whitespace/comments
""".split()
@@ -521,7 +520,7 @@ class SourceProcessor(SourceFileProcessor):
print("%s does not end with a single new line." % name)
result = False
# Sanitize flags for fuzzer.
- if ".js" in name and ("mjsunit" in name or "debugger" in name):
+ if (".js" in name or ".mjs" in name) and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
print("%s Flags should use '-' (not '_')" % name)
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 0165e0f1dd..c6c98c04c3 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -44,33 +44,33 @@ INSTANCE_TYPES = {
80: "ACCESSOR_PAIR_TYPE",
81: "ALIASED_ARGUMENTS_ENTRY_TYPE",
82: "ALLOCATION_MEMENTO_TYPE",
- 83: "ASM_WASM_DATA_TYPE",
- 84: "ASYNC_GENERATOR_REQUEST_TYPE",
- 85: "CLASS_POSITIONS_TYPE",
- 86: "DEBUG_INFO_TYPE",
- 87: "ENUM_CACHE_TYPE",
- 88: "FUNCTION_TEMPLATE_INFO_TYPE",
- 89: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 90: "INTERCEPTOR_INFO_TYPE",
- 91: "INTERPRETER_DATA_TYPE",
- 92: "MODULE_INFO_ENTRY_TYPE",
- 93: "MODULE_TYPE",
- 94: "OBJECT_TEMPLATE_INFO_TYPE",
- 95: "PROMISE_CAPABILITY_TYPE",
- 96: "PROMISE_REACTION_TYPE",
- 97: "PROTOTYPE_INFO_TYPE",
- 98: "SCRIPT_TYPE",
- 99: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+ 83: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 84: "ASM_WASM_DATA_TYPE",
+ 85: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 86: "CLASS_POSITIONS_TYPE",
+ 87: "DEBUG_INFO_TYPE",
+ 88: "ENUM_CACHE_TYPE",
+ 89: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 90: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 91: "INTERCEPTOR_INFO_TYPE",
+ 92: "INTERPRETER_DATA_TYPE",
+ 93: "OBJECT_TEMPLATE_INFO_TYPE",
+ 94: "PROMISE_CAPABILITY_TYPE",
+ 95: "PROMISE_REACTION_TYPE",
+ 96: "PROTOTYPE_INFO_TYPE",
+ 97: "SCRIPT_TYPE",
+ 98: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+ 99: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
100: "STACK_FRAME_INFO_TYPE",
101: "STACK_TRACE_FRAME_TYPE",
102: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
103: "TUPLE2_TYPE",
104: "TUPLE3_TYPE",
- 105: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 106: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 107: "WASM_DEBUG_INFO_TYPE",
- 108: "WASM_EXCEPTION_TAG_TYPE",
- 109: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 105: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 106: "WASM_DEBUG_INFO_TYPE",
+ 107: "WASM_EXCEPTION_TAG_TYPE",
+ 108: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 109: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
110: "WASM_JS_FUNCTION_DATA_TYPE",
111: "CALLABLE_TASK_TYPE",
112: "CALLBACK_TASK_TYPE",
@@ -78,60 +78,66 @@ INSTANCE_TYPES = {
114: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
115: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
116: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
- 117: "ALLOCATION_SITE_TYPE",
- 118: "EMBEDDER_DATA_ARRAY_TYPE",
- 119: "FIXED_ARRAY_TYPE",
- 120: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 121: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 122: "HASH_TABLE_TYPE",
- 123: "ORDERED_HASH_MAP_TYPE",
- 124: "ORDERED_HASH_SET_TYPE",
- 125: "ORDERED_NAME_DICTIONARY_TYPE",
- 126: "NAME_DICTIONARY_TYPE",
- 127: "GLOBAL_DICTIONARY_TYPE",
- 128: "NUMBER_DICTIONARY_TYPE",
- 129: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 130: "STRING_TABLE_TYPE",
- 131: "EPHEMERON_HASH_TABLE_TYPE",
- 132: "SCOPE_INFO_TYPE",
- 133: "SCRIPT_CONTEXT_TABLE_TYPE",
- 134: "AWAIT_CONTEXT_TYPE",
- 135: "BLOCK_CONTEXT_TYPE",
- 136: "CATCH_CONTEXT_TYPE",
- 137: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 138: "EVAL_CONTEXT_TYPE",
- 139: "FUNCTION_CONTEXT_TYPE",
- 140: "MODULE_CONTEXT_TYPE",
- 141: "NATIVE_CONTEXT_TYPE",
- 142: "SCRIPT_CONTEXT_TYPE",
- 143: "WITH_CONTEXT_TYPE",
- 144: "WEAK_FIXED_ARRAY_TYPE",
- 145: "TRANSITION_ARRAY_TYPE",
- 146: "CALL_HANDLER_INFO_TYPE",
- 147: "CELL_TYPE",
- 148: "CODE_DATA_CONTAINER_TYPE",
- 149: "DESCRIPTOR_ARRAY_TYPE",
- 150: "FEEDBACK_CELL_TYPE",
- 151: "FEEDBACK_VECTOR_TYPE",
- 152: "LOAD_HANDLER_TYPE",
- 153: "PREPARSE_DATA_TYPE",
- 154: "PROPERTY_ARRAY_TYPE",
- 155: "PROPERTY_CELL_TYPE",
- 156: "SHARED_FUNCTION_INFO_TYPE",
- 157: "SMALL_ORDERED_HASH_MAP_TYPE",
- 158: "SMALL_ORDERED_HASH_SET_TYPE",
- 159: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 160: "STORE_HANDLER_TYPE",
- 161: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 162: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 163: "WEAK_ARRAY_LIST_TYPE",
- 164: "WEAK_CELL_TYPE",
+ 117: "INTERNAL_CLASS_TYPE",
+ 118: "SMI_PAIR_TYPE",
+ 119: "SMI_BOX_TYPE",
+ 120: "SORT_STATE_TYPE",
+ 121: "SOURCE_TEXT_MODULE_TYPE",
+ 122: "SYNTHETIC_MODULE_TYPE",
+ 123: "ALLOCATION_SITE_TYPE",
+ 124: "EMBEDDER_DATA_ARRAY_TYPE",
+ 125: "FIXED_ARRAY_TYPE",
+ 126: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 128: "HASH_TABLE_TYPE",
+ 129: "ORDERED_HASH_MAP_TYPE",
+ 130: "ORDERED_HASH_SET_TYPE",
+ 131: "ORDERED_NAME_DICTIONARY_TYPE",
+ 132: "NAME_DICTIONARY_TYPE",
+ 133: "GLOBAL_DICTIONARY_TYPE",
+ 134: "NUMBER_DICTIONARY_TYPE",
+ 135: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 136: "STRING_TABLE_TYPE",
+ 137: "EPHEMERON_HASH_TABLE_TYPE",
+ 138: "SCOPE_INFO_TYPE",
+ 139: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 140: "AWAIT_CONTEXT_TYPE",
+ 141: "BLOCK_CONTEXT_TYPE",
+ 142: "CATCH_CONTEXT_TYPE",
+ 143: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 144: "EVAL_CONTEXT_TYPE",
+ 145: "FUNCTION_CONTEXT_TYPE",
+ 146: "MODULE_CONTEXT_TYPE",
+ 147: "NATIVE_CONTEXT_TYPE",
+ 148: "SCRIPT_CONTEXT_TYPE",
+ 149: "WITH_CONTEXT_TYPE",
+ 150: "WEAK_FIXED_ARRAY_TYPE",
+ 151: "TRANSITION_ARRAY_TYPE",
+ 152: "CALL_HANDLER_INFO_TYPE",
+ 153: "CELL_TYPE",
+ 154: "CODE_DATA_CONTAINER_TYPE",
+ 155: "DESCRIPTOR_ARRAY_TYPE",
+ 156: "FEEDBACK_CELL_TYPE",
+ 157: "FEEDBACK_VECTOR_TYPE",
+ 158: "LOAD_HANDLER_TYPE",
+ 159: "PREPARSE_DATA_TYPE",
+ 160: "PROPERTY_ARRAY_TYPE",
+ 161: "PROPERTY_CELL_TYPE",
+ 162: "SHARED_FUNCTION_INFO_TYPE",
+ 163: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 164: "SMALL_ORDERED_HASH_SET_TYPE",
+ 165: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 166: "STORE_HANDLER_TYPE",
+ 167: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 168: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 169: "WEAK_ARRAY_LIST_TYPE",
+ 170: "WEAK_CELL_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
1027: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
- 1041: "JS_VALUE_TYPE",
+ 1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1056: "JS_API_OBJECT_TYPE",
1057: "JS_OBJECT_TYPE",
1058: "JS_ARGUMENTS_TYPE",
@@ -186,232 +192,237 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x00139): (74, "FreeSpaceMap"),
- ("read_only_space", 0x00189): (68, "MetaMap"),
- ("read_only_space", 0x00209): (67, "NullMap"),
- ("read_only_space", 0x00271): (149, "DescriptorArrayMap"),
- ("read_only_space", 0x002d1): (144, "WeakFixedArrayMap"),
- ("read_only_space", 0x00321): (77, "OnePointerFillerMap"),
- ("read_only_space", 0x00371): (77, "TwoPointerFillerMap"),
- ("read_only_space", 0x003f1): (67, "UninitializedMap"),
- ("read_only_space", 0x00461): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x00501): (67, "UndefinedMap"),
- ("read_only_space", 0x00561): (65, "HeapNumberMap"),
- ("read_only_space", 0x005e1): (67, "TheHoleMap"),
- ("read_only_space", 0x00689): (67, "BooleanMap"),
- ("read_only_space", 0x00761): (72, "ByteArrayMap"),
- ("read_only_space", 0x007b1): (119, "FixedArrayMap"),
- ("read_only_space", 0x00801): (119, "FixedCOWArrayMap"),
- ("read_only_space", 0x00851): (122, "HashTableMap"),
- ("read_only_space", 0x008a1): (64, "SymbolMap"),
- ("read_only_space", 0x008f1): (40, "OneByteStringMap"),
- ("read_only_space", 0x00941): (132, "ScopeInfoMap"),
- ("read_only_space", 0x00991): (156, "SharedFunctionInfoMap"),
- ("read_only_space", 0x009e1): (69, "CodeMap"),
- ("read_only_space", 0x00a31): (139, "FunctionContextMap"),
- ("read_only_space", 0x00a81): (147, "CellMap"),
- ("read_only_space", 0x00ad1): (155, "GlobalPropertyCellMap"),
- ("read_only_space", 0x00b21): (71, "ForeignMap"),
- ("read_only_space", 0x00b71): (145, "TransitionArrayMap"),
- ("read_only_space", 0x00bc1): (151, "FeedbackVectorMap"),
- ("read_only_space", 0x00c61): (67, "ArgumentsMarkerMap"),
- ("read_only_space", 0x00d01): (67, "ExceptionMap"),
- ("read_only_space", 0x00da1): (67, "TerminationExceptionMap"),
- ("read_only_space", 0x00e49): (67, "OptimizedOutMap"),
- ("read_only_space", 0x00ee9): (67, "StaleRegisterMap"),
- ("read_only_space", 0x00f59): (141, "NativeContextMap"),
- ("read_only_space", 0x00fa9): (140, "ModuleContextMap"),
- ("read_only_space", 0x00ff9): (138, "EvalContextMap"),
- ("read_only_space", 0x01049): (142, "ScriptContextMap"),
- ("read_only_space", 0x01099): (134, "AwaitContextMap"),
- ("read_only_space", 0x010e9): (135, "BlockContextMap"),
- ("read_only_space", 0x01139): (136, "CatchContextMap"),
- ("read_only_space", 0x01189): (143, "WithContextMap"),
- ("read_only_space", 0x011d9): (137, "DebugEvaluateContextMap"),
- ("read_only_space", 0x01229): (133, "ScriptContextTableMap"),
- ("read_only_space", 0x01279): (121, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x012c9): (76, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x01319): (119, "ArrayListMap"),
- ("read_only_space", 0x01369): (66, "BigIntMap"),
- ("read_only_space", 0x013b9): (120, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x01409): (73, "BytecodeArrayMap"),
- ("read_only_space", 0x01459): (148, "CodeDataContainerMap"),
- ("read_only_space", 0x014a9): (75, "FixedDoubleArrayMap"),
- ("read_only_space", 0x014f9): (127, "GlobalDictionaryMap"),
- ("read_only_space", 0x01549): (150, "ManyClosuresCellMap"),
- ("read_only_space", 0x01599): (119, "ModuleInfoMap"),
- ("read_only_space", 0x015e9): (70, "MutableHeapNumberMap"),
- ("read_only_space", 0x01639): (126, "NameDictionaryMap"),
- ("read_only_space", 0x01689): (150, "NoClosuresCellMap"),
- ("read_only_space", 0x016d9): (128, "NumberDictionaryMap"),
- ("read_only_space", 0x01729): (150, "OneClosureCellMap"),
- ("read_only_space", 0x01779): (123, "OrderedHashMapMap"),
- ("read_only_space", 0x017c9): (124, "OrderedHashSetMap"),
- ("read_only_space", 0x01819): (125, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x01869): (153, "PreparseDataMap"),
- ("read_only_space", 0x018b9): (154, "PropertyArrayMap"),
- ("read_only_space", 0x01909): (146, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x01959): (146, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x019a9): (146, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x019f9): (129, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x01a49): (119, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x01a99): (157, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x01ae9): (158, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x01b39): (159, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x01b89): (130, "StringTableMap"),
- ("read_only_space", 0x01bd9): (161, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x01c29): (162, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x01c79): (163, "WeakArrayListMap"),
- ("read_only_space", 0x01cc9): (131, "EphemeronHashTableMap"),
- ("read_only_space", 0x01d19): (118, "EmbedderDataArrayMap"),
- ("read_only_space", 0x01d69): (164, "WeakCellMap"),
- ("read_only_space", 0x01db9): (58, "NativeSourceStringMap"),
- ("read_only_space", 0x01e09): (32, "StringMap"),
- ("read_only_space", 0x01e59): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x01ea9): (33, "ConsStringMap"),
- ("read_only_space", 0x01ef9): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x01f49): (37, "ThinStringMap"),
- ("read_only_space", 0x01f99): (35, "SlicedStringMap"),
- ("read_only_space", 0x01fe9): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x02039): (34, "ExternalStringMap"),
- ("read_only_space", 0x02089): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x020d9): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x02129): (0, "InternalizedStringMap"),
- ("read_only_space", 0x02179): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x021c9): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x02219): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x02269): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x022b9): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x02309): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x02371): (87, "EnumCacheMap"),
- ("read_only_space", 0x02411): (105, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x02601): (90, "InterceptorInfoMap"),
- ("read_only_space", 0x04d99): (78, "AccessCheckInfoMap"),
- ("read_only_space", 0x04de9): (79, "AccessorInfoMap"),
- ("read_only_space", 0x04e39): (80, "AccessorPairMap"),
- ("read_only_space", 0x04e89): (81, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x04ed9): (82, "AllocationMementoMap"),
- ("read_only_space", 0x04f29): (83, "AsmWasmDataMap"),
- ("read_only_space", 0x04f79): (84, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x04fc9): (85, "ClassPositionsMap"),
- ("read_only_space", 0x05019): (86, "DebugInfoMap"),
- ("read_only_space", 0x05069): (88, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x050b9): (89, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05109): (91, "InterpreterDataMap"),
- ("read_only_space", 0x05159): (92, "ModuleInfoEntryMap"),
- ("read_only_space", 0x051a9): (93, "ModuleMap"),
- ("read_only_space", 0x051f9): (94, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x05249): (95, "PromiseCapabilityMap"),
- ("read_only_space", 0x05299): (96, "PromiseReactionMap"),
- ("read_only_space", 0x052e9): (97, "PrototypeInfoMap"),
- ("read_only_space", 0x05339): (98, "ScriptMap"),
- ("read_only_space", 0x05389): (99, "SourcePositionTableWithFrameCacheMap"),
- ("read_only_space", 0x053d9): (100, "StackFrameInfoMap"),
- ("read_only_space", 0x05429): (101, "StackTraceFrameMap"),
- ("read_only_space", 0x05479): (102, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x054c9): (103, "Tuple2Map"),
- ("read_only_space", 0x05519): (104, "Tuple3Map"),
- ("read_only_space", 0x05569): (106, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x055b9): (107, "WasmDebugInfoMap"),
- ("read_only_space", 0x05609): (108, "WasmExceptionTagMap"),
- ("read_only_space", 0x05659): (109, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x056a9): (110, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x056f9): (111, "CallableTaskMap"),
- ("read_only_space", 0x05749): (112, "CallbackTaskMap"),
- ("read_only_space", 0x05799): (113, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x057e9): (114, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05839): (115, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x05889): (116, "FinalizationGroupCleanupJobTaskMap"),
- ("read_only_space", 0x058d9): (117, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05929): (117, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05979): (152, "LoadHandler1Map"),
- ("read_only_space", 0x059c9): (152, "LoadHandler2Map"),
- ("read_only_space", 0x05a19): (152, "LoadHandler3Map"),
- ("read_only_space", 0x05a69): (160, "StoreHandler0Map"),
- ("read_only_space", 0x05ab9): (160, "StoreHandler1Map"),
- ("read_only_space", 0x05b09): (160, "StoreHandler2Map"),
- ("read_only_space", 0x05b59): (160, "StoreHandler3Map"),
- ("map_space", 0x00139): (1057, "ExternalMap"),
- ("map_space", 0x00189): (1073, "JSMessageObjectMap"),
+ ("read_only_space", 0x00111): (74, "FreeSpaceMap"),
+ ("read_only_space", 0x00161): (68, "MetaMap"),
+ ("read_only_space", 0x001e1): (67, "NullMap"),
+ ("read_only_space", 0x00249): (155, "DescriptorArrayMap"),
+ ("read_only_space", 0x002a9): (150, "WeakFixedArrayMap"),
+ ("read_only_space", 0x002f9): (77, "OnePointerFillerMap"),
+ ("read_only_space", 0x00349): (77, "TwoPointerFillerMap"),
+ ("read_only_space", 0x003c9): (67, "UninitializedMap"),
+ ("read_only_space", 0x00439): (8, "OneByteInternalizedStringMap"),
+ ("read_only_space", 0x004d9): (67, "UndefinedMap"),
+ ("read_only_space", 0x00539): (65, "HeapNumberMap"),
+ ("read_only_space", 0x005b9): (67, "TheHoleMap"),
+ ("read_only_space", 0x00661): (67, "BooleanMap"),
+ ("read_only_space", 0x00739): (72, "ByteArrayMap"),
+ ("read_only_space", 0x00789): (125, "FixedArrayMap"),
+ ("read_only_space", 0x007d9): (125, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00829): (128, "HashTableMap"),
+ ("read_only_space", 0x00879): (64, "SymbolMap"),
+ ("read_only_space", 0x008c9): (40, "OneByteStringMap"),
+ ("read_only_space", 0x00919): (138, "ScopeInfoMap"),
+ ("read_only_space", 0x00969): (162, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x009b9): (69, "CodeMap"),
+ ("read_only_space", 0x00a09): (145, "FunctionContextMap"),
+ ("read_only_space", 0x00a59): (153, "CellMap"),
+ ("read_only_space", 0x00aa9): (161, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00af9): (71, "ForeignMap"),
+ ("read_only_space", 0x00b49): (151, "TransitionArrayMap"),
+ ("read_only_space", 0x00b99): (157, "FeedbackVectorMap"),
+ ("read_only_space", 0x00c39): (67, "ArgumentsMarkerMap"),
+ ("read_only_space", 0x00cd9): (67, "ExceptionMap"),
+ ("read_only_space", 0x00d79): (67, "TerminationExceptionMap"),
+ ("read_only_space", 0x00e21): (67, "OptimizedOutMap"),
+ ("read_only_space", 0x00ec1): (67, "StaleRegisterMap"),
+ ("read_only_space", 0x00f31): (147, "NativeContextMap"),
+ ("read_only_space", 0x00f81): (146, "ModuleContextMap"),
+ ("read_only_space", 0x00fd1): (144, "EvalContextMap"),
+ ("read_only_space", 0x01021): (148, "ScriptContextMap"),
+ ("read_only_space", 0x01071): (140, "AwaitContextMap"),
+ ("read_only_space", 0x010c1): (141, "BlockContextMap"),
+ ("read_only_space", 0x01111): (142, "CatchContextMap"),
+ ("read_only_space", 0x01161): (149, "WithContextMap"),
+ ("read_only_space", 0x011b1): (143, "DebugEvaluateContextMap"),
+ ("read_only_space", 0x01201): (139, "ScriptContextTableMap"),
+ ("read_only_space", 0x01251): (127, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x012a1): (76, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x012f1): (125, "ArrayListMap"),
+ ("read_only_space", 0x01341): (66, "BigIntMap"),
+ ("read_only_space", 0x01391): (126, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x013e1): (73, "BytecodeArrayMap"),
+ ("read_only_space", 0x01431): (154, "CodeDataContainerMap"),
+ ("read_only_space", 0x01481): (75, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x014d1): (133, "GlobalDictionaryMap"),
+ ("read_only_space", 0x01521): (156, "ManyClosuresCellMap"),
+ ("read_only_space", 0x01571): (125, "ModuleInfoMap"),
+ ("read_only_space", 0x015c1): (70, "MutableHeapNumberMap"),
+ ("read_only_space", 0x01611): (132, "NameDictionaryMap"),
+ ("read_only_space", 0x01661): (156, "NoClosuresCellMap"),
+ ("read_only_space", 0x016b1): (134, "NumberDictionaryMap"),
+ ("read_only_space", 0x01701): (156, "OneClosureCellMap"),
+ ("read_only_space", 0x01751): (129, "OrderedHashMapMap"),
+ ("read_only_space", 0x017a1): (130, "OrderedHashSetMap"),
+ ("read_only_space", 0x017f1): (131, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x01841): (159, "PreparseDataMap"),
+ ("read_only_space", 0x01891): (160, "PropertyArrayMap"),
+ ("read_only_space", 0x018e1): (152, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x01931): (152, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x01981): (152, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x019d1): (135, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x01a21): (125, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x01a71): (163, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x01ac1): (164, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x01b11): (165, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x01b61): (121, "SourceTextModuleMap"),
+ ("read_only_space", 0x01bb1): (136, "StringTableMap"),
+ ("read_only_space", 0x01c01): (122, "SyntheticModuleMap"),
+ ("read_only_space", 0x01c51): (167, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x01ca1): (168, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x01cf1): (169, "WeakArrayListMap"),
+ ("read_only_space", 0x01d41): (137, "EphemeronHashTableMap"),
+ ("read_only_space", 0x01d91): (124, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x01de1): (170, "WeakCellMap"),
+ ("read_only_space", 0x01e31): (58, "NativeSourceStringMap"),
+ ("read_only_space", 0x01e81): (32, "StringMap"),
+ ("read_only_space", 0x01ed1): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x01f21): (33, "ConsStringMap"),
+ ("read_only_space", 0x01f71): (45, "ThinOneByteStringMap"),
+ ("read_only_space", 0x01fc1): (37, "ThinStringMap"),
+ ("read_only_space", 0x02011): (35, "SlicedStringMap"),
+ ("read_only_space", 0x02061): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x020b1): (34, "ExternalStringMap"),
+ ("read_only_space", 0x02101): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x02151): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x021a1): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x021f1): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x02241): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x02291): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x022e1): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x02331): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x02381): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x023e9): (88, "EnumCacheMap"),
+ ("read_only_space", 0x02489): (83, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02679): (91, "InterceptorInfoMap"),
+ ("read_only_space", 0x04e59): (78, "AccessCheckInfoMap"),
+ ("read_only_space", 0x04ea9): (79, "AccessorInfoMap"),
+ ("read_only_space", 0x04ef9): (80, "AccessorPairMap"),
+ ("read_only_space", 0x04f49): (81, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x04f99): (82, "AllocationMementoMap"),
+ ("read_only_space", 0x04fe9): (84, "AsmWasmDataMap"),
+ ("read_only_space", 0x05039): (85, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x05089): (86, "ClassPositionsMap"),
+ ("read_only_space", 0x050d9): (87, "DebugInfoMap"),
+ ("read_only_space", 0x05129): (89, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x05179): (90, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x051c9): (92, "InterpreterDataMap"),
+ ("read_only_space", 0x05219): (93, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x05269): (94, "PromiseCapabilityMap"),
+ ("read_only_space", 0x052b9): (95, "PromiseReactionMap"),
+ ("read_only_space", 0x05309): (96, "PrototypeInfoMap"),
+ ("read_only_space", 0x05359): (97, "ScriptMap"),
+ ("read_only_space", 0x053a9): (98, "SourcePositionTableWithFrameCacheMap"),
+ ("read_only_space", 0x053f9): (99, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05449): (100, "StackFrameInfoMap"),
+ ("read_only_space", 0x05499): (101, "StackTraceFrameMap"),
+ ("read_only_space", 0x054e9): (102, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05539): (103, "Tuple2Map"),
+ ("read_only_space", 0x05589): (104, "Tuple3Map"),
+ ("read_only_space", 0x055d9): (105, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x05629): (106, "WasmDebugInfoMap"),
+ ("read_only_space", 0x05679): (107, "WasmExceptionTagMap"),
+ ("read_only_space", 0x056c9): (108, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x05719): (109, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05769): (110, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x057b9): (111, "CallableTaskMap"),
+ ("read_only_space", 0x05809): (112, "CallbackTaskMap"),
+ ("read_only_space", 0x05859): (113, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x058a9): (114, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x058f9): (115, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05949): (116, "FinalizationGroupCleanupJobTaskMap"),
+ ("read_only_space", 0x05999): (117, "InternalClassMap"),
+ ("read_only_space", 0x059e9): (118, "SmiPairMap"),
+ ("read_only_space", 0x05a39): (119, "SmiBoxMap"),
+ ("read_only_space", 0x05a89): (120, "SortStateMap"),
+ ("read_only_space", 0x05ad9): (123, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05b29): (123, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05b79): (158, "LoadHandler1Map"),
+ ("read_only_space", 0x05bc9): (158, "LoadHandler2Map"),
+ ("read_only_space", 0x05c19): (158, "LoadHandler3Map"),
+ ("read_only_space", 0x05c69): (166, "StoreHandler0Map"),
+ ("read_only_space", 0x05cb9): (166, "StoreHandler1Map"),
+ ("read_only_space", 0x05d09): (166, "StoreHandler2Map"),
+ ("read_only_space", 0x05d59): (166, "StoreHandler3Map"),
+ ("map_space", 0x00111): (1057, "ExternalMap"),
+ ("map_space", 0x00161): (1073, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("read_only_space", 0x001d9): "NullValue",
- ("read_only_space", 0x00259): "EmptyDescriptorArray",
- ("read_only_space", 0x002c1): "EmptyWeakFixedArray",
- ("read_only_space", 0x003c1): "UninitializedValue",
- ("read_only_space", 0x004d1): "UndefinedValue",
- ("read_only_space", 0x00551): "NanValue",
- ("read_only_space", 0x005b1): "TheHoleValue",
- ("read_only_space", 0x00649): "HoleNanValue",
- ("read_only_space", 0x00659): "TrueValue",
- ("read_only_space", 0x00709): "FalseValue",
- ("read_only_space", 0x00751): "empty_string",
- ("read_only_space", 0x00c11): "EmptyScopeInfo",
- ("read_only_space", 0x00c21): "EmptyFixedArray",
- ("read_only_space", 0x00c31): "ArgumentsMarker",
- ("read_only_space", 0x00cd1): "Exception",
- ("read_only_space", 0x00d71): "TerminationException",
- ("read_only_space", 0x00e19): "OptimizedOut",
- ("read_only_space", 0x00eb9): "StaleRegister",
- ("read_only_space", 0x02359): "EmptyEnumCache",
- ("read_only_space", 0x023c1): "EmptyPropertyArray",
- ("read_only_space", 0x023d1): "EmptyByteArray",
- ("read_only_space", 0x023e1): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x023f9): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x02461): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x02471): "EmptySloppyArgumentsElements",
- ("read_only_space", 0x02491): "EmptySlowElementDictionary",
- ("read_only_space", 0x024d9): "EmptyOrderedHashMap",
- ("read_only_space", 0x02501): "EmptyOrderedHashSet",
- ("read_only_space", 0x02529): "EmptyFeedbackMetadata",
- ("read_only_space", 0x02539): "EmptyPropertyCell",
- ("read_only_space", 0x02561): "EmptyPropertyDictionary",
- ("read_only_space", 0x025b1): "NoOpInterceptorInfo",
- ("read_only_space", 0x02651): "EmptyWeakArrayList",
- ("read_only_space", 0x02669): "InfinityValue",
- ("read_only_space", 0x02679): "MinusZeroValue",
- ("read_only_space", 0x02689): "MinusInfinityValue",
- ("read_only_space", 0x02699): "SelfReferenceMarker",
- ("read_only_space", 0x026f1): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x02709): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x02721): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x02739): "HashSeed",
- ("old_space", 0x00139): "ArgumentsIteratorAccessor",
- ("old_space", 0x001a9): "ArrayLengthAccessor",
- ("old_space", 0x00219): "BoundFunctionLengthAccessor",
- ("old_space", 0x00289): "BoundFunctionNameAccessor",
- ("old_space", 0x002f9): "ErrorStackAccessor",
- ("old_space", 0x00369): "FunctionArgumentsAccessor",
- ("old_space", 0x003d9): "FunctionCallerAccessor",
- ("old_space", 0x00449): "FunctionNameAccessor",
- ("old_space", 0x004b9): "FunctionLengthAccessor",
- ("old_space", 0x00529): "FunctionPrototypeAccessor",
- ("old_space", 0x00599): "StringLengthAccessor",
- ("old_space", 0x00609): "InvalidPrototypeValidityCell",
- ("old_space", 0x00619): "EmptyScript",
- ("old_space", 0x00699): "ManyClosuresCell",
- ("old_space", 0x006b1): "ArrayConstructorProtector",
- ("old_space", 0x006c1): "NoElementsProtector",
- ("old_space", 0x006e9): "IsConcatSpreadableProtector",
- ("old_space", 0x006f9): "ArraySpeciesProtector",
- ("old_space", 0x00721): "TypedArraySpeciesProtector",
- ("old_space", 0x00749): "RegExpSpeciesProtector",
- ("old_space", 0x00771): "PromiseSpeciesProtector",
- ("old_space", 0x00799): "StringLengthProtector",
- ("old_space", 0x007a9): "ArrayIteratorProtector",
- ("old_space", 0x007d1): "ArrayBufferDetachingProtector",
- ("old_space", 0x007f9): "PromiseHookProtector",
- ("old_space", 0x00821): "PromiseResolveProtector",
- ("old_space", 0x00831): "MapIteratorProtector",
- ("old_space", 0x00859): "PromiseThenProtector",
- ("old_space", 0x00881): "SetIteratorProtector",
- ("old_space", 0x008a9): "StringIteratorProtector",
- ("old_space", 0x008d1): "SingleCharacterStringCache",
- ("old_space", 0x010e1): "StringSplitCache",
- ("old_space", 0x018f1): "RegExpMultipleCache",
- ("old_space", 0x02101): "BuiltinsConstantsTable",
+ ("read_only_space", 0x001b1): "NullValue",
+ ("read_only_space", 0x00231): "EmptyDescriptorArray",
+ ("read_only_space", 0x00299): "EmptyWeakFixedArray",
+ ("read_only_space", 0x00399): "UninitializedValue",
+ ("read_only_space", 0x004a9): "UndefinedValue",
+ ("read_only_space", 0x00529): "NanValue",
+ ("read_only_space", 0x00589): "TheHoleValue",
+ ("read_only_space", 0x00621): "HoleNanValue",
+ ("read_only_space", 0x00631): "TrueValue",
+ ("read_only_space", 0x006e1): "FalseValue",
+ ("read_only_space", 0x00729): "empty_string",
+ ("read_only_space", 0x00be9): "EmptyScopeInfo",
+ ("read_only_space", 0x00bf9): "EmptyFixedArray",
+ ("read_only_space", 0x00c09): "ArgumentsMarker",
+ ("read_only_space", 0x00ca9): "Exception",
+ ("read_only_space", 0x00d49): "TerminationException",
+ ("read_only_space", 0x00df1): "OptimizedOut",
+ ("read_only_space", 0x00e91): "StaleRegister",
+ ("read_only_space", 0x023d1): "EmptyEnumCache",
+ ("read_only_space", 0x02439): "EmptyPropertyArray",
+ ("read_only_space", 0x02449): "EmptyByteArray",
+ ("read_only_space", 0x02459): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x02471): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x024d9): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x024e9): "EmptySloppyArgumentsElements",
+ ("read_only_space", 0x02509): "EmptySlowElementDictionary",
+ ("read_only_space", 0x02551): "EmptyOrderedHashMap",
+ ("read_only_space", 0x02579): "EmptyOrderedHashSet",
+ ("read_only_space", 0x025a1): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x025b1): "EmptyPropertyCell",
+ ("read_only_space", 0x025d9): "EmptyPropertyDictionary",
+ ("read_only_space", 0x02629): "NoOpInterceptorInfo",
+ ("read_only_space", 0x026c9): "EmptyWeakArrayList",
+ ("read_only_space", 0x026e1): "InfinityValue",
+ ("read_only_space", 0x026f1): "MinusZeroValue",
+ ("read_only_space", 0x02701): "MinusInfinityValue",
+ ("read_only_space", 0x02711): "SelfReferenceMarker",
+ ("read_only_space", 0x02769): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x02781): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x02799): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x027b1): "HashSeed",
+ ("old_space", 0x00111): "ArgumentsIteratorAccessor",
+ ("old_space", 0x00181): "ArrayLengthAccessor",
+ ("old_space", 0x001f1): "BoundFunctionLengthAccessor",
+ ("old_space", 0x00261): "BoundFunctionNameAccessor",
+ ("old_space", 0x002d1): "ErrorStackAccessor",
+ ("old_space", 0x00341): "FunctionArgumentsAccessor",
+ ("old_space", 0x003b1): "FunctionCallerAccessor",
+ ("old_space", 0x00421): "FunctionNameAccessor",
+ ("old_space", 0x00491): "FunctionLengthAccessor",
+ ("old_space", 0x00501): "FunctionPrototypeAccessor",
+ ("old_space", 0x00571): "StringLengthAccessor",
+ ("old_space", 0x005e1): "InvalidPrototypeValidityCell",
+ ("old_space", 0x005f1): "EmptyScript",
+ ("old_space", 0x00671): "ManyClosuresCell",
+ ("old_space", 0x00689): "ArrayConstructorProtector",
+ ("old_space", 0x00699): "NoElementsProtector",
+ ("old_space", 0x006c1): "IsConcatSpreadableProtector",
+ ("old_space", 0x006d1): "ArraySpeciesProtector",
+ ("old_space", 0x006f9): "TypedArraySpeciesProtector",
+ ("old_space", 0x00721): "PromiseSpeciesProtector",
+ ("old_space", 0x00749): "StringLengthProtector",
+ ("old_space", 0x00759): "ArrayIteratorProtector",
+ ("old_space", 0x00781): "ArrayBufferDetachingProtector",
+ ("old_space", 0x007a9): "PromiseHookProtector",
+ ("old_space", 0x007d1): "PromiseResolveProtector",
+ ("old_space", 0x007e1): "MapIteratorProtector",
+ ("old_space", 0x00809): "PromiseThenProtector",
+ ("old_space", 0x00831): "SetIteratorProtector",
+ ("old_space", 0x00859): "StringIteratorProtector",
+ ("old_space", 0x00881): "SingleCharacterStringCache",
+ ("old_space", 0x01091): "StringSplitCache",
+ ("old_space", 0x018a1): "RegExpMultipleCache",
+ ("old_space", 0x020b1): "BuiltinsConstantsTable",
}
# List of known V8 Frame Markers.
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
index 9189c814b9..d029ffe604 100755
--- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -13,43 +13,101 @@ set -u
# non-zero status, or zero if no command exited with a non-zero status
set -o pipefail
+log_and_run() {
+ echo ">>" $*
+ if ! $*; then
+ echo "sub-command failed: $*"
+ exit
+ fi
+}
+
+###############################################################################
+# Setup directories.
+###############################################################################
+
TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
V8_DIR="${TOOLS_WASM_DIR}/../.."
SPEC_TEST_DIR=${V8_DIR}/test/wasm-spec-tests
+TMP_DIR=${SPEC_TEST_DIR}/tmp
-cd ${V8_DIR}
+log_and_run cd ${V8_DIR}
-rm -rf ${SPEC_TEST_DIR}/tests
-mkdir ${SPEC_TEST_DIR}/tests
+log_and_run rm -rf ${SPEC_TEST_DIR}/tests
+log_and_run mkdir ${SPEC_TEST_DIR}/tests
-rm -rf ${SPEC_TEST_DIR}/tmp
-mkdir ${SPEC_TEST_DIR}/tmp
+log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals
-./tools/dev/gm.py x64.release d8
+log_and_run rm -rf ${TMP_DIR}
+log_and_run mkdir ${TMP_DIR}
-cd ${V8_DIR}/test/wasm-js/data/interpreter
+###############################################################################
+# Generate the spec tests.
+###############################################################################
+log_and_run cd ${V8_DIR}/test/wasm-js/data/interpreter
# The next step requires that ocaml is installed. See the README.md in
# ${V8_DIR}/test/wasm-js/data/interpreter/.
-make clean all
+log_and_run make clean opt
+
+log_and_run cd ${V8_DIR}/test/wasm-js/data/test/core
+log_and_run cp *.wast ${SPEC_TEST_DIR}/tests/
+
+log_and_run ./run.py --wasm ${V8_DIR}/test/wasm-js/data/interpreter/wasm --out ${TMP_DIR}
+log_and_run cp ${TMP_DIR}/*.js ${SPEC_TEST_DIR}/tests/
-cd ${V8_DIR}/test/wasm-js/data/test/core
+###############################################################################
+# Generate the proposal tests.
+###############################################################################
+repos='bulk-memory-operations reference-types'
-./run.py --wasm ${V8_DIR}/test/wasm-js/data/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8 --out ${SPEC_TEST_DIR}/tmp
-cp ${SPEC_TEST_DIR}/tmp/*.js ${SPEC_TEST_DIR}/tests/
-rm -rf ${SPEC_TEST_DIR}/tmp
+for repo in ${repos}; do
+ echo "Process ${repo}"
+ log_and_run cd ${TMP_DIR}
+ log_and_run git clone https://github.com/WebAssembly/${repo}
+ # Compile the spec interpreter to generate the .js test cases later.
+ log_and_run cd ${repo}/interpreter
+ log_and_run make clean opt
+ log_and_run cd ../test/core
+ log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals/${repo}
+
+ # Iterate over all proposal tests. Those which differ from the spec tests are
+ # copied to the output directory and converted to .js tests.
+ for abs_filename in ${TMP_DIR}/${repo}/test/core/*.wast; do
+ rel_filename="$(basename -- $abs_filename)"
+ test_name=${rel_filename%.wast}
+ spec_filename=${V8_DIR}/test/wasm-js/data/test/core/${rel_filename}
+ if [ ! -f "$spec_filename" ] || ! cmp -s $abs_filename $spec_filename ; then
+ log_and_run cp ${rel_filename} ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+ log_and_run ./run.py --wasm ../../interpreter/wasm ${rel_filename} --out _build 2> /dev/null
+ fi
+ done
+ log_and_run cp _build/*.js ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+done
+
+###############################################################################
+# Report and cleanup.
+###############################################################################
cd ${SPEC_TEST_DIR}
echo
echo "The following files will get uploaded:"
-ls tests
+ls -R tests
echo
-# For the following command you first have to authenticate with google cloud
-# storage. For that you have to execute
-#
-# > gsutil.py config
-#
-# When the script asks you for your project-id, use 0.
-upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
+log_and_run rm -rf ${TMP_DIR}
+
+###############################################################################
+# Upload all spec tests.
+###############################################################################
+
+echo "****************************************************************************"
+echo "* For the following command you first have to authenticate with google cloud"
+echo "* storage. For that you have to execute"
+echo "*"
+echo "* > gsutil.py config"
+echo "*"
+echo "* When the script asks you for your project-id, use 0."
+echo "****************************************************************************"
+log_and_run cd ${SPEC_TEST_DIR}
+log_and_run upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 5f663412eb..9a80a32344 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,4 +7,4 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles.......
+The bartender starts to shake the bottles..........
diff --git a/deps/v8/tools/windbg.js b/deps/v8/tools/windbg.js
index a82c753772..3df14f4a2e 100644
--- a/deps/v8/tools/windbg.js
+++ b/deps/v8/tools/windbg.js
@@ -20,23 +20,37 @@ function help() {
print(" !job(address_or_taggedint)");
print(" prints object at the address, e.g. !job(0x235cb869f9)");
print(" !jobs(start_address, count)");
- print(" prints 'count' objects from a continuous range of Object pointers");
- print(" e.g. !jobs(0x5f7270, 42)");
+ print(" prints 'count' objects from a continuous range of Object");
+ print(" pointers, e.g. !jobs(0x5f7270, 42)");
print(" !jst() or !jst");
print(" prints javascript stack (output goes into the console)");
print(" !jsbp() or !jsbp");
- print(" sets bp in v8::internal::Execution::Call (begin user's script)");
+ print(" sets bp in v8::internal::Execution::Call");
print("");
print("--------------------------------------------------------------------");
- print(" to run any function from this script (live or postmortem):");
+ print(" Managed heap");
+ print("--------------------------------------------------------------------");
+ print(" !set_iso(isolate_address)");
+ print(" call this function before using !mem or other heap routines");
+ print(" !mem or !mem(\"space1[ space2 ...]\")");
+ print(" prints memory chunks from the 'space' owned by the heap in the");
+ print(" isolate set by !set_iso; valid values for 'space' are:");
+ print(" new, old, map, code, lo [large], nlo [newlarge], ro [readonly]");
+ print(" if no 'space' specified prints memory chunks for all spaces,");
+ print(" e.g. !mem(\"code\"), !mem(\"ro new old\")");
+ print(" !where(address)");
+ print(" prints name of the space and address of the MemoryChunk the");
+ print(" 'address' is from, e.g. !where(0x235cb869f9)");
+ print("");
+ print("--------------------------------------------------------------------");
+ print(" To run any function from this script (live or postmortem):");
print("");
print(" dx @$scriptContents.function_name(args)");
print(" e.g. dx @$scriptContents.pointer_size()");
- print(" e.g. dx @$scriptContents.module_name('v8_test')");
+ print(" e.g. dx @$scriptContents.module_name(\"v8_for_test\")");
print("--------------------------------------------------------------------");
}
-
/*=============================================================================
Output
=============================================================================*/
@@ -222,6 +236,170 @@ function set_user_js_bp() {
ctl.ExecuteCommand(`bp ${module_name()}!v8::internal::Execution::Call`)
}
+
+/*=============================================================================
+ Managed heap related functions (live and post-mortem debugging)
+=============================================================================*/
+let isolate_address = 0;
+function set_isolate_address(addr) {
+ isolate_address = addr;
+}
+
+/*-----------------------------------------------------------------------------
+ Memory in each Space is organized into a linked list of memory chunks
+-----------------------------------------------------------------------------*/
+const NEVER_EVACUATE = 1 << 7; // see src\heap\spaces.h
+
+function print_memory_chunk_list(space_type, front, top, age_mark) {
+ let alloc_pos = top ? ` (allocating at: ${top})` : "";
+ let age_mark_pos = age_mark ? ` (age_mark at: ${top})` : "";
+ print(`${space_type}${alloc_pos}${age_mark_pos}:`);
+ if (front.isNull) {
+ print("<empty>\n");
+ return;
+ }
+
+ let cur = front;
+ while (!cur.isNull) {
+ let imm = cur.flags_ & NEVER_EVACUATE ? "*" : " ";
+ let addr = `0x${cur.address.toString(16)}`;
+ let area =
+ `0x${cur.area_start_.toString(16)} - 0x${cur.area_end_.toString(16)}`;
+ let dt = `dt ${addr} ${module_name()}!v8::internal::MemoryChunk`;
+ print(`${imm} ${addr}:\t ${area} (0x${cur.size_.toString(16)}) : ${dt}`);
+ cur = cur.list_node_.next_;
+ }
+ print("");
+}
+
+const space_tags =
+ ['old', 'new_to', 'new_from', 'ro', 'map', 'code', 'lo', 'nlo'];
+
+function get_chunks_space(space_tag, front, chunks) {
+ let cur = front;
+ while (!cur.isNull) {
+ chunks.push({
+ 'address':cur.address,
+ 'area_start_':cur.area_start_,
+ 'area_end_':cur.area_end_,
+ 'space':space_tag});
+ cur = cur.list_node_.next_;
+ }
+}
+
+function get_chunks() {
+ let iso = cast(isolate_address, "v8::internal::Isolate");
+ let h = iso.heap_;
+
+ let chunks = [];
+ get_chunks_space('old', h.old_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('new_to',
+ h.new_space_.to_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('new_from',
+ h.new_space_.from_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('ro', h.read_only_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('map', h.map_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('code', h.code_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('lo', h.lo_space_.memory_chunk_list_.front_, chunks);
+ get_chunks_space('nlo', h.new_lo_space_.memory_chunk_list_.front_, chunks);
+
+ return chunks;
+}
+
+function find_chunk(address) {
+ // if 'address' is greater than Number.MAX_SAFE_INTEGER, comparison ops on it
+ // throw "Error: 64 bit value loses precision on conversion to number"
+ try {
+ let chunks = get_chunks(isolate_address);
+ for (let c of chunks) {
+ let chunk = cast(c.address, "v8::internal::MemoryChunk");
+ if (address >= chunk.area_start_ && address < chunk.area_end_) {
+ return c;
+ }
+ }
+ }
+ catch (e) { }
+ return undefined;
+}
+
+/*-----------------------------------------------------------------------------
+ Print memory chunks from spaces in the current Heap
+ 'isolate_address' should be an int (so in hex must include '0x' prefix).
+ 'space': space separated string containing "all", "old", "new", "map",
+ "code", "ro [readonly]", "lo [large]", "nlo [newlarge]"
+-----------------------------------------------------------------------------*/
+function print_memory(space = "all") {
+ if (isolate_address == 0) {
+ print("Please call !set_iso(isolate_address) first.");
+ return;
+ }
+
+ let iso = cast(isolate_address, "v8::internal::Isolate");
+ let h = iso.heap_;
+ print(`Heap at ${h.targetLocation}`);
+
+ let st = space.toLowerCase().split(" ");
+
+ print("Im address:\t object area start - end (size)");
+ if (st.includes("all") || st.includes("old")) {
+ print_memory_chunk_list("OldSpace",
+ h.old_space_.memory_chunk_list_.front_,
+ h.old_space_.allocation_info_.top_);
+ }
+ if (st.includes("all") || st.includes("new")) {
+ // new space doesn't use the chunk list from its base class but from
+ // the to/from semi-spaces it points to
+ print_memory_chunk_list("NewSpace_To",
+ h.new_space_.to_space_.memory_chunk_list_.front_,
+ h.new_space_.allocation_info_.top_,
+ h.new_space_.to_space_.age_mark_);
+ print_memory_chunk_list("NewSpace_From",
+ h.new_space_.from_space_.memory_chunk_list_.front_);
+ }
+ if (st.includes("all") || st.includes("map")) {
+ print_memory_chunk_list("MapSpace",
+ h.map_space_.memory_chunk_list_.front_,
+ h.map_space_.allocation_info_.top_);
+ }
+ if (st.includes("all") || st.includes("code")) {
+ print_memory_chunk_list("CodeSpace",
+ h.code_space_.memory_chunk_list_.front_,
+ h.code_space_.allocation_info_.top_);
+ }
+ if (st.includes("all") || st.includes("large") || st.includes("lo")) {
+ print_memory_chunk_list("LargeObjectSpace",
+ h.lo_space_.memory_chunk_list_.front_);
+ }
+ if (st.includes("all") || st.includes("newlarge") || st.includes("nlo")) {
+ print_memory_chunk_list("NewLargeObjectSpace",
+ h.new_lo_space_.memory_chunk_list_.front_);
+ }
+ if (st.includes("all") || st.includes("readonly") || st.includes("ro")) {
+ print_memory_chunk_list("ReadOnlySpace",
+ h.read_only_space_.memory_chunk_list_.front_);
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ 'isolate_address' and 'address' should be ints (so in hex must include '0x'
+ prefix).
+-----------------------------------------------------------------------------*/
+function print_owning_space(address) {
+ if (isolate_address == 0) {
+ print("Please call !set_iso(isolate_address) first.");
+ return;
+ }
+
+ let c = find_chunk(address);
+ let addr = `0x${address.toString(16)}`;
+ if (c) {
+ print(`${addr} is in ${c.space} (chunk: 0x${c.address.toString(16)})`);
+ }
+ else {
+ print(`Address ${addr} is not in managed heap`);
+ }
+}
+
/*=============================================================================
Initialize short aliased names for the most common commands
=============================================================================*/
@@ -233,6 +411,10 @@ function initializeScript() {
new host.functionAlias(print_objects_array, "jobs"),
new host.functionAlias(print_js_stack, "jst"),
+ new host.functionAlias(set_isolate_address, "set_iso"),
+ new host.functionAlias(print_memory, "mem"),
+ new host.functionAlias(print_owning_space, "where"),
+
new host.functionAlias(set_user_js_bp, "jsbp"),
]
}